How to use the cma.CMAEvolutionStrategy function in cma

To help you get started, we’ve selected a few cma examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github manoharan-lab / holopy / holopy / inference / cmaes.py View on Github external
popsize = len(initial_population)
    stds = [par.sd if isinstance(par, prior.Gaussian) 
                    else par.interval/4 for par in parameters]
    weights = [weight_function(i, popsize) for i in range(popsize)]
    if weights[-1] > 0:
        weights[-1] = 0
        warnings.warn('Setting weight of worst parent to 0')
    with tempfile.TemporaryDirectory() as tempdir:
        cmaoptions = {'CMA_stds':stds, 'CMA_recombination_weights':weights,
                      'verb_filenameprefix':tempdir, 'verbose':-3}
        cmaoptions.update(tols)
        if seed is not None:
            cmaoptions.update({'seed':seed})
        guess = [par.guess for par in parameters]
        cma_strategy = cma.CMAEvolutionStrategy(guess, 1, cmaoptions)
        cma_strategy.inject(initial_population, force=True)
        solutions = np.zeros((popsize, len(parameters)))
        func_vals = np.zeros(popsize)
        pool = choose_pool(parallel)
        while not cma_strategy.stop():
            invalid = np.ones(popsize, dtype=bool)
            inf_replace_counter = 0
            while invalid.any() and inf_replace_counter < 10:
                attempts = cma_strategy.ask(np.sum(invalid))
                solutions[invalid, :] = attempts
                func_vals[invalid] = list(pool.map(obj_func, attempts))
                invalid = ~np.isfinite(func_vals)
                inf_replace_counter += 1 # catches case where all are inf
            cma_strategy.tell(solutions, func_vals)
            cma_strategy.logger.add()
        cma_strategy.logger.load()
github zuoxingdong / lagom / lagom / es.py View on Github external
def __init__(self, x0, sigma0, opts=None):
        import cma
        self.es = cma.CMAEvolutionStrategy(x0, sigma0, opts)
        
        self.x0 = self.es.x0
        self.sigma0 = self.es.sigma0
        self.popsize = self.es.popsize
github CompuCell3D / CompuCell3D / CompuCell3D / optimization / optimization.py View on Github external
starting_params = optim_param_mgr.get_starting_points()
        print 'starting_params (mapped to [0,1])=', starting_params
        print 'remapped (true) starting params=', optim_param_mgr.params_from_0_1(starting_params)
        print 'dictionary of remapped parameters labeled by parameter name=', optim_param_mgr.param_from_0_1_dict(
            starting_params)

        print 'simulation_name=', simulation_name
        self.workload_dict = self.prepare_optimization_run(simulation_name=simulation_name)
        workload_dict = self.workload_dict

        print workload_dict

        std_dev = optim_param_mgr.std_dev
        default_bounds = optim_param_mgr.default_bounds

        optim = CMAEvolutionStrategy(starting_params, std_dev, {'bounds': list(default_bounds)})

        while not optim.stop():  # iterate
            # get candidate solutions
            # param_set_list = optim.ask(number=self.num_workers)
            # param_set_list = optim.ask(number=1)
            param_set_list = optim.ask(number=population_size)

            # set param_set_list for run_task to iterate over
            self.set_param_set_list(param_set_list=param_set_list)

            # #debug
            # return_result_vec = [self.fcn(optim_param_mgr.params_from_0_1(X)) for X in param_set_list]

            # evaluate  targert function values at the candidate solutions
            return_result_vec = np.array([], dtype=float)
            for param_set in self.param_generator(self.num_workers):
github zhouyanasd / DL-NC / Brian2_scripts / sim_brian_scratch / sim_brian_KHT / sim_brian_KTH_v1_SAES_v1.py View on Github external
def __init__(self, f, acquisition, x0, sigma, kappa=2.576, xi=0.0, **opts):
        self.f = f
        self.optimizer = BayesianOptimization_(
            f=f,
            pbounds=opts['bounds'],
            random_state=1,
        )
        self.util = UtilityFunction(kind=acquisition, kappa=kappa, xi=xi)
        opts['bounds'] = self.optimizer._space._bounds.T.tolist()
        self.es = cma.CMAEvolutionStrategy(x0, sigma, opts)
github pints-team / pints / pints / _optimisers / _cmaes.py View on Github external
# Tell cma-es to be quiet
        options.set('verbose', -9)

        # Set population size
        options.set('popsize', self._population_size)

        # CMAES always seeds np.random, whether you ask it too or not, so to
        # get consistent debugging output, we should always pass in a seed.
        # Instead of using a fixed number (which would be bad), we can use a
        # randomly generated number: This will ensure pseudo-randomness, but
        # produce consistent results if np.random has been seeded before
        # calling.
        options.set('seed', np.random.randint(2**31))

        # Search
        self._es = cma.CMAEvolutionStrategy(self._x0, self._sigma0, options)

        # Update optimiser state
        self._running = True
github CCSI-Toolset / FOQUS / foqus_lib / framework / optimizer / OptCMA.py View on Github external
it = es.result()[4] # iteration index
                self.msgQueue.put("Reloaded " + str(it) + " iterations")
                bestSoFar = [es.result()[1]]
                bestCoord = es.result()[0]
                self.resQueue.put(["BEST", bestSoFar, bestCoord])
                self.resQueue.put(["IT", it - 1, bestSoFar[0]])
            except:
                logging.getLogger("foqus." + __name__).exception(
                    "Failed to load restart file")
                self.msgQueue.put(traceback.format_exc())
                self.msgQueue.put("Couldn't open restart file: "+pickIn)
                self.msgQueue.put("Not proceeding with optimization")
                self.msgQueue.put("Check the solver settings")
                return
        else: # if no restart file create new CMA-ES object
            es = cma.CMAEvolutionStrategy(xinit, sd0, opts)
            it = 0 # the iteration index
            bestSoFar = numpy.array(numpy.inf)
        #
        # Put initial progress message out, jus says no samples have run
        # and on first iteration (or whatever iteration from restart)
        #
        self.resQueue.put(["PROG", 0, popsize, 0, it, 0, 0])
        #
        # setup the problem object to share information with solver
        # when calculating objective and running flowsheet samples
        #
        self.prob.initSolverParameters()
        self.prob.solverStart = start
        self.prob.maxSolverTime = maxTime
        if storeRes:
            self.prob.storeResults = setName
github zuoxingdong / lagom / lagom / contrib / es.py View on Github external
mu0, 
                 std0, 
                 popsize):
        """
        Args:
            mu0 (list or ndarray): initial mean
            std0 (float): initial standard deviation
            popsize (int): population size
        """
        self.mu0 = mu0
        self.std0 = std0
        self.popsize = popsize
        
        # Create CMA-ES instance
        import cma
        self.es = cma.CMAEvolutionStrategy(self.mu0, 
                                           self.std0, 
                                           {'popsize': self.popsize})
        
        self.solutions = None
github dariocazzani / World-Models-TensorFlow / stateless_agent / train-agents.py View on Github external
def train():
	es = cma.CMAEvolutionStrategy(_NUM_PARAMS * [0], 0.1, {'popsize': 16})
	rewards_through_gens = []
	generation = 1
	try:
		while not es.stop():
			solutions = es.ask()
			with mp.Pool(mp.cpu_count()) as p:
				rewards = list(tqdm.tqdm(p.imap(play, list(solutions)), total=len(solutions)))

			es.tell(solutions, rewards)

			rewards = np.array(rewards) *(-1.)
			print("\n**************")
			print("Generation: {}".format(generation))
			print("Min reward: {:.3f}\nMax reward: {:.3f}".format(np.min(rewards), np.max(rewards)))
			print("Avg reward: {:.3f}".format(np.mean(rewards)))
			print("**************\n")
github rlworkgroup / garage / src / garage / np / algos / cma_es.py View on Github external
def train(self, runner):
        """Initialize variables and start training.

        Args:
            runner (LocalRunner): LocalRunner is passed to give algorithm
                the access to runner.step_epochs(), which provides services
                such as snapshotting and sampler control.

        Returns:
            The average return in last epoch cycle.

        """
        init_mean = self.policy.get_param_values()
        self.es = cma.CMAEvolutionStrategy(init_mean, self.sigma0,
                                           {'popsize': self.n_samples})
        self.all_params = self._sample_params()
        self.cur_params = self.all_params[0]
        self.policy.set_param_values(self.cur_params)
        self.all_returns = []

        return super().train(runner)