How to use the hyperactive.optimizers.local.HillClimbingPositioner function in hyperactive

To help you get started, we’ve selected a few hyperactive examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github SimonBlanke / Hyperactive / hyperactive / optimizers / monte_carlo / parallel_tempering.py View on Github external
return _cand_

    def _init_iteration(self, _cand_):
        p = self._init_annealer(_cand_)

        self._optimizer_eval(_cand_, p)
        self._update_pos(_cand_, p)

        return p

    def _finish_search(self):
        self._pbar_.close_p_bar()


class System(HillClimbingPositioner):
    def __init__(self, _opt_args_, temp):
        super().__init__(_opt_args_)
        self.temp = temp
github SimonBlanke / Hyperactive / hyperactive / optimizers / population / evolution_strategy.py View on Github external
self._move_positioners(_cand_)
        self._optimizer_eval(_cand_, _p_current)
        self._update_pos(_cand_, _p_current)

        return _cand_

    def _init_iteration(self, _cand_):
        p = self._init_individual(_cand_)

        self._optimizer_eval(_cand_, p)
        self._update_pos(_cand_, p)

        return p


class Individual(HillClimbingPositioner):
    def __init__(self, _opt_args_):
        super().__init__(_opt_args_)
github SimonBlanke / Hyperactive / hyperactive / optimizers / local / tabu_search.py View on Github external
if self.p_list[0].score_new < _cand_.score_best:
            self.p_list[0] = self._tabu_pos(self.p_list[0].pos_new, self.p_list[0])

        return _cand_

    def _init_iteration(self, _cand_):
        p = super()._init_base_positioner(_cand_, positioner=TabuPositioner)

        self._optimizer_eval(_cand_, p)
        self._update_pos(_cand_, p)

        return p


class TabuPositioner(HillClimbingPositioner):
    def __init__(self, _opt_args_):
        super().__init__(_opt_args_)
        self.tabus = []
        self.tabu_memory = _opt_args_.tabu_memory

    def add_tabu(self, tabu):
        self.tabus.append(tabu)

        if len(self.tabus) > self.tabu_memory:
            self.tabus.pop(0)

    def move_climb(self, _cand_, pos, epsilon_mod=1):
        sigma = 1 + _cand_._space_.dim * self.epsilon * epsilon_mod
        pos_normal = np.random.normal(pos, sigma, pos.shape)
        pos_new_int = np.rint(pos_normal)
github SimonBlanke / Hyperactive / hyperactive / optimizers / monte_carlo / parallel_tempering.py View on Github external
def _iterate(self, i, _cand_, _p_list_):
        _cand_ = self._annealing_systems(_cand_, _p_list_)

        if self.n_iter_swap != 0 and i % self.n_iter_swap == 0:
            self._swap_pos(_cand_, _p_list_)

        return _cand_

    def _init_opt_positioner(self, _cand_):
        _p_list_ = self._init_annealers(_cand_)

        return _p_list_


class System(HillClimbingPositioner):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.temp = kwargs["temp"]