How to use the hyperactive.optimizers.local.HillClimbingOptimizer function in hyperactive

To help you get started, we’ve selected a few hyperactive examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github SimonBlanke / Hyperactive / hyperactive / optimizers / random / random_annealing.py View on Github external
# Author: Simon Blanke
# Email: simon.blanke@yahoo.com
# License: MIT License


from ..local import HillClimbingOptimizer


class RandomAnnealingOptimizer(HillClimbingOptimizer):
    def __init__(self, _opt_args_):
        super().__init__(_opt_args_)
        self.temp = _opt_args_.start_temp

    def _iterate(self, i, _cand_):
        self.p_list[0].move_climb(
            _cand_, self.p_list[0].pos_current, epsilon_mod=self.temp / 10
        )
        self._optimizer_eval(_cand_, self.p_list[0])
        self._update_pos(_cand_, self.p_list[0])

        self.temp = self.temp * self._opt_args_.annealing_rate

        return _cand_
github SimonBlanke / Hyperactive / hyperactive / optimizers / local / tabu_search.py View on Github external
import numpy as np


from . import HillClimbingOptimizer, HillClimbingPositioner
from scipy.spatial.distance import euclidean


def gaussian(distance, sig, sigma_factor=1):
    return (
        sigma_factor
        * sig
        * np.exp(-np.power(distance, 2.0) / (sigma_factor * np.power(sig, 2.0)))
    )


class TabuOptimizer(HillClimbingOptimizer):
    def __init__(self, _opt_args_):
        super().__init__(_opt_args_)

    def _tabu_pos(self, pos, _p_):
        _p_.add_tabu(pos)

        return _p_

    def _iterate(self, i, _cand_):
        self._hill_climb_iter(i, _cand_)

        if self.p_list[0].score_new < _cand_.score_best:
            self.p_list[0] = self._tabu_pos(self.p_list[0].pos_new, self.p_list[0])

        return _cand_
github SimonBlanke / Hyperactive / hyperactive / optimizers / local / stochastic_hill_climbing.py View on Github external
# Author: Simon Blanke
# Email: simon.blanke@yahoo.com
# License: MIT License

import random
import numpy as np

from . import HillClimbingOptimizer


class StochasticHillClimbingOptimizer(HillClimbingOptimizer):
    def __init__(self, _opt_args_):
        super().__init__(_opt_args_)
        self.norm_factor = _opt_args_.norm_factor

        if self.norm_factor == "adaptive":
            self._accept = self._accept_adapt
            self.diff_max = 0
        else:
            self._accept = self._accept_default

    def _consider(self, _p_, p_accept):
        rand = random.uniform(0, self._opt_args_.p_down)

        if p_accept > rand:
            _p_.score_current = _p_.score_new
            _p_.pos_current = _p_.pos_new
github SimonBlanke / Hyperactive / hyperactive / optimizers / random / random_restart_hill_climbing.py View on Github external
# Author: Simon Blanke
# Email: simon.blanke@yahoo.com
# License: MIT License


from ..local import HillClimbingOptimizer


class RandomRestartHillClimbingOptimizer(HillClimbingOptimizer):
    def __init__(self, _opt_args_):
        super().__init__(_opt_args_)
        self.n_iter_restart = _opt_args_.n_iter_restart

    def _iterate(self, i, _cand_):
        self._hill_climb_iter(i, _cand_)

        if self.n_iter_restart != 0 and i % self.n_iter_restart == 0:
            self.p_list[0].move_random(_cand_)

        return _cand_