How to use the hyperactive.base_optimizer.BaseOptimizer function in hyperactive

To help you get started, we’ve selected a few hyperactive examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github SimonBlanke / Hyperactive / hyperactive / optimizers / sequence_model / sbom.py View on Github external
for data in data_list:
            print("data", data.shape)
            data_sorted = data[data[:, dim].argsort()]

            subdata = np.array_split(data_sorted, 2, axis=0)

            subdata_list = subdata_list + subdata

        data_list = subdata_list

        print("subdata_list", len(subdata_list))

    return subcubes


class SBOM(BaseOptimizer):
    def __init__(self, _opt_args_):
        super().__init__(_opt_args_)
        self.n_positioners = 1

    def get_random_sample(self):
        sample_size = self._sample_size()
        if sample_size > self.all_pos_comb.shape[0]:
            sample_size = self.all_pos_comb.shape[0]

        row_sample = np.random.choice(
            self.all_pos_comb.shape[0], size=(sample_size,), replace=False
        )
        return self.all_pos_comb[row_sample]

    def _sample_size(self):
        n = self._opt_args_.max_sample_size
github SimonBlanke / Hyperactive / hyperactive / optimizers / population / particle_swarm_optimization.py View on Github external
# Author: Simon Blanke
# Email: simon.blanke@yahoo.com
# License: MIT License


import random

import numpy as np

from ...base_optimizer import BaseOptimizer
from ...base_positioner import BasePositioner


class ParticleSwarmOptimizer(BaseOptimizer):
    def __init__(self, _opt_args_):
        super().__init__(_opt_args_)
        self.n_positioners = self._opt_args_.n_particles

    def _init_particle(self, _cand_):
        _p_ = Particle()
        _p_.pos_new = _cand_._space_.get_random_pos()
        _p_.velo = np.zeros(len(_cand_._space_.search_space))

        self._optimizer_eval(_cand_, _p_)
        self._update_pos(_cand_, _p_)

        return _p_

    def _move_positioner(self, _cand_, _p_):
        r1, r2 = random.random(), random.random()
github SimonBlanke / Hyperactive / hyperactive / optimizers / local / hill_climbing_optimizer.py View on Github external
# Author: Simon Blanke
# Email: simon.blanke@yahoo.com
# License: MIT License

import numpy as np

from ...base_optimizer import BaseOptimizer
from ...base_positioner import BasePositioner


class HillClimbingOptimizer(BaseOptimizer):
    def __init__(self, _opt_args_):
        super().__init__(_opt_args_)
        self.n_positioners = 1

    def _hill_climb_iter(self, i, _cand_):
        score_new = -np.inf
        pos_new = None

        self.p_list[0].move_climb(_cand_, self.p_list[0].pos_current)
        self._optimizer_eval(_cand_, self.p_list[0])

        if self.p_list[0].score_new > score_new:
            score_new = self.p_list[0].score_new
            pos_new = self.p_list[0].pos_new

        if i % self._opt_args_.n_neighbours == 0:
github SimonBlanke / Hyperactive / hyperactive / optimizers / sequence_model / tree_of_parzen_estimators.py View on Github external
# Author: Simon Blanke
# Email: simon.blanke@yahoo.com
# License: MIT License


import tqdm

from ...base_optimizer import BaseOptimizer

"""
TODO
"""


class TPEOptimizer(BaseOptimizer):
    def __init__(
        self,
        search_config,
        n_iter,
        metric="accuracy",
        n_jobs=1,
        cv=5,
        verbosity=1,
        random_state=None,
        warm_start=False,
        memory=True,
        scatter_init=False,
        eps=1,
    ):
        super().__init__(
            search_config,
github SimonBlanke / Hyperactive / hyperactive / optimizers / random / random_search.py View on Github external
# Author: Simon Blanke
# Email: simon.blanke@yahoo.com
# License: MIT License


from ...base_optimizer import BaseOptimizer


class RandomSearchOptimizer(BaseOptimizer):
    def __init__(self, _opt_args_):
        super().__init__(_opt_args_)
        self.n_positioners = 1

    def _iterate(self, i, _cand_):
        if i < 1:
            self._init_iteration(_cand_)
        else:
            self.p_list[0].move_random(_cand_)
            self._optimizer_eval(_cand_, self.p_list[0])

            self._update_pos(_cand_, self.p_list[0])

        return _cand_

    def _init_iteration(self, _cand_):
github SimonBlanke / Hyperactive / hyperactive / optimizers / population / evolution_strategy.py View on Github external
# Author: Simon Blanke
# Email: simon.blanke@yahoo.com
# License: MIT License

from math import floor, ceil
import numpy as np
import random

from ...base_optimizer import BaseOptimizer
from ..local import HillClimbingPositioner


class EvolutionStrategyOptimizer(BaseOptimizer):
    def __init__(self, _opt_args_):
        super().__init__(_opt_args_)
        self.n_positioners = self._opt_args_.individuals

        self.n_mutations = floor(self.n_positioners * self._opt_args_.mutation_rate)
        self.n_crossovers = ceil(self.n_positioners * self._opt_args_.crossover_rate)

    def _init_individual(self, _cand_):
        _p_ = Individual(self._opt_args_)
        _p_.move_random(_cand_)

        return _p_

    def _mutate_individuals(self, _cand_, mutate_idx):
        p_list_mutate = [self.p_list[i] for i in mutate_idx]
        for _p_ in p_list_mutate: