How to use the emukit.bayesian_optimization.acquisitions.ExpectedImprovement function in emukit

To help you get started, we’ve selected a few emukit examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github amzn / emukit / tests / emukit / test_acquisitions.py View on Github external
def integrated_expected_improvement_acquisition(gpy_model_mcmc):
    return IntegratedHyperParameterAcquisition(gpy_model_mcmc, ExpectedImprovement, 10)
github amzn / emukit / tests / emukit / bayesian_optimization / test_bayesian_optimization_loop.py View on Github external
def test_batch_loop_fails_without_gradients_implemented():
    parameter_space = ParameterSpace([ContinuousParameter('x', 0, 1)])

    model = mock.create_autospec(IModel)

    base_acquisition = ExpectedImprovement(model)

    batch_size = 10

    with pytest.raises(ValueError):
        BayesianOptimizationLoop(parameter_space, model, base_acquisition, batch_size)
github amzn / emukit / tests / emukit / test_acquisitions.py View on Github external
def expected_improvement_acquisition(gpy_model):
    return ExpectedImprovement(gpy_model)
github amzn / emukit / integration_tests / emukit / bayesian_optimization / test_optimization_with_categorical_variables.py View on Github external
ContinuousParameter('real_param', 0.0, 1.0),
        CategoricalParameter('categorical_param', encoding)
    ])

    x_init = parameter_space.sample_uniform(10)

    assert x_init.shape == (10, 4)
    assert np.all(np.logical_or(x_init[:, 1:3] == 0.0, x_init[:, 1:3] == 1.0))

    y_init = objective(x_init)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    gpy_model.Gaussian_noise.fix(1)
    model = GPyModelWrapper(gpy_model)

    acquisition = ExpectedImprovement(model)

    loop = BayesianOptimizationLoop(parameter_space, model, acquisition)
    loop.run_loop(objective, 5)

    assert len(loop.loop_state.Y) == 15
    assert np.all(np.logical_or(loop.loop_state.X[:, 1:3] == 0.0, loop.loop_state.X[:, 1:3] == 1.0))
github amzn / emukit / tests / emukit / bayesian_optimization / test_bayesian_optimization_loop.py View on Github external
def test_loop():
    n_iterations = 5

    x_init = np.random.rand(5, 1)
    y_init = np.random.rand(5, 1)

    # Make GPy model
    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    space = ParameterSpace([ContinuousParameter('x', 0, 1)])
    acquisition = ExpectedImprovement(model)

    # Make loop and collect points
    bo = BayesianOptimizationLoop(model=model, space=space, acquisition=acquisition)
    bo.run_loop(UserFunctionWrapper(f), FixedIterationsStoppingCondition(n_iterations))

    # Check we got the correct number of points
    assert bo.loop_state.X.shape[0] == n_iterations + 5

    # Check the obtained results
    results = bo.get_results()

    assert results.minimum_location.shape[0] == 1
    assert results.best_found_value_per_iteration.shape[0] == n_iterations + 5
github amzn / emukit / emukit / examples / gp_bayesian_optimization / single_objective_bayesian_optimization.py View on Github external
def _acquisition_chooser(self):
        """ Select the acquisition function used in the optimization """
        if self.acquisition_type is AcquisitionType.EI:
            self.acquisition = ExpectedImprovement(self.model)
        elif self.acquisition_type is AcquisitionType.PI:
            self.acquisition = ProbabilityOfImprovement(self.model)
        elif self.acquisition_type is AcquisitionType.NLCB:
            self.acquisition = NegativeLowerConfidenceBound(self.model)
github amzn / emukit / emukit / bayesian_optimization / loops / bayesian_optimization_loop.py View on Github external
"""
        Emukit class that implement a loop for building modular Bayesian optimization

        :param space: Input space where the optimization is carried out.
        :param model: The model that approximates the underlying function
        :param acquisition: The acquisition function that will be used to collect new points (default, EI). If batch
                            size is greater than one, this acquisition must output positive values only.
        :param update_interval: Number of iterations between optimization of model hyper-parameters. Defaults to 1.
        :param batch_size: How many points to evaluate in one iteration of the optimization loop. Defaults to 1.
        """

        self.model = model

        if acquisition is None:
            acquisition = ExpectedImprovement(model)

        model_updaters = FixedIntervalUpdater(model, update_interval)

        acquisition_optimizer = AcquisitionOptimizer(space)
        if batch_size == 1:
            candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
        else:
            if not isinstance(model, IDifferentiable):
                raise ValueError('Model must implement ' + str(IDifferentiable) +
                                 ' for use with Local Penalization batch method.')
            log_acquisition = LogAcquisition(acquisition)
            candidate_point_calculator = LocalPenalizationPointCalculator(log_acquisition, acquisition_optimizer, model,
                                                                          space, batch_size)

        loop_state = create_loop_state(model.X, model.Y)
github amzn / emukit / emukit / bayesian_optimization / acquisitions / entropy_search.py View on Github external
if not isinstance(model, IEntropySearchModel):
            raise RuntimeError("Model is not supported for Entropy Search")

        self.model = model
        self.num_representer_points = num_representer_points
        self.burn_in_steps = burn_in_steps

        if sampler is None:
            self.sampler = AffineInvariantEnsembleSampler(space)
        else:
            self.sampler = sampler

        # (unnormalized) density from which to sample the representer points to approximate pmin
        if proposal_function is None:

            ei = ExpectedImprovement(model)

            def prop_func(x):
                if len(x.shape) == 1:
                    x_ = x[None, :]
                else:
                    x_ = x

                if space.check_points_in_domain(x_):
                    return np.log(np.clip(ei.evaluate(x_)[0], 0., np.PINF))
                else:
                    return np.array([np.NINF])

            self.proposal_function = prop_func
        else:
            self.proposal_function = proposal_function
github amzn / emukit / emukit / bayesian_optimization / loops / cost_sensitive_bayesian_optimization_loop.py View on Github external
"""
        Emukit class that implements a loop for building modular cost sensitive Bayesian optimization.

        :param space: Input space where the optimization is carried out.
        :param model_objective: The model that approximates the underlying objective function
        :param model_cost: The model that approximates the cost of evaluating the objective function
        :param acquisition: The acquisition function that will be used to collect new points (default, EI).
        :param update_interval:  Number of iterations between optimization of model hyper-parameters. Defaults to 1.
        """

        if not np.all(np.isclose(model_objective.X, model_cost.X)):
            raise ValueError('Emukit currently only supports identical '
                             'training inputs for the cost and objective model')

        if acquisition is None:
            expected_improvement = ExpectedImprovement(model_objective)
            acquisition = acquisition_per_expected_cost(expected_improvement, model_cost)

        model_updater_objective = FixedIntervalUpdater(model_objective, update_interval)
        model_updater_cost = FixedIntervalUpdater(model_cost, update_interval, lambda state: state.cost)

        acquisition_optimizer = AcquisitionOptimizer(space)
        candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)

        loop_state = create_loop_state(model_objective.X, model_objective.Y, model_cost.Y)

        super(CostSensitiveBayesianOptimizationLoop, self).__init__(candidate_point_calculator,
                                                                    [model_updater_objective, model_updater_cost],
                                                                    loop_state)