How to use the emukit.core.loop.SequentialPointCalculator function in emukit

To help you get started, we’ve selected a few emukit examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github amzn / emukit / tests / emukit / core / test_loop_steps.py View on Github external
def test_sequential_with_context():
    mock_acquisition = mock.create_autospec(Acquisition)
    mock_acquisition.has_gradients = False
    mock_acquisition.evaluate = lambda x: np.sum(x**2, axis=1)[:, None]
    space = ParameterSpace([ContinuousParameter('x', 0, 1), ContinuousParameter('y', 0, 1)])
    acquisition_optimizer = GradientAcquisitionOptimizer(space)

    loop_state_mock = mock.create_autospec(LoopState)
    seq = SequentialPointCalculator(mock_acquisition, acquisition_optimizer)
    next_points = seq.compute_next_points(loop_state_mock, context={'x': 0.25})

    # "SequentialPointCalculator" should only ever return 1 value
    assert(len(next_points) == 1)
    # Context value should be what we set
    assert np.isclose(next_points[0, 0], 0.25)
github amzn / emukit / tests / emukit / core / test_multi_source_optimizer.py View on Github external
def test_multi_source_sequential_with_context():
    # Check that we can fix a non-information source parameter with context
    mock_acquisition = mock.create_autospec(Acquisition)
    mock_acquisition.has_gradients = False
    mock_acquisition.evaluate = lambda x: np.sum(x**2, axis=1)[:, None]
    space = ParameterSpace([ContinuousParameter('x', 0, 1),
                            ContinuousParameter('y', 0, 1),
                            InformationSourceParameter(2)])
    acquisition_optimizer = AcquisitionOptimizer(space)
    multi_source_acquisition_optimizer = MultiSourceAcquisitionOptimizer(acquisition_optimizer, space)

    loop_state_mock = mock.create_autospec(LoopState)
    seq = SequentialPointCalculator(mock_acquisition, multi_source_acquisition_optimizer)
    next_points = seq.compute_next_points(loop_state_mock, context={'x': 0.25})

    # "SequentialPointCalculator" should only ever return 1 value
    assert(len(next_points) == 1)
    # Context value should be what we set
    assert np.isclose(next_points[0, 0], 0.25)
github amzn / emukit / tests / emukit / core / test_multi_source_optimizer.py View on Github external
def test_multi_source_sequential_with_source_context():
    # Check that we can fix a non-information source parameter with context
    mock_acquisition = mock.create_autospec(Acquisition)
    mock_acquisition.has_gradients = False
    mock_acquisition.evaluate = lambda x: np.sum(x**2, axis=1)[:, None]
    space = ParameterSpace([ContinuousParameter('x', 0, 1),
                            ContinuousParameter('y', 0, 1),
                            InformationSourceParameter(2)])
    acquisition_optimizer = AcquisitionOptimizer(space)
    multi_source_acquisition_optimizer = MultiSourceAcquisitionOptimizer(acquisition_optimizer, space)

    loop_state_mock = mock.create_autospec(LoopState)
    seq = SequentialPointCalculator(mock_acquisition, multi_source_acquisition_optimizer)
    next_points = seq.compute_next_points(loop_state_mock, context={'source': 1.0})

    # "SequentialPointCalculator" should only ever return 1 value
    assert(len(next_points) == 1)
    # Context value should be what we set
    assert np.isclose(next_points[0, 1], 1.)
github amzn / emukit / tests / emukit / core / test_outer_loop.py View on Github external
x_init = np.linspace(0, 1, 5)[:, None]
    y_init = user_function(x_init)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    mse = []

    def compute_mse(self, loop_state):
        mse.append(np.mean(np.square(model.predict(x_test)[0] - y_test)))

    loop_state = create_loop_state(x_init, y_init)

    acquisition = ModelVariance(model)
    acquisition_optimizer = AcquisitionOptimizer(space)
    candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
    model_updater = FixedIntervalUpdater(model)

    loop = OuterLoop(candidate_point_calculator, model_updater, loop_state)
    loop.iteration_end_event.append(compute_mse)
    loop.run_loop(user_function, 5)

    assert len(mse) == 5
github amzn / emukit / tests / emukit / core / test_loop_steps.py View on Github external
def test_sequential_evaluator():
    # SequentialPointCalculator should just return result of the acquisition optimizer
    mock_acquisition = mock.create_autospec(Acquisition)
    mock_acquisition_optimizer = mock.create_autospec(GradientAcquisitionOptimizer)
    mock_acquisition_optimizer.optimize.return_value = (np.array([[0.]]), None)
    loop_state_mock = mock.create_autospec(LoopState)
    seq = SequentialPointCalculator(mock_acquisition, mock_acquisition_optimizer)
    next_points = seq.compute_next_points(loop_state_mock)

    # "SequentialPointCalculator" should only ever return 1 value
    assert(len(next_points) == 1)
    # Value should be result of acquisition optimization
    assert(np.equal(np.array([[0.]]), next_points[0]))
github amzn / emukit / emukit / quadrature / loop / quadrature_loop.py View on Github external
:param model: the vanilla Bayesian quadrature method
        :param acquisition: The acquisition function that is be used to collect new points.
        default, IntegralVarianceReduction
        :param model_updater: Defines how and when the quadrature model is updated if new data arrives.
                              Defaults to updating hyper-parameters every iteration.
        """

        if acquisition is None:
            acquisition = IntegralVarianceReduction(model)

        if model_updater is None:
            model_updater = FixedIntervalUpdater(model, 1)

        space = ParameterSpace(model.integral_bounds.convert_to_list_of_continuous_parameters())
        acquisition_optimizer = AcquisitionOptimizer(space)
        candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
        loop_state = create_loop_state(model.X, model.Y)

        super().__init__(candidate_point_calculator, model_updater, loop_state)

        self.model = model
github amzn / emukit / emukit / examples / fabolas / fabolas_loop.py View on Github external
if marginalize_hypers:
            acquisition_generator = lambda model: ContinuousFidelityEntropySearch(model_objective, space=extended_space,
                                                                                  target_fidelity_index=len(
                                                                                      extended_space.parameters) - 1)
            entropy_search = IntegratedHyperParameterAcquisition(model_objective, acquisition_generator)
        else:
            entropy_search = ContinuousFidelityEntropySearch(model_objective, space=extended_space,
                                                             target_fidelity_index=len(extended_space.parameters) - 1)

        acquisition = acquisition_per_expected_cost(entropy_search, model_cost)

        model_updater_objective = FixedIntervalUpdater(model_objective, update_interval)
        model_updater_cost = FixedIntervalUpdater(model_cost, update_interval, lambda state: state.cost)

        acquisition_optimizer = RandomSearchAcquisitionOptimizer(extended_space, num_eval_points=num_eval_points)
        candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)

        loop_state = create_loop_state(model_objective.X, model_objective.Y, model_cost.Y)

        super(CostSensitiveBayesianOptimizationLoop, self).__init__(candidate_point_calculator,
                                                                    [model_updater_objective, model_updater_cost],
                                                                    loop_state)
github amzn / emukit / emukit / experimental_design / model_based / experimental_design_loop.py View on Github external
:param space: Definition of domain bounds to collect points within
        :param model: The model that approximates the underlying function
        :param acquisition: experimental design acquisition function object. Default: ModelVariance acquisition
        :param update_interval: How many iterations pass before next model optimization
        :param batch_size: Number of points to collect in a batch. Defaults to one.
        """

        if acquisition is None:
            acquisition = ModelVariance(model)

        # This AcquisitionOptimizer object deals with optimizing the acquisition to find the next point to collect
        acquisition_optimizer = AcquisitionOptimizer(space)

        # Construct emukit classes
        if batch_size == 1:
            candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
        elif batch_size > 1:
            candidate_point_calculator = \
                GreedyBatchPointCalculator(model, acquisition, acquisition_optimizer, batch_size)
        else:
            raise ValueError('Batch size value of ' + str(batch_size) + ' is invalid.')

        model_updater = FixedIntervalUpdater(model, update_interval)
        loop_state = create_loop_state(model.X, model.Y)

        super().__init__(candidate_point_calculator, model_updater, loop_state)

        self.model = model
github amzn / emukit / emukit / bayesian_optimization / loops / bayesian_optimization_loop.py View on Github external
:param acquisition: The acquisition function that will be used to collect new points (default, EI). If batch
                            size is greater than one, this acquisition must output positive values only.
        :param update_interval: Number of iterations between optimization of model hyper-parameters. Defaults to 1.
        :param batch_size: How many points to evaluate in one iteration of the optimization loop. Defaults to 1.
        """

        self.model = model

        if acquisition is None:
            acquisition = ExpectedImprovement(model)

        model_updaters = FixedIntervalUpdater(model, update_interval)

        acquisition_optimizer = AcquisitionOptimizer(space)
        if batch_size == 1:
            candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
        else:
            if not isinstance(model, IDifferentiable):
                raise ValueError('Model must implement ' + str(IDifferentiable) +
                                 ' for use with Local Penalization batch method.')
            log_acquisition = LogAcquisition(acquisition)
            candidate_point_calculator = LocalPenalizationPointCalculator(log_acquisition, acquisition_optimizer, model,
                                                                          space, batch_size)

        loop_state = create_loop_state(model.X, model.Y)

        super().__init__(candidate_point_calculator, model_updaters, loop_state)
github amzn / emukit / emukit / examples / gp_bayesian_optimization / optimization_loops.py View on Github external
model = Bohamiann(x_init, y_init, **model_kwargs)
    else:
        raise ValueError('Unrecognised model type: ' + str(model_type))

    # Create acquisition
    if acquisition_type is AcquisitionType.EI:
        acquisition = ExpectedImprovement(model)
    elif acquisition_type is AcquisitionType.PI:
        acquisition = ProbabilityOfImprovement(model)
    elif acquisition_type is AcquisitionType.NLCB:
        acquisition = NegativeLowerConfidenceBound(model)
    else:
        raise ValueError('Unrecognised acquisition type: ' + str(acquisition_type))

    acquisition_optimizer = AcquisitionOptimizer(parameter_space)
    candidate_point_calculator = SequentialPointCalculator(acquisition, acquisition_optimizer)
    loop_state = create_loop_state(x_init, y_init)
    model_updater = FixedIntervalUpdater(model, 1)
    return OuterLoop(candidate_point_calculator, model_updater, loop_state)