Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
import mock
import numpy as np
import pytest
from emukit.core.acquisition import Acquisition, IntegratedHyperParameterAcquisition
from emukit.core.interfaces import IPriorHyperparameters
class DummyAcquisition(Acquisition):
def __init__(self):
pass
def evaluate(self, x):
return np.ones(x.shape[0])
@property
def has_gradients(self):
return False
class DummyAcquisitionWithGradients(Acquisition):
def __init__(self):
pass
def evaluate(self, x):
def test_objective_anchor_point_generator():
num_samples = 5
mock_acquisition = mock.create_autospec(Acquisition)
mock_acquisition.evaluate.return_value = np.arange(num_samples)[:, None]
space = mock.create_autospec(ParameterSpace)
space.sample_uniform.return_value = np.arange(num_samples)[:, None]
space.constraints = []
generator = ObjectiveAnchorPointsGenerator(space, mock_acquisition, num_samples=num_samples)
anchor_points = generator.get(1)
# Check that the X that is picked corresponds to the highest acquisition value
assert np.array_equal(anchor_points, np.array([[num_samples-1]]))
from emukit.core.interfaces import IPriorHyperparameters
class DummyAcquisition(Acquisition):
def __init__(self):
pass
def evaluate(self, x):
return np.ones(x.shape[0])
@property
def has_gradients(self):
return False
class DummyAcquisitionWithGradients(Acquisition):
def __init__(self):
pass
def evaluate(self, x):
return np.ones(x.shape[0])
def evaluate_with_gradients(self, x):
return np.ones(x.shape[0]), -np.ones(x.shape[0])
@property
def has_gradients(self):
return True
def test_acquisition_adding():
acquisition_sum = DummyAcquisition() + DummyAcquisition()
def test_sequential_evaluator():
# SequentialPointCalculator should just return result of the acquisition optimizer
mock_acquisition = mock.create_autospec(Acquisition)
mock_acquisition_optimizer = mock.create_autospec(GradientAcquisitionOptimizer)
mock_acquisition_optimizer.optimize.return_value = (np.array([[0.]]), None)
loop_state_mock = mock.create_autospec(LoopState)
seq = SequentialPointCalculator(mock_acquisition, mock_acquisition_optimizer)
next_points = seq.compute_next_points(loop_state_mock)
# "SequentialPointCalculator" should only ever return 1 value
assert(len(next_points) == 1)
# Value should be result of acquisition optimization
assert(np.equal(np.array([[0.]]), next_points[0]))
def test_sequential_with_context():
mock_acquisition = mock.create_autospec(Acquisition)
mock_acquisition.has_gradients = False
mock_acquisition.evaluate = lambda x: np.sum(x**2, axis=1)[:, None]
space = ParameterSpace([ContinuousParameter('x', 0, 1), ContinuousParameter('y', 0, 1)])
acquisition_optimizer = GradientAcquisitionOptimizer(space)
loop_state_mock = mock.create_autospec(LoopState)
seq = SequentialPointCalculator(mock_acquisition, acquisition_optimizer)
next_points = seq.compute_next_points(loop_state_mock, context={'x': 0.25})
# "SequentialPointCalculator" should only ever return 1 value
assert(len(next_points) == 1)
# Context value should be what we set
assert np.isclose(next_points[0, 0], 0.25)
from typing import Tuple
import numpy as np
from ...core.acquisition import Acquisition
class LogAcquisition(Acquisition):
"""
Takes the log of an acquisition function.
"""
def __init__(self, acquisition: Acquisition):
"""
:param acquisition: Base acquisition function that is log transformed. This acquisition function must output
positive values only.
"""
self.acquisition = acquisition
def evaluate(self, x: np.ndarray) -> np.ndarray:
"""
:param x: Input location
:return: log of original acquisition function at input location(s)
"""
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from typing import Tuple, Union
import numpy as np
from GPyOpt.util.general import get_quantiles
from ...core.interfaces import IModel, IDifferentiable
from ...core.acquisition import Acquisition
class ProbabilityOfImprovement(Acquisition):
def __init__(self, model: Union[IModel, IDifferentiable], jitter: np.float64 = np.float64(0)) -> None:
"""
This acquisition computes for a given input point the probability of improving over the
currently best observed function value. For more information see:
Efficient Global Optimization of Expensive Black-Box Functions
Jones, Donald R. and Schonlau, Matthias and Welch, William J.
Journal of Global Optimization
:param model: The underlying model that provides the predictive mean and variance for the given test points
:param jitter: Jitter to balance exploration / exploitation
"""
self.model = model
self.jitter = jitter
from ..interfaces import IDifferentiable, IModel
def acquisition_per_expected_cost(acquisition: Acquisition, cost_model: IModel, min_cost: float=1e-4) -> Acquisition:
"""
Creates an acquisition function that is the original acquisition scaled by the expected value of the evaluation
cost of the user function.
:param acquisition: Base acquisition function
:param cost_model: Model of the evaluation cost. Should return positive values only.
:return: Scaled acquisition function
"""
return acquisition / CostAcquisition(cost_model, min_cost)
class CostAcquisition(Acquisition):
"""
Acquisition that simply returns the expected value from the cost model
"""
def __init__(self, cost_model: IModel, min_cost: float=1e-4):
"""
:param cost_model: Model of cost. Should return only positive predictions
:param min_cost: A minimum value for the cost. The cost model prediction will be clipped to this value if
required
"""
self.cost_model = cost_model
self.min_cost = min_cost
def evaluate(self, x: np.ndarray) -> np.ndarray:
"""
Evaluate acquisition function
from typing import Union, Callable, Tuple
import numpy as np
from emukit.core.acquisition import Acquisition
from emukit.core.interfaces import IModel, IPriorHyperparameters
class IntegratedHyperParameterAcquisition(Acquisition):
"""
This acquisition class provides functionality for integrating any acquisition function over model hyper-parameters
"""
def __init__(self, model: Union[IModel, IPriorHyperparameters], acquisition_generator: Callable, n_samples: int=10,
n_burnin: int=100, subsample_interval: int=10, step_size: float=1e-1, leapfrog_steps: int=20):
"""
:param model: An emukit model that implements IPriorHyperparameters
:param acquisition_generator: Function that returns acquisition object when given the model as the only argument
:param n_samples: Number of hyper-parameter samples
:param n_burnin: Number of initial samples not used.
:param subsample_interval: Interval of subsampling from HMC samples.
:param step_size: Size of the gradient steps in the HMC sampler.
:param leapfrog_steps: Number of gradient steps before each Metropolis Hasting step.
"""
self.model = model
self.acquisition_generator = acquisition_generator
import scipy
import numpy as np
from ...core import InformationSourceParameter
from ...core.acquisition import Acquisition
from ...core.interfaces import IModel
from ...core.parameter_space import ParameterSpace
from ...samplers import AffineInvariantEnsembleSampler, McmcSampler
from ..acquisitions import ExpectedImprovement
from ..interfaces import IEntropySearchModel
from .. import epmgp
class EntropySearch(Acquisition):
def __init__(self, model: Union[IModel, IEntropySearchModel], space: ParameterSpace, sampler: McmcSampler = None,
num_samples: int = 100, num_representer_points: int = 50,
proposal_function: Callable = None, burn_in_steps: int = 50) -> None:
"""
Entropy Search acquisition function approximates the distribution of the global
minimum and tries to decrease its entropy. See this paper for more details:
P. Hennig and C. J. Schuler
Entropy search for information-efficient global optimization
Journal of Machine Learning Research, 13, 2012
:param model: GP model to compute the distribution of the minimum dubbed pmin.
:param space: Domain space which we need for the sampling of the representer points
:param sampler: mcmc sampler for representer points