Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_local_penalization_gradients_with_single_point_in_batch():
np.random.seed(123)
model = MockModel()
lp = LocalPenalization(model)
lp.update_batches(np.zeros((1, 1)), 1, -0.1)
x0 = np.array([0.5])
_check_grad(lp, TOL, x0)
def test_local_penalization_gradients_with_no_points_in_batch():
np.random.seed(123)
model = MockModel()
lp = LocalPenalization(model)
lp.update_batches(np.zeros((1, 1)), 1, -0.1)
x0 = np.array([0.5])
_check_grad(lp, TOL, x0)
def test_local_penalization_gradients_with_multiple_points_in_batch():
np.random.seed(123)
model = MockModel()
lp = LocalPenalization(model)
lp.update_batches(np.random.rand(5, 1), 1, -0.1)
x0 = np.array([0.5])
_check_grad(lp, TOL, x0)
def test_local_penaliztion_at_batch_point():
# Test edge case where evaluating local penalization at a point already in the batch.
# This can lead to divide by zero errors if not done correctly.
np.random.seed(123)
model = MockModel()
lp = LocalPenalization(model)
x_batch = np.random.rand(5, 1)
lp.update_batches(x_batch, 1, -0.1)
val, grad = lp.evaluate_with_gradients(x_batch)
assert not np.any(np.isnan(grad))
def test_penalization_function_gradients_shape():
model = MockModel()
lp = LocalPenalization(model)
lp.update_batches(np.zeros((5, 2)), 1, -0.1)
val, grad = lp.evaluate_with_gradients(np.random.rand(10, 2))
assert grad.shape == (10, 2)
assert val.shape == (10, 1)
def test_penalization_function_shape():
model = MockModel()
lp = LocalPenalization(model)
lp.update_batches(np.zeros((5, 1)), 1, -0.1)
value = lp.evaluate(np.random.rand(10, 1))
assert value.shape == (10, 1)
def compute_next_points(self, loop_state: LoopState, context: dict=None) -> np.ndarray:
"""
Computes a batch of points using local penalization.
:param loop_state: Object containing the current state of the loop
:param context: Contains variables to fix through optimization of acquisition function. The dictionary key is
the parameter name and the value is the value to fix the parameter to.
"""
self.acquisition.update_parameters()
# Initialize local penalization acquisition
local_penalization_acquisition = LocalPenalization(self.model)
# Everything done in log space so addition here is same as multiplying acquisition with local penalization
# function.
acquisition = self.acquisition + local_penalization_acquisition
x_batch = []
for i in range(self.batch_size):
# Collect point
x_next, _ = self.acquisition_optimizer.optimize(acquisition, context)
x_batch.append(x_next)
# Update local penalization acquisition with x_next
f_min = np.min(self.model.Y)
lipschitz_constant = _estimate_lipschitz_constant(self.parameter_space, self.model)
local_penalization_acquisition.update_batches(np.concatenate(x_batch, axis=0), lipschitz_constant, f_min)
return np.concatenate(x_batch, axis=0)