Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
filter: Predicate to use for filtering candidates
reraise: Flag indicating whether exceptions in the
objective function evaluations should be re-raised,
terminating the optimization.
Returns:
Record minimizing merit() and satisfying filter();
or None if nothing satisfies the filter
"""
try:
return self._run(merit=merit, filter=filter, reraise=reraise)
finally:
self.call_term_callbacks()
class ThreadController(Controller):
"""Thread-based optimization controller.
The optimizer dispatches work to a queue of workers.
Each worker has methods of the form
worker.eval(record)
worker.kill(record)
These methods are asynchronous: they start a function evaluation
or termination, but do not necessarily complete it. The worker
must respond to eval requests, but may ignore kill requests. On
eval requests, the worker should either attempt the evaluation or
mark the record as killed. The worker sends status updates back
to the controller in terms of lambdas (executed at the controller)
that update the relevant record. When the worker becomes
available again, it should use add_worker to add itself back to
Args:
merit: Function to minimize (default is r.value)
filter: Predicate to use for filtering candidates
Returns:
Record minimizing merit() and satisfying filter();
or None if nothing satisfies the filter
"""
try:
return self._run(merit=merit, filter=filter)
finally:
self.call_term_callbacks()
class ScriptedController(Controller):
"""Run a test script of actions from the controller.
The ScriptedController is meant to test that a strategy adheres
to an expected sequence of proposed actions in a given scenario.
Attributes:
strategy: Strategy for choosing optimization actions.
fevals: Database of function evaluations
"""
def __init__(self):
Controller.__init__(self)
self._can_work = True
def add_timer(self, timeout, callback):
"Add timer."
def _pop(self, key):
"Pop a retry proposal draft from the in-flight dictionary"
return self._in_flight.pop(id(key))
def _rekey(self, old_key, new_key):
"Change the key on a retry proposal draft."
logger.debug("Rekey retry proposal")
self._set(new_key, self._pop(old_key))
def _resubmit(self, key):
"Recycle a previously-submitted retry proposal."
logger.debug("Resubmitting retry proposal")
self.rput(self._pop(key))
class FixedSampleStrategy(BaseStrategy):
"""Sample at a fixed set of points.
The fixed sampling strategy is appropriate for any non-adaptive
sampling scheme. Since the strategy is non-adaptive, we can
employ as many available workers as we have points to process. We
keep trying any evaluation that fails, and suggest termination
only when all evaluations are complete. The points in the
experimental design can be provided as any iterable object (e.g. a
list or a generator function). One can use a generator for an
infinite sequence if the fixed sampling strategy is used in
combination with a strategy that provides a termination criterion.
"""
def __init__(self, points):
"""Initialize the sampling scheme.
data = TorchOptim(seed=seed, server=server)
# Create a strategy and a controller
controller = ThreadController()
controller.strategy = \
SyncStrategyNoConstraints(
worker_id=0, data=data,
maxeval=maxeval, nsamples=nsamples,
exp_design=LatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
response_surface=RBFInterpolant(surftype=CubicRBFSurface, maxp=maxeval),
sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim))
# Launch the threads and give them access to the objective function
for _ in range(nthreads):
worker = BasicWorkerThread(controller, data.objfunction)
controller.launch_worker(worker)
# Run the optimization strategy
result = controller.run()
print('Best value found: {0}'.format(result.value))
print('Best solution found: {0}\n'.format(
np.array_str(result.params[0], max_line_width=np.inf,
precision=5, suppress_small=True)))
millis = int(round(time.time() * 1000))
print('Ended: ' + str(datetime.now()) + ' (' + str(millis) + ')')
controller.strategy = SRBFStrategy(max_evals=self.max_iter,
opt_prob=self.problem,
exp_design=sampler,
surrogate=surrogate_model,
asynchronous=True,
batch_size=num_threads)
print("Number of threads: {}".format(num_threads))
print("Maximum number of evaluations: {}".format(self.max_iter))
print("Strategy: {}".format(controller.strategy.__class__.__name__))
print("Experimental design: {}".format(sampler.__class__.__name__))
print("Surrogate: {}".format(surrogate_model.__class__.__name__))
# Launch the threads and give them access to the objective function
for _ in range(num_threads):
worker = BasicWorkerThread(controller, self.problem.eval)
controller.launch_worker(worker)
# Run the optimization strategy
result = controller.run()
print('Best value found: {0}'.format(result.value))
print('Best solution found: {0}\n'.format(np.array_str(result.params[0],
max_line_width=np.inf,
precision=4, suppress_small=True)))
self.solution = result.params[0]
# Extract function values from the controller
self.optimization_values = np.array([o.value for o in controller.fevals])
# send the finnish signal
data = TorchOptim(seed=seed, server=server)
# Create a strategy and a controller
controller = ThreadController()
controller.strategy = \
SyncStrategyNoConstraints(
worker_id=0, data=data,
maxeval=maxeval, nsamples=nsamples,
exp_design=SymmetricLatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
response_surface=RBFInterpolant(surftype=CubicRBFSurface, maxp=maxeval),
sampling_method=CandidateDYCORS(data=data, numcand=500*data.dim))
# Launch the threads and give them access to the objective function
for _ in range(nthreads):
worker = BasicWorkerThread(controller, data.objfunction)
controller.launch_worker(worker)
# Run the optimization strategy
result = controller.run()
print('Best value found: {0}'.format(result.value))
print('Best solution found: {0}\n'.format(
np.array_str(result.params[0], max_line_width=np.inf,
precision=5, suppress_small=True)))
millis = int(round(time.time() * 1000))
print('Ended: ' + str(datetime.now()) + ' (' + str(millis) + ')')
data = TorchOptim(seed=seed, server=server)
# Create a strategy and a controller
controller = ThreadController()
controller.strategy = \
SyncStrategyNoConstraints(
worker_id=0, data=data,
maxeval=maxeval, nsamples=nsamples,
exp_design=LatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
response_surface=RBFInterpolant(surftype=CubicRBFSurface, maxp=maxeval),
sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim))
# Launch the threads and give them access to the objective function
for _ in range(nthreads):
worker = BasicWorkerThread(controller, data.objfunction)
controller.launch_worker(worker)
# Run the optimization strategy
result = controller.run()
print('Best value found: {0}'.format(result.value))
print('Best solution found: {0}\n'.format(
np.array_str(result.params[0], max_line_width=np.inf,
precision=5, suppress_small=True)))
millis = int(round(time.time() * 1000))
print('Ended: ' + str(datetime.now()) + ' (' + str(millis) + ')')
data = TorchOptim(seed=seed, server=server)
# Create a strategy and a controller
controller = ThreadController()
controller.strategy = \
SyncStrategyNoConstraints(
worker_id=0, data=data,
maxeval=maxeval, nsamples=nsamples,
exp_design=LatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
response_surface=RBFInterpolant(surftype=CubicRBFSurface, maxp=maxeval),
sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim))
# Launch the threads and give them access to the objective function
for _ in range(nthreads):
worker = BasicWorkerThread(controller, data.objfunction)
controller.launch_worker(worker)
# Run the optimization strategy
result = controller.run()
print('Best value found: {0}'.format(result.value))
print('Best solution found: {0}\n'.format(
np.array_str(result.params[0], max_line_width=np.inf,
precision=5, suppress_small=True)))
millis = int(round(time.time() * 1000))
print('Ended: ' + str(datetime.now()) + ' (' + str(millis) + ')')
def __init__(self, controller, objective):
"Initialize the worker."
super(BasicWorkerThread, self).__init__(controller)
self.objective = objective
server = sys.argv[4]
np.random.seed(int(seed))
print("\nNumber of threads: "+str(nthreads))
print("Maximum number of evaluations: "+str(maxeval))
print("Search strategy: Candidate DyCORS")
print("Experimental design: Latin Hypercube")
print("Surrogate: Cubic RBF")
nsamples = nthreads
data = TorchOptim(seed=seed, server=server)
# Create a strategy and a controller
controller = ThreadController()
controller.strategy = \
SyncStrategyNoConstraints(
worker_id=0, data=data,
maxeval=maxeval, nsamples=nsamples,
exp_design=LatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
response_surface=RBFInterpolant(surftype=CubicRBFSurface, maxp=maxeval),
sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim))
# Launch the threads and give them access to the objective function
for _ in range(nthreads):
worker = BasicWorkerThread(controller, data.objfunction)
controller.launch_worker(worker)
# Run the optimization strategy
result = controller.run()