Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
server = sys.argv[4]
np.random.seed(int(seed))
print("\nNumber of threads: "+str(nthreads))
print("Maximum number of evaluations: "+str(maxeval))
print("Search strategy: Candidate DyCORS")
print("Experimental design: Latin Hypercube")
print("Surrogate: Cubic RBF")
nsamples = nthreads
data = TorchOptim(seed=seed, server=server)
# Create a strategy and a controller
controller = ThreadController()
controller.strategy = \
SyncStrategyNoConstraints(
worker_id=0, data=data,
maxeval=maxeval, nsamples=nsamples,
exp_design=LatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
response_surface=RBFInterpolant(surftype=CubicRBFSurface, maxp=maxeval),
sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim))
# Launch the threads and give them access to the objective function
for _ in range(nthreads):
worker = BasicWorkerThread(controller, data.objfunction)
controller.launch_worker(worker)
# Run the optimization strategy
result = controller.run()
np.random.seed(int(seed))
print("\nNumber of threads: "+str(nthreads))
print("Maximum number of evaluations: "+str(maxeval))
print("Search strategy: Candidate DyCORS")
print("Experimental design: Latin Hypercube")
print("Surrogate: Cubic RBF")
print('best\tf_eval_time\tresult\ttestset_result\tf_eval_count\twallclock_time\thyper-parameters')
nsamples = nthreads
data = TorchOptim(seed=seed, server=server)
# Create a strategy and a controller
controller = ThreadController()
controller.strategy = \
SyncStrategyNoConstraints(
worker_id=0, data=data,
maxeval=maxeval, nsamples=nsamples,
exp_design=LatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
response_surface=RBFInterpolant(surftype=CubicRBFSurface, maxp=maxeval),
sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim))
# Launch the threads and give them access to the objective function
for _ in range(nthreads):
worker = BasicWorkerThread(controller, data.objfunction)
controller.launch_worker(worker)
# Run the optimization strategy
result = controller.run()
server = sys.argv[4]
np.random.seed(int(seed))
print("\nNumber of threads: "+str(nthreads))
print("Maximum number of evaluations: "+str(maxeval))
print("Search strategy: Candidate DyCORS")
print("Experimental design: Latin Hypercube")
print("Surrogate: Cubic RBF")
nsamples = nthreads
data = TorchOptim(seed=seed, server=server)
# Create a strategy and a controller
controller = ThreadController()
controller.strategy = \
SyncStrategyNoConstraints(
worker_id=0, data=data,
maxeval=maxeval, nsamples=nsamples,
exp_design=LatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
response_surface=RBFInterpolant(surftype=CubicRBFSurface, maxp=maxeval),
sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim))
# Launch the threads and give them access to the objective function
for _ in range(nthreads):
worker = BasicWorkerThread(controller, data.objfunction)
controller.launch_worker(worker)
# Run the optimization strategy
result = controller.run()
#
# # Run the optimization strategy
# result = controller.run()
#
# # Print the final result
# print('Best value found: {0}'.format(result.value))
# print('Best solution found: {0}'.format(np.array_str(result.params[0], max_line_width=np.inf, precision=5,
# suppress_small=True)))
num_threads = 4
surrogate_model = GPRegressor(dim=self.problem.dim)
sampler = SymmetricLatinHypercube(dim=self.problem.dim, num_pts=2 * (self.problem.dim + 1))
# Create a strategy and a controller
controller = ThreadController()
controller.strategy = SRBFStrategy(max_evals=self.max_iter,
opt_prob=self.problem,
exp_design=sampler,
surrogate=surrogate_model,
asynchronous=True,
batch_size=num_threads)
print("Number of threads: {}".format(num_threads))
print("Maximum number of evaluations: {}".format(self.max_iter))
print("Strategy: {}".format(controller.strategy.__class__.__name__))
print("Experimental design: {}".format(sampler.__class__.__name__))
print("Surrogate: {}".format(surrogate_model.__class__.__name__))
# Launch the threads and give them access to the objective function
for _ in range(num_threads):
worker = BasicWorkerThread(controller, self.problem.eval)
np.random.seed(int(seed))
print("\nNumber of threads: "+str(nthreads))
print("Maximum number of evaluations: "+str(maxeval))
print("Search strategy: Candidate DyCORS")
print("Experimental design: Symmetric Latin Hypercube")
print("Surrogate: Cubic RBF")
print('best\tf_eval_time\tresult\ttestset_result\tf_eval_count\twallclock_time\thyper-parameters')
nsamples = nthreads
data = TorchOptim(seed=seed, server=server)
# Create a strategy and a controller
controller = ThreadController()
controller.strategy = \
SyncStrategyNoConstraints(
worker_id=0, data=data,
maxeval=maxeval, nsamples=nsamples,
exp_design=SymmetricLatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
response_surface=RBFInterpolant(surftype=CubicRBFSurface, maxp=maxeval),
sampling_method=CandidateDYCORS(data=data, numcand=500*data.dim))
# Launch the threads and give them access to the objective function
for _ in range(nthreads):
worker = BasicWorkerThread(controller, data.objfunction)
controller.launch_worker(worker)
# Run the optimization strategy
result = controller.run()
def __init__(self, sockname=("localhost", 0), strategy=None, handlers={}):
"""Initialize the controller on the given (host,port) address
Args:
sockname: Socket on which to serve workers
strategy: Strategy object to connect to controllers
handlers: Dictionary of specialized message handlers
"""
super(ThreadedTCPServer, self).__init__(sockname, SocketWorkerHandler)
self.message_handlers = handlers
self.controller = ThreadController()
self.controller.strategy = strategy
self.controller.add_term_callback(self.shutdown)