How to use the poap.controller.ThreadController function in POAP

To help you get started, we’ve selected a few POAP examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ilija139 / HORD / 19-CNN / pySOT_runner.py View on Github external
server = sys.argv[4]

    np.random.seed(int(seed))

    print("\nNumber of threads: "+str(nthreads))
    print("Maximum number of evaluations: "+str(maxeval))
    print("Search strategy: Candidate DyCORS")
    print("Experimental design: Latin Hypercube")
    print("Surrogate: Cubic RBF")
    nsamples = nthreads

    data = TorchOptim(seed=seed, server=server)

    
    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = \
        SyncStrategyNoConstraints(
            worker_id=0, data=data,
            maxeval=maxeval, nsamples=nsamples,
            exp_design=LatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
            response_surface=RBFInterpolant(surftype=CubicRBFSurface, maxp=maxeval),
            sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim))

    # Launch the threads and give them access to the objective function
    for _ in range(nthreads):
        worker = BasicWorkerThread(controller, data.objfunction)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()
github ilija139 / HORD / 6-MLP / pySOT_runner.py View on Github external
np.random.seed(int(seed))

    print("\nNumber of threads: "+str(nthreads))
    print("Maximum number of evaluations: "+str(maxeval))
    print("Search strategy: Candidate DyCORS")
    print("Experimental design: Latin Hypercube")
    print("Surrogate: Cubic RBF")
    print('best\tf_eval_time\tresult\ttestset_result\tf_eval_count\twallclock_time\thyper-parameters')
    nsamples = nthreads

    data = TorchOptim(seed=seed, server=server)

    
    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = \
        SyncStrategyNoConstraints(
            worker_id=0, data=data,
            maxeval=maxeval, nsamples=nsamples,
            exp_design=LatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
            response_surface=RBFInterpolant(surftype=CubicRBFSurface, maxp=maxeval),
            sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim))

    # Launch the threads and give them access to the objective function
    for _ in range(nthreads):
        worker = BasicWorkerThread(controller, data.objfunction)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()
github ilija139 / HORD / 8-CNN / pySOT_runner.py View on Github external
server = sys.argv[4]

    np.random.seed(int(seed))

    print("\nNumber of threads: "+str(nthreads))
    print("Maximum number of evaluations: "+str(maxeval))
    print("Search strategy: Candidate DyCORS")
    print("Experimental design: Latin Hypercube")
    print("Surrogate: Cubic RBF")
    nsamples = nthreads

    data = TorchOptim(seed=seed, server=server)

    
    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = \
        SyncStrategyNoConstraints(
            worker_id=0, data=data,
            maxeval=maxeval, nsamples=nsamples,
            exp_design=LatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
            response_surface=RBFInterpolant(surftype=CubicRBFSurface, maxp=maxeval),
            sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim))

    # Launch the threads and give them access to the objective function
    for _ in range(nthreads):
        worker = BasicWorkerThread(controller, data.objfunction)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()
github SanPen / GridCal / src / GridCal / Engine / Simulations / Optimization / optimization_driver.py View on Github external
#
        # # Run the optimization strategy
        # result = controller.run()
        #
        # # Print the final result
        # print('Best value found: {0}'.format(result.value))
        # print('Best solution found: {0}'.format(np.array_str(result.params[0], max_line_width=np.inf, precision=5,
        #                                                      suppress_small=True)))

        num_threads = 4

        surrogate_model = GPRegressor(dim=self.problem.dim)
        sampler = SymmetricLatinHypercube(dim=self.problem.dim, num_pts=2 * (self.problem.dim + 1))

        # Create a strategy and a controller
        controller = ThreadController()
        controller.strategy = SRBFStrategy(max_evals=self.max_iter,
                                           opt_prob=self.problem,
                                           exp_design=sampler,
                                           surrogate=surrogate_model,
                                           asynchronous=True,
                                           batch_size=num_threads)

        print("Number of threads: {}".format(num_threads))
        print("Maximum number of evaluations: {}".format(self.max_iter))
        print("Strategy: {}".format(controller.strategy.__class__.__name__))
        print("Experimental design: {}".format(sampler.__class__.__name__))
        print("Surrogate: {}".format(surrogate_model.__class__.__name__))

        # Launch the threads and give them access to the objective function
        for _ in range(num_threads):
            worker = BasicWorkerThread(controller, self.problem.eval)
github ilija139 / HORD / 15-CNN / pySOT_runner.py View on Github external
np.random.seed(int(seed))

    print("\nNumber of threads: "+str(nthreads))
    print("Maximum number of evaluations: "+str(maxeval))
    print("Search strategy: Candidate DyCORS")
    print("Experimental design: Symmetric Latin Hypercube")
    print("Surrogate: Cubic RBF")
    print('best\tf_eval_time\tresult\ttestset_result\tf_eval_count\twallclock_time\thyper-parameters')
    nsamples = nthreads

    data = TorchOptim(seed=seed, server=server)

    
    # Create a strategy and a controller
    controller = ThreadController()
    controller.strategy = \
        SyncStrategyNoConstraints(
            worker_id=0, data=data,
            maxeval=maxeval, nsamples=nsamples,
            exp_design=SymmetricLatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
            response_surface=RBFInterpolant(surftype=CubicRBFSurface, maxp=maxeval),
            sampling_method=CandidateDYCORS(data=data, numcand=500*data.dim))

    # Launch the threads and give them access to the objective function
    for _ in range(nthreads):
        worker = BasicWorkerThread(controller, data.objfunction)
        controller.launch_worker(worker)

    # Run the optimization strategy
    result = controller.run()
github dbindel / POAP / poap / tcpserve.py View on Github external
def __init__(self, sockname=("localhost", 0), strategy=None, handlers={}):
        """Initialize the controller on the given (host,port) address

        Args:
            sockname: Socket on which to serve workers
            strategy: Strategy object to connect to controllers
            handlers: Dictionary of specialized message handlers
        """
        super(ThreadedTCPServer, self).__init__(sockname, SocketWorkerHandler)
        self.message_handlers = handlers
        self.controller = ThreadController()
        self.controller.strategy = strategy
        self.controller.add_term_callback(self.shutdown)