How to use the emukit.core.initial_designs.RandomDesign function in emukit

To help you get started, we’ve selected a few emukit examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github amzn / emukit / tests / emukit / test_functions / test_multi_fidelity_functions.py View on Github external
def test_multi_fidelity_function_shapes(fcn):
    n_points = 10
    fcn, space = fcn()
    random = RandomDesign(space)
    samples = random.get_samples(n_points)

    # There are only 2 or 3 fidelity functions in set of functions we are testing
    n_fidelities = len(space.parameters[-1].domain)
    if n_fidelities == 2:
        samples[:5, -1] = 0
        samples[5:, -1] = 1
    elif n_fidelities == 3:
        samples[:5, -1] = 0
        samples[5:8, -1] = 1
        samples[8:, -1] = 2
    else:
        raise ValueError('Please add a case for functions with {:.0f} fidelity levels'.format(n_fidelities))

    # Check shapes when calling through function wrapper
    results = fcn.evaluate(samples)
github amzn / emukit / tests / emukit / core / test_model_free_designs.py View on Github external
def create_model_free_designs(space: ParameterSpace):
    return [RandomDesign(space), LatinDesign(space)]
github amzn / emukit / integration_tests / emukit / experimental_design / test_multi_source_experimental_design.py View on Github external
def test_multi_source_batch_experimental_design():
    objective, space = multi_fidelity_forrester_function()

    # Create initial data
    random_design = RandomDesign(space)
    x_init = random_design.get_samples(10)
    intiial_results = objective.evaluate(x_init)
    y_init = np.array([res.Y for res in intiial_results])

    # Create multi source acquisition optimizer
    acquisition_optimizer = GradientAcquisitionOptimizer(space)
    multi_source_acquisition_optimizer = MultiSourceAcquisitionOptimizer(acquisition_optimizer, space)

    # Create GP model
    gpy_model = GPy.models.GPRegression(x_init, y_init)
    model = GPyModelWrapper(gpy_model)

    # Create acquisition
    acquisition = ModelVariance(model)

    # Create batch candidate point calculator
github amzn / emukit / emukit / bayesian_optimization / acquisitions / max_value_entropy_search.py View on Github external
def update_parameters(self):
        # apply gumbel sampling to obtain samples from y*
        # we approximate Pr(y*^hat 0.25:
github amzn / emukit / emukit / benchmarking / loop_benchmarking / benchmarker.py View on Github external
:param metrics: List of metric objects that assess the performance of the loop at every iteration
        :param initial_design: An object that returns a set of samples in the input domain that are used as the initial
                               data set
        """

        self.loop_names = [loop[0] for loop in loops_with_names]
        self.loops = [loop[1] for loop in loops_with_names]

        if isinstance(test_function, UserFunction):
            self.test_function = test_function
        else:
            self.test_function = UserFunctionWrapper(test_function)
        self.parameter_space = parameter_space

        if initial_design is None:
            initial_design = RandomDesign(parameter_space)
        self.initial_design = initial_design
        self.metrics = metrics
        self.metric_names = [metric.name for metric in metrics]

        if len(set(self.metric_names)) != len(self.metric_names):
            raise ValueError('Names of metrics are not unique')