How to use sherpa - 10 common examples

To help you get started, we’ve selected a few sherpa examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sherpa-ai / sherpa / tests / test_sequential_testing.py View on Github external
def test_wait():
    parameters = [sherpa.Continuous('myparam', [0, 1])]
    rs = sherpa.algorithms.RandomSearch()
    gs = SequentialTesting(algorithm=rs,
                           K=10,
                           n=(3, 6, 9),
                           P=0.5)
    study = sherpa.Study(algorithm=gs,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for _ in range(10*3 - 1):
        trial = study.get_suggestion()
        print(trial.id, trial.parameters, "{}/{}".format(gs.k, gs.K[gs.t]),
              "{}/{}".format(gs.t, gs.T))
        study.add_observation(trial,
                              iteration=1,
                              objective=trial.parameters['myparam'] + numpy.random.normal(
github sherpa-ai / sherpa / tests / test_bayesian_optimization.py View on Github external
def test_branin():
    def branin(x1, x2):
        # Global minimum 0.397887 at (-pi, 12.275), (pi, 2.275),
        # and (9.42478, 2.475)
        a = 1
        b = 5.1/(4*math.pi**2)
        c = 5/math.pi
        r = 6
        s = 10
        t = 1/(8*math.pi)
        return a*(x2 - b*x1**2 + c*x1 - r)**2 + s*(1-t)*math.cos(x1)+s

    parameters = [sherpa.Continuous('x1', [-5., 10.]),
                  sherpa.Continuous('x2', [0., 15.])]

    bayesian_optimization = BayesianOptimization(num_grid_points=2, max_num_trials=50, fine_tune=True)
    study = sherpa.Study(algorithm=bayesian_optimization,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for trial in study:
        print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = branin(trial.parameters['x1'], trial.parameters['x2'])
        print("Branin-Hoo: {}".format(fval))
        study.add_observation(trial=trial,
                              iteration=1,
                              objective=fval)
github sherpa-ai / sherpa / tests / test_gpyopt.py View on Github external
def parameters():
    parameters = [sherpa.Continuous('dropout', [0., 0.5]),
                  sherpa.Continuous('lr', [1e-7, 1e-1], 'log'),
                  sherpa.Choice('activation', ['relu', 'tanh', 'sigmoid']),
                  sherpa.Discrete('num_hidden', [100, 300])
                  ]
    return parameters
github sherpa-ai / sherpa / tests / test_algorithms.py View on Github external
def test_grid_search_log_continuous():
    parameters = [sherpa.Continuous('log-continuous', [1e-4,1e-2], 'log')]

    alg = sherpa.algorithms.GridSearch(num_grid_points=3)

    suggestion = alg.get_suggestion(parameters)
    seen = set()

    while suggestion != sherpa.AlgorithmState.DONE:
        seen.add(suggestion['log-continuous'])
        suggestion = alg.get_suggestion(parameters)

    assert seen == {1e-4, 1e-3, 1e-2}
github sherpa-ai / sherpa / tests / test_sequential_testing.py View on Github external
def test_overall_larger_is_better():
    parameters = [sherpa.Continuous('myparam', [0, 10]),
                  sherpa.Discrete('myparam2', [0, 10])]
    rs = sherpa.algorithms.RandomSearch()
    gs = SequentialTesting(algorithm=rs,
                           K=10,
                           n=(3, 6, 9),
                           P=0.5)
    study = sherpa.Study(algorithm=gs,
                         parameters=parameters,
                         lower_is_better=False,
                         disable_dashboard=True)

    for trial in study:
        print(trial.id, trial.parameters, "{}/{}".format(gs.k, gs.K[gs.t]),
              "{}/{}".format(gs.t, gs.T))

        study.add_observation(trial,
github sherpa-ai / sherpa / tests / test_sequential_testing.py View on Github external
def test_bayes_opt():
    def f(x, sd=1):
        y = (x - 3) ** 2 + 10.
        if sd == 0:
            return y
        else:
            return y + numpy.random.normal(loc=0., scale=sd,
                                           size=numpy.array(x).shape)

    parameters = [sherpa.Continuous('x', [1, 6])]

    alg = GPyOpt(max_num_trials=10)
    gs = SequentialTesting(algorithm=alg,
                           K=10,
                           n=(3, 6, 9),
                           P=0.5)
    study = sherpa.Study(algorithm=gs,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)
    for trial in study:
        study.add_observation(trial,
                              iteration=1,
                              objective=f(trial.parameters['x']))
        study.finalize(trial)
    print(study.get_best_result())
github sherpa-ai / sherpa / tests / test_gpyopt.py View on Github external
                                    ([sherpa.Continuous('a', [0, 1]), sherpa.Continuous('b', [10., 100])])])
def test_transformation_to_gpyopt_domain_continuous(parameters):
    domain = GPyOpt._initialize_domain(parameters)
    for p, d in zip(parameters, domain):
        assert d['name'] == p.name
        assert d['type'] == 'continuous'
        assert d['domain'] == tuple(p.range)
github sherpa-ai / sherpa / tests / test_bayesian_optimization.py View on Github external
def test_transformers():
    parameter = sherpa.Choice('choice', ['a', 'b', 'c', 'd'])
    transformer = BayesianOptimization.ChoiceTransformer(parameter)
    assert np.all(transformer.transform(['d', 'c', 'b', 'a'])
                  == np.flip(np.eye(4), axis=0))

    assert all(transformer.reverse(transformer.transform(['d', 'c', 'b', 'a']))
               == np.array(['d', 'c', 'b', 'a']))

    parameter = sherpa.Continuous('continuous', [0., 0.4])
    transformer = BayesianOptimization.ContinuousTransformer(parameter)
    assert np.all(transformer.transform([0.2, 0.4, 0.]) == np.array([0.5, 1.0, 0.0]))
    assert np.all(transformer.reverse(transformer.transform([0.2, 0.4, 0.]))
                  == np.array([0.2, 0.4, 0.]))

    parameter = sherpa.Continuous('continuous-log', [0.00001, 0.1], 'log')
    transformer = BayesianOptimization.ContinuousTransformer(parameter)
    print(transformer.transform([0.01]))
    assert np.all(transformer.transform([0.0001, 0.001, 0.01]) == np.array(
        [0.25, 0.5, 0.75]))
    print(transformer.reverse(
        transformer.transform([0.0001, 0.001, 0.01])))
    assert np.all(transformer.reverse(
        transformer.transform([0.0001, 0.001, 0.01])) == np.array(
        [0.0001, 0.001, 0.01]))

    parameter = sherpa.Discrete('discrete', [0, 12])
    transformer = BayesianOptimization.DiscreteTransformer(parameter)
    assert np.all(transformer.transform([3, 6, 9])
                  == np.array([0.25, 0.5, 0.75]))
    assert np.all(
        transformer.reverse(transformer.transform([3, 6, 9])) == np.array(
github sherpa-ai / sherpa / tests / test_sequential_testing.py View on Github external
def test_chain_gs():
    parameters = [sherpa.Continuous('myparam', [0, 1])]


    alg = sherpa.algorithms.RandomSearch()
    chain = sherpa.algorithms.Chain([SequentialTesting(algorithm=alg, K=5,
                                                       n=(3, 6, 9), P=0.5),
                                     SequentialTesting(algorithm=alg, K=5,
                                                       n=(3, 6, 9), P=0.5),
                                     SequentialTesting(algorithm=alg, K=5,
                                                       n=(3, 6, 9), P=0.5)])
    study = sherpa.Study(algorithm=chain,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)
    for trial in study:
        study.add_observation(trial,
                              iteration=1,
github sherpa-ai / sherpa / tests / test_algorithms.py View on Github external
def test_pbt():
    parameters = [sherpa.Continuous(name='param_a', range=[0, 1])]

    algorithm = sherpa.algorithms.PopulationBasedTraining(num_generations=3,
                                                          population_size=20,
                                                          parameter_range={'param_a': [0., 1.2]})

    study = sherpa.Study(parameters=parameters,
                         algorithm=algorithm,
                         lower_is_better=True,
                         disable_dashboard=True)

    for _ in range(20):
        trial = study.get_suggestion()
        print("Trial-ID={}".format(trial.id))
        print(trial.parameters)
        print()
        study.add_observation(trial=trial, iteration=1, objective=trial.id)