How to use the sherpa.Study function in sherpa

To help you get started, we’ve selected a few sherpa examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sherpa-ai / sherpa / tests / test_algorithms.py View on Github external
def test_pbt():
    parameters = [sherpa.Continuous(name='param_a', range=[0, 1])]

    algorithm = sherpa.algorithms.PopulationBasedTraining(num_generations=3,
                                                          population_size=20,
                                                          parameter_range={'param_a': [0., 1.2]})

    study = sherpa.Study(parameters=parameters,
                         algorithm=algorithm,
                         lower_is_better=True,
                         disable_dashboard=True)

    for _ in range(20):
        trial = study.get_suggestion()
        print("Trial-ID={}".format(trial.id))
        print(trial.parameters)
        print()
        study.add_observation(trial=trial, iteration=1, objective=trial.id)
        study.finalize(trial=trial,
                       status='COMPLETED')

    for _ in range(16):
        trial = study.get_suggestion()
        print("Trial-ID={}".format(trial.id))
github sherpa-ai / sherpa / tests / test_bayesian_optimization.py View on Github external
def test_1d():
    def obj_func(x):
        # Global maximum of 4 is at x=4
        return 4. * np.exp(-(x - 4.) ** 2 / 10.) * np.cos(1.5 * (x - 4.)) ** 2

    parameters = [sherpa.Continuous('x1', [0., 7.])]

    bayesian_optimization = BayesianOptimization(num_grid_points=5,
                                                 max_num_trials=50,
                                                 fine_tune=False)
    study = sherpa.Study(algorithm=bayesian_optimization,
                         parameters=parameters,
                         lower_is_better=False,
                         disable_dashboard=True)

    for trial in study:
        print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = obj_func(trial.parameters['x1'])
        print("Function Value: {}".format(fval))
        study.add_observation(trial=trial,
                              iteration=1,
                              objective=fval)
        study.finalize(trial, status='COMPLETED')
    rval = study.get_best_result()
    assert np.isclose(rval['Objective'], 4.)
github sherpa-ai / sherpa / tests / test_sequential_testing.py View on Github external
def get_suggestion(self, parameters, results, lower_is_better):
            if results is not None and len(results) > 0:
                print(results)
                assert 'ObjectiveStdErr' in results.columns
                assert (results.loc[:, 'Objective'] == 0.).all()
                exp_std_err = numpy.sqrt(numpy.var([-1,0,1],ddof=1)/(3-1))
                assert (numpy.isclose(results.loc[:, 'ObjectiveStdErr'], exp_std_err)).all()
            return {'myparam': numpy.random.random()}


    alg = MyAlg()
    gs = SequentialTesting(algorithm=alg,
                           K=10,
                           n=(3, 6, 9),
                           P=0.5)
    study = sherpa.Study(algorithm=gs,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)
    for trial in study:
        study.add_observation(trial,
                              iteration=1,
                              objective=trial.id%3-1)
        study.finalize(trial)
        print(study.results)
github sherpa-ai / sherpa / tests / test_gpyopt.py View on Github external
assert isinstance(y, str)
        assert isinstance(z, int)
        # Global maximum of 4 is at x=4
        return -4. * numpy.exp(-(x - 4.) ** 2 / 10.) * numpy.cos(
            1.5 * (x - 4.)) ** 2 -int(y) * z

    parameters = [sherpa.Continuous('x', [0., 7.]),
                  sherpa.Choice('y', ["-1", "0", "1"]),
                  sherpa.Discrete('z', [1, 5])]

    bayesian_optimization = GPyOpt(max_concurrent=1,
                                   max_num_trials=100,
                                   model_type='GP',
                                   acquisition_type='EI')

    study = sherpa.Study(algorithm=bayesian_optimization,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for trial in study:
        print("Trial {}:\t{}".format(trial.id, trial.parameters))

        fval = obj_func(**trial.parameters)
        print("Function Value: {}".format(fval))
        study.add_observation(trial=trial,
                              iteration=1,
                              objective=fval)
        study.finalize(trial, status='COMPLETED')
    rval = study.get_best_result()
    print(rval)
github sherpa-ai / sherpa / tests / test_sequential_testing.py View on Github external
def test_get_suggestion():
    for _ in range(10):
        parameters = [sherpa.Continuous('myparam', [0, 1]),
                      sherpa.Discrete('myparam2', [0, 10])]
        rs = sherpa.algorithms.RandomSearch()
        gs = SequentialTesting(algorithm=rs,
                               K=10,
                               n=(3, 6, 9),
                               P=0.5)
        study = sherpa.Study(algorithm=gs,
                             parameters=parameters,
                             lower_is_better=True,
                             disable_dashboard=True)
        seen_configs = []
        last_config = {}
        config_count = 3
        for trial in study:
            print(trial.id, trial.parameters, "{}/{}".format(gs.k, gs.K[gs.t]),
                  "{}/{}".format(gs.t, gs.T))
            if trial.parameters == last_config:
                config_count += 1
                assert config_count <= 3
            elif trial.parameters == "DONE":
                assert gs.K[gs.t] == 1 or gs.t == 3
                break
            else:
github sherpa-ai / sherpa / tests / testing_utils.py View on Github external
def get_mock_study():
    mock_algorithm = mock.MagicMock()
    mock_algorithm.get_suggestion.return_value = {'a': 1, 'b': 2}
    mock_stopping_rule = mock.MagicMock()

    return sherpa.Study(parameters=[sherpa.Discrete('a', [1,2]),
                                    sherpa.Choice('b', [2,5,7])],
                        algorithm=mock_algorithm,
                        stopping_rule=mock_stopping_rule,
                        lower_is_better=True,
                        disable_dashboard=True)
github sherpa-ai / sherpa / tests / test_sequential_testing.py View on Github external
def test_overall_lower_is_better():
    parameters = [sherpa.Continuous('myparam', [0, 10]),
                  sherpa.Discrete('myparam2', [0, 10])]
    rs = sherpa.algorithms.RandomSearch()
    gs = SequentialTesting(algorithm=rs,
                           K=10,
                           n=(3, 6, 9),
                           P=0.5)
    study = sherpa.Study(algorithm=gs,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)

    for trial in study:
        print(trial.id, trial.parameters, "{}/{}".format(gs.k, gs.K[gs.t]),
              "{}/{}".format(gs.t, gs.T))

        study.add_observation(trial,
                              iteration=1,
                              objective=trial.parameters[
                                            'myparam'] + numpy.random.normal(
                                  scale=1.))
        study.finalize(trial)

    completed = study.results.query("Status == 'COMPLETED'")
github sherpa-ai / sherpa / tests / test_sequential_testing.py View on Github external
def test_chain_gs():
    parameters = [sherpa.Continuous('myparam', [0, 1])]


    alg = sherpa.algorithms.RandomSearch()
    chain = sherpa.algorithms.Chain([SequentialTesting(algorithm=alg, K=5,
                                                       n=(3, 6, 9), P=0.5),
                                     SequentialTesting(algorithm=alg, K=5,
                                                       n=(3, 6, 9), P=0.5),
                                     SequentialTesting(algorithm=alg, K=5,
                                                       n=(3, 6, 9), P=0.5)])
    study = sherpa.Study(algorithm=chain,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)
    for trial in study:
        study.add_observation(trial,
                              iteration=1,
                              objective=numpy.random.random())
        study.finalize(trial)
    print(study.results.query("Status == 'COMPLETED'"))
    assert len(study.results.query("Status == 'COMPLETED'")) == 135
github sherpa-ai / sherpa / tests / test_sequential_testing.py View on Github external
def f(x, sd=1):
        y = (x - 3) ** 2 + 10.
        if sd == 0:
            return y
        else:
            return y + numpy.random.normal(loc=0., scale=sd,
                                           size=numpy.array(x).shape)

    parameters = [sherpa.Continuous('x', [1, 6])]

    alg = GPyOpt(max_num_trials=10)
    gs = SequentialTesting(algorithm=alg,
                           K=10,
                           n=(3, 6, 9),
                           P=0.5)
    study = sherpa.Study(algorithm=gs,
                         parameters=parameters,
                         lower_is_better=True,
                         disable_dashboard=True)
    for trial in study:
        study.add_observation(trial,
                              iteration=1,
                              objective=f(trial.parameters['x']))
        study.finalize(trial)
    print(study.get_best_result())
github sherpa-ai / sherpa / examples / randomforest.py View on Github external
import time
import sherpa
import sherpa.algorithms.bayesian_optimization as bayesian_optimization


parameters = [sherpa.Discrete('n_estimators', [2, 50]),
              sherpa.Choice('criterion', ['gini', 'entropy']),
              sherpa.Continuous('max_features', [0.1, 0.9])]

algorithm = bayesian_optimization.GPyOpt(max_concurrent=1,
                                         model_type='GP',
                                         acquisition_type='EI',
                                         max_num_trials=100)

X, y = load_breast_cancer(return_X_y=True)
study = sherpa.Study(parameters=parameters,
                     algorithm=algorithm,
                     lower_is_better=False)

for trial in study:
    print("Trial ", trial.id, " with parameters ", trial.parameters)
    clf = RandomForestClassifier(criterion=trial.parameters['criterion'],
                                 max_features=trial.parameters['max_features'],
                                 n_estimators=trial.parameters['n_estimators'],
                                 random_state=0)
    scores = cross_val_score(clf, X, y, cv=5)
    print("Score: ", scores.mean())
    study.add_observation(trial, iteration=1, objective=scores.mean())
    study.finalize(trial)
print(study.get_best_result())