How to use the optuna.study.create_study function in optuna

To help you get started, we’ve selected a few optuna examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github optuna / optuna / tests / test_visualization.py View on Github external
assert len(figure.data) == 2
    assert figure.data[0].x == (0, 1, 2)
    assert figure.data[0].y == (1.0, 2.0, 0.0)
    assert figure.data[1].x == (0, 1, 2)
    if direction == 'minimize':
        assert figure.data[1].y == (1.0, 1.0, 0.0)
    else:
        assert figure.data[1].y == (1.0, 2.0, 2.0)

    # Ignore failed trials.
    def fail_objective(_):
        # type: (Trial) -> float

        raise ValueError

    study = create_study(direction=direction)
    study.optimize(fail_objective, n_trials=1, catch=(ValueError,))

    figure = _get_optimization_history_plot(study)
    assert len(figure.data) == 0
github optuna / optuna / tests / test_trial.py View on Github external
def test_datetime_start(storage_init_func):
    # type: (typing.Callable[[], storages.BaseStorage]) -> None

    trial_datetime_start = [None]  # type: typing.List[typing.Optional[datetime]]

    def objective(trial):
        # type: (Trial) -> float

        trial_datetime_start[0] = trial.datetime_start
        return 1.0

    study = create_study(storage_init_func())
    study.optimize(objective, n_trials=1)

    assert study.trials[0].datetime_start == trial_datetime_start[0]
github optuna / optuna / tests / test_visualization.py View on Github external
figure = _get_slice_plot(study, params=['param_a'])
    assert len(figure.data) == 1
    assert figure.data[0]['x'] == (1.0, 2.5)
    assert figure.data[0]['y'] == (0.0, 1.0)

    # Test with wrong parameters.
    with pytest.raises(ValueError):
        _get_slice_plot(study, params=['optuna'])

    # Ignore failed trials.
    def fail_objective(_):
        # type: (Trial) -> float

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
    figure = _get_slice_plot(study)
    assert len(figure.data) == 0
github optuna / optuna / tests / test_trial.py View on Github external
def test_suggest_int(storage_init_func):
    # type: (typing.Callable[[], storages.BaseStorage]) -> None

    mock = Mock()
    mock.side_effect = [1, 2, 3]
    sampler = samplers.RandomSampler()

    with patch.object(sampler, 'sample_independent', mock) as mock_object:
        study = create_study(storage_init_func(), sampler=sampler)
        trial = Trial(study, study._storage.create_new_trial(study._study_id))
        distribution = distributions.IntUniformDistribution(low=0, high=3)

        assert trial._suggest('x', distribution) == 1  # Test suggesting a param.
        assert trial._suggest('x', distribution) == 1  # Test suggesting the same param.
        assert trial._suggest('y', distribution) == 3  # Test suggesting a different param.
        assert trial.params == {'x': 1, 'y': 3}
        assert mock_object.call_count == 3
github optuna / optuna / tests / test_visualization.py View on Github external
def test_get_optimization_history_plot(direction):
    # type: (str) -> None

    # Test with no trial.
    study = create_study(direction=direction)
    figure = _get_optimization_history_plot(study)
    assert len(figure.data) == 0

    def objective(trial):
        # type: (Trial) -> float

        if trial.number == 0:
            return 1.0
        elif trial.number == 1:
            return 2.0
        elif trial.number == 2:
            return 0.0
        return 0.0

    # Test with a trial.
    study = create_study(direction=direction)
github optuna / optuna / tests / samplers_tests / test_samplers.py View on Github external
def test_categorical(sampler_class, choices):
    # type: (typing.Callable[[], BaseSampler], typing.Tuple[T, ...]) -> None

    distribution = CategoricalDistribution(choices)

    study = optuna.study.create_study(sampler=sampler_class())

    def sample():
        # type: () -> float

        trial = _create_new_trial(study)
        param_value = study.sampler.sample_independent(study, trial, 'x', distribution)
        return distribution.to_internal_repr(param_value)

    points = np.array([sample() for _ in range(100)])

    # 'x' value is corresponding to an index of distribution.choices.
    assert np.all(points >= 0)
    assert np.all(points <= len(distribution.choices) - 1)
    round_points = np.round(points)
    np.testing.assert_almost_equal(round_points, points)
github optuna / optuna / tests / test_visualization.py View on Github external
Args:
        no_trials: If ``False``, create a study with no trials.
        less_than_two: If ``True``, create a study with two/four hyperparameters where
            'param_a' (and 'param_c') appear(s) only once while 'param_b' (and 'param_b')
            appear(s) twice in `study.trials`.
        with_c_d: If ``True``, the study has four hyperparameters named 'param_a',
            'param_b', 'param_c', and 'param_d'. Otherwise, there are only two
            hyperparameters ('param_a' and 'param_b').

    Returns:
        :class:`~optuna.study.Study`

    """

    study = create_study()
    if no_trials:
        return study
    study._append_trial(
        value=0.0,
        params={
            'param_a': 1.0,
            'param_b': 2.0,
            'param_c': 3.0,
            'param_d': 4.0,
        } if with_c_d else {
            'param_a': 1.0,
            'param_b': 2.0,
        },
        distributions={
            'param_a': UniformDistribution(0.0, 3.0),
            'param_b': UniformDistribution(0.0, 3.0),
github optuna / optuna / tests / pruners_tests / test_percentile.py View on Github external
def setup_study(trial_num, _intermediate_values):
        # type: (int, List[List[float]]) -> Study

        _study = optuna.study.create_study(direction="minimize")
        trial_ids = [_study._storage.create_new_trial(
            _study._study_id) for _ in range(trial_num)]

        for step, values in enumerate(_intermediate_values):
            # Study does not have any trials.
            with pytest.raises(ValueError):
                _all_trials = _study._storage.get_all_trials(_study._study_id)
                _direction = _study._storage.get_study_direction(_study._study_id)
                percentile._get_percentile_intermediate_result_over_trials(
                    _all_trials, _direction, step, 25)

            for i in range(trial_num):
                trial_id = trial_ids[i]
                value = values[i]
                _study._storage.set_trial_intermediate_value(trial_id, step, value)
github optuna / optuna / tests / visualization_tests / test_optimization_history.py View on Github external
def test_plot_optimization_history(direction):
    # type: (str) -> None

    # Test with no trial.
    study = create_study(direction=direction)
    figure = plot_optimization_history(study)
    assert len(figure.data) == 0

    def objective(trial):
        # type: (Trial) -> float

        if trial.number == 0:
            return 1.0
        elif trial.number == 1:
            return 2.0
        elif trial.number == 2:
            return 0.0
        return 0.0

    # Test with a trial.
    study = create_study(direction=direction)
github optuna / optuna / tests / visualization_tests / test_intermediate_plot.py View on Github external
assert len(figure.data) == 1
    assert figure.data[0].x == (0, 1)
    assert figure.data[0].y == (1.0, 2.0)

    # Test a study with one trial with intermediate values and
    # one trial without intermediate values.
    # Expect the trial with no intermediate values to be ignored.
    study.optimize(lambda t: objective(t, False), n_trials=1)
    assert len(study.trials) == 2
    figure = plot_intermediate_values(study)
    assert len(figure.data) == 1
    assert figure.data[0].x == (0, 1)
    assert figure.data[0].y == (1.0, 2.0)

    # Test a study of only one trial that has no intermediate values.
    study = create_study()
    study.optimize(lambda t: objective(t, False), n_trials=1)
    figure = plot_intermediate_values(study)
    assert not figure.data

    # Ignore failed trials.
    def fail_objective(_):
        # type: (Trial) -> float

        raise ValueError

    study = create_study()
    study.optimize(fail_objective, n_trials=1, catch=(ValueError, ))
    figure = plot_intermediate_values(study)
    assert not figure.data