How to use the optuna.distributions.LogUniformDistribution function in optuna

To help you get started, we’ve selected a few optuna examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github optuna / optuna / optuna / integration / skopt.py View on Github external
def __init__(self, search_space, skopt_kwargs):
        # type: (Dict[str, BaseDistribution], Dict[str, Any]) -> None

        self._search_space = search_space

        dimensions = []
        for name, distribution in sorted(self._search_space.items()):
            if isinstance(distribution, distributions.UniformDistribution):
                # Convert the upper bound from exclusive (optuna) to inclusive (skopt).
                high = np.nextafter(distribution.high, float('-inf'))
                dimension = space.Real(distribution.low, high)
            elif isinstance(distribution, distributions.LogUniformDistribution):
                # Convert the upper bound from exclusive (optuna) to inclusive (skopt).
                high = np.nextafter(distribution.high, float('-inf'))
                dimension = space.Real(distribution.low, high, prior='log-uniform')
            elif isinstance(distribution, distributions.IntUniformDistribution):
                dimension = space.Integer(distribution.low, distribution.high)
            elif isinstance(distribution, distributions.DiscreteUniformDistribution):
                count = (distribution.high - distribution.low) // distribution.q
                dimension = space.Integer(0, count)
            elif isinstance(distribution, distributions.CategoricalDistribution):
                dimension = space.Categorical(distribution.choices)
            else:
                raise NotImplementedError(
                    "The distribution {} is not implemented.".format(distribution))

            dimensions.append(dimension)
github optuna / optuna / tests / integration_tests / test_sklearn.py View on Github external
def test_optuna_search(enable_pruning):
    # type: (bool) -> None

    X, y = make_blobs(n_samples=10)
    est = SGDClassifier(max_iter=5, tol=1e-03)
    param_dist = {'alpha': distributions.LogUniformDistribution(1e-04, 1e+03)}
    optuna_search = integration.OptunaSearchCV(
        est,
        param_dist,
        cv=3,
        enable_pruning=enable_pruning,
        error_score='raise',
        max_iter=5,
        random_state=0,
        return_train_score=True
    )

    with pytest.raises(NotFittedError):
        optuna_search._check_is_fitted()

    optuna_search.fit(X, y)
    optuna_search.trials_dataframe()
github optuna / optuna / tests / visualization_tests / test_slice.py View on Github external
def test_plot_slice_log_scale():
    # type: () -> None

    study = create_study()
    study._append_trial(
        value=0.0,
        params={
            'x_linear': 1.0,
            'y_log': 1e-3,
        },
        distributions={
            'x_linear': UniformDistribution(0.0, 3.0),
            'y_log': LogUniformDistribution(1e-5, 1.),
        }
    )

    # Plot a parameter.
    figure = plot_slice(study, params=['y_log'])
    assert figure.layout['xaxis_type'] == 'log'
    figure = plot_slice(study, params=['x_linear'])
    assert figure.layout['xaxis_type'] is None

    # Plot multiple parameters.
    figure = plot_slice(study)
    assert figure.layout['xaxis_type'] is None
    assert figure.layout['xaxis2_type'] == 'log'
github optuna / optuna / tests / test_distributions.py View on Github external
def test_empty_range_contains():
    # type: () -> None

    u = distributions.UniformDistribution(low=1.0, high=1.0)
    assert not u._contains(0.9)
    assert u._contains(1.0)
    assert not u._contains(1.1)

    lu = distributions.LogUniformDistribution(low=1.0, high=1.0)
    assert not lu._contains(0.9)
    assert lu._contains(1.0)
    assert not lu._contains(1.1)

    du = distributions.DiscreteUniformDistribution(low=1.0, high=1.0, q=2.0)
    assert not du._contains(0.9)
    assert du._contains(1.0)
    assert not du._contains(1.1)

    iu = distributions.IntUniformDistribution(low=1, high=1)
    assert not iu._contains(0)
    assert iu._contains(1)
    assert not iu._contains(2)
github optuna / optuna / tests / storages_tests / test_storages.py View on Github external
def test_set_and_get_trial_param(storage_init_func):
    # type: (Callable[[], BaseStorage]) -> None

    storage = storage_init_func()

    # Setup test across multiple studies and trials.
    study_id = storage.create_new_study()
    trial_id_1 = storage.create_new_trial(study_id)
    trial_id_2 = storage.create_new_trial(study_id)
    trial_id_3 = storage.create_new_trial(storage.create_new_study())

    # Setup Distributions.
    distribution_x = UniformDistribution(low=1.0, high=2.0)
    distribution_y_1 = CategoricalDistribution(choices=('Shibuya', 'Ebisu', 'Meguro'))
    distribution_y_2 = CategoricalDistribution(choices=('Shibuya', 'Shinsen'))
    distribution_z = LogUniformDistribution(low=1.0, high=100.0)

    # Test trial_1: setting new params.
    assert storage.set_trial_param(trial_id_1, 'x', 0.5, distribution_x)
    assert storage.set_trial_param(trial_id_1, 'y', 2, distribution_y_1)

    # Test trial_1: getting params.
    assert storage.get_trial_param(trial_id_1, 'x') == 0.5
    assert storage.get_trial_param(trial_id_1, 'y') == 2
    # Test trial_1: checking all params and external repr.
    assert storage.get_trial(trial_id_1).params == {'x': 0.5, 'y': 'Meguro'}
    # Test trial_1: setting existing name.
    assert not storage.set_trial_param(trial_id_1, 'x', 0.6, distribution_x)

    # Setup trial_2: setting new params (to the same study as trial_1).
    assert storage.set_trial_param(trial_id_2, 'x', 0.3, distribution_x)
    assert storage.set_trial_param(trial_id_2, 'z', 0.1, distribution_z)
github optuna / optuna / tests / integration_tests / test_chainermn.py View on Github external
def test_relative_sampling(storage_mode, cache_mode, comm):
        # type: (str, bool, CommunicatorBase) -> None

        relative_search_space = {
            'x': distributions.UniformDistribution(low=-10, high=10),
            'y': distributions.LogUniformDistribution(low=20, high=30),
            'z': distributions.CategoricalDistribution(choices=(-1.0, 1.0)),
        }
        relative_params = {'x': 1.0, 'y': 25.0, 'z': -1.0}
        sampler = DeterministicRelativeSampler(relative_search_space,  # type: ignore
                                               relative_params)

        with MultiNodeStorageSupplier(storage_mode, cache_mode, comm) as storage:
            study = TestChainerMNStudy._create_shared_study(storage, comm, sampler=sampler)
            mn_study = ChainerMNStudy(study, comm)

            # Invoke optimize.
            n_trials = 20
            func = Func()
            mn_study.optimize(func, n_trials=n_trials)

            # Assert trial counts.
github optuna / optuna / tests / integration_tests / test_cma.py View on Github external
def search_space():
        # type: () -> Dict[str, BaseDistribution]

        return {
            'c': CategoricalDistribution(('a', 'b')),
            'd': DiscreteUniformDistribution(-1, 9, 2),
            'i': IntUniformDistribution(-1, 1),
            'l': LogUniformDistribution(0.001, 0.1),
            'u': UniformDistribution(-2, 2),
        }
github optuna / optuna / optuna / trial.py View on Github external
>>>     ...

        Args:
            name:
                A parameter name.
            low:
                Lower endpoint of the range of suggested values. ``low`` is included in the range.
            high:
                Upper endpoint of the range of suggested values. ``high`` is excluded from the
                range.

        Returns:
            A suggested float value.
        """

        distribution = distributions.LogUniformDistribution(low=low, high=high)
        if low == high:
            return self._set_new_param_or_get_existing(name, low, distribution)

        return self._suggest(name, distribution)
github optuna / optuna / optuna / samplers / tpe / sampler.py View on Github external
return self._sample_uniform(param_distribution, below_param_values, above_param_values)
        elif isinstance(param_distribution, distributions.LogUniformDistribution):
            return self._sample_loguniform(param_distribution, below_param_values,
                                           above_param_values)
        elif isinstance(param_distribution, distributions.DiscreteUniformDistribution):
            return self._sample_discrete_uniform(param_distribution, below_param_values,
                                                 above_param_values)
        elif isinstance(param_distribution, distributions.IntUniformDistribution):
            return self._sample_int(param_distribution, below_param_values, above_param_values)
        elif isinstance(param_distribution, distributions.CategoricalDistribution):
            return self._sample_categorical(param_distribution, below_param_values,
                                            above_param_values)
        else:
            distribution_list = [
                distributions.UniformDistribution.__name__,
                distributions.LogUniformDistribution.__name__,
                distributions.DiscreteUniformDistribution.__name__,
                distributions.IntUniformDistribution.__name__,
                distributions.CategoricalDistribution.__name__
            ]
            raise NotImplementedError("The distribution {} is not implemented. "
                                      "The parameter distribution should be one of the {}".format(
                                          param_distribution, distribution_list))