How to use the optuna.distributions.UniformDistribution function in optuna

To help you get started, we’ve selected a few optuna examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github optuna / optuna / tests / test_visualization.py View on Github external
study._append_trial(
        value=0.0,
        params={
            'param_a': 1.0,
            'param_b': 2.0,
            'param_c': 3.0,
            'param_d': 4.0,
        } if with_c_d else {
            'param_a': 1.0,
            'param_b': 2.0,
        },
        distributions={
            'param_a': UniformDistribution(0.0, 3.0),
            'param_b': UniformDistribution(0.0, 3.0),
            'param_c': UniformDistribution(2.0, 5.0),
            'param_d': UniformDistribution(2.0, 5.0),
        } if with_c_d else {
            'param_a': UniformDistribution(0.0, 3.0),
            'param_b': UniformDistribution(0.0, 3.0),
        }
    )
    study._append_trial(
        value=2.0,
        params={
            'param_b': 0.0,
            'param_d': 4.0,
        } if with_c_d else {
            'param_b': 0.0,
        },
        distributions={
            'param_b': UniformDistribution(0.0, 3.0),
            'param_d': UniformDistribution(2.0, 5.0),
github optuna / optuna / tests / visualization_tests / test_utils.py View on Github external
def test_is_log_scale():
    # type: () -> None

    study = create_study()
    study._append_trial(
        value=0.0,
        params={
            'param_linear': 1.0,
        },
        distributions={
            'param_linear': UniformDistribution(0.0, 3.0),
        }
    )
    study._append_trial(
        value=2.0,
        params={
            'param_linear': 2.0,
            'param_log': 1e-3,
        },
        distributions={
            'param_linear': UniformDistribution(0.0, 3.0),
            'param_log': LogUniformDistribution(1e-5, 1.),
        }
    )
    assert _is_log_scale(study.trials, 'param_log')
    assert not _is_log_scale(study.trials, 'param_linear')
github optuna / optuna / tests / test_distributions.py View on Github external
def test_contains():
    # type: () -> None

    u = distributions.UniformDistribution(low=1., high=2.)
    assert not u._contains(0.9)
    assert u._contains(1)
    assert u._contains(1.5)
    assert not u._contains(2)

    lu = distributions.LogUniformDistribution(low=0.001, high=100)
    assert not lu._contains(0.0)
    assert lu._contains(0.001)
    assert lu._contains(12.3)
    assert not lu._contains(100)

    du = distributions.DiscreteUniformDistribution(low=1., high=10., q=2.)
    assert not du._contains(0.9)
    assert du._contains(1.0)
    assert du._contains(3.5)
    assert du._contains(6)
github optuna / optuna / tests / visualization_tests / test_slice.py View on Github external
def test_plot_slice_log_scale():
    # type: () -> None

    study = create_study()
    study._append_trial(
        value=0.0,
        params={
            'x_linear': 1.0,
            'y_log': 1e-3,
        },
        distributions={
            'x_linear': UniformDistribution(0.0, 3.0),
            'y_log': LogUniformDistribution(1e-5, 1.),
        }
    )

    # Plot a parameter.
    figure = plot_slice(study, params=['y_log'])
    assert figure.layout['xaxis_type'] == 'log'
    figure = plot_slice(study, params=['x_linear'])
    assert figure.layout['xaxis_type'] is None

    # Plot multiple parameters.
    figure = plot_slice(study)
    assert figure.layout['xaxis_type'] is None
    assert figure.layout['xaxis2_type'] == 'log'
github optuna / optuna / tests / test_visualization.py View on Github external
'param_b': UniformDistribution(0.0, 3.0),
        }
    )
    study._append_trial(
        value=2.0,
        params={
            'param_b': 0.0,
            'param_d': 4.0,
        } if with_c_d else {
            'param_b': 0.0,
        },
        distributions={
            'param_b': UniformDistribution(0.0, 3.0),
            'param_d': UniformDistribution(2.0, 5.0),
        } if with_c_d else {
            'param_b': UniformDistribution(0.0, 3.0),
        }
    )
    if less_than_two:
        return study

    study._append_trial(
        value=1.0,
        params={
            'param_a': 2.5,
            'param_b': 1.0,
            'param_c': 4.5,
            'param_d': 2.0,
        } if with_c_d else {
            'param_a': 2.5,
            'param_b': 1.0,
        },
github optuna / optuna / tests / storages_tests / test_storages.py View on Github external
def test_set_and_get_trial_param(storage_init_func):
    # type: (Callable[[], BaseStorage]) -> None

    storage = storage_init_func()

    # Setup test across multiple studies and trials.
    study_id = storage.create_new_study()
    trial_id_1 = storage.create_new_trial(study_id)
    trial_id_2 = storage.create_new_trial(study_id)
    trial_id_3 = storage.create_new_trial(storage.create_new_study())

    # Setup Distributions.
    distribution_x = UniformDistribution(low=1.0, high=2.0)
    distribution_y_1 = CategoricalDistribution(choices=('Shibuya', 'Ebisu', 'Meguro'))
    distribution_y_2 = CategoricalDistribution(choices=('Shibuya', 'Shinsen'))
    distribution_z = LogUniformDistribution(low=1.0, high=100.0)

    # Test trial_1: setting new params.
    assert storage.set_trial_param(trial_id_1, 'x', 0.5, distribution_x)
    assert storage.set_trial_param(trial_id_1, 'y', 2, distribution_y_1)

    # Test trial_1: getting params.
    assert storage.get_trial_param(trial_id_1, 'x') == 0.5
    assert storage.get_trial_param(trial_id_1, 'y') == 2
    # Test trial_1: checking all params and external repr.
    assert storage.get_trial(trial_id_1).params == {'x': 0.5, 'y': 'Meguro'}
    # Test trial_1: setting existing name.
    assert not storage.set_trial_param(trial_id_1, 'x', 0.6, distribution_x)
github optuna / optuna / tests / test_distributions.py View on Github external
def test_eq_ne_hash():
    # type: () -> None

    # Two instances of a class are regarded as equivalent if the fields have the same values.
    for d in EXAMPLE_DISTRIBUTIONS.values():
        d_copy = copy.deepcopy(d)
        assert d == d_copy
        assert not d != d_copy
        assert hash(d) == hash(d_copy)

    # Different field values.
    d0 = distributions.UniformDistribution(low=1, high=2)
    d1 = distributions.UniformDistribution(low=1, high=3)
    assert d0 != d1
    assert not d0 == d1
    assert hash(d0) != hash(d1)

    # Different distribution classes.
    d2 = distributions.IntUniformDistribution(low=1, high=2)
    assert d0 != d2
    assert not d0 == d2
    assert hash(d0) != hash(d2)

    # Different types.
    assert d0 != 1
    assert not d0 == 1
    assert d0 != 'foo'
    assert not d0 == 'foo'
github optuna / optuna / tests / integration_tests / test_skopt.py View on Github external
{'p0': distributions.UniformDistribution(low=0, high=100)})
    assert optimizer._is_compatible(trial)

    # Compatible.
    trial = _create_frozen_trial({
        'p0': 5,
        'p1': 7
    }, {
        'p0': distributions.UniformDistribution(low=0, high=10),
        'p1': distributions.UniformDistribution(low=0, high=10)
    })
    assert optimizer._is_compatible(trial)

    # Incompatible ('p0' doesn't exist).
    trial = _create_frozen_trial({'p1': 5},
                                 {'p1': distributions.UniformDistribution(low=0, high=10)})
    assert not optimizer._is_compatible(trial)

    # Incompatible (the value of 'p0' is out of range).
    trial = _create_frozen_trial({'p0': 20},
                                 {'p0': distributions.UniformDistribution(low=0, high=100)})
    assert not optimizer._is_compatible(trial)

    # Error (different distribution class).
    trial = _create_frozen_trial({'p0': 5},
                                 {'p0': distributions.IntUniformDistribution(low=0, high=10)})
    with pytest.raises(ValueError):
        optimizer._is_compatible(trial)
github optuna / optuna / examples / samplers / simulated_annealing_sampler.py View on Github external
def _sample_neighbor_params(self, search_space):
        # Generate a sufficiently near neighbor (i.e., parameters).
        #
        # In this example, we define a sufficiently near neighbor as
        # `self.neighbor_range_factor * 100` percent region of the entire
        # search space centered on the current point.

        params = {}
        for param_name, param_distribution in search_space.items():
            if isinstance(param_distribution, distributions.UniformDistribution):
                current_value = self._current_trial.params[param_name]
                width = (param_distribution.high -
                         param_distribution.low) * self.neighbor_range_factor
                neighbor_low = max(current_value - width, param_distribution.low)
                neighbor_high = min(current_value + width, param_distribution.high)
                params[param_name] = self._rng.uniform(neighbor_low, neighbor_high)
            else:
                raise NotImplementedError(
                    'Unsupported distribution {}.'.format(param_distribution))

        return params
github optuna / optuna / optuna / integration / cma.py View on Github external
def _initialize_sigma0(search_space):
        # type: (Dict[str, BaseDistribution]) -> float

        sigma0s = []
        for name, distribution in search_space.items():
            if isinstance(distribution, UniformDistribution):
                sigma0s.append((distribution.high - distribution.low) / 6)
            elif isinstance(distribution, DiscreteUniformDistribution):
                sigma0s.append((distribution.high - distribution.low) / 6)
            elif isinstance(distribution, IntUniformDistribution):
                sigma0s.append((distribution.high - distribution.low) / 6)
            elif isinstance(distribution, LogUniformDistribution):
                log_high = math.log(distribution.high)
                log_low = math.log(distribution.low)
                sigma0s.append((log_high - log_low) / 6)
            elif isinstance(distribution, CategoricalDistribution):
                sigma0s.append((len(distribution.choices) - 1) / 6)
            else:
                raise NotImplementedError('The distribution {} is not implemented.'.format(
                    distribution))
        return min(sigma0s)