Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, search_space, skopt_kwargs):
# type: (Dict[str, BaseDistribution], Dict[str, Any]) -> None
self._search_space = search_space
dimensions = []
for name, distribution in sorted(self._search_space.items()):
if isinstance(distribution, distributions.UniformDistribution):
# Convert the upper bound from exclusive (optuna) to inclusive (skopt).
high = np.nextafter(distribution.high, float('-inf'))
dimension = space.Real(distribution.low, high)
elif isinstance(distribution, distributions.LogUniformDistribution):
# Convert the upper bound from exclusive (optuna) to inclusive (skopt).
high = np.nextafter(distribution.high, float('-inf'))
dimension = space.Real(distribution.low, high, prior='log-uniform')
elif isinstance(distribution, distributions.IntUniformDistribution):
dimension = space.Integer(distribution.low, distribution.high)
elif isinstance(distribution, distributions.DiscreteUniformDistribution):
count = (distribution.high - distribution.low) // distribution.q
dimension = space.Integer(0, count)
elif isinstance(distribution, distributions.CategoricalDistribution):
dimension = space.Categorical(distribution.choices)
else:
raise NotImplementedError(
def __init__(self, search_space, skopt_kwargs):
# type: (Dict[str, BaseDistribution], Dict[str, Any]) -> None
self._search_space = search_space
dimensions = []
for name, distribution in sorted(self._search_space.items()):
if isinstance(distribution, distributions.UniformDistribution):
# Convert the upper bound from exclusive (optuna) to inclusive (skopt).
high = np.nextafter(distribution.high, float('-inf'))
dimension = space.Real(distribution.low, high)
elif isinstance(distribution, distributions.LogUniformDistribution):
# Convert the upper bound from exclusive (optuna) to inclusive (skopt).
high = np.nextafter(distribution.high, float('-inf'))
dimension = space.Real(distribution.low, high, prior='log-uniform')
elif isinstance(distribution, distributions.IntUniformDistribution):
dimension = space.Integer(distribution.low, distribution.high)
elif isinstance(distribution, distributions.DiscreteUniformDistribution):
count = (distribution.high - distribution.low) // distribution.q
dimension = space.Integer(0, count)
elif isinstance(distribution, distributions.CategoricalDistribution):
dimension = space.Categorical(distribution.choices)
else:
raise NotImplementedError(
"The distribution {} is not implemented.".format(distribution))
dimensions.append(dimension)
def _adjust_discrete_uniform_high(name, low, high, q):
# type: (str, float, float, float) -> float
r = high - low
if math.fmod(r, q) != 0:
high = (r // q) * q + low
logger = logging.get_logger(__name__)
logger.warning('The range of parameter `{}` is not divisible by `q`, and is '
'replaced by [{}, {}].'.format(name, low, high))
return high
def _initialize_x0(search_space):
# type: (Dict[str, BaseDistribution]) -> Dict[str, Any]
x0 = {}
for name, distribution in search_space.items():
if isinstance(distribution, UniformDistribution):
x0[name] = numpy.mean([distribution.high, distribution.low])
elif isinstance(distribution, DiscreteUniformDistribution):
x0[name] = numpy.mean([distribution.high, distribution.low])
elif isinstance(distribution, IntUniformDistribution):
x0[name] = int(numpy.mean([distribution.high, distribution.low]))
elif isinstance(distribution, LogUniformDistribution):
log_high = math.log(distribution.high)
log_low = math.log(distribution.low)
x0[name] = math.exp(numpy.mean([log_high, log_low]))
elif isinstance(distribution, CategoricalDistribution):
index = (len(distribution.choices) - 1) // 2
x0[name] = distribution.choices[index]
else:
raise NotImplementedError('The distribution {} is not implemented.'.format(
distribution))
return x0
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(
np.zeros((16, 20), np.float32),
np.zeros((16, ), np.int32),
batch_size=1,
epochs=1,
callbacks=[KerasPruningCallback(trial, 'accuracy')],
verbose=0)
return 1.0
study = optuna.create_study(pruner=DeterministicPruner(True))
study.optimize(objective, n_trials=1)
assert study.trials[0].state == optuna.structs.TrialState.PRUNED
study = optuna.create_study(pruner=DeterministicPruner(False))
study.optimize(objective, n_trials=1)
assert study.trials[0].state == optuna.structs.TrialState.COMPLETE
assert study.trials[0].value == 1.0
def objective(trial):
# type: (optuna.trial.Trial) -> float
model = nn.Sequential(nn.Linear(20, 1), nn.Sigmoid())
learn = Learner(data_bunch, model, metrics=[accuracy], callback_fns=[
partial(FastAIPruningCallback, trial=trial, monitor='valid_loss')
])
learn.fit(1)
return 1.0
study = optuna.create_study(pruner=DeterministicPruner(True))
study.optimize(objective, n_trials=1)
assert study.trials[0].state == optuna.structs.TrialState.PRUNED
study = optuna.create_study(pruner=DeterministicPruner(False))
study.optimize(objective, n_trials=1)
assert study.trials[0].state == optuna.structs.TrialState.COMPLETE
assert study.trials[0].value == 1.0
max_nb_epochs=2,
)
trainer.checkpoint_callback = None # Disable unrelated checkpoint callbacks.
model = Model()
trainer.fit(model)
return 1.0
study = optuna.create_study(pruner=DeterministicPruner(True))
study.optimize(objective, n_trials=1)
assert study.trials[0].state == optuna.structs.TrialState.PRUNED
study = optuna.create_study(pruner=DeterministicPruner(False))
study.optimize(objective, n_trials=1)
assert study.trials[0].state == optuna.structs.TrialState.COMPLETE
assert study.trials[0].value == 1.0
def test_all(session):
# type: (Session) -> None
study = StudyModel(study_id=1, study_name='test-study', direction=StudyDirection.MINIMIZE)
trial = TrialModel(trial_id=1, study_id=study.study_id, state=TrialState.COMPLETE)
session.add(
TrialSystemAttributeModel(trial_id=trial.trial_id, key='sample-key', value_json='1'))
session.commit()
system_attributes = TrialSystemAttributeModel.all(session)
assert 1 == len(system_attributes)
assert 'sample-key' == system_attributes[0].key
assert '1' == system_attributes[0].value_json
def test_count(session):
# type: (Session) -> None
study_1 = StudyModel(study_id=1, study_name='test-study-1')
study_2 = StudyModel(study_id=2, study_name='test-study-2')
session.add(TrialModel(study_id=study_1.study_id, state=TrialState.COMPLETE))
session.add(TrialModel(study_id=study_1.study_id, state=TrialState.RUNNING))
session.add(TrialModel(study_id=study_2.study_id, state=TrialState.RUNNING))
session.commit()
assert 3 == TrialModel.count(session)
assert 2 == TrialModel.count(session, study=study_1)
assert 1 == TrialModel.count(session, state=TrialState.COMPLETE)
def test_cascade_delete_on_study(session):
# type: (Session) -> None
study_id = 1
study = StudyModel(study_id=study_id, study_name='test-study',
direction=StudyDirection.MINIMIZE)
study.trials.append(TrialModel(study_id=study.study_id, state=TrialState.COMPLETE))
study.trials.append(TrialModel(study_id=study.study_id, state=TrialState.RUNNING))
session.add(study)
session.commit()
assert 2 == len(TrialModel.where_study(study, session))
session.delete(study)
session.commit()
assert 0 == len(TrialModel.where_study(study, session))