Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def objective(trial):
# type: (optuna.trial.Trial) -> float
model = nn.Sequential(nn.Linear(20, 1), nn.Sigmoid())
learn = Learner(data_bunch, model, metrics=[accuracy], callback_fns=[
partial(FastAIPruningCallback, trial=trial, monitor='valid_loss')
])
learn.fit(1)
return 1.0
study = optuna.create_study(pruner=DeterministicPruner(True))
study.optimize(objective, n_trials=1)
assert study.trials[0].state == optuna.structs.TrialState.PRUNED
study = optuna.create_study(pruner=DeterministicPruner(False))
study.optimize(objective, n_trials=1)
assert study.trials[0].state == optuna.structs.TrialState.COMPLETE
assert study.trials[0].value == 1.0
max_nb_epochs=2,
)
trainer.checkpoint_callback = None # Disable unrelated checkpoint callbacks.
model = Model()
trainer.fit(model)
return 1.0
study = optuna.create_study(pruner=DeterministicPruner(True))
study.optimize(objective, n_trials=1)
assert study.trials[0].state == optuna.structs.TrialState.PRUNED
study = optuna.create_study(pruner=DeterministicPruner(False))
study.optimize(objective, n_trials=1)
assert study.trials[0].state == optuna.structs.TrialState.COMPLETE
assert study.trials[0].value == 1.0
end_iteration=1,
rank=1,
iteration=1,
evaluation_result_list=[['validation-error', 1.]])
# The pruner is deactivated.
study = optuna.create_study(pruner=DeterministicPruner(False))
trial = create_running_trial(study, 1.0)
pruning_callback = XGBoostPruningCallback(trial, 'validation-error')
pruning_callback(env)
# The pruner is activated.
study = optuna.create_study(pruner=DeterministicPruner(True))
trial = create_running_trial(study, 1.0)
pruning_callback = XGBoostPruningCallback(trial, 'validation-error')
with pytest.raises(optuna.structs.TrialPruned):
pruning_callback(env)
)
hook = TensorFlowPruningHook(
trial=trial,
estimator=clf,
metric="accuracy",
run_every_steps=5,
)
train_spec = tf.estimator.TrainSpec(
input_fn=fixed_value_input_fn, max_steps=100, hooks=[hook])
eval_spec = tf.estimator.EvalSpec(input_fn=fixed_value_input_fn, steps=1, hooks=[])
tf.estimator.train_and_evaluate(estimator=clf, train_spec=train_spec, eval_spec=eval_spec)
return 1.0
study = optuna.create_study(pruner=DeterministicPruner(True), direction='maximize')
study.optimize(objective, n_trials=1)
assert study.trials[0].state == optuna.structs.TrialState.PRUNED
study = optuna.create_study(pruner=DeterministicPruner(False), direction='maximize')
study.optimize(objective, n_trials=1)
assert study.trials[0].state == optuna.structs.TrialState.COMPLETE
assert study.trials[0].value == 1.0
# Check if eval_metrics returns the None value.
value = OrderedDict([(10, {'accuracy': None})])
with patch('optuna.integration.tensorflow.read_eval_metrics', return_value=value) as mock_obj:
study = optuna.create_study(pruner=DeterministicPruner(True), direction='maximize')
study.optimize(objective, n_trials=1)
assert mock_obj.call_count == 1
assert math.isnan(study.trials[0].intermediate_values[10])
assert study.trials[0].state == optuna.structs.TrialState.PRUNED
A pandas DataFrame_ of trials in the :class:`~optuna.study.Study`.
.. _DataFrame: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
.. _MultiIndex: https://pandas.pydata.org/pandas-docs/stable/advanced.html
"""
_check_pandas_availability()
trials = self.trials
# If no trials, return an empty dataframe.
if not len(trials):
return pd.DataFrame()
assert all(isinstance(trial, structs.FrozenTrial) for trial in trials)
fields_to_df_columns = collections.OrderedDict() # type: Dict[str, str]
for field in structs.FrozenTrial._ordered_fields:
if field.startswith('_'):
if not include_internal_fields:
continue
else:
# Python conventional underscores are omitted in the dataframe.
df_column = field[1:]
else:
df_column = field
fields_to_df_columns[field] = df_column
# column_agg is an aggregator of column names.
# Keys of column agg are attributes of `FrozenTrial` such as 'trial_id' and 'params'.
# Values are dataframe columns such as ('trial_id', '') and ('params', 'n_layers').
column_agg = collections.defaultdict(set) # type: Dict[str, Set]
non_nested_field = ''
def create_new_study_id(self, study_name=None):
# type: (Optional[str]) -> int
session = self.scoped_session()
if study_name is None:
study_name = self._create_unique_study_name(session)
study = models.StudyModel(study_name=study_name, direction=structs.StudyDirection.NOT_SET)
session.add(study)
if not self._commit_with_integrity_check(session):
session.close()
raise structs.DuplicatedStudyError(
"Another study with name '{}' already exists. "
"Please specify a different name, or reuse the existing one "
"by setting `load_if_exists` (for Python API) or "
"`--skip-if-exists` flag (for CLI).".format(study_name))
self.logger.info('A new study created with name: {}'.format(study.study_name))
study_id = study.study_id
session.close()
return study_id
def main(unused_argv):
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=25)
pruned_trials = [t for t in study.trials if t.state == optuna.structs.TrialState.PRUNED]
complete_trials = [t for t in study.trials if t.state == optuna.structs.TrialState.COMPLETE]
print('Study statistics: ')
print(' Number of finished trials: ', len(study.trials))
print(' Number of pruned trials: ', len(pruned_trials))
print(' Number of complete trials: ', len(complete_trials))
print('Best trial:')
trial = study.best_trial
print(' Value: ', trial.value)
print(' Params: ')
for key, value in trial.params.items():
print(' {}: {}'.format(key, value))
shutil.rmtree(MODEL_DIR)
def get_best_trial(self, study_id):
# type: (int) -> structs.FrozenTrial
all_trials = self.get_all_trials(study_id)
all_trials = [t for t in all_trials if t.state is structs.TrialState.COMPLETE]
if len(all_trials) == 0:
raise ValueError('No trials are completed yet.')
if self.get_study_direction(study_id) == structs.StudyDirection.MAXIMIZE:
return max(all_trials, key=lambda t: t.value)
return min(all_trials, key=lambda t: t.value)
def _transition_probability(self, study, prev_trial):
if self._current_trial is None:
return 1.0
prev_value = prev_trial.value
current_value = self._current_trial.value
# `prev_trial` is always accepted if it has a better value than the current trial.
if study.direction == structs.StudyDirection.MINIMIZE and prev_value <= current_value:
return 1.0
elif study.direction == structs.StudyDirection.MAXIMIZE and prev_value >= current_value:
return 1.0
# Calculate the probability of accepting `prev_trial` that has a worse value than
# the current trial.
return np.exp(-abs(current_value - prev_value) / self._temperature)
def _get_last_complete_trial(study):
complete_trials = [t for t in study.trials if t.state == structs.TrialState.COMPLETE]
return complete_trials[-1]