Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_rng(self):
scen = Scenario(self.scen_fn, cmd_options={'run_obj': 'quality'})
validator = Validator(scen, self.trajectory, 42)
self.assertTrue(isinstance(validator.rng, np.random.RandomState))
validator = Validator(scen, self.trajectory)
self.assertTrue(isinstance(validator.rng, np.random.RandomState))
validator = Validator(scen, self.trajectory, np.random.RandomState())
self.assertTrue(isinstance(validator.rng, np.random.RandomState))
def test_validation(self):
with mock.patch.object(TrajLogger, "read_traj_aclib_format",
return_value=None) as traj_mock:
self.scenario.output_dir = "test"
smac = SMAC4AC(self.scenario)
self.output_dirs.append(smac.output_dir)
smbo = smac.solver
with mock.patch.object(Validator, "validate", return_value=None) as validation_mock:
smbo.validate(config_mode='inc', instance_mode='train+test',
repetitions=1, use_epm=False, n_jobs=-1, backend='threading')
self.assertTrue(validation_mock.called)
with mock.patch.object(Validator, "validate_epm", return_value=None) as epm_validation_mock:
smbo.validate(config_mode='inc', instance_mode='train+test',
repetitions=1, use_epm=True, n_jobs=-1, backend='threading')
self.assertTrue(epm_validation_mock.called)
def test_get_configs(self):
scen = Scenario(self.scen_fn, cmd_options={'run_obj': 'quality'})
validator = Validator(scen, self.trajectory, self.rng)
self.assertEqual(1, len(validator._get_configs("def")))
self.assertEqual(1, len(validator._get_configs("inc")))
self.assertEqual(2, len(validator._get_configs("def+inc")))
self.assertEqual(7, len(validator._get_configs("wallclock_time")))
self.assertEqual(8, len(validator._get_configs("cpu_time")))
self.assertEqual(10, len(validator._get_configs("all")))
# Using maxtime
validator.scen.wallclock_limit = 65
validator.scen.algo_runs_timelimit = 33
self.assertEqual(8, len(validator._get_configs("wallclock_time")))
self.assertEqual(9, len(validator._get_configs("cpu_time")))
# Exceptions
self.assertRaises(ValueError, validator._get_configs, "notanoption")
self.assertRaises(ValueError, validator._get_instances, "notanoption")
def test_inst_no_feat(self):
''' test if scenarios are treated correctly if no features are
specified.'''
scen = Scenario(self.scen_fn,
cmd_options={'run_obj': 'quality',
'train_insts' : self.train_insts,
'test_insts': self.test_insts})
self.assertTrue(scen.feature_array is None)
self.assertEqual(len(scen.feature_dict), 0)
scen.instance_specific = self.inst_specs
validator = Validator(scen, self.trajectory, self.rng)
# Add a few runs and check, if they are correctly processed
old_configs = [entry["incumbent"] for entry in self.trajectory]
old_rh = RunHistory(average_cost)
for config in old_configs[:int(len(old_configs)/2)]:
old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0',
seed=127)
rh = validator.validate_epm('all', 'train+test', 1, old_rh)
self.assertEqual(len(old_rh.get_all_configs()), 4)
self.assertEqual(len(rh.get_all_configs()), 10)
def test_nonexisting_output(self):
scen = Scenario(self.scen_fn, cmd_options={'run_obj': 'quality'})
validator = Validator(scen, self.trajectory)
path = "test/test_files/validation/test/nonexisting/output"
validator.validate(output_fn=path)
self.assertTrue(os.path.exists(path))
def test_objective_runtime(self):
''' test if everything is ok with objective runtime (imputing!) '''
scen = Scenario(self.scen_fn, cmd_options={'run_obj' : 'runtime',
'cutoff_time' : 5})
validator = Validator(scen, self.trajectory, self.rng)
old_configs = [entry["incumbent"] for entry in self.trajectory]
old_rh = RunHistory(average_cost)
for config in old_configs[:int(len(old_configs)/2)]:
old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0')
validator.validate_epm('all', 'train', 1, old_rh)
def test_validate_epm(self):
''' test using epm to validate '''
scen = Scenario(self.scen_fn,
cmd_options={'run_obj': 'quality',
'train_insts' : self.train_insts,
'test_insts': self.test_insts,
'features': self.feature_dict})
scen.instance_specific = self.inst_specs
validator = Validator(scen, self.trajectory, self.rng)
# Add a few runs and check, if they are correctly processed
old_configs = [entry["incumbent"] for entry in self.trajectory]
old_rh = RunHistory(average_cost)
for config in old_configs[:int(len(old_configs)/2)]:
old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0',
seed=127)
validator.validate_epm('all', 'train', 1, old_rh)
def test_get_runs_capped(self):
''' test if capped, crashed and aborted runs are ignored
during rh-recovery '''
scen = Scenario(self.scen_fn,
cmd_options={'run_obj':'quality',
'instances' : ['0']})
validator = Validator(scen, self.trajectory, self.rng)
# Get runhistory
old_configs = ['config1', 'config2', 'config3',
'config4', 'config5', 'config6']
old_rh = RunHistory(average_cost)
old_rh.add('config1', 1, 1, StatusType.SUCCESS, instance_id='0', seed=0)
old_rh.add('config2', 1, 1, StatusType.TIMEOUT, instance_id='0', seed=0)
old_rh.add('config3', 1, 1, StatusType.CRASHED, instance_id='0', seed=0)
old_rh.add('config4', 1, 1, StatusType.ABORT, instance_id='0', seed=0)
old_rh.add('config5', 1, 1, StatusType.MEMOUT, instance_id='0', seed=0)
old_rh.add('config6', 1, 1, StatusType.CAPPED, instance_id='0', seed=0)
# Get multiple configs
expected = [_Run(inst_specs='0', seed=0, inst='0', config='config3'),
_Run(inst_specs='0', seed=0, inst='0', config='config4'),
_Run(inst_specs='0', seed=0, inst='0', config='config6')]
def test_validate(self):
''' test validation '''
scen = Scenario(self.scen_fn,
cmd_options={'run_obj': 'quality',
'train_insts' : self.train_insts,
'test_insts': self.test_insts})
scen.instance_specific = self.inst_specs
validator = Validator(scen, self.trajectory, self.rng)
# Test basic usage
rh = validator.validate(config_mode='def', instance_mode='test',
repetitions=3)
self.assertEqual(len(rh.get_all_configs()), 1)
self.assertEqual(len(rh.get_runs_for_config(rh.get_all_configs()[0])), 9)
rh = validator.validate(config_mode='inc', instance_mode='train+test')
self.assertEqual(len(rh.get_all_configs()), 1)
self.assertEqual(len(rh.get_runs_for_config(rh.get_all_configs()[0])), 6)
rh = validator.validate(config_mode='wallclock_time', instance_mode='train')
self.assertEqual(len(rh.get_all_configs()), 7)
self.assertEqual(sum([len(rh.get_runs_for_config(c)) for c in
rh.get_all_configs()]), 21)
# Test with backend multiprocessing
Returns
-------
runhistory: RunHistory
runhistory containing all specified runs
"""
if isinstance(config_mode, str):
traj_fn = os.path.join(self.scenario.output_dir_for_this_run, "traj_aclib2.json")
trajectory = TrajLogger.read_traj_aclib_format(fn=traj_fn, cs=self.scenario.cs)
else:
trajectory = None
if self.scenario.output_dir_for_this_run:
new_rh_path = os.path.join(self.scenario.output_dir_for_this_run, "validated_runhistory.json")
else:
new_rh_path = None
validator = Validator(self.scenario, trajectory, self.rng)
if use_epm:
new_rh = validator.validate_epm(config_mode=config_mode,
instance_mode=instance_mode,
repetitions=repetitions,
runhistory=self.runhistory,
output_fn=new_rh_path)
else:
new_rh = validator.validate(config_mode, instance_mode, repetitions,
n_jobs, backend, self.runhistory,
self.intensifier.tae_runner,
output_fn=new_rh_path)
return new_rh