How to use the smac.tae.execute_ta_run.StatusType function in smac

To help you get started, we’ve selected a few smac examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github automl / SMAC3 / test / test_tae / test_hydra_tae.py View on Github external
eta = ExecuteTARunHydra(
            cost_oracle=self.oracle, tae=ExecuteTARunAClib,
            ta=shlex.split("python test/test_tae/dummy_ta_wrapper_aclib.py 1"),
            stats=stats)
        status, cost, runtime, ar_info = eta.run(config={}, instance=None, cutoff=10)
        assert status == StatusType.SUCCESS
        assert cost == 0
        assert runtime == 0

        print(status, cost, runtime)

        eta = ExecuteTARunHydra(cost_oracle=self.oracle, tae=ExecuteTARunAClib,
            ta=shlex.split("python test/test_tae/dummy_ta_wrapper_aclib.py 2"),
            stats=stats)
        status, cost, runtime, ar_info = eta.run(config={}, instance=None, cutoff=10)
        assert status == StatusType.SUCCESS
        assert cost == 0
        assert runtime == 0

        print(status, cost, runtime)

        eta = ExecuteTARunHydra(cost_oracle=self.oracle, tae=ExecuteTARunAClib,
                                ta=shlex.split("python test/test_tae/dummy_ta_wrapper_aclib.py 2"), stats=stats,
                                run_obj="quality")
        status, cost, runtime, ar_info = eta.run(config={}, instance=None, cutoff=10)
        assert status == StatusType.SUCCESS
        assert cost == 0
        assert runtime == 3.0

        print(status, cost, runtime, ar_info)
github automl / SMAC3 / test / test_intensify / test_successive_halving.py View on Github external
def test_top_k_1(self):
        """
            test _top_k() for configs with same instance-seed-budget keys
        """
        intensifier = SuccessiveHalving(
            tae_runner=None, stats=self.stats, traj_logger=None,
            rng=np.random.RandomState(12345),
            instances=[1], initial_budget=1)
        self.rh.add(config=self.config1, cost=1, time=1,
                    status=StatusType.SUCCESS, instance_id=1, seed=None,
                    additional_info=None)
        self.rh.add(config=self.config1, cost=1, time=1,
                    status=StatusType.SUCCESS, instance_id=2, seed=None,
                    additional_info=None)
        self.rh.add(config=self.config2, cost=2, time=2,
                    status=StatusType.SUCCESS, instance_id=1, seed=None,
                    additional_info=None)
        self.rh.add(config=self.config2, cost=2, time=2,
                    status=StatusType.SUCCESS, instance_id=2, seed=None,
                    additional_info=None)
        conf = intensifier._top_k(configs=[self.config2, self.config1],
                                  k=1, run_history=self.rh)

        self.assertEqual(conf, [self.config1])
github automl / SMAC3 / test / test_tae / test_exec_tae_run.py View on Github external
def test_start_tae_return_abort(self, test_run):
        '''
            testing abort
        '''
        # Patch run-function for custom-return
        test_run.return_value = StatusType.ABORT, 12345.0, 1.2345, {}

        scen = Scenario(scenario={'cs': ConfigurationSpace(),
                                  'run_obj': 'quality',
                                  'output_dir': ''}, cmd_options=None)
        stats = Stats(scen)
        stats.start_timing()
        eta = ExecuteTARun(ta=lambda *args: None, stats=stats)

        self.assertRaises(
            TAEAbortException, eta.start, config={}, instance=1)
github automl / SMAC3 / test / test_intensify / test_intensify.py View on Github external
def test_compare_configs_no_joint_set(self):
        intensifier = Intensifier(
            tae_runner=None, stats=self.stats,
            traj_logger=TrajLogger(output_dir=None, stats=self.stats),
            rng=None, instances=[1])

        for i in range(2):
            self.rh.add(config=self.config1, cost=2, time=2,
                        status=StatusType.SUCCESS, instance_id=1,
                        seed=i, additional_info=None)

        for i in range(2, 5):
            self.rh.add(config=self.config2, cost=1, time=1,
                        status=StatusType.SUCCESS, instance_id=1,
                        seed=i, additional_info=None)

        # The sets for the incumbent are completely disjoint.
        conf = intensifier._compare_configs(incumbent=self.config1,
                                            challenger=self.config2,
                                            run_history=self.rh,
                                            aggregate_func=average_cost)
        self.assertIsNone(conf)

        # The incumbent has still one instance-seed pair left on which the
        # challenger was not run yet.
github automl / SMAC3 / test / test_utils / test_validate.py View on Github external
def test_epm_reuse_rf(self):
        """ if no runhistory is passed to epm, but there was a model trained
        before, that model should be reused! (if reuse_epm flag is set) """
        scen = Scenario(self.scen_fn, cmd_options={'run_obj': 'quality'})
        scen.feature_array = None
        validator = Validator(scen, self.trajectory)
        old_rh = RunHistory(average_cost)
        for config in [e["incumbent"] for e in self.trajectory]:
            old_rh.add(config, 1, 1, StatusType.SUCCESS, instance_id='0',
                       seed=127)
        self.assertTrue(isinstance(validator.validate_epm(runhistory=old_rh),
                                   RunHistory))
        self.assertTrue(isinstance(validator.validate_epm(
                                    output_fn="test/test_files/validation/"),
                                    RunHistory))
        self.assertRaises(ValueError, validator.validate_epm, reuse_epm=False)
github automl / auto-sklearn / test / test_evaluation / test_test_evaluator.py View on Github external
eval_t(queue=self.queue,
               backend=self.backend,
               config=self.configuration,
               metric=accuracy,
               seed=1, num_run=1,
               all_scoring_functions=False,
               output_y_hat_optimization=False,
               include=None,
               exclude=None,
               disable_file_output=False,
               instance=self.dataset_name
        )
        rval = read_queue(self.queue)
        self.assertEqual(len(rval), 1)
        self.assertAlmostEqual(rval[0]['loss'], 0.08)
        self.assertEqual(rval[0]['status'], StatusType.SUCCESS)
        self.assertNotIn('bac_metric', rval[0]['additional_run_info'])
github automl / SMAC3 / smac / runhistory / runhistory.py View on Github external
config_id = self.config_ids.get(config)
        if config_id is None:
            self._n_id += 1
            self.config_ids[config] = self._n_id
            config_id = self.config_ids.get(config)
            self.ids_config[self._n_id] = config

        k = RunKey(config_id, instance_id, seed)
        v = RunValue(cost, time, status, additional_info)

        # Each runkey is supposed to be used only once. Repeated tries to add
        # the same runkey will be ignored silently if not capped.
        if self.overwrite_existing_runs or self.data.get(k) is None:
            self._add(k, v, status, origin)
        elif status != StatusType.CAPPED and self.data[k].status == StatusType.CAPPED:
            # overwrite capped runs with uncapped runs
            self._add(k, v, status, origin)
        elif status == StatusType.CAPPED and self.data[k].status == StatusType.CAPPED and cost > self.data[k].cost:
            # overwrite if censored with a larger cutoff
            self._add(k, v, status, origin)
github automl / ParameterImportance / pimp / importance / importance.py View on Github external
self.scenario.par_factor)
            model = RandomForestWithInstances(self.scenario.cs,
                                              self.types, self.bounds, 12345,
                                              instance_features=self.scenario.feature_array,
                                              )

            imputor = RFRImputator(rng=self.rng,
                                   cutoff=cutoff,
                                   threshold=threshold,
                                   model=model,
                                   change_threshold=0.01,
                                   max_iter=10)
            rh2EPM = RunHistory2EPM4LogCost(scenario=self.scenario,
                                            num_params=num_params,
                                            success_states=[
                                                StatusType.SUCCESS, ],
                                            impute_censored_data=self.impute,
                                            impute_state=[
                                                StatusType.TIMEOUT, StatusType.CAPPED],
                                            imputor=imputor)
        else:
            self.model = 'rfi'
            rh2EPM = RunHistory2EPM4Cost(scenario=self.scenario,
                                         num_params=num_params,
                                         success_states=None,
                                         impute_censored_data=self.impute,
                                         impute_state=None)
        self.logger.info('Using model %s' % str(self.model))
        X, Y = rh2EPM.transform(self.runhistory)

        self.X = X
        self.y = Y