How to use the smac.tae.execute_ta_run.StatusType.SUCCESS function in smac

To help you get started, we’ve selected a few smac examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github automl / SMAC3 / test / test_tae / test_tae_old.py View on Github external
eta = ExecuteTARunOld(
            ta=shlex.split("python test/test_tae/dummy_ta_wrapper.py 2"),
            stats=stats)
        status, cost, runtime, ar_info = eta.run(config={})
        assert status == StatusType.SUCCESS
        assert cost == 2.0
        assert runtime == 2.0

        print(status, cost, runtime)

        eta = ExecuteTARunOld(
            ta=shlex.split("python test/test_tae/dummy_ta_wrapper.py 2"),
            stats=stats, run_obj="quality")
        status, cost, runtime, ar_info = eta.run(config={},)
        assert status == StatusType.SUCCESS
        assert cost == 4.0
        assert runtime == 2.0

        print(status, cost, runtime, ar_info)
github automl / SMAC3 / test / test_tae / test_exec_tae_run.py View on Github external
# TEST INF
        eta = get_tae('runtime')
        # Patch run-function for custom-return (obj = runtime, cost = inf)
        test_run.return_value = StatusType.SUCCESS, np.inf, 1, {}
        self.assertEqual(eta.start(config={}, cutoff=10, instance=1)[0], StatusType.SUCCESS)
        #                                      (obj = runtime, runtime = inf)
        test_run.return_value = StatusType.SUCCESS, 1, np.inf, {}
        self.assertEqual(eta.start(config={}, cutoff=10, instance=1)[0], StatusType.TIMEOUT)

        eta = get_tae('quality')
        # Patch run-function for custom-return (obj = quality, cost = inf)
        test_run.return_value = StatusType.SUCCESS, np.inf, 1, {}
        self.assertEqual(eta.start(config={}, instance=1)[0], StatusType.CRASHED)
        #                                      (obj = quality, runtime = inf)
        test_run.return_value = StatusType.SUCCESS, 1, np.inf, {}
        self.assertEqual(eta.start(config={}, instance=1)[0], StatusType.SUCCESS)
github automl / auto-sklearn / test / test_evaluation / test_train_evaluator.py View on Github external
resampling_strategy='holdout',
            resampling_strategy_args=None,
            seed=1,
            num_run=1,
            all_scoring_functions=False,
            output_y_hat_optimization=True,
            include=None,
            exclude=None,
            disable_file_output=False,
            instance=self.dataset_name,
            metric=accuracy,
        )
        rval = read_queue(self.queue)
        self.assertEqual(len(rval), 7)
        self.assertAlmostEqual(rval[-1]['loss'], 0.030303030303030276)
        self.assertEqual(rval[0]['status'], StatusType.SUCCESS)
github automl / SMAC3 / test / test_runhistory / test_runhistory.py View on Github external
rh = RunHistory(aggregate_func=average_cost)
        cs = get_config_space()
        config1 = Configuration(cs,
                                values={'a': 1, 'b': 2})
        config2 = Configuration(cs,
                                values={'a': 1, 'b': 3})
        rh.add(config=config1, cost=10, time=20,
               status=StatusType.SUCCESS, instance_id=1,
               seed=1)

        rh.add(config=config2, cost=10, time=20,
               status=StatusType.SUCCESS, instance_id=1,
               seed=1)

        rh.add(config=config1, cost=10, time=20,
               status=StatusType.SUCCESS, instance_id=2,
               seed=2)

        ist = rh.get_runs_for_config(config=config1)
        #print(ist)
        #print(ist[0])
        #print(ist[1])
        self.assertEqual(len(ist), 2)
        self.assertEqual(ist[0].instance, 1)
        self.assertEqual(ist[1].instance, 2)
github automl / CAVE / cave / reader / conversion / csv2rh.py View on Github external
def add_to_rh(row):
            new_status = self._interpret_status(row['status']) if 'status' in row else StatusType.SUCCESS
            rh.add(config=id_to_config[row['config_id']],
                   cost=row['cost'],
                   time=row['time'] if 'time' in row else -1,
                   status=new_status,
                   instance_id=row['instance_id'] if 'instance_id' in row else None,
                   seed=row['seed'] if 'seed' in row else None,
                   budget=row['budget'] if 'budget' in row else 0,
                   additional_info=None,
                   origin=DataOrigin.INTERNAL)
github automl / ParameterImportance / importance / importance / importance.py View on Github external
cutoff = np.log10(self.scenario.cutoff)
            threshold = np.log10(self.scenario.cutoff *
                                 self.scenario.par_factor)
            model = 'rfi'

            imputor = RFRImputator(rs=np.random.RandomState(self.seed),
                                   cutoff=cutoff,
                                   threshold=threshold,
                                   model=model,
                                   change_threshold=0.01,
                                   max_iter=10)
            # TODO: Adapt runhistory2EPM object based on scenario
            rh2EPM = RunHistory2EPM4LogCost(scenario=self.scenario,
                                            num_params=num_params,
                                            success_states=[
                                                StatusType.SUCCESS.value, ],
                                            impute_censored_data=False,
                                            impute_state=[
                                                StatusType.TIMEOUT, ],
                                            imputor=imputor)
        else:
            rh2EPM = RunHistory2EPM4Cost(scenario=self.scenario,
                                         num_params=num_params,
                                         success_states=None,
                                         impute_censored_data=False,
                                         impute_state=None)
        X, Y = rh2EPM.transform(self.runhistory)

        self.X = X
        self.y = Y
        self.model.train(X, Y)
github automl / SMAC3 / smac / optimizer / adaptive_component_selection.py View on Github external
raise ValueError(conf['model'])

        if conf["acq_func"] == "EI":
            acq = EI(model=model, par=conf.get("par_ei", 0))
        elif conf["acq_func"] == "LCB":
            acq = LCB(model=model, par=conf.get("par_lcb", 0.05))
        elif conf["acq_func"] == "PI":
            acq = PI(model=model, par=conf.get("par_pi", 0))
        elif conf["acq_func"] == "LogEI":
            # par value should be in log-space
            acq = LogEI(model=model, par=conf.get("par_logei", 0))
        else:
            raise ValueError(conf['acq_func'])

        num_params = len(self.scenario.cs.get_hyperparameters())
        success_states = [StatusType.SUCCESS, StatusType.CRASHED]
        #TODO: only designed for black box problems without instances
        if conf["y_transform"] == "y":
            rh2epm = RunHistory2EPM4Cost(scenario=self.scenario, 
                                num_params=num_params, 
                                success_states=success_states, 
                                impute_censored_data=False, 
                                impute_state=None)
        elif conf["y_transform"] == "log_scaled":
            rh2epm = RunHistory2EPM4LogScaledCost(scenario=self.scenario, 
                                num_params=num_params, 
                                success_states=success_states, 
                                impute_censored_data=False, 
                                impute_state=None)
        elif conf["y_transform"] == "inv_scaled":
            rh2epm = RunHistory2EPM4InvScaledCost(scenario=self.scenario, 
                                num_params=num_params,
github automl / auto-sklearn / autosklearn / automl.py View on Github external
stats = Stats(scenario_mock)
        stats.start_timing()
        ta = ExecuteTaFuncWithQueue(backend=self._backend,
                                    autosklearn_seed=self._seed,
                                    resampling_strategy=self._resampling_strategy,
                                    initial_num_run=num_run,
                                    logger=self._logger,
                                    stats=stats,
                                    metric=self._metric,
                                    memory_limit=memory_limit,
                                    disable_file_output=self._disable_evaluator_output,
                                    **self._resampling_strategy_arguments)

        status, cost, runtime, additional_info = \
            ta.run(1, cutoff=self._time_for_task)
        if status == StatusType.SUCCESS:
            self._logger.info("Finished creating dummy predictions.")
        else:
            self._logger.error('Error creating dummy predictions: %s ',
                               str(additional_info))
            # Fail if dummy prediction fails.
            raise ValueError("Dummy prediction failed: %s " % str(additional_info))

        return ta.num_run
github automl / auto-sklearn / autosklearn / automl.py View on Github external
scenario_mock.wallclock_limit = self._time_for_task
        # This stats object is a hack - maybe the SMAC stats object should
        # already be generated here!
        stats = Stats(scenario_mock)
        stats.start_timing()
        ta = ExecuteTaFuncWithQueue(backend=self._backend,
                                    autosklearn_seed=self._seed,
                                    resampling_strategy=self._resampling_strategy,
                                    initial_num_run=num_run,
                                    logger=self._logger,
                                    stats=stats,
                                    **self._resampling_strategy_arguments)

        status, cost, runtime, additional_info = \
            ta.run(1, cutoff=self._time_for_task, memory_limit=memory_limit)
        if status == StatusType.SUCCESS:
            self._logger.info("Finished creating dummy predictions.")
        else:
            self._logger.error('Error creating dummy predictions:%s ',
                               additional_info)

        return ta.num_run
github automl / CAVE / cave / feature_analysis / feature_imp.py View on Github external
def run(self):
        """
        Implementation of the forward selection loop.
        Uses SMACs EPM (RF) wrt the feature space to minimize the OOB error.

        Returns
        -------
        feature_importance: OrderedDict
            dict_keys (first key -> most important) -> OOB error
        """
        parameters = [p.name for p in self.scenario.cs.get_hyperparameters()]
        self.logger.debug("Parameters: %s", parameters)

        rh2epm = RunHistory2EPM4Cost(scenario=self.scenario, num_params=len(parameters),
                                     success_states=[StatusType.SUCCESS,
                                                     StatusType.CAPPED,
                                                     StatusType.CRASHED],
                                     impute_censored_data=False, impute_state=None)

        X, y = rh2epm.transform(self.rh)

        # reduce sample size to speedup computation
        if X.shape[0] > self.MAX_SAMPLES:
            idx = np.random.choice(X.shape[0], size=self.MAX_SAMPLES, replace=False)
            X = X[idx, :]
            y = y[idx]

        self.logger.debug("Shape of X: %s, of y: %s, #parameters: %s, #feats: %s",
                          X.shape, y.shape,
                          len(parameters),
                          len(self.scenario.feature_names))