How to use the smac.tae.execute_ta_run.StatusType.TIMEOUT function in smac

To help you get started, we’ve selected a few smac examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github automl / SMAC3 / test / test_tae / test_tae_aclib.py View on Github external
def test_run(self):
        '''
            running some simple algo in aclib 2.0 style
        '''
        scen = Scenario(scenario={'cs': ConfigurationSpace(),
                                  'run_obj': 'quality',
                                  'output_dir': ''}, cmd_options=None)
        stats = Stats(scen)

        eta = ExecuteTARunAClib(
            ta=shlex.split("python test/test_tae/dummy_ta_wrapper_aclib.py 1"),
            stats=stats)
        status, cost, runtime, ar_info = eta.run(config={})
        assert status == StatusType.TIMEOUT
        assert cost == 2.0
        assert runtime == 2.0

        print(status, cost, runtime)

        eta = ExecuteTARunAClib(
            ta=shlex.split("python test/test_tae/dummy_ta_wrapper_aclib.py 2"),
            stats=stats)
        status, cost, runtime, ar_info = eta.run(config={})
        assert status == StatusType.SUCCESS
        assert cost == 3.0
        assert runtime == 3.0

        print(status, cost, runtime)

        eta = ExecuteTARunAClib(ta=shlex.split(
github automl / SMAC3 / test / test_runhistory / test_rfr_imputor.py View on Github external
i = rs.randint(-10, 10)
    f = rs.rand(1)[0]
    seed = rs.randint(0, 10000)

    # 'a' occurs more often than 'b'
    c = 'a' if rs.binomial(1, 0.2) == 0 else 'b'

    # We have 100 instance, but prefer the middle ones
    instance_id = int(rs.normal(loc=50, scale=20, size=1)[0])
    instance_id = min(max(0, instance_id), 100)

    status = StatusType.SUCCESS
    runtime = 10**(numpy.sin(i)+f) + seed/10000 - numpy.sin(instance_id)

    if runtime > 40:
        status = StatusType.TIMEOUT
        runtime = 40
    elif instance_id > 50 and runtime > 15:
        # This is a timeout with probability 0.5
        status = StatusType.TIMEOUT
        runtime /= 2.0

    config = Configuration(cs, values={'cat_a_b': c, 'float_0_1': f,
                                       'integer_0_100': i})

    return config, seed, runtime, status, instance_id
github automl / auto-sklearn / test / test_evaluation / test_evaluation.py View on Github external
def test_eval_with_limits_holdout_fail_timeout(self, pynisher_mock):
        m1 = unittest.mock.Mock()
        m2 = unittest.mock.Mock()
        m1.return_value = m2
        pynisher_mock.return_value = m1
        m2.exit_status = pynisher.TimeoutException
        m2.wall_clock_time = 30
        ta = ExecuteTaFuncWithQueue(backend=BackendMock(), autosklearn_seed=1,
                                    resampling_strategy='holdout',
                                    logger=self.logger,
                                    stats=self.stats,
                                    memory_limit=3072,
                                    metric=accuracy)
        info = ta.start(config=None, instance=None, cutoff=30)
        self.assertEqual(info[0], StatusType.TIMEOUT)
        self.assertEqual(info[1], 1.0)
        self.assertIsInstance(info[2], float)
github automl / SMAC3 / test / test_intensify / test_intensify.py View on Github external
status=StatusType.SUCCESS, instance_id=1,
                    seed=12345,
                    additional_info=None)
        
        # config2 should have a timeout (due to adaptive capping)
        # and config1 should still be the incumbent
        inc = intensifier._race_challenger(challenger=self.config2,
                                           incumbent=self.config1,
                                           run_history=self.rh,
                                           aggregate_func=average_cost)
        # self.assertTrue(False)
        self.assertEqual(inc, self.config1)
        
        # further run for incumbent
        self.rh.add(config=self.config1, cost=2, time=2,
                    status=StatusType.TIMEOUT, instance_id=2,
                    seed=12345,
                    additional_info=None)
        
        # give config2 a second chance
        inc = intensifier._race_challenger(challenger=self.config2,
                               incumbent=self.config1,
                               run_history=self.rh,
                               aggregate_func=average_cost)      
        
        # the incumbent should still be config1 because
        # config2 should get on inst 1 a full timeout
        # such that c(config1) = 1.25 and c(config2) close to 1.3
        self.assertEqual(inc, self.config1)
        # the capped run should not be counted in runs_perf_config
        self.assertAlmostEqual(self.rh.runs_per_config[2], 2)
github automl / SMAC3 / test / test_runhistory / test_rfr_imputor.py View on Github external
# 'a' occurs more often than 'b'
    c = 'a' if rs.binomial(1, 0.2) == 0 else 'b'

    # We have 100 instance, but prefer the middle ones
    instance_id = int(rs.normal(loc=50, scale=20, size=1)[0])
    instance_id = min(max(0, instance_id), 100)

    status = StatusType.SUCCESS
    runtime = 10**(numpy.sin(i)+f) + seed/10000 - numpy.sin(instance_id)

    if runtime > 40:
        status = StatusType.TIMEOUT
        runtime = 40
    elif instance_id > 50 and runtime > 15:
        # This is a timeout with probability 0.5
        status = StatusType.TIMEOUT
        runtime /= 2.0

    config = Configuration(cs, values={'cat_a_b': c, 'float_0_1': f,
                                       'integer_0_100': i})

    return config, seed, runtime, status, instance_id
github automl / auto-sklearn / test / test_automl / test_automl.py View on Github external
# Case 2. Check that if statustype returned by ta.run() != success,
        # the function raises error.
        ta_run_mock.return_value = StatusType.CRASHED, None, None, "test"
        self.assertRaisesRegex(ValueError,
                               'Dummy prediction failed: test',
                               auto._do_dummy_prediction,
                               D, 1,
                               )
        ta_run_mock.return_value = StatusType.ABORT, None, None, "test"
        self.assertRaisesRegex(ValueError,
                               'Dummy prediction failed: test',
                               auto._do_dummy_prediction,
                               D, 1,
                               )
        ta_run_mock.return_value = StatusType.TIMEOUT, None, None, "test"
        self.assertRaisesRegex(ValueError,
                               'Dummy prediction failed: test',
                               auto._do_dummy_prediction,
                               D, 1,
                               )
        ta_run_mock.return_value = StatusType.MEMOUT, None, None, "test"
        self.assertRaisesRegex(ValueError,
                               'Dummy prediction failed: test',
                               auto._do_dummy_prediction,
                               D, 1,
                               )
        ta_run_mock.return_value = StatusType.CAPPED, None, None, "test"
        self.assertRaisesRegex(ValueError,
                               'Dummy prediction failed: test',
                               auto._do_dummy_prediction,
                               D, 1,
github automl / SMAC3 / test / test_tae / test_exec_func.py View on Github external
def test_timeout(self):
        def run_over_time(*args):
            time.sleep(5)

        taf = ExecuteTAFuncDict(ta=run_over_time, stats=self.stats)
        rval = taf.run(config=None, cutoff=1)
        self.assertEqual(rval[0], StatusType.TIMEOUT)
        self.assertEqual(rval[1], 2147483647.0)
        self.assertGreaterEqual(rval[2], 0.0)
        self.assertEqual(rval[3], dict())
github automl / SMAC3 / smac / tae / execute_ta_run_old.py View on Github external
fields = list(map(lambda x: x.strip(" "), fields))
                if len(fields) == 5:
                    status, runtime, runlength, quality, seed = fields
                    additional_info = {}
                else:
                    status, runtime, runlength, quality, seed, additional_info = fields
                    additional_info = {"additional_info": additional_info}

                runtime = min(float(runtime), cutoff)
                quality = float(quality)
                seed = int(seed)

        if status.upper() in ["SAT", "UNSAT", "SUCCESS"]:
            status = StatusType.SUCCESS
        elif status.upper() in ["TIMEOUT"]:
            status = StatusType.TIMEOUT
        elif status.upper() in ["CRASHED"]:
            status = StatusType.CRASHED
        elif status.upper() in ["ABORT"]:
            status = StatusType.ABORT
        elif status.upper() in ["MEMOUT"]:
            status = StatusType.MEMOUT
        else:
            self.logger.warn("Could not parse output of target algorithm. Expected format: "
                             "\"Result of this algorithm run: ,,,\"; "
                             "Treating as CRASHED run.")
            status = StatusType.CRASHED

        if status in [StatusType.CRASHED, StatusType.ABORT]:
            self.logger.warn(
                "Target algorithm crashed. Last 5 lines of stdout and stderr")
            self.logger.warn("\n".join(stdout_.split("\n")[-5:]))
github automl / auto-sklearn / autosklearn / automl.py View on Github external
params = []
        status = []
        for run_key in self.runhistory_.data:
            run_value = self.runhistory_.data[run_key]
            config_id = run_key.config_id
            config = self.runhistory_.ids_config[config_id]

            param_dict = config.get_dictionary()
            params.append(param_dict)
            mean_test_score.append(self._metric._optimum - \
                                  (self._metric._sign * run_value.cost))
            mean_fit_time.append(run_value.time)
            s = run_value.status
            if s == StatusType.SUCCESS:
                status.append('Success')
            elif s == StatusType.TIMEOUT:
                status.append('Timeout')
            elif s == StatusType.CRASHED:
                status.append('Crash')
            elif s == StatusType.ABORT:
                status.append('Abort')
            elif s == StatusType.MEMOUT:
                status.append('Memout')
            else:
                raise NotImplementedError(s)

            for hp_name in hp_names:
                if hp_name in param_dict:
                    hp_value = param_dict[hp_name]
                    mask_value = False
                else:
                    hp_value = np.NaN
github automl / auto-sklearn / autosklearn / smbo.py View on Github external
scenario_dict['input_psmac_dirs'] = backend.get_smac_output_glob(
        smac_run_id=seed if not scenario_dict['shared-model'] else '*',
    )
    scenario = Scenario(scenario_dict)
    if len(metalearning_configurations) > 0:
        default_config = scenario.cs.get_default_configuration()
        initial_configurations = [default_config] + metalearning_configurations
    else:
        initial_configurations = None
    rh2EPM = RunHistory2EPM4Cost(
        num_params=len(scenario.cs.get_hyperparameters()),
        scenario=scenario,
        success_states=[
            StatusType.SUCCESS,
            StatusType.MEMOUT,
            StatusType.TIMEOUT,
            # As long as we don't have a model for crashes yet!
            StatusType.CRASHED,
        ],
        impute_censored_data=False,
        impute_state=None,
    )
    return SMAC(
        scenario=scenario,
        rng=seed,
        runhistory2epm=rh2EPM,
        tae_runner=ta,
        initial_configurations=initial_configurations,
        runhistory=runhistory,
        run_id=seed,
    )