How to use the smac.stats.stats.Stats function in smac

To help you get started, we’ve selected a few smac examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github automl / SMAC3 / test / test_intensify / test_intensify.py View on Github external
def setUp(self):
        unittest.TestCase.setUp(self)

        self.rh = RunHistory(aggregate_func=average_cost)
        self.cs = get_config_space()
        self.config1 = Configuration(self.cs,
                                     values={'a': 0, 'b': 100})
        self.config2 = Configuration(self.cs,
                                     values={'a': 100, 'b': 0})
        self.config3 = Configuration(self.cs,
                                     values={'a': 100, 'b': 100})

        self.scen = Scenario({"cutoff_time": 2, 'cs': self.cs,
                              "run_obj": 'runtime',
                              "output_dir": ''})
        self.stats = Stats(scenario=self.scen)
        self.stats.start_timing()

        self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)
github automl / SMAC3 / test / test_intensify / test_successive_halving.py View on Github external
def setUp(self):
        unittest.TestCase.setUp(self)

        self.rh = RunHistory()
        self.cs = get_config_space()
        self.config1 = Configuration(self.cs,
                                     values={'a': 0, 'b': 100})
        self.config2 = Configuration(self.cs,
                                     values={'a': 100, 'b': 0})
        self.config3 = Configuration(self.cs,
                                     values={'a': 100, 'b': 100})

        self.scen = Scenario({"cutoff_time": 2, 'cs': self.cs,
                              "run_obj": 'runtime',
                              "output_dir": ''})
        self.stats = Stats(scenario=self.scen)
        self.stats.start_timing()

        self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)
github automl / SMAC3 / test / test_utils / io / test_traj_logging.py View on Github external
def test_oserror(self):
        scen = Scenario(scenario={'run_obj': 'quality', 'cs': self.cs,
                                  'output_dir': ''})
        stats = Stats(scen)
        # test OSError
        with patch('os.makedirs') as osMock:
            osMock.side_effect = OSError()
            self.assertRaises(OSError, TrajLogger, output_dir='./tmp_test_folder', stats=stats)
github automl / auto-sklearn / autosklearn / automl.py View on Github external
def _do_dummy_prediction(self, datamanager, num_run):

        # When using partial-cv it makes no sense to do dummy predictions
        if self._resampling_strategy in ['partial-cv',
                                         'partial-cv-iterative-fit']:
            return num_run

        self._logger.info("Starting to create dummy predictions.")
        memory_limit = int(self._ml_memory_limit)
        scenario_mock = unittest.mock.Mock()
        scenario_mock.wallclock_limit = self._time_for_task
        # This stats object is a hack - maybe the SMAC stats object should
        # already be generated here!
        stats = Stats(scenario_mock)
        stats.start_timing()
        ta = ExecuteTaFuncWithQueue(backend=self._backend,
                                    autosklearn_seed=self._seed,
                                    resampling_strategy=self._resampling_strategy,
                                    initial_num_run=num_run,
                                    logger=self._logger,
                                    stats=stats,
                                    metric=self._metric,
                                    memory_limit=memory_limit,
                                    disable_file_output=self._disable_evaluator_output,
                                    **self._resampling_strategy_arguments)

        status, cost, runtime, additional_info = \
            ta.run(1, cutoff=self._time_for_task)
        if status == StatusType.SUCCESS:
            self._logger.info("Finished creating dummy predictions.")
github automl / SMAC3 / examples / branin / restore_state.py View on Github external
#smac.solver.scenario.ta_run_limit = 50
    #smac.optimize()

    # Or, to show the whole process of recovering a SMAC-run from the output
    # directory, create a new scenario with an extended budget:
    new_scenario = Scenario(orig_scen_dict,
                            cmd_options={'runcount_limit': 50,  # overwrite these args
                                         'output_dir' : 'restored'})

    # We load the runhistory, ...
    rh_path = os.path.join(old_output_dir, "runhistory.json")
    runhistory = RunHistory(aggregate_func=None)
    runhistory.load_json(rh_path, new_scenario.cs)
    # ... stats, ...
    stats_path = os.path.join(old_output_dir, "stats.json")
    stats = Stats(new_scenario)
    stats.load(stats_path)
    # ... and trajectory.
    traj_path = os.path.join(old_output_dir,  "traj_aclib2.json")
    trajectory = TrajLogger.read_traj_aclib_format(
        fn=traj_path, cs=new_scenario.cs)
    incumbent = trajectory[-1]["incumbent"]

    # Now we can initialize SMAC with the recovered objects and restore the
    # state where we left off. By providing stats and a restore_incumbent, SMAC
    # automatically detects the intention of restoring a state.
    smac = SMAC(scenario=new_scenario,
                runhistory=runhistory,
                stats=stats,
                restore_incumbent=incumbent,
                run_id=1)
    # Because we changed the output_dir, we might want to copy the old
github automl / auto-sklearn / scripts / run_auto-sklearn_for_metadata_generation.py View on Github external
incumbent_id_to_performance = {}
validated_trajectory = []

if is_test:
    memory_limit_factor = 1
else:
    memory_limit_factor = 2

for entry in trajectory:
    incumbent_id = entry.incumbent_id
    train_performance = entry.train_perf
    if incumbent_id not in incumbent_id_to_model:
        config = entry.incumbent

        logger = logging.getLogger('Testing:)')
        stats = Stats(
            Scenario({
                'cutoff_time': per_run_time_limit * 2,
                'run_obj': 'quality',
            })
        )
        stats.start_timing()
        # To avoid the output "first run crashed"...
        stats.ta_runs += 1
        ta = ExecuteTaFuncWithQueue(backend=automl._automl._backend,
                                    autosklearn_seed=seed,
                                    resampling_strategy='test',
                                    memory_limit=memory_limit_factor * automl_arguments['ml_memory_limit'],
                                    disable_file_output=True,
                                    logger=logger,
                                    stats=stats,
                                    all_scoring_functions=True,
github automl / auto-sklearn / autosklearn / automl.py View on Github external
def _do_dummy_prediction(self, datamanager, num_run):

        # When using partial-cv it makes no sense to do dummy predictions
        if self._resampling_strategy in ['partial-cv',
                                         'partial-cv-iterative-fit']:
            return num_run

        self._logger.info("Starting to create dummy predictions.")
        memory_limit = int(self._ml_memory_limit)
        scenario_mock = unittest.mock.Mock()
        scenario_mock.wallclock_limit = self._time_for_task
        # This stats object is a hack - maybe the SMAC stats object should
        # already be generated here!
        stats = Stats(scenario_mock)
        stats.start_timing()
        ta = ExecuteTaFuncWithQueue(backend=self._backend,
                                    autosklearn_seed=self._seed,
                                    resampling_strategy=self._resampling_strategy,
                                    initial_num_run=num_run,
                                    logger=self._logger,
                                    stats=stats,
                                    **self._resampling_strategy_arguments)

        status, cost, runtime, additional_info = \
            ta.run(1, cutoff=self._time_for_task, memory_limit=memory_limit)
        if status == StatusType.SUCCESS:
            self._logger.info("Finished creating dummy predictions.")
        else:
            self._logger.error('Error creating dummy predictions:%s ',
                               additional_info)
github automl / SMAC3 / smac / facade / experimental / epils_facade.py View on Github external
stats: Stats = None,
                 rng: np.random.RandomState = None,
                 run_id: int = 1):
        """Constructor"""
        self.logger = logging.getLogger(
            self.__module__ + "." + self.__class__.__name__)

        aggregate_func = average_cost
        self.runhistory = None
        self.trajectory = None

        # initialize stats object
        if stats:
            self.stats = stats
        else:
            self.stats = Stats(scenario)

        self.output_dir = create_output_directory(scenario, run_id)
        scenario.write()

        # initialize empty runhistory
        if runhistory is None:
            runhistory = RunHistory(aggregate_func=aggregate_func)
        # inject aggr_func if necessary
        if runhistory.aggregate_func is None:
            runhistory.aggregate_func = aggregate_func

        # initial random number generator
        num_run, rng = self._get_rng(rng=rng)

        # reset random number generator in config space to draw different
        # random configurations with each seed given to SMAC