How to use the orion.core function in orion

To help you get started, we’ve selected a few orion examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Epistimio / orion / tests / functional / branching / test_branching.py View on Github external
def test_init_w_version_from_parent_w_children(clean_db, monkeypatch):
    """Test that init of experiment from version with children fails."""
    monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
    orion.core.cli.main("init_only -n experiment ./black_box.py -x~normal(0,1)".split(" "))
    orion.core.cli.main("init_only -n experiment ./black_box.py -x~normal(0,1) "
                        "-y~+normal(0,1)".split(" "))

    with pytest.raises(ValueError) as exc:
        orion.core.cli.main("init_only -n experiment -v 1 ./black_box.py "
                            "-x~normal(0,1) -y~+normal(0,1) -z~normal(0,1)".split(" "))

    assert "Experiment name" in str(exc.value)
github Epistimio / orion / tests / unittests / client / test_client.py View on Github external
def test_create_experiment_new_default(self):
        """Test creating a new experiment with all defaults"""
        name = 'all_default'
        space = {'x': 'uniform(0, 10)'}
        with OrionState():
            experiment = create_experiment(name='all_default', space=space)

            assert experiment.name == name
            assert experiment.space.configuration == space

            assert experiment.max_trials == orion.core.config.experiment.max_trials
            assert experiment.working_dir == orion.core.config.experiment.working_dir
            assert experiment.algorithms.configuration == {'random': {'seed': None}}
            assert experiment.configuration['producer'] == {'strategy': 'MaxParallelStrategy'}
github Epistimio / orion / tests / functional / commands / test_init_only_command.py View on Github external
def test_no_name(capsys):
    """Try to run the command without providing an experiment name"""
    returncode = orion.core.cli.main(["init_only", "--exp-max-trials", "10"])
    assert returncode == 1

    captured = capsys.readouterr().err

    assert captured == 'Error: No name provided for the experiment.\n'
github Epistimio / orion / tests / functional / example / test_scikit_learn.py View on Github external
def test_orion_runs_script(monkeypatch):
    """Verifies Oríon can execute the example script."""
    script = os.path.abspath("examples/scikitlearn-iris/main.py")
    monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
    config = "orion_config.yaml"

    orion.core.cli.main(["hunt", "--config", config, "python", script,
                         "orion~choices([0.1])"])

    experiment = create_experiment(name="scikit-iris-tutorial")
    assert experiment is not None
    assert experiment.version == 1

    keys = experiment.space.keys()
    assert len(keys) == 1
    assert '/_pos_2' in keys

    storage = get_storage()
    trials = storage.fetch_trials(uid=experiment.id)
    assert len(trials) == 1

    trial = trials[0]
    assert trial.status == 'completed'
github Epistimio / orion / tests / functional / commands / test_setup_command.py View on Github external
def test_creation_when_not_existing(monkeypatch, tmp_path):
    """Test if a configuration file is created when it does not exist."""
    config_path = str(tmp_path) + "/tmp_config.yaml"
    monkeypatch.setattr(orion.core, "DEF_CONFIG_FILES_PATHS", [config_path])
    monkeypatch.setattr(builtins, "input", _mock_input(['type', 'name', 'host']))

    try:
        os.remove(config_path)
    except FileNotFoundError:
        pass

    orion.core.cli.main(["db", "setup"])

    assert os.path.exists(config_path)

    with open(config_path, 'r') as output:
        content = yaml.safe_load(output)

    assert content == {"database": {"type": "type", "name": "name", "host": "host"}}
github Epistimio / orion / tests / functional / commands / test_list_command.py View on Github external
def test_no_exp(monkeypatch, clean_db, capsys):
    """Test that nothing is printed when there are no experiments."""
    monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
    orion.core.cli.main(['list'])

    captured = capsys.readouterr().out

    assert captured == ""
github Epistimio / orion / src / orion / client / experiment.py View on Github external
`BrokenExperiment`
            if too many trials failed to run and the experiment cannot continue.
            This is determined by ``max_broken`` in the configuration of the experiment.

        `SampleTimeout`
            if the algorithm of the experiment could not sample new unique points.

        """
        if self.is_broken:
            raise BrokenExperiment("Trials failed too many times")

        if self.is_done:
            return None

        try:
            trial = orion.core.worker.reserve_trial(self._experiment, self._producer)

        except WaitingForTrials as e:
            if self.is_broken:
                raise BrokenExperiment("Trials failed too many times") from e

            raise e

        except SampleTimeout as e:
            if self.is_broken:
                raise BrokenExperiment("Trials failed too many times") from e

            raise e

        if trial is not None:
            self._maintain_reservation(trial)
github Epistimio / orion / src / orion / core / io / experiment_builder.py View on Github external
storage: dict, optional
        Configuration of the storage backend.

    """
    experiment = Experiment(name=name, version=version)
    experiment._id = kwargs.get('_id', None)  # pylint:disable=protected-access
    experiment.pool_size = kwargs.get('pool_size')
    if experiment.pool_size is None:
        experiment.pool_size = orion.core.config.experiment.get(
            'pool_size', deprecated='ignore')
    experiment.max_trials = kwargs.get('max_trials', orion.core.config.experiment.max_trials)
    experiment.space = _instantiate_space(space)
    experiment.algorithms = _instantiate_algo(experiment.space, kwargs.get('algorithms'))
    experiment.producer = kwargs.get('producer', {})
    experiment.producer['strategy'] = _instantiate_strategy(experiment.producer.get('strategy'))
    experiment.working_dir = kwargs.get('working_dir', orion.core.config.experiment.working_dir)
    experiment.metadata = kwargs.get('metadata', {'user': kwargs.get('user', getpass.getuser())})
    experiment.refers = kwargs.get('refers', {'parent_id': None, 'root_id': None, 'adapter': []})
    experiment.refers['adapter'] = _instantiate_adapters(experiment.refers.get('adapter', []))

    return experiment
github Epistimio / orion / src / orion / core / utils / backward.py View on Github external
def populate_priors(metadata):
    """Compute parser state and priors based on user_args and populate metadata."""
    if 'user_args' not in metadata:
        return

    parser = OrionCmdlineParser(orion.core.config.user_script_config)
    parser.parse(metadata["user_args"])
    metadata["parser"] = parser.get_state_dict()
    metadata["priors"] = dict(parser.priors)
github Epistimio / orion / src / orion / core / io / database / pickleddb.py View on Github external
from contextlib import contextmanager
import logging
import os
import pickle
from pickle import PicklingError

from filelock import FileLock, Timeout

import orion.core
from orion.core.io.database import AbstractDB, DatabaseTimeout
from orion.core.io.database.ephemeraldb import EphemeralDB

log = logging.getLogger(__name__)

DEFAULT_HOST = os.path.join(orion.core.DIRS.user_data_dir, 'orion', 'orion_db.pkl')

TIMEOUT_ERROR_MESSAGE = """\
Could not acquire lock for PickledDB after {} seconds.

This is likely due to one or many of the following scenarios:

1. There is a large amount of workers and many simultaneous queries. This typically occurs
   when the task to optimize is short (few minutes). Try to reduce the amount of workers
   at least below 50.

2. The database is growing large with thousands of trials and many experiments.
   If so, you can use a different PickleDB (different file, that is, different `host`)
   for each experiment seperately to alleviate this issue.

3. The filesystem is slow. Parallel filesystems on HPC often suffer from
   large pool of users generating frequent I/O. In this case try using a separate