How to use the sacred.Experiment function in sacred

To help you get started, we’ve selected a few sacred examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github IDSIA / sacred / examples / minimal.py View on Github external
#!/usr/bin/env python
# coding=utf-8
"""
This is a very basic example of how to use sacred.
"""
from __future__ import division, print_function, unicode_literals
from sacred import Experiment

ex = Experiment()

@ex.config
def cfg():
    a = 10
    b = 17
    c = a + b


@ex.automain
def main(a, b, c, log):
    print('a =', a)
    print('b =', b)
    print('c =', c)
    log.debug('HA!')
github arthurmensch / modl / examples / unmask / unmask_contrast_archi.py View on Github external
from os.path import join

from nilearn.datasets import load_mni152_brain_mask
from sacred import Experiment
from sacred.observers import MongoObserver
from sklearn.externals.joblib import Memory

from modl.input_data.fmri.monkey import monkey_patch_nifti_image

monkey_patch_nifti_image()

from modl.datasets import get_data_dirs, fetch_hcp
from modl.datasets.archi import fetch_archi, INTERESTING_CONTRASTS
from modl.input_data.fmri.unmask import create_raw_contrast_data

unmask_task = Experiment('unmask_contrast_archi')
observer = MongoObserver.create(db_name='amensch', collection='runs')
unmask_task.observers.append(observer)


@unmask_task.config
def config():
    n_jobs = 30
    batch_size = 1200


@unmask_task.automain
def run(n_jobs, batch_size, _run):
    imgs = fetch_archi()
#    interesting_con = INTERESTING_CONTRASTS
#    imgs = imgs.loc[(slice(None), slice(None), interesting_con), :]
github srianant / DNN_Hyperparameter_Optimization / Optimization / capstone / NeuralNetwork / nn_dist / optimizer.py View on Github external
import shutil
import psutil
import time
import select
import pickle
import sys
import numpy as np
from numpy.random import rand
from sacred import Experiment
from sacred.observers import MongoObserver
from pprint import pprint

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
python_version = sys.version_info.major

ex = Experiment()
mongo_observer = MongoObserver.create()
ex.observers.append(mongo_observer)
ex.add_config('optimizer_config.yaml')

# Configure your logger here
logging = logger.getLogger('hyper_opt')
ex.logger = logging

class Optimizer(object):
    """Neural Network Hyperparameter Optimizer Class.
    """

    def __init__(self, config):
        """Optimize class object initialization

        Args:
github fgnt / sms_wsj / sms_wsj / train_baseline_asr.py View on Github external
from sms_wsj.kaldi.utils import run_process, pc2_environ

kaldi_root = Path(os.environ['KALDI_ROOT'])
assert kaldi_root.exists(), (
    f'The environmental variable KALDI_ROOT has to be set to a working kaldi'
    f' root, at the moment it points to f{kaldi_root}'
)
assert (kaldi_root / 'src').exists(), (
    f'The environmental variable KALDI_ROOT has to be set to a working kaldi'
    f' root, at the moment it points to f{kaldi_root}'
)
assert (kaldi_root / 'src' / 'base' / '.depend.mk').exists(), (
    'The kaldi your KALDI_ROOT points to is not installed, please refer to'
    ' kaldi for further information on how to install it'
)
ex = sacred.Experiment('Kaldi ASR baseline training')


@ex.config
def config():
    egs_path = None
    json_path = None
    # only used for the paderborn parallel computing center
    if 'CCS_NODEFILE' in os.environ:
        num_jobs = len(list(
            Path(os.environ['CCS_NODEFILE']).read_text().strip().splitlines()
        ))
    else:
        num_jobs = os.cpu_count()
    stage = 0
    end_stage = 20
    kaldi_cmd = 'run.pl'
github IDSIA / sacred / examples / 07_magic.py View on Github external
"""A standard machine learning task using sacred's magic."""
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sklearn import svm, datasets, model_selection

ex = Experiment("svm")

ex.observers.append(FileStorageObserver("my_runs"))


@ex.config  # Configuration is defined through local variables.
def cfg():
    C = 1.0
    gamma = 0.7
    kernel = "rbf"
    seed = 42


@ex.capture
def get_model(C, gamma, kernel):
    return svm.SVC(C=C, kernel=kernel, gamma=gamma)
github fgnt / sms_wsj / sms_wsj / database / create_json_for_written_files.py View on Github external
in case of a change in the database location by using
the old sms_wsj.json as intermediate json.
However, this script does not change the speaker
and utterance combination, log weights, etc. which are
specified in the intermediate json.

"""

from sms_wsj.database.write_files import check_files, KEY_MAPPER
from sms_wsj.database.utils import _example_id_to_rng
import json
import sacred
from pathlib import Path
from lazy_dataset.database import JsonDatabase

ex = sacred.Experiment('Write SMS-WSJ json after wav files are written')


def create_json(db_dir, intermediate_json_path, write_all, snr_range=(20, 30)):
    db = JsonDatabase(intermediate_json_path)
    json_dict = dict(datasets=dict())
    database_dict = db.data['datasets']

    if write_all:
        key_mapper = KEY_MAPPER
    else:
        key_mapper = {'observation': 'observation'}

    for dataset_name, dataset in database_dict.items():
        dataset_dict = dict()
        for ex_id, ex in dataset.items():
            for key, data_type in key_mapper.items():
github arthurmensch / cogspaces / examples / multiple / predict_logistic_multi.py View on Github external
from os.path import join

import numpy as np
from cogspaces.pipeline import get_output_dir
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sklearn.externals.joblib import Parallel
from sklearn.externals.joblib import delayed
from sklearn.utils import check_random_state

# Add examples to known modules
sys.path.append(path.dirname(path.dirname
                             (path.dirname(path.abspath(__file__)))))
from examples.predict import exp as single_exp

exp = Experiment('predict_logistic_multi')
basedir = join(get_output_dir(), 'predict_logistic_multi')
if not os.path.exists(basedir):
    os.makedirs(basedir)
exp.observers.append(FileStorageObserver.create(basedir=basedir))


@exp.config
def config():
    n_jobs = 36
    n_seeds = 10
    seed = 2


@single_exp.config
def config():
    reduced_dir = join(get_output_dir(), 'reduced')
github arthurmensch / modl / exps / multi_decompose_images.py View on Github external
import numpy as np
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sklearn.externals.joblib import Parallel
from sklearn.externals.joblib import delayed
from sklearn.utils import check_random_state

from modl.utils.system import get_output_dir

# Add examples to known modules
sys.path.append(path.dirname(path.dirname
                             (path.dirname(path.abspath(__file__)))))
from exps.exp_decompose_images import exp as single_exp

exp = Experiment('multi_decompose_images')
basedir = join(get_output_dir(), 'multi_decompose_images')
if not os.path.exists(basedir):
    os.makedirs(basedir)
exp.observers.append(FileStorageObserver.create(basedir=basedir))


@exp.config
def config():
    n_jobs = 15
    n_seeds = 1
    seed = 1


@single_exp.config
def config():
    batch_size = 200
github arthurmensch / cogspaces / sandbox / exps_old / old / exp_predict.py View on Github external
import numpy as np
import pandas as pd
from cogspaces.pipeline import get_output_dir, make_data_frame, split_folds, \
    MultiDatasetTransformer
from joblib import load
from os.path import join
from sacred import Experiment
from sacred.observers import FileStorageObserver
from scipy.linalg import svd
from sklearn.externals.joblib import dump

from cogspaces.models.trace import TraceClassifier

idx = pd.IndexSlice

exp = Experiment('predict')
basedir = join(get_output_dir(), 'predict')
exp.observers.append(FileStorageObserver.create(basedir=basedir))


@exp.config
def config():
    datasets = ['archi']
    reduced_dir = join(get_output_dir(), 'reduced')
    unmask_dir = join(get_output_dir(), 'unmasked')
    source = 'hcp_rs_concat'
    test_size = {'hcp': .1, 'archi': .5, 'brainomics': .5, 'camcan': .5,
                 'la5c': .5, 'full': .5}
    train_size = dict(hcp=None, archi=None, la5c=None, brainomics=None,
                      camcan=None,
                      human_voice=None)
    dataset_weights = {'brainomics': 1, 'archi': 1, 'hcp': 1}
github svip-lab / PlanarReconstruction / main.py View on Github external
import torchvision.transforms as tf

from models.baseline_same import Baseline as UNet
from utils.loss import hinge_embedding_loss, surface_normal_loss, parameter_loss, \
    class_balanced_cross_entropy_loss
from utils.misc import AverageMeter, get_optimizer
from utils.metric import eval_iou, eval_plane_prediction
from utils.disp import tensor_to_image
from utils.disp import colors_256 as colors
from bin_mean_shift import Bin_Mean_Shift
from modules import get_coordinate_map
from utils.loss import Q_loss
from instance_parameter_loss import InstanceParameterLoss
from match_segmentation import MatchSegmentation

ex = Experiment()


class PlaneDataset(data.Dataset):
    def __init__(self, subset='train', transform=None, root_dir=None):
        assert subset in ['train', 'val']
        self.subset = subset
        self.transform = transform
        self.root_dir = os.path.join(root_dir, subset)
        self.txt_file = os.path.join(root_dir, subset + '.txt')

        self.data_list = [line.strip() for line in open(self.txt_file, 'r').readlines()]
        self.precompute_K_inv_dot_xy_1()

    def get_plane_parameters(self, plane, plane_nums, segmentation):
        valid_region = segmentation != 20