How to use the sacred.SETTINGS function in sacred

To help you get started, we’ve selected a few sacred examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github IDSIA / sacred / sacred / config / config_scope.py View on Github external
def is_ignored(line):
    for pattern in SETTINGS.CONFIG.IGNORED_COMMENTS:
        if re.match(pattern, line) is not None:
            return True
    return False
github khui / copacrr / evals / docpairs.py View on Github external
from utils.config import train_test_years, file2name, qrelfdir
from utils.eval_utils import read_run, jud_label, label_jud, year_label_jud, get_epoch_from_val, get_model_param
from utils.year_2_qids import qid_year, year_qids, get_qrelf
import numpy as np, matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from pandas import DataFrame as df
import logging, warnings

import sacred
from sacred.utils import apply_backspaces_and_linefeeds

ex = sacred.Experiment('metrics')
ex.path = 'metrics' # name of the experiment
sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append('CUDA_VISIBLE_DEVICES')
sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append('USER')
ex.captured_out_filter = apply_backspaces_and_linefeeds

from utils.config import default_params
default_params = ex.config(default_params)


def create_docpairs(qid_cwid_label, test_qids, qid_year):
    docpairs = {}
    for qid in qid_cwid_label:
        assert qid in test_qids
        year = qid_year[qid]
        docpairs.setdefault(year, {})
        
        label_cwids = {}
        for cwid, raw_label in qid_cwid_label[qid].items():
            jud = year_label_jud[year][raw_label]
github EdwinYam / J-Net / Training.py View on Github external
import os

import Datasets
import Utils
import Models.UnetSpectrogramSeparator
import Models.UnetAudioSeparator
import Models.NestedUnetSpectrogramSeparator
import Models.NestedUnetAudioSeparator
import Test
import Evaluate

import functools
from tensorflow.contrib.signal import hann_window
from tensorflow.python.util import deprecation
# deprecation._print_DEPRECATION_WARNINGS = False
SETTINGS['CONFIG']['READ_ONLY_CONFIG'] = False

'''Alias Settings'''
tf.trainable_variables = tf.compat.v1.trainable_variables
tf.get_variable = tf.compat.v1.get_variable
tf.assign = tf.compat.v1.assign
tf.summary.FileWriter = tf.compat.v1.summary.FileWriter
tf.summary.scalar = tf.compat.v1.summary.scalar
tf.train.Saver = tf.compat.v1.train.Saver
tf.train.SaverDef.V2 = tf.compat.v1.train.SaverDef.V2
tf.train.AdamOptimizer = tf.compat.v1.train.AdamOptimizer


ex = Experiment('Nested WaveUnet Training', ingredients=[config_ingredient])

@ex.config
# Executed for training, sets the seed value to the Sacred config so that Sacred fixes the Python and Numpy RNG to the same state everytime.
github khui / copacrr / pred_per_epoch.py View on Github external
import matplotlib.pyplot as plt
import pickle
from keras.utils import plot_model
import tempfile, subprocess

import keras.backend as K
K.get_session()

from utils.utils import load_test_data, DumpWeight, dump_modelplot, pred_label
from utils.config import treceval, perlf, rawdoc_mat_dir, file2name, default_params, qrelfdir

import sacred
from sacred.utils import apply_backspaces_and_linefeeds
ex = sacred.Experiment('predict')
ex.path = 'predict'
sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append('CUDA_VISIBLE_DEVICES')
sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append('USER')
ex.captured_out_filter = apply_backspaces_and_linefeeds

from utils.config import default_params
default_params = ex.config(default_params)


def plot_curve(epoch_err_ndcg_loss, outdir, plot_id, p):
    epoches, errs, ndcgs, maps, losses = zip(*epoch_err_ndcg_loss)
    losses = [loss/10000.0 for loss in losses]
    fig, ax = plt.subplots()
    rects1 = ax.plot(epoches, ndcgs, 'b--')
    rects2 = ax.plot(epoches, maps, color='r')
    rects3 = ax.plot(epoches, errs, 'g.')
    axt = ax.twinx()
    rects0 = axt.plot(epoches, losses, 'k:')
github HumanCompatibleAI / human_aware_rl / human_aware_rl / ppo / ppo_rllib_from_params_client.py View on Github external
# Sacred setup (must be before rllib imports)
from sacred import Experiment
ex_fp = Experiment("PPO RLLib From Params")

# Necessary work-around to make sacred pickling compatible with rllib
from sacred import SETTINGS
SETTINGS.CONFIG.READ_ONLY_CONFIG = False

# Slack notification configuration
from sacred.observers import SlackObserver
if os.path.exists('slack.json') and not LOCAL_TESTING:
    slack_obs = SlackObserver.from_config('slack.json')
    ex_fp.observers.append(slack_obs)

    # Necessary for capturing stdout in multiprocessing setting
    SETTINGS.CAPTURE_MODE = 'sys'

# rllib and rllib-dependent imports
# Note: tensorflow and tensorflow dependent imports must also come after rllib imports
# This is because rllib disables eager execution. Otherwise, it must be manually disabled
import ray
from ray.tune.result import DEFAULT_RESULTS_DIR
from ray.tune.registry import register_env
from ray.rllib.models import ModelCatalog
from ray.rllib.agents.ppo.ppo import PPOTrainer
from human_aware_rl.ppo.ppo_rllib import RllibPPOModel, RllibLSTMPPOModel
from human_aware_rl.rllib.rllib import OvercookedMultiAgent, save_trainer, gen_trainer_from_params
from human_aware_rl.imitation.behavior_cloning_tf2 import BehaviorCloningPolicy, BC_SAVE_DIR


###################### Temp Documentation #######################
#   run the following command in order to train a PPO self-play #
github solivr / tf-crnn / training.py View on Github external
logging.getLogger("tensorflow").setLevel(logging.INFO)

from tf_crnn.config import Params
from tf_crnn.model import get_model_train
from tf_crnn.preprocessing import data_preprocessing
from tf_crnn.data_handler import dataset_generator
from tf_crnn.callbacks import CustomLoaderCallback, CustomSavingCallback, LRTensorBoard, EPOCH_FILENAME, FOLDER_SAVED_MODEL
import tensorflow as tf
import numpy as np
import os
import json
import pickle
from glob import glob
from sacred import Experiment, SETTINGS

SETTINGS.CONFIG.READ_ONLY_CONFIG = False

ex = Experiment('crnn')

ex.add_config('config.json')

@ex.automain
def training(_config: dict):
    parameters = Params(**_config)

    export_config_filename =  os.path.join(parameters.output_model_dir, 'config.json')
    saving_dir = os.path.join(parameters.output_model_dir, FOLDER_SAVED_MODEL)

    if not parameters.restore_model:
        # check if output folder already exists
        assert not os.path.isdir(parameters.output_model_dir), \
            '{} already exists, you cannot use it as output directory.'.format(parameters.output_model_dir)
github bayesiains / nsf / experiments / images.py View on Github external
from data import load_num_batches
from torchvision.utils import make_grid, save_image

from nde import distributions, transforms, flows
import utils
import optim
import nn as nn_

import matplotlib
matplotlib.use('Agg')

import matplotlib.pyplot as plt

# Capture job id on the cluster
sacred.SETTINGS.HOST_INFO.CAPTURED_ENV.append('SLURM_JOB_ID')

runs_dir = os.path.join(utils.get_data_root(), 'runs/images')
ex = Experiment('decomposition-flows-images')

fso = observers.FileStorageObserver.create(runs_dir, priority=1)
# I don't like how sacred names run folders.
ex.observers.extend([fso, autils.NamingObserver(runs_dir, priority=2)])

# For num_workers > 0 and tensor datasets, bad things happen otherwise.
torch.multiprocessing.set_start_method("spawn", force=True)

# noinspection PyUnusedLocal
@ex.config
def config():
    # Dataset
    dataset = 'fashion-mnist'
github HumanCompatibleAI / adversarial-policies / src / aprl / multi / common_worker.py View on Github external
def fix_sacred_capture():
    """Workaround for Sacred stdout capture issue #195 and Ray issue #5718."""
    # TODO(adam): remove once Sacred issue #195 is closed
    sacred.SETTINGS.CAPTURE_MODE = "sys"
github IDSIA / sacred / sacred / config / utils.py View on Github external
make sure all keys are string.
      * ENFORCE_VALID_PYTHON_IDENTIFIER (default: False):
        make sure all keys are valid python identifiers.

    Parameters
    ----------
    key:
      The key that should be checked

    Raises
    ------
    KeyError:
      if the key violates any requirements

    """
    if SETTINGS.CONFIG.ENFORCE_KEYS_MONGO_COMPATIBLE and (
        isinstance(key, str) and ("." in key or key[0] == "$")
    ):
        raise KeyError(
            'Invalid key "{}". Config-keys cannot '
            'contain "." or start with "$"'.format(key)
        )

    if (
        SETTINGS.CONFIG.ENFORCE_KEYS_JSONPICKLE_COMPATIBLE
        and isinstance(key, str)
        and (key in jsonpickle.tags.RESERVED or key.startswith("json://"))
    ):
        raise KeyError(
            'Invalid key "{}". Config-keys cannot be one of the'
            "reserved jsonpickle tags: {}".format(key, jsonpickle.tags.RESERVED)
        )
github IDSIA / sacred / sacred / config / utils.py View on Github external
Raises
    ------
    KeyError:
      if the key violates any requirements

    """
    if SETTINGS.CONFIG.ENFORCE_KEYS_MONGO_COMPATIBLE and (
        isinstance(key, str) and ("." in key or key[0] == "$")
    ):
        raise KeyError(
            'Invalid key "{}". Config-keys cannot '
            'contain "." or start with "$"'.format(key)
        )

    if (
        SETTINGS.CONFIG.ENFORCE_KEYS_JSONPICKLE_COMPATIBLE
        and isinstance(key, str)
        and (key in jsonpickle.tags.RESERVED or key.startswith("json://"))
    ):
        raise KeyError(
            'Invalid key "{}". Config-keys cannot be one of the'
            "reserved jsonpickle tags: {}".format(key, jsonpickle.tags.RESERVED)
        )

    if SETTINGS.CONFIG.ENFORCE_STRING_KEYS and (not isinstance(key, str)):
        raise KeyError(
            'Invalid key "{}". Config-keys have to be strings, '
            "but was {}".format(key, type(key))
        )

    if SETTINGS.CONFIG.ENFORCE_VALID_PYTHON_IDENTIFIER_KEYS and (
        isinstance(key, str) and not PYTHON_IDENTIFIER.match(key)