How to use the sacred.Ingredient function in sacred

To help you get started, we’ve selected a few sacred examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github IDSIA / sacred / tests / test_exceptions.py View on Github external
def test_circular_dependency_raises():
    # create experiment with circular dependency
    ing = Ingredient("ing")
    ex = Experiment("exp", ingredients=[ing])
    ex.main(lambda: None)
    ing.ingredients.append(ex)

    # run and see if it raises
    with pytest.raises(CircularDependencyError, match="exp->ing->exp"):
        ex.run()
github atomistic-machine-learning / schnetpack / src / schnetpack / sacred / optimizer_ingredient.py View on Github external
from sacred import Ingredient
from torch.optim import Adam


optimizer_ing = Ingredient("optimizer")


@optimizer_ing.config
def config():
    r"""
    settings for optimizer class
    """
    name = "adam"  # optimizer type
    learning_rate = 1e-4  # initial learning rate


@optimizer_ing.capture
def build_optimizer(name, learning_rate, trainable_params):
    """
    build optimizer object
github IDSIA / sacred / examples / modular.py View on Github external
This is a very basic example of how to use Sacred.
"""

from sacred import Experiment, Ingredient

# ============== Ingredient 0: settings =================
s = Ingredient("settings")


@s.config
def cfg1():
    verbose = True


# ============== Ingredient 1: dataset.paths =================
data_paths = Ingredient("dataset.paths", ingredients=[s])


@data_paths.config
def cfg2(settings):
    v = not settings["verbose"]
    base = "/home/sacred/"


# ============== Ingredient 2: dataset =======================
data = Ingredient("dataset", ingredients=[data_paths, s])


@data.config
def cfg3(paths):
    basepath = paths["base"] + "datasets/"
    filename = "foo.hdf5"
github atomistic-machine-learning / schnetpack / src / schnetpack / sacred / evaluation_data_ingredient.py View on Github external
import os
from sacred import Ingredient
from schnetpack.data.parsing import generate_db
from schnetpack.data.atoms import AtomsData


eval_data_ing = Ingredient("dataset")


@eval_data_ing.config
def config():
    pass


@eval_data_ing.capture
def get_eval_data(path):
    """
    Build dataset that needs to be evaluated.

    Args:
        path (str): path to the input file

    Returns:
github arthurmensch / modl / examples / decompose_fmri.py View on Github external
# Adapted from nilearn example

# Load ADDH
import time

import matplotlib.pyplot as plt
from sacred import Experiment
from sacred import Ingredient
from sklearn.externals.joblib import Memory

from modl.datasets.fmri import load_rest_func, load_atlas_init
from modl.fmri import fMRIDictFact
from modl.plotting.fmri import display_maps
from modl.utils.system import get_cache_dirs

data_ing = Ingredient('data')
init_ing = Ingredient('init')

decompose_ex = Experiment('decompose_fmri', ingredients=[data_ing, init_ing])


@init_ing.config
def config():
    n_components = 20
    source = None


@data_ing.config
def config():
    dataset = 'adhd'
    raw = False
    n_subjects = 40
github atomistic-machine-learning / schnetpack / src / schnetpack / sacred / trainer_ingredients.py View on Github external
from sacred import Ingredient

from schnetpack.train.trainer import Trainer
from schnetpack.sacred.optimizer_ingredient import optimizer_ing, build_optimizer
from schnetpack.sacred.train_hook_ingredients import hooks_ing, build_hooks
from schnetpack.sacred.loss_ingredient import loss_ing, build_loss

train_ingredient = Ingredient(
    "trainer", ingredients=[optimizer_ing, hooks_ing, loss_ing]
)


@train_ingredient.config
def cfg():
    pass


@train_ingredient.capture
def setup_trainer(model, train_dir, train_loader, val_loader, property_map, exclude=[]):
    """
    build a trainer object

    Args:
        model (torch.nn.Module): model object
github kata-ai / indosum / ingredients / summarization.py View on Github external
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################

import json

from sacred import Ingredient

from ingredients.corpus import ing as corpus_ingredient, read_jsonl
from models import AbstractSummarizer


# TODO Putting corpus ingredient here does not feel right. When summarizing, we do not need
# the corpus. Any jsonl file will do. What we need here is the `read_jsonl` function and its
# preprocessing. That might be best put in a separate ingredient.
ing = Ingredient('summ', ingredients=[corpus_ingredient])


@ing.config
def cfg():
    # path to the JSONL file to summarize
    path = 'test.jsonl'
    # extract at most this number of sentences as summary
    size = 3


@ing.capture
def run_summarization(model: AbstractSummarizer, path, size=3):
    for doc in read_jsonl(path):
        summary = set(model.summarize(doc, size=size))
        sent_id = 0
        for para in doc.paragraphs:
github atomistic-machine-learning / schnetpack / src / schnetpack / sacred / trainer_ingredients.py View on Github external
import os
from sacred import Ingredient
from torch.optim import Adam

from schnetpack.train.hooks import *
from schnetpack.train.trainer import Trainer
from schnetpack.metrics import *


train_ingredient = Ingredient('trainer')


@train_ingredient.config
def cfg():
    """configuration for the trainer ingredient"""
    optimizer = 'adam'
    schedule = None
    learning_rate = 1e-4
    max_epochs = None
    metrics = []
    max_steps = None
    early_stopping = False
    lr_schedule = None
    logging_hooks = []
github sjoerdvansteenkiste / Neural-EM / nem_model.py View on Github external
#!/usr/bin/env python
# coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals

import numpy as np
import tensorflow as tf

from tensorflow.contrib.rnn import RNNCell
from network import net, build_network
from sacred import Ingredient

nem = Ingredient('nem', ingredients=[net])


# noinspection PyUnusedLocal
@nem.config
def cfg():
    # general
    gradient_gamma = True       # whether to back-propagate a gradient through gamma

    # loss
    loss_inter_weight = 1.0     # weight for the inter-cluster loss
    loss_step_weights = 'last'  # all, last, or list of weights
    pixel_prior = {
        'p': 0.0,               # probability of success for pixel prior Bernoulli
        'mu': 0.0,              # mean of pixel prior Gaussian
        'sigma': 0.25           # std of pixel prior Gaussian
    }
github atomistic-machine-learning / schnetpack / src / schnetpack / sacred / dataset_ingredients.py View on Github external
import os
from sacred import Ingredient
from schnetpack.datasets import ANI1, ISO17, QM9, MD17, MaterialsProject
from schnetpack.data.parsing import generate_db
from schnetpack.data import AtomsData, AtomsDataError
from schnetpack.atomistic import Properties

dataset_ingredient = Ingredient("dataset")


@dataset_ingredient.config
def cfg():
    """Settings for training dataset"""
    dbpath = None  # path to ase.db
    dataset = "CUSTOM"  # dataset name; use for pre-implemented datasets
    property_mapping = {}  # mapping from model properties to data properties


@dataset_ingredient.named_config
def qm9():
    """Default settings for QM9 dataset."""
    dbpath = "./data/qm9.db"
    dataset = "QM9"
    property_mapping = {