How to use the sacred.observers.FileStorageObserver function in sacred

To help you get started, we’ve selected a few sacred examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github daviddiazvico / scikit-datasets / tests / utils / test_experiment.py View on Github external
def _experiment(inner_cv, outer_cv):
    e = experiment(_dataset, _estimator)
    e.observers.append(FileStorageObserver('.results'))
    e.run(config_updates={'dataset': {'inner_cv': inner_cv,
                                      'outer_cv': outer_cv}})
github arthurmensch / cogspaces / examples / predict_contrast_l1.py View on Github external
from sacred.observers import FileStorageObserver
from sklearn.externals.joblib import load, dump
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelBinarizer, StandardScaler
from sklearn.utils import gen_batches, check_random_state

from cogspaces.model import make_model, init_tensorflow, make_adversaries
from cogspaces.model_selection import StratifiedGroupShuffleSplit
from cogspaces.utils import get_output_dir

idx = pd.IndexSlice

predict_contrast_exp = Experiment('predict_contrast')

base_artifact_dir = join(get_output_dir(), 'predict')
observer = FileStorageObserver.create(basedir=base_artifact_dir)

predict_contrast_exp.observers.append(observer)


def scale(X, train, per_dataset_std):
    X_train = X.iloc[train]
    if per_dataset_std:
        standard_scaler = {}
        corr = np.sum(np.sqrt(
            X_train[0].groupby(level='dataset').aggregate('count').values))
        for dataset, this_X_train in X_train.groupby(level='dataset'):
            this_standard_scaler = StandardScaler()
            this_standard_scaler.fit(this_X_train)
            this_standard_scaler.scale_ /= sqrt(this_X_train.shape[0]) / corr
            standard_scaler[dataset] = this_standard_scaler
        new_X = []
github arthurmensch / cogspaces / examples / predict_contrast.py View on Github external
from sacred.observers import FileStorageObserver
from sklearn.externals.joblib import load, dump
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelBinarizer, StandardScaler
from sklearn.utils import gen_batches, check_random_state

from cogspaces.utils import get_output_dir
from cogspaces.model import make_model, init_tensorflow, make_adversaries
from cogspaces.model_selection import StratifiedGroupShuffleSplit

idx = pd.IndexSlice

predict_contrast_exp = Experiment('predict_contrast')

base_artifact_dir = join(get_output_dir(), 'predict')
observer = FileStorageObserver.create(basedir=base_artifact_dir)

predict_contrast_exp.observers.append(observer)


def scale(X, train, per_dataset_std):
    X_train = X.iloc[train]
    if per_dataset_std:
        standard_scaler = {}
        corr = np.sum(np.sqrt(
            X_train[0].groupby(level='dataset').aggregate('count').values))
        for dataset, this_X_train in X_train.groupby(level='dataset'):
            this_standard_scaler = StandardScaler()
            this_standard_scaler.fit(this_X_train)
            this_standard_scaler.scale_ /= sqrt(this_X_train.shape[0]) / corr
            standard_scaler[dataset] = this_standard_scaler
        new_X = []
github arthurmensch / cogspaces / exps_old / single.py View on Github external
from joblib import dump
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression
from sklearn.model_selection import GridSearchCV, StratifiedShuffleSplit

from cogspaces.model.non_convex_pytorch import TransferEstimator
from cogspaces.models.trace import TransferTraceNormEstimator
from cogspaces.pipeline import get_output_dir, make_data_frame, split_folds, \
    MultiDatasetTransformer

idx = pd.IndexSlice

exp = Experiment('single_exp')
basedir = join(get_output_dir(), 'single_exp')
exp.observers.append(FileStorageObserver.create(basedir=basedir))


@exp.config
def config():
    datasets = ['brainomics', 'hcp']
    reduced_dir = join(get_output_dir(), 'reduced')
    unmask_dir = join(get_output_dir(), 'unmasked')
    # source = 'mix'
    source = 'hcp_new'
    test_size = {'hcp': .1, 'archi': .5, 'brainomics': .5, 'camcan': .5,
                 'la5c': .5, 'full': .5}
    train_size = dict(hcp=None, archi=None, la5c=None, brainomics=None,
                      camcan=None,
                      human_voice=None)
    dataset_weights = {'brainomics': 1, 'archi': 1, 'hcp': 1}
    model = 'trace'
github songlab-cal / tape-neurips2019 / tape / run_supervised_experiments.py View on Github external
def run_single_experiment(dataset: str,
                          savedir: str,
                          named_configs: List,
                          config_updates: Dict[str, Any]):
    from tape.__main__ import proteins

    config_updates.update({
        'training': {'learning_rate': 1e-4, 'use_memory_saving_gradients': True},
        'num_epochs': 1000,
        'steps_per_epoch': 200,
        'tasks': dataset})

    if not os.path.exists(savedir):
        os.mkdir(savedir)
    shutil.rmtree(proteins.observers[0].basedir)
    proteins.observers[0] = FileStorageObserver.create(
        os.path.join(savedir, dataset))

    proteins.run(
        named_configs=named_configs,
        config_updates=config_updates)
github arthurmensch / cogspaces / sandbox / exps_old / old / nips / multi_predict_nips.py View on Github external
def single_run(config_updates, rundir, _id):
    run = single_exp._create_run(config_updates=config_updates)
    observer = FileStorageObserver.create(basedir=rundir)
    run._id = _id
    run.observers = [observer]
    try:
        run()
    except:
        pass
github arthurmensch / cogspaces / examples / predict_trace_norm.py View on Github external
from os.path import join

import pandas as pd
from sacred import Experiment
from sacred.observers import FileStorageObserver

from cogspaces.trace_norm import TraceNormEstimator
from cogspaces.utils import get_output_dir, make_data_frame, split_folds, \
    MultiDatasetTransformer

idx = pd.IndexSlice

exp = Experiment('Clean')
basedir = join(get_output_dir(), 'clean')
exp.observers.append(FileStorageObserver.create(basedir=basedir))


@exp.config
def config():
    datasets = ['brainomics']
    reduced_dir = join(get_output_dir(), 'reduced')
    unmask_dir = join(get_output_dir(), 'unmasked')
    source = 'hcp_rs'
    n_subjects = None
    test_size = {'hcp': .1, 'archi': .5, 'brainomics': .5, 'camcan': .5,
                 'la5c': .5}
    train_size = {'hcp': .9, 'archi': .5, 'brainomics': .5, 'camcan': .5,
                  'la5c': .5}
    alpha = 0
    beta = 0
    n_iter = 3000
github arthurmensch / modl / exps / multi_decompose_images.py View on Github external
def single_run(config_updates, rundir, _id):
    for i in range(3):
        try:
            run = single_exp._create_run(config_updates=config_updates)
            observer = FileStorageObserver.create(basedir=rundir)
            run._id = _id
            run.observers = [observer]
            run()
            break
        except TypeError:
            if i < 2:
                print("Run %i failed at start, retrying..." % _id)
            else:
                print("Giving up %i" % _id)
            continue
github HumanCompatibleAI / adversarial-policies / src / aprl / train.py View on Github external
def main():
    observer = FileStorageObserver(osp.join("data", "sacred", "train"))
    train_ex.observers.append(observer)
    train_ex.run_commandline()
github HazyResearch / learning-circuits / learning_transforms / learning_transforms.py View on Github external
matrices (using the largest logits), and re-optimize using L-BFGS to find
    the nearest local optima.
    """
    # Hack: create new instance without call __init__, since trainable.__init__
    # creates result_dir and log_dir in the wrong place (~/ray_results)
    trainable_cls = TrainableBP
    trainable = trainable_cls.__new__(trainable_cls)
    trainable._setup(trial.config)
    trainable.restore(str(Path(trial.logdir) / trial._checkpoint.value))
    loss = trainable.polish(N_LBFGS_STEPS, save_to_self_model=True)
    torch.save(trainable.model.state_dict(), str((Path(trial.logdir) / trial._checkpoint.value).parent / 'polished_model.pth'))
    return loss


ex = Experiment('Transform_factorization')
ex.observers.append(FileStorageObserver.create('logs_new'))
slack_config_path = Path('config/slack.json')  # Add webhook_url there for Slack notification
if slack_config_path.exists():
    ex.observers.append(SlackObserver.from_config(str(slack_config_path)))


@ex.config
def default_config():
    model = 'BP'
    target = 'dft'  # The target matrix to factor ('dft', 'idft', 'dct', 'hadamard')
    size = 8  # Size of matrix to factor, must be power of 2
    complex = True  # Whether to use complex factorization or real factorization
    fixed_order = True  # Whether the order of the factors are fixed
    ntrials = 20  # Number of trials for hyperparameter tuning
    nsteps = 400  # Number of steps per epoch
    nepochsvalid = 5  # Frequency of validation (polishing), in terms of epochs
    nmaxepochs = 200  # Maximum number of epochs