How to use the sacred.observers.MongoObserver.create function in sacred

To help you get started, we’ve selected a few sacred examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Lab41 / altair / altair / experiments.py View on Github external
elif vectorizer_cls == LDAVectorizer:
        configs["vectorizer_string"] = 'lda'
    elif vectorizer_cls == Doc2VecVectorizer:
        configs["vectorizer_string"] = 'doc2vec'
    else:
        print("Unknown vectorizer; quitting")
        quit()

    # Monkey patch to avoid having to declare all our variables
    def noop(item):
        pass

    Scaffold._warn_about_suspicious_changes = noop

    # Add mongo observer for Sacred
    ex.observers.append(MongoObserver.create(url=os.environ['MONGO_DB_URI'], db_name='altair_baseline_metrics'))

    # Define the entrypoint
    ex.main(lambda: run_model())

    # Tell sacred about config items so they are logged
    ex.run(config_updates=configs)
github arthurmensch / modl / examples / contrast / old / predict_contrast_hierarchical_dataset.py View on Github external
from sacred.optional import pymongo
from sklearn.externals.joblib import Parallel
from sklearn.externals.joblib import delayed
from sklearn.model_selection import ParameterGrid
from sklearn.utils import check_random_state

sys.path.append(path.dirname(path.dirname
                             (path.dirname(path.abspath(__file__)))))

from examples.contrast.predict_contrast_hierarchical\
    import predict_contrast_hierarchical

multi_predict_task = Experiment('multi_predict_contrast_hierarchical',
                                ingredients=[predict_contrast_hierarchical])
collection = multi_predict_task.path
observer = MongoObserver.create(db_name='amensch', collection=collection)
multi_predict_task.observers.append(observer)


@multi_predict_task.config
def config():
    n_jobs = 36
    dropout_latent_list = [0.5]
    latent_dim_list = [50]
    shared_supervised_list = [False]
    task_prob_list = [0.5]
    alpha_list = [1e-4]
    n_seeds = 10
    verbose = 0
    seed = 2
github arthurmensch / modl / examples / fmri_compare.py View on Github external
def single_run(config_updates=None, _seed=0):
    config_updates['seed'] = _seed

    @fmri_decompose.capture
    def pre_run_hook(_run):
        _run.info['parent_id'] = fmri_compare.observers[0].run_entry['_id']
        _run.info['updated_params'] = config_updates

    single_observer = MongoObserver.create(url='mongo')
    fmri_decompose.pre_run_hooks = [pre_run_hook]
    fmri_decompose.observers = [single_observer]
    run = create_run(fmri_decompose, fmri_decompose_run.__name__,
                     config_updates)
    run()
github arthurmensch / modl / examples / components / multi_decompose_rest.py View on Github external
from os import path

from sacred import Experiment
from sacred.observers import MongoObserver
from sacred.optional import pymongo
from sklearn.externals.joblib import Parallel
from sklearn.externals.joblib import delayed

sys.path.append(path.dirname(path.dirname
                             (path.dirname(path.abspath(__file__)))))

from examples.components.decompose_rest import decompose_rest

multi_decompose_rest = Experiment('multi_decompose_rest',
                                  ingredients=[decompose_rest])
observer = MongoObserver.create(db_name='amensch', collection='runs')
multi_decompose_rest.observers.append(observer)

@multi_decompose_rest.config
def config():
    n_jobs = 1
    n_components_list = [256]
    alpha_list = [1e-4]


@decompose_rest.config
def config():
    batch_size = 100
    learning_rate = 0.92
    method = 'gram'
    reduction = 12
    alpha = 1e-4  # Overriden
github arthurmensch / modl / examples / contrast / multi_predict_contrast_no_projection.py View on Github external
def single_run(config_updates, _id, master_id):
    observer = MongoObserver.create(db_name='amensch',
                                    collection=collection)

    @predict_contrast.config
    def config():
        n_jobs = 1
        from_loadings = True
        projection = False
        factored = False
        loadings_dir = join(get_data_dirs()[0], 'pipeline', 'contrast',
                            'reduced')
        verbose = 2
        max_iter = 50

    predict_contrast.observers = [observer]

    run = predict_contrast._create_run(config_updates=config_updates)
github arthurmensch / modl / examples / contrast / multi_predict_contrast_factored.py View on Github external
from sklearn.externals.joblib import Parallel
from sklearn.externals.joblib import delayed

import numpy as np
from sklearn.model_selection import ParameterGrid
from sklearn.utils import check_random_state

sys.path.append(path.dirname(path.dirname
                             (path.dirname(path.abspath(__file__)))))

from examples.contrast.predict_contrast import predict_contrast

multi_predict_task = Experiment('multi_predict_contrast_factored',
                                ingredients=[predict_contrast])
collection = multi_predict_task.path
observer = MongoObserver.create(db_name='amensch', collection=collection)
multi_predict_task.observers.append(observer)


@multi_predict_task.config
def config():
    n_jobs = 25
    dropout_list = [0., 0.3, 0.6, 0.9]
    latent_dim_list = [200]
    alpha_list = [1e-4]
    beta_list = [0]
    fine_tune_list = [0]
    activation_list = ['linear']
    n_seeds = 10
    early_stop = False
github arthurmensch / modl / examples / contrast / predict_contrast_train_size.py View on Github external
from sacred.observers import MongoObserver
from sacred.optional import pymongo
from sklearn.externals.joblib import Parallel
from sklearn.externals.joblib import delayed
from sklearn.model_selection import ParameterGrid
from sklearn.utils import check_random_state, shuffle

sys.path.append(path.dirname(path.dirname
                             (path.dirname(path.abspath(__file__)))))

from examples.contrast.predict_contrast import predict_contrast_exp

predict_contrast_multi_exp = Experiment('predict_contrast_train_size',
                                        ingredients=[predict_contrast_exp])
collection = predict_contrast_multi_exp.path
observer = MongoObserver.create(db_name='amensch', collection=collection)
predict_contrast_multi_exp.observers.append(observer)


@predict_contrast_multi_exp.config
def config():
    n_jobs = 24
    n_seeds = 10
    seed = 2


def single_run(config_updates, _id, master_id):
    observer = MongoObserver.create(db_name='amensch', collection=collection)
    predict_contrast_exp.observers = [observer]

    @predict_contrast_exp.config
    def config():
github arthurmensch / cogspaces / examples / multiple / predict_contrast_multi.py View on Github external
from sacred import Experiment
from sacred.observers import MongoObserver
from sacred.optional import pymongo
from sklearn.externals.joblib import Parallel
from sklearn.externals.joblib import delayed
from sklearn.utils import check_random_state, shuffle

sys.path.append(path.dirname(path.dirname
                             (path.dirname(path.abspath(__file__)))))

from examples.predict_contrast import predict_contrast_exp

predict_contrast_multi_exp = Experiment('predict_contrast_multi',
                                        ingredients=[predict_contrast_exp])
collection = predict_contrast_multi_exp.path
observer = MongoObserver.create(db_name='amensch', collection=collection)
predict_contrast_multi_exp.observers.append(observer)


@predict_contrast_multi_exp.config
def config():
    n_jobs = 24
    n_seeds = 10
    seed = 2


def single_run(config_updates, _id, master_id):
    observer = MongoObserver.create(db_name='amensch', collection=collection)
    predict_contrast_exp.observers = [observer]

    @predict_contrast_exp.config
    def config():