How to use the emmental.init function in emmental

To help you get started, we’ve selected a few emmental examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github SenWu / emmental / tests / optimizers / test_adamax_optimizer.py View on Github external
def test_adamax_optimizer(caplog):
    """Unit test of Adamax optimizer."""
    caplog.set_level(logging.INFO)

    optimizer = "adamax"
    dirpath = "temp_test_optimizer"
    model = nn.Linear(1, 1)
    emmental_learner = EmmentalLearner()

    Meta.reset()
    emmental.init(dirpath)

    # Test default Adamax setting
    config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
    emmental.Meta.update_config(config)
    emmental_learner._set_optimizer(model)

    assert emmental_learner.optimizer.defaults == {
        "lr": 0.001,
        "betas": (0.9, 0.999),
        "eps": 1e-08,
        "weight_decay": 0,
    }

    # Test new Adamax setting
    config = {
        "learner_config": {
github SenWu / emmental / tests / logging / test_logging_manager.py View on Github external
def test_logging_manager_tensorboard(caplog):
    """Unit test of logging_manager (tensorboard)."""
    caplog.set_level(logging.INFO)

    emmental.init()
    Meta.update_config(
        config={
            "logging_config": {
                "counter_unit": "epoch",
                "evaluation_freq": 1,
                "checkpointing": False,
                "checkpointer_config": {"checkpoint_freq": 2},
                "writer_config": {"writer": "tensorboard"},
            }
        }
    )

    logging_manager = LoggingManager(n_batches_per_epoch=2)

    logging_manager.update(5)
github SenWu / emmental / tests / logging / test_logging_manager.py View on Github external
def test_logging_manager_wrong_counter_unit(caplog):
    """Unit test of logging_manager (wrong counter_unit)."""
    caplog.set_level(logging.INFO)

    emmental.init()
    Meta.update_config(
        config={
            "logging_config": {
                "counter_unit": "epochs",
                "evaluation_freq": 1,
                "checkpointing": False,
                "checkpointer_config": {"checkpoint_freq": 2},
            }
        }
    )

    with pytest.raises(ValueError):
        logging_manager = LoggingManager(n_batches_per_epoch=2)
        logging_manager.update(5)
github SenWu / emmental / tests / optimizers / test_adamw_optimizer.py View on Github external
def test_adamw_optimizer(caplog):
    """Unit test of AdamW optimizer."""
    caplog.set_level(logging.INFO)

    optimizer = "adamw"
    dirpath = "temp_test_optimizer"
    model = nn.Linear(1, 1)
    emmental_learner = EmmentalLearner()

    Meta.reset()
    emmental.init(dirpath)

    # Test default AdamW setting
    config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
    emmental.Meta.update_config(config)
    emmental_learner._set_optimizer(model)

    assert emmental_learner.optimizer.defaults == {
        "lr": 0.001,
        "betas": (0.9, 0.999),
        "eps": 1e-08,
        "amsgrad": False,
        "weight_decay": 0,
    }

    # Test new AdamW setting
    config = {
github SenWu / emmental / tests / test_e2e.py View on Github external
def test_e2e(caplog):
    """Run an end-to-end test."""
    caplog.set_level(logging.INFO)

    dirpath = "temp_test_e2e"
    use_exact_log_path = False
    Meta.reset()
    emmental.init(dirpath, use_exact_log_path=use_exact_log_path)

    config = {
        "meta_config": {"seed": 0},
        "learner_config": {
            "n_epochs": 3,
            "optimizer_config": {"lr": 0.01, "grad_clip": 100},
        },
        "logging_config": {
            "counter_unit": "epoch",
            "evaluation_freq": 1,
            "writer_config": {"writer": "tensorboard", "verbose": True},
            "checkpointing": True,
            "checkpointer_config": {
                "checkpoint_path": None,
                "checkpoint_freq": 1,
                "checkpoint_metric": {"model/all/train/loss": "min"},
github SenWu / emmental / tests / optimizers / test_sparse_adam_optimizer.py View on Github external
def test_sparse_adam_optimizer(caplog):
    """Unit test of SparseAdam optimizer."""
    caplog.set_level(logging.INFO)

    optimizer = "sparse_adam"
    dirpath = "temp_test_optimizer"
    model = nn.Linear(1, 1)
    emmental_learner = EmmentalLearner()

    Meta.reset()
    emmental.init(dirpath)

    # Test default SparseAdam setting
    config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
    emmental.Meta.update_config(config)
    emmental_learner._set_optimizer(model)

    assert emmental_learner.optimizer.defaults == {
        "lr": 0.001,
        "betas": (0.9, 0.999),
        "eps": 1e-08,
    }

    # Test new SparseAdam setting
    config = {
        "learner_config": {
            "optimizer_config": {
github SenWu / emmental / tests / logging / test_logging_manager.py View on Github external
def test_logging_manager_no_writer(caplog):
    """Unit test of logging_manager (no writer)."""
    caplog.set_level(logging.INFO)

    emmental.init()
    Meta.update_config(
        config={
            "logging_config": {
                "counter_unit": "epoch",
                "evaluation_freq": 1,
                "checkpointing": False,
                "checkpointer_config": {"checkpoint_freq": 2},
                "writer_config": {"writer": None},
            }
        }
    )

    logging_manager = LoggingManager(n_batches_per_epoch=2)

    logging_manager.update(5)
github SenWu / emmental / tests / optimizers / test_r_prop_optimizer.py View on Github external
def test_r_prop_optimizer(caplog):
    """Unit test of Rprop optimizer."""
    caplog.set_level(logging.INFO)

    optimizer = "r_prop"
    dirpath = "temp_test_optimizer"
    model = nn.Linear(1, 1)
    emmental_learner = EmmentalLearner()

    Meta.reset()
    emmental.init(dirpath)

    # Test default Rprop setting
    config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
    emmental.Meta.update_config(config)
    emmental_learner._set_optimizer(model)

    assert emmental_learner.optimizer.defaults == {
        "lr": 0.001,
        "etas": (0.5, 1.2),
        "step_sizes": (1e-06, 50),
    }

    # Test new Rprop setting
    config = {
        "learner_config": {
            "optimizer_config": {
github HazyResearch / fonduer / tests / e2e / test_e2e.py View on Github external
tp_len = len(TP)
    fp_len = len(FP)
    fn_len = len(FN)
    prec = tp_len / (tp_len + fp_len) if tp_len + fp_len > 0 else float("nan")
    rec = tp_len / (tp_len + fn_len) if tp_len + fn_len > 0 else float("nan")
    f1 = 2 * (prec * rec) / (prec + rec) if prec + rec > 0 else float("nan")

    logger.info(f"prec: {prec}")
    logger.info(f"rec: {rec}")
    logger.info(f"f1: {f1}")

    assert f1 > 0.7

    # Testing LSTM
    emmental.Meta.reset()
    emmental.init(fonduer.Meta.log_path)
    emmental.Meta.update_config(config=config)

    tasks = create_task(ATTRIBUTE, 2, F_train[0].shape[1], 2, emb_layer, model="LSTM")

    model = EmmentalModel(name=f"{ATTRIBUTE}_task")

    for task in tasks:
        model.add_task(task)

    emmental_learner = EmmentalLearner()
    emmental_learner.learn(model, [train_dataloader])

    test_preds = model.predict(test_dataloader, return_preds=True)
    positive = np.where(np.array(test_preds["probs"][ATTRIBUTE])[:, TRUE] > 0.7)
    true_pred = [test_cands[0][_] for _ in positive[0]]
github SenWu / emmental / tests / lr_schedulers / test_exponential_scheduler.py View on Github external
def test_exponential_scheduler(caplog):
    """Unit test of exponential scheduler."""
    caplog.set_level(logging.INFO)

    lr_scheduler = "exponential"
    dirpath = "temp_test_scheduler"
    model = nn.Linear(1, 1)
    emmental_learner = EmmentalLearner()

    Meta.reset()
    emmental.init(dirpath)

    # Test step per batch
    config = {
        "learner_config": {
            "n_epochs": 4,
            "optimizer_config": {"optimizer": "sgd", "lr": 10},
            "lr_scheduler_config": {
                "lr_scheduler": lr_scheduler,
                "exponential_config": {"gamma": 0.1},
            },
        }
    }
    emmental.Meta.update_config(config)
    emmental_learner.n_batches_per_epoch = 1
    emmental_learner._set_optimizer(model)
    emmental_learner._set_lr_scheduler(model)