Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_sparse_adam_optimizer(caplog):
"""Unit test of SparseAdam optimizer."""
caplog.set_level(logging.INFO)
optimizer = "sparse_adam"
dirpath = "temp_test_optimizer"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test default SparseAdam setting
config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
emmental.Meta.update_config(config)
emmental_learner._set_optimizer(model)
assert emmental_learner.optimizer.defaults == {
"lr": 0.001,
"betas": (0.9, 0.999),
"eps": 1e-08,
}
# Test new SparseAdam setting
config = {
"learner_config": {
def test_adamw_optimizer(caplog):
"""Unit test of AdamW optimizer."""
caplog.set_level(logging.INFO)
optimizer = "adamw"
dirpath = "temp_test_optimizer"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test default AdamW setting
config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
emmental.Meta.update_config(config)
emmental_learner._set_optimizer(model)
assert emmental_learner.optimizer.defaults == {
"lr": 0.001,
"betas": (0.9, 0.999),
"eps": 1e-08,
"amsgrad": False,
"weight_decay": 0,
}
# Test new AdamW setting
def test_adamax_optimizer(caplog):
"""Unit test of Adamax optimizer."""
caplog.set_level(logging.INFO)
optimizer = "adamax"
dirpath = "temp_test_optimizer"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test default Adamax setting
config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
emmental.Meta.update_config(config)
emmental_learner._set_optimizer(model)
assert emmental_learner.optimizer.defaults == {
"lr": 0.001,
"betas": (0.9, 0.999),
"eps": 1e-08,
"weight_decay": 0,
}
# Test new Adamax setting
config = {
def test_tensorboard_writer(caplog):
"""Unit test of log_writer."""
caplog.set_level(logging.INFO)
emmental.Meta.reset()
emmental.init()
log_writer = TensorBoardWriter()
log_writer.add_config(emmental.Meta.config)
log_writer.add_scalar(name="step 1", value=0.1, step=1)
log_writer.add_scalar(name="step 2", value=0.2, step=2)
config_filename = "config.yaml"
log_writer.write_config(config_filename)
# Test config
with open(os.path.join(emmental.Meta.log_path, config_filename), "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
def test_multi_step_scheduler(caplog):
"""Unit test of multi step scheduler."""
caplog.set_level(logging.INFO)
lr_scheduler = "multi_step"
dirpath = "temp_test_scheduler"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
config = {
"learner_config": {
"n_epochs": 4,
"optimizer_config": {"optimizer": "sgd", "lr": 10},
"lr_scheduler_config": {
"lr_scheduler": lr_scheduler,
"multi_step_config": {
"milestones": [1, 3],
"gamma": 0.1,
"last_epoch": -1,
},
},
}
}
def test_rms_prop_optimizer(caplog):
"""Unit test of RMSprop optimizer."""
caplog.set_level(logging.INFO)
optimizer = "rms_prop"
dirpath = "temp_test_optimizer"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test default RMSprop setting
config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
emmental.Meta.update_config(config)
emmental_learner._set_optimizer(model)
assert emmental_learner.optimizer.defaults == {
"lr": 0.001,
"alpha": 0.99,
"eps": 1e-08,
"momentum": 0,
"centered": False,
"weight_decay": 0,
}
"checkpointer_config": {
"checkpoint_path": None,
"checkpoint_freq": 1,
"checkpoint_metric": {"model/train/all/loss": "min"},
"checkpoint_task_metrics": None,
"checkpoint_runway": 0,
"checkpoint_all": True,
"clear_intermediate_checkpoints": True,
"clear_all_checkpoints": False,
},
},
}
# Test default and default args are the same
dirpath = "temp_parse_args"
Meta.reset()
emmental.init(dirpath)
parser = parse_args()
args = parser.parse_args([])
config1 = parse_args_to_config(args)
config2 = emmental.Meta.config
del config2["learner_config"]["global_evaluation_metric_dict"]
del config2["learner_config"]["optimizer_config"]["parameters"]
assert config1 == config2
shutil.rmtree(dirpath)
def test_cosine_annealing_scheduler(caplog):
"""Unit test of cosine annealing scheduler."""
caplog.set_level(logging.INFO)
lr_scheduler = "cosine_annealing"
dirpath = "temp_test_scheduler"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
config = {
"learner_config": {
"n_epochs": 4,
"optimizer_config": {"optimizer": "sgd", "lr": 10},
"lr_scheduler_config": {"lr_scheduler": lr_scheduler},
}
}
emmental.Meta.update_config(config)
emmental_learner.n_batches_per_epoch = 1
emmental_learner._set_optimizer(model)
emmental_learner._set_lr_scheduler(model)
assert emmental_learner.optimizer.param_groups[0]["lr"] == 10
def test_adam_optimizer(caplog):
"""Unit test of Adam optimizer."""
caplog.set_level(logging.INFO)
optimizer = "adam"
dirpath = "temp_test_optimizer"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test default Adam setting
config = {"learner_config": {"optimizer_config": {"optimizer": optimizer}}}
emmental.Meta.update_config(config)
emmental_learner._set_optimizer(model)
assert emmental_learner.optimizer.defaults == {
"lr": 0.001,
"betas": (0.9, 0.999),
"eps": 1e-08,
"amsgrad": False,
"weight_decay": 0,
}
# Test new Adam setting