How to use the lagom.experiment.run_experiment function in lagom

To help you get started, we’ve selected a few lagom examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github zuoxingdong / lagom / test / test_experiment.py View on Github external
def test_run_experiment(num_sample, max_workers, chunksize):
    def run(config, seed, device, logdir):
        return config['ID'], seed, device, logdir
    
    config = Config({'network.lr': Grid([1e-3, 5e-3]), 
                     'network.size': [32, 16],
                     'env.id': Grid(['CartPole-v1', 'Ant-v2'])}, 
                    num_sample=num_sample, 
                    keep_dict_order=True)
    seeds = [1, 2, 3]
    log_dir = './some_path'
    run_experiment(run, config, seeds, log_dir, max_workers, chunksize, use_gpu=False, gpu_ids=None)
 
    p = Path('./some_path')
    assert p.exists()
    assert (p / 'configs.pkl').exists()
    assert (p / 'source_files').exists() and (p / 'source_files').is_dir()
    # Check all configuration folders with their IDs and subfolders for all random seeds
    for i in range(4):
        config_p = p / str(i)
        assert config_p.exists()
        assert (config_p / 'config.yml').exists()
        for seed in seeds:
            assert (config_p / str(seed)).exists()
    # Clean the logging directory
    rmtree(p)
    # Test remove
    assert not p.exists()
github zuoxingdong / lagom / examples / vae / main.py View on Github external
from lagom.experiment import run_experiment

from experiment import ExperimentWorker
from experiment import ExperimentMaster


run_experiment(worker_class=ExperimentWorker, 
               master_class=ExperimentMaster, 
               num_worker=4)
github zuoxingdong / lagom / baselines / ppo / experiment.py View on Github external
for i in count():
        if agent.total_timestep >= config['train.timestep']:
            break
        train_logger = engine.train(i)
        train_logs.append(train_logger.logs)
        if i == 0 or (i+1) % config['log.freq'] == 0:
            train_logger.dump(keys=None, index=0, indent=0, border='-'*50)
        if agent.total_timestep >= int(config['train.timestep']*(checkpoint_count/(config['checkpoint.num'] - 1))):
            agent.checkpoint(logdir, i + 1)
            checkpoint_count += 1
    pickle_dump(obj=train_logs, f=logdir/'train_logs', ext='.pkl')
    return None
    

if __name__ == '__main__':
    run_experiment(run=run, 
                   config=config, 
                   seeds=[1770966829, 1500925526, 2054191100], 
                   log_dir='logs/default',
                   max_workers=os.cpu_count(), 
                   chunksize=1, 
                   use_gpu=False,  # CPU a bit faster
                   gpu_ids=None)
github zuoxingdong / lagom / examples / reinforcement_learning / ppo / logs / default / source_files / experiment.py View on Github external
for i in count():
        if agent.total_timestep >= config['train.timestep']:
            break
        train_logger = engine.train(i)
        train_logs.append(train_logger.logs)
        if i == 0 or (i+1) % config['log.freq'] == 0:
            train_logger.dump(keys=None, index=0, indent=0, border='-'*50)
        if i == 0 or (i+1) % config['checkpoint.freq'] == 0:
            agent.checkpoint(logdir, i + 1)
    agent.checkpoint(logdir, i + 1)
    pickle_dump(obj=train_logs, f=logdir/'train_logs', ext='.pkl')
    return None
    

if __name__ == '__main__':
    run_experiment(run=run, 
                   config=config, 
                   seeds=[1770966829, 1500925526, 2054191100], 
                   num_worker=os.cpu_count())
github zuoxingdong / lagom / examples / policy_gradient / reinforce / main.py View on Github external
from lagom.experiment import run_experiment

from experiment import ExperimentWorker
from experiment import ExperimentMaster


run_experiment(worker_class=ExperimentWorker, 
               master_class=ExperimentMaster, 
               num_worker=100)
github zuoxingdong / lagom / examples / vae / experiment.py View on Github external
test_loader=test_loader)
    
    train_logs = []
    eval_logs = []
    for epoch in range(config['train.num_epoch']):
        train_logger = engine.train(epoch, logdir=logdir)
        train_logs.append(train_logger.logs)
        eval_logger = engine.eval(epoch, logdir=logdir)
        eval_logs.append(eval_logger.logs)
    pickle_dump(obj=train_logs, f=logdir/'train_logs', ext='.pkl')
    pickle_dump(obj=eval_logs, f=logdir/'eval_logs', ext='.pkl')
    return None


if __name__ == '__main__':
    run_experiment(run=run, 
                   config=config, 
                   seeds=[1770966829], 
                   log_dir='logs/default',
                   max_workers=os.cpu_count(),
                   chunksize=1, 
                   use_gpu=True,  # GPU much faster
                   gpu_ids=None)
github zuoxingdong / lagom / legacy / a2c / experiment.py View on Github external
list_config = configurator.make_configs()
        
        return list_config

    def make_seeds(self):
        list_seed = [1770966829, 1500925526, 2054191100]
        
        return list_seed
    
    def process_results(self, results):
        assert all([result is None for result in results])

        
if __name__ == '__main__':
    run_experiment(worker_class=ExperimentWorker, 
                   master_class=ExperimentMaster, 
                   num_worker=100)
github zuoxingdong / lagom / baselines / ddpg_td3 / experiment.py View on Github external
if config['agent.use_td3']:
        agent = TD3Agent(config, env, device)
    else:
        agent = DDPGAgent(config, env, device)
    runner = EpisodeRunner()
    replay = ReplayBuffer(env, config['replay.capacity'], device)
    engine = Engine(config, agent=agent, random_agent=random_agent, env=env, eval_env=eval_env, runner=runner, replay=replay, logdir=logdir)
    
    train_logs, eval_logs = engine.train()
    pickle_dump(obj=train_logs, f=logdir/'train_logs', ext='.pkl')
    pickle_dump(obj=eval_logs, f=logdir/'eval_logs', ext='.pkl')
    return None  
    

if __name__ == '__main__':
    run_experiment(run=run, 
                   config=config, 
                   seeds=[4153361530, 3503522377, 2876994566, 172236777, 3949341511], 
                   log_dir='logs/default',
                   max_workers=os.cpu_count(), 
                   chunksize=1, 
                   use_gpu=True,  # GPU much faster, note that performance differs between CPU/GPU
                   gpu_ids=None)
github zuoxingdong / lagom / examples / policy_gradient / vpg / main.py View on Github external
from lagom.experiment import run_experiment

from experiment import ExperimentWorker
from experiment import ExperimentMaster


run_experiment(worker_class=ExperimentWorker, 
               master_class=ExperimentMaster, 
               num_worker=100)
github zuoxingdong / lagom / examples / reinforcement_learning / dqn / experiment.py View on Github external
agent = Agent(config, env, device)
    replay = ReplayBuffer(config['replay.capacity'], device)
    initialize_replay(config, env, replay)
    engine = Engine(config, agent=agent, env=env, replay=replay)
    running_rewards = deque(maxlen=100)
    
    train_logs = []
    for n in range(config['train.iter']):
        train_logger = engine.train(n, running_rewards=running_rewards)
        train_logs.append(train_logger.logs)
    pickle_dump(obj=train_logs, f=logdir/'train_logs', ext='.pkl')
    return None  
    

if __name__ == '__main__':
    run_experiment(run=run, 
                   config=config, 
                   seeds=[4153361530, 3503522377, 2876994566, 172236777, 3949341511, 849059707], 
                   num_worker=os.cpu_count())