How to use the chainer.optimizers.Adam function in chainer

To help you get started, we’ve selected a few chainer examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Xiao-Ming / UNet-VocalSeparation-Chainer / network.py View on Github external
def TrainUNet(Xlist, Ylist, epoch=40, savefile="unet.model"):
    assert(len(Xlist) == len(Ylist))
    unet = UNet()
    model = UNetTrainmodel(unet)
    model.to_gpu(0)
    opt = optimizers.Adam()
    opt.setup(model)
    config.train = True
    config.enable_backprop = True
    itemcnt = len(Xlist)
    itemlength = [x.shape[1] for x in Xlist]
    subepoch = sum(itemlength) // const.PATCH_LENGTH // const.BATCH_SIZE * 4
    for ep in range(epoch):
        sum_loss = 0.0
        for subep in range(subepoch):
            X = np.zeros((const.BATCH_SIZE, 1, 512, const.PATCH_LENGTH),
                         dtype="float32")
            Y = np.zeros((const.BATCH_SIZE, 1, 512, const.PATCH_LENGTH),
                         dtype="float32")
            idx_item = np.random.randint(0, itemcnt, const.BATCH_SIZE)
            for i in range(const.BATCH_SIZE):
                randidx = np.random.randint(
github chainer / chainer / tests / chainer_tests / training_tests / extensions_tests / test_computational_graph.py View on Github external
def _run_test(self, tempdir, initial_flag):
        n_data = 4
        n_epochs = 3
        outdir = os.path.join(tempdir, 'testresult')

        # Prepare
        model = Model()
        classifier = links.Classifier(model)
        optimizer = chainer.optimizers.Adam()
        optimizer.setup(classifier)

        dataset = Dataset([i for i in range(n_data)])
        iterator = chainer.iterators.SerialIterator(dataset, 1, shuffle=False)
        updater = training.updaters.StandardUpdater(iterator, optimizer)
        trainer = training.Trainer(updater, (n_epochs, 'epoch'), out=outdir)

        extension = c.DumpGraph('main/loss', out_name='test.dot')
        trainer.extend(extension)

        # Run
        with chainer.using_config('keep_graph_on_report', initial_flag):
            trainer.run()

        # Check flag history
        self.assertEqual(model.flag_history,
github crowdAI / marLo / examples / chainer_test_PPO.py View on Github external
print('next observation:', obs)
print('reward:', r)
print('done:', done)
print('info:', info)

print('actions:', str(env.action_space))

timestep_limit = env.spec.tags.get(
        'wrapper_config.TimeLimit.max_episode_steps'
		)
obs_space = env.observation_space
action_space = env.action_space

model = A3CFFSoftmax(obs_space.low.size, action_space.n)

opt = chainer.optimizers.Adam(alpha=lr, eps=1e-5)
opt.setup(model)

# Initialize the agent
agent = PPO(
			model, opt,
            gpu=gpu,
            phi=phi,
            update_interval=update_interval,
            minibatch_size=64, epochs=10,
            clip_eps_vf=None, entropy_coef=0.0,
        )

# Linearly decay the learning rate to zero
def lr_setter(env, agent, value):
	agent.optimizer.alpha = value
github takerum / vat_chainer / train_semisup.py View on Github external
def train(args):
    np.random.seed(args.seed)
    train_l, train_ul, test = load_dataset(args.data_dir, valid=args.validation, dataset_seed=args.dataset_seed)
    print("N_train_labeled:{}, N_train_unlabeled:{}".format(train_l.N, train_ul.N))
    enc = CNN(n_outputs=args.n_categories, dropout_rate=args.dropout_rate, top_bn=args.top_bn)
    if args.gpu > -1:
        chainer.cuda.get_device(args.gpu).use()
        enc.to_gpu()

    optimizer = optimizers.Adam(alpha=args.lr, beta1=args.mom1)
    optimizer.setup(enc)
    optimizer.use_cleargrads()
    alpha_plan = [args.lr] * args.num_epochs
    beta1_plan = [args.mom1] * args.num_epochs
    for i in range(args.epoch_decay_start, args.num_epochs):
        alpha_plan[i] = float(args.num_epochs - i) / (args.num_epochs - args.epoch_decay_start) * args.lr
        beta1_plan[i] = args.mom2

    accs_test = np.zeros(args.num_epochs)
    cl_losses = np.zeros(args.num_epochs)
    ul_losses = np.zeros(args.num_epochs)
    mkdir_p(args.log_dir)
    for epoch in range(args.num_epochs):
        optimizer.alpha = alpha_plan[epoch]
        optimizer.beta1 = beta1_plan[epoch]
        sum_loss_l = 0
github wkentaro / fcn / examples / apc2016 / train_fcn16s.py View on Github external
n_class = len(dataset_train.label_names)

    fcn32s = fcn.models.FCN32s(n_class=n_class)
    chainer.serializers.load_npz(fcn32s_path, fcn32s)

    model = fcn.models.FCN16s(n_class=n_class)
    model.train = True
    fcn.utils.copy_chainermodel(fcn32s, model)

    if gpu >= 0:
        cuda.get_device(gpu).use()
        model.to_gpu()

    # 3. optimizer

    optimizer = chainer.optimizers.Adam(alpha=1e-5)
    optimizer.setup(model)

    # training loop

    trainer = fcn.Trainer(
        device=gpu,
        model=model,
        optimizer=optimizer,
        iter_train=iter_train,
        iter_val=iter_val,
        out=out,
    )
    trainer.train(
        max_iter=150000,
        interval_eval=5000,
    )
github nknytk / face-classifier-cnn / train_classifier.py View on Github external
def main(config_file):
    with open(config_file) as fp:
        conf = json.load(fp)
    fe_conf = conf['feature_extractor']
    cl_conf = conf['classifier']

    fe_class = getattr(cnn_feature_extractors, fe_conf['model'])
    feature_extractor = fe_class(n_classes=fe_conf['n_classes'], n_base_units=fe_conf['n_base_units'])
    chainer.serializers.load_npz(fe_conf['out_file'], feature_extractor)

    model = classifiers.MLPClassifier(cl_conf['n_classes'], feature_extractor)
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    device = cl_conf.get('device', -1)
    train_dataset = feature_dataset(os.path.join(cl_conf['dataset_path'], 'train'), model)
    train_iter = chainer.iterators.SerialIterator(train_dataset, conf.get('batch_size', 1))
    updater = chainer.training.StandardUpdater(train_iter, optimizer, device=device)
    trainer = chainer.training.Trainer(updater, (cl_conf['epoch'], 'epoch'), out='out_re')

    trainer.extend(extensions.dump_graph('main/loss'))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.ProgressBar(update_interval=10))

    test_dataset_path = os.path.join(cl_conf['dataset_path'], 'test')
    if os.path.exists(test_dataset_path):
        test_dataset = feature_dataset(test_dataset_path, model)
        test_iter = chainer.iterators.SerialIterator(test_dataset, 10, repeat=False, shuffle=False)
github fukuta0614 / chainer-image-generation / CycleGAN / train_cycle_gan.py View on Github external
# genA convert B -> A, genB convert A -> B
    genA = Generator(block_num=args.block_num, bn=args.g_bn)
    genB = Generator(block_num=args.block_num, bn=args.g_bn)
    # disA discriminate realA and fakeA, disB discriminate realB and fakeB
    disA = Discriminator(bn=args.d_bn)
    disB = Discriminator(bn=args.d_bn)

    if args.gpu >= 0:
        cuda.get_device_from_id(args.gpu).use()
        genA.to_gpu()
        genB.to_gpu()
        disA.to_gpu()
        disB.to_gpu()

    optimizer_genA = chainer.optimizers.Adam(alpha=0.0002, beta1=0.5, beta2=0.9)
    optimizer_genB = chainer.optimizers.Adam(alpha=0.0002, beta1=0.5, beta2=0.9)
    optimizer_disA = chainer.optimizers.Adam(alpha=0.0002, beta1=0.5, beta2=0.9)
    optimizer_disB = chainer.optimizers.Adam(alpha=0.0002, beta1=0.5, beta2=0.9)

    optimizer_genA.setup(genA)
    optimizer_genB.setup(genB)
    optimizer_disA.setup(disA)
    optimizer_disB.setup(disB)

    # start training
    start = time.time()
    fake_poolA = np.zeros((args.memory_size, 3, args.size, args.size)).astype('float32')
    fake_poolB = np.zeros((args.memory_size, 3, args.size, args.size)).astype('float32')
    lambda_ = args.lambda_
    const_realA = np.asarray([testA.get_example(i) for i in range(10)])
    const_realB = np.asarray([testB.get_example(i) for i in range(10)])
github funalab / QCANet / src / lib / trainer.py View on Github external
def NSNTraining(self, trainIdx, testIdx, xlist, ylist, kc):
        if self.opt_method == 'Adam':
            opt_nsn = optimizers.Adam(alpha=0.05767827010227712, beta1=0.9687170166672859,
                                      beta2=0.9918705323205452, eps=0.03260658847351856)
            opt_nsn.setup(self.model)
            opt_nsn.add_hook(chainer.optimizer.WeightDecay(0.00000416029939))

        elif self.opt_method == 'SGD':
            opt_nsn = optimizers.SGD(lr=1.0)
            opt_nsn.setup(self.model)
            opt_nsn.add_hook(chainer.optimizer.WeightDecay(0.00009))

        train_eval, test_eval = {}, {}
        train_eval['loss'], test_eval['loss'] = [], []
        for cri in self.criteria:
            train_eval[cri] = []
            test_eval[cri] = []
        N_train = len(trainIdx)
        N_test = len(testIdx)