How to use the chainer.serializers.load_npz function in chainer

To help you get started, we’ve selected a few chainer examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github aws / sagemaker-chainer-container / test / resources / mnist / distributed_customer_script.py View on Github external
def model_fn(model_dir):
    model = L.Classifier(MLP(1000, 10))
    serializers.load_npz(os.path.join(model_dir, 'model.npz'), model)
    return model.predictor
github mitmul / chainer-faster-rcnn / tests / test_train_rcnn.py View on Github external
if __name__ == '__main__':
    batchsize = 1

    train_dataset = VOC('train')
    valid_dataset = VOC('val')

    train_iter = iterators.SerialIterator(train_dataset, batchsize)
    model = FasterRCNN()
    model.to_gpu(0)

    warmup(model, train_iter)
    model.rcnn_train = True

    serializers.load_npz('tests/train_test/snapshot_10000', model)

    # optimizer = optimizers.Adam()
    # optimizer.setup(model)
    optimizer = optimizers.MomentumSGD(lr=0.001)
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(0.0005))

    updater = training.StandardUpdater(train_iter, optimizer, device=0)
    trainer = training.Trainer(updater, (100, 'epoch'),
                               out='tests/train_test_rcnn')
    trainer.extend(extensions.LogReport(trigger=(100, 'iteration')))
    trainer.extend(extensions.PrintReport([
        'epoch', 'iteration',
        'main/loss_cls',
        'main/loss_bbox',
        'main/loss_rcnn',
github osmr / imgclsmob / chainer_ / chainercv2 / models / ror_cifar.py View on Github external
channels_per_layers = [16, 32, 64]
    init_block_channels = 16

    channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]

    net = CIFARRoR(
        channels=channels,
        init_block_channels=init_block_channels,
        classes=classes,
        **kwargs)

    if pretrained:
        if (model_name is None) or (not model_name):
            raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
        from .model_store import get_model_file
        load_npz(
            file=get_model_file(
                model_name=model_name,
                local_model_store_dir_path=root),
            obj=net)

    return net
github oyam / Semantic-Segmentation-using-Adversarial-Networks / 02-train.py View on Github external
def load_pretrained_model(initmodel_path, initmodel, model, n_class, device):
    print('Initializing the model')
    chainer.serializers.load_npz(initmodel_path, initmodel)
    utils.copy_chainermodel(initmodel, model)
    return model
github wuhuikai / chainer-pix2pix / train.py View on Github external
# Plot
    plot_interval = (args.plot_interval, 'iteration')

    trainer.extend(
        extensions.PlotReport(['main/loss_D'], 'iteration', file_name='G_GAN_loss.png', trigger=plot_interval), trigger=plot_interval)
    trainer.extend(
        extensions.PlotReport(['D/loss'], 'iteration', file_name='D_GAN_loss.png', trigger=plot_interval), trigger=plot_interval)
    trainer.extend(
        extensions.PlotReport(['main/loss_l1'], 'iteration', file_name='G_L1_loss.png', trigger=plot_interval), trigger=plot_interval)
    trainer.extend(
        extensions.PlotReport(['main/loss'], 'iteration', file_name='G_loss.png', trigger=plot_interval), trigger=plot_interval)
    trainer.extend(display_image(G, valset, args.out, args.gpu), trigger=plot_interval)

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
github pstuvwx / Deep_VoiceChanger / convertor.py View on Github external
def numpy_mode():
    with chainer.using_config('train', False):
        with chainer.no_backprop_mode():
            netA_path = input('enter netA path...')
            netB_path = input('enter netB path...')
            wave_path = input('enter wave path...')

            ds = dataset.WaveDataset(wave_path, -1, True)

            netA = Generator()
            netB = Generator()
            chainer.serializers.load_npz(netA_path, netA)
            chainer.serializers.load_npz(netB_path, netB)

            que_a = queue.deque()
            que_ab = queue.deque()
            que_aba = queue.deque()

            gla = GLA()

            print('converting...')
            for i in tqdm.tqdm(range(ds.max//dataset.dif)):
                x_a = ds.get_example(i)
                x_a = chainer.dataset.convert.concat_examples([x_a], -1)
                x_a = chainer.Variable(x_a)

                x_ab = netA(x_a)
                x_aba = netB(x_ab)
github Aixile / chainer-gan-experiments / lsgan / train.py View on Github external
if args.gpu >= 0:
        chainer.cuda.get_device(args.gpu).use()

    if args.gen_class != '':
        gen = eval(args.gen_class)
    else:
        gen = DCGANGenerator(latent=args.latent_len, out_ch=args.image_channels)

    if args.dis_class != '':
        dis = eval(args.dis_class)
    else:
        dis = DCGANDiscriminator(base_size=64, down_layers=5, in_ch=args.image_channels, noise_all_layers=True, conv_as_last=True)

    if args.load_gen_model != '':
        serializers.load_npz(args.load_gen_model, gen)
        print("Generator model loaded")

    if args.load_dis_model != '':
        serializers.load_npz(args.load_dis_model, dis)
        print("Discriminator model loaded")

    if args.gpu >= 0:
        gen.to_gpu()
        dis.to_gpu()
        print("use gpu {}".format(args.gpu))

    opt_g = make_adam(gen, lr=args.learning_rate_g, beta1=0.5)
    opt_d = make_adam(dis, lr=args.learning_rate_d, beta1=0.5)

    train_dataset = getattr(datasets, args.load_dataset)(path=args.dataset_path)
    train_iter = chainer.iterators.MultiprocessIterator(
github mitaki28 / pixcaler / pixcaler / train-cycle.py View on Github external
trainer.extend(extensions.LogReport(trigger=preview_interval))
    trainer.extend(extensions.PlotReport(
        logging_keys,
        trigger=preview_interval,
    ))
    trainer.extend(extensions.PrintReport(
        ['epoch', 'iteration'] + logging_keys,
    ), trigger=display_interval)
    trainer.extend(extensions.ProgressBar(update_interval=10))
    trainer.extend(out_image_cycle(upscaler.gen, downscaler.gen, 8, args.out), trigger=preview_interval)
    trainer.extend(CommandsExtension())

    if args.resume:
        # Resume from a snapshot
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    trainer.run()
github TadaoYamaoka / python-dlshogi / train_policy.py View on Github external
parser.add_argument('--lr', type=float, default=0.01, help='learning rate')
parser.add_argument('--eval_interval', '-i', type=int, default=1000, help='eval interval')
args = parser.parse_args()

logging.basicConfig(format='%(asctime)s\t%(levelname)s\t%(message)s', datefmt='%Y/%m/%d %H:%M:%S', filename=args.log, level=logging.DEBUG)

model = PolicyNetwork()
model.to_gpu()

optimizer = optimizers.SGD(lr=args.lr)
optimizer.setup(model)

# Init/Resume
if args.initmodel:
    logging.info('Load model from {}'.format(args.initmodel))
    serializers.load_npz(args.initmodel, model)
if args.resume:
    logging.info('Load optimizer state from {}'.format(args.resume))
    serializers.load_npz(args.resume, optimizer)

logging.info('read kifu start')
# 保存済みのpickleファイルがある場合、pickleファイルを読み込む
# train date
train_pickle_filename = re.sub(r'\..*?$', '', args.kifulist_train) + '.pickle'
if os.path.exists(train_pickle_filename):
    with open(train_pickle_filename, 'rb') as f:
        positions_train = pickle.load(f)
    logging.info('load train pickle')
else:
    positions_train = read_kifu(args.kifulist_train)

# test data