How to use the nnabla.functions.mean function in nnabla

To help you get started, we’ve selected a few nnabla examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sony / nnabla-examples / penn-treebank / train.py View on Github external
e_list = [PF.embed(x_elm, num_words, state_size, name=embed_name)
              for x_elm in F.split(x, axis=1)]
    t_list = F.split(t, axis=1)
    loss = 0
    for i, (e_t, t_t) in enumerate(zip(e_list, t_list)):
        if dropout:
            h1 = l1(F.dropout(e_t, dropout_rate), w_init, b_init)
            h2 = l2(F.dropout(h1, dropout_rate), w_init, b_init)
            y = PF.affine(F.dropout(h2, dropout_rate),
                          num_words, name=pred_name)
        else:
            h1 = l1(e_t, w_init, b_init)
            h2 = l2(h1, w_init, b_init)
            y = PF.affine(h2, num_words, name=pred_name)
        t_t = F.reshape(t_t, [batch_size, 1])
        loss += F.mean(F.softmax_cross_entropy(y, t_t))
    loss /= float(i+1)

    return loss
github sony / nnabla / examples / cpp / forward_check / mnist / vat.py View on Github external
# Create DataIterators for datasets of labeled, unlabeled and validation
    di_l = DataIterator(args.batchsize_l, [x_l, t_l])
    di_u = DataIterator(args.batchsize_u, [x_u])
    di_v = DataIterator(args.batchsize_v, [x_v, t_v])

    # Create networks
    # feed-forward-net building function
    def forward(x, test=False):
        return mlp_net(x, n_h, n_y, test)

    # Net for learning labeled data
    xl = nn.Variable((args.batchsize_l,) + shape_x, need_grad=False)
    hl = forward(xl, test=False)
    tl = nn.Variable((args.batchsize_l, 1), need_grad=False)
    loss_l = F.mean(F.softmax_cross_entropy(hl, tl))

    # Net for learning unlabeled data
    xu = nn.Variable((args.batchsize_u,) + shape_x, need_grad=False)
    r = nn.Variable((args.batchsize_u,) + shape_x, need_grad=True)
    eps = nn.Variable((args.batchsize_u,) + shape_x, need_grad=False)
    loss_u, yu = vat(xu, r, eps, forward, distance)

    # Net for evaluating valiation data
    xv = nn.Variable((args.batchsize_v,) + shape_x, need_grad=False)
    hv = forward(xv, test=True)
    tv = nn.Variable((args.batchsize_v, 1), need_grad=False)

    # Create solver
    solver = S.Adam(args.learning_rate)
    solver.set_parameters(nn.get_parameters())
github sony / nnabla-examples / imagenet-classification / multi_device_multi_process_classification.py View on Github external
Args:

        tiny: Tiny ImageNet mode if True.
    """
    data_size = 320
    nn_in_size = 224
    if tiny:
        data_size = 64
        nn_in_size = 56
    image = nn.Variable([args.batch_size, 3, data_size, data_size])
    label = nn.Variable([args.batch_size, 1])
    pimage = image_preprocess(image, nn_in_size, data_size, test)
    pred, hidden = model_resnet.resnet_imagenet(
        pimage, num_classes, args.num_layers, args.shortcut_type, test=test, tiny=tiny)
    loss = F.mean(F.softmax_cross_entropy(pred, label))
    Model = namedtuple('Model', ['image', 'label', 'pred', 'loss', 'hidden'])
    return Model(image, label, pred, loss, hidden)
github sony / nnabla / examples / cpp / forward_check / imagenet / classification.py View on Github external
tiny: Tiny ImageNet mode if True.
    """
    data_size = 320
    nn_in_size = 224
    if tiny:
        data_size = 64
        nn_in_size = 56
    image = nn.Variable([args.batch_size, 3, data_size, data_size])
    label = nn.Variable([args.batch_size, 1])
    #pimage = image_preprocess(image, nn_in_size)
    # pred, hidden = model_resnet.resnet_imagenet(
    #    pimage, num_classes, args.num_layers, args.shortcut_type, test=test, tiny=tiny)
    pred, hidden = model_resnet.resnet_imagenet(
        image, num_classes, args.num_layers, args.shortcut_type, test=test, tiny=tiny)
    loss = F.mean(F.softmax_cross_entropy(pred, label))
    Model = namedtuple('Model', ['image', 'label', 'pred', 'loss', 'hidden'])
    return Model(image, label, pred, loss, hidden)
github sony / nnabla-examples / GANs / pggan / trainer.py View on Github external
while self.di.epoch == current_epoch:
                img, _ = self.di.next()
                x = nn.Variable.from_numpy_array(img)

                z = F.randn(shape=(batch_size, self.n_latent, 1, 1))
                z = pixel_wise_feature_vector_normalization(
                    z) if self.hyper_sphere else z
                y = self.gen.transition(z, alpha, test=True)

                y.need_grad = False
                x_r = F.average_pooling(x, kernel=kernel)

                p_real = self.dis.transition(x_r, alpha)
                p_fake = self.dis.transition(y, alpha)

                loss_dis = F.mean(F.pow_scalar((p_real - 1), 2.)
                                  + F.pow_scalar(p_fake, 2.) * self.l2_fake_weight)

                if itr % self.n_critic + 1 == self.n_critic:
                    with nn.parameter_scope("discriminator"):
                        self.solver_dis.set_parameters(nn.get_parameters(),
                                                       reset=False, retain_state=True)
                        self.solver_dis.zero_grad()
                        loss_dis.backward(clear_buffer=True)
                        self.solver_dis.update()

                z = F.randn(shape=(batch_size, self.n_latent, 1, 1))
                z = pixel_wise_feature_vector_normalization(
                    z) if self.hyper_sphere else z
                y = self.gen.transition(z, alpha, test=False)
                p_fake = self.dis.transition(y, alpha)
github sony / nnabla-examples / mnist-collection / siamese.py View on Github external
image0 = nn.Variable([args.batch_size, 1, 28, 28])
    image1 = nn.Variable([args.batch_size, 1, 28, 28])
    label = nn.Variable([args.batch_size])
    # Create prediction graph.
    pred = mnist_lenet_siamese(image0, image1, test=False)
    # Create loss function.
    loss = F.mean(contrastive_loss(pred, label, margin))

    # TEST
    # Create input variables.
    vimage0 = nn.Variable([args.batch_size, 1, 28, 28])
    vimage1 = nn.Variable([args.batch_size, 1, 28, 28])
    vlabel = nn.Variable([args.batch_size])
    # Create prediction graph.
    vpred = mnist_lenet_siamese(vimage0, vimage1, test=True)
    vloss = F.mean(contrastive_loss(vpred, vlabel, margin))

    # Create Solver.
    solver = S.Adam(args.learning_rate)
    solver.set_parameters(nn.get_parameters())

    # Create monitor.
    import nnabla.monitor as M
    monitor = M.Monitor(args.monitor_path)
    monitor_loss = M.MonitorSeries("Training loss", monitor, interval=10)
    monitor_time = M.MonitorTimeElapsed("Training time", monitor, interval=100)
    monitor_vloss = M.MonitorSeries("Test loss", monitor, interval=10)

    # Initialize DataIterator for MNIST.
    rng = np.random.RandomState(313)
    data = siamese_data_iterator(args.batch_size, True, rng)
    vdata = siamese_data_iterator(args.batch_size, False, rng)
github sony / nnabla-examples / imagenet-classification / classification.py View on Github external
t_model.pred.persistent = True  # Not clearing buffer of pred in backward

    # TODO: need_grad should be passed to get_unlinked_variable after v1.0.3 fix.
    t_pred2 = t_model.pred.get_unlinked_variable()
    t_pred2.need_grad = False

    t_e = F.mean(F.top_n_error(t_pred2, t_model.label))
    v_model = get_model(
        args, num_classes, test=True, tiny=args.tiny_mode)
    v_model.pred.persistent = True  # Not clearing buffer of pred in forward

    # TODO: need_grad should be passed to get_unlinked_variable after v1.0.3 fix.
    v_pred2 = v_model.pred.get_unlinked_variable()
    v_pred2.need_grad = False

    v_e = F.mean(F.top_n_error(v_pred2, v_model.label))

    # Save_nnp_Epoch0
    contents = save_nnp({'x': v_model.image}, {
                        'y': v_model.pred}, args.batch_size)
    save.save(os.path.join(args.model_save_path,
                           'Imagenet_result_epoch0.nnp'), contents)

    # Create Solver.
    solver = S.Momentum(args.learning_rate, 0.9)
    solver.set_parameters(nn.get_parameters())

    start_point = 0
    if args.checkpoint is not None:
        # load weights and solver state info from specified checkpoint file.
        start_point = load_checkpoint(args.checkpoint, solver)
github sony / nnabla-examples / GANs / pggan / trainer.py View on Github external
if itr % self.n_critic + 1 == self.n_critic:
                    with nn.parameter_scope("discriminator"):
                        self.solver_dis.set_parameters(nn.get_parameters(),
                                                       reset=False, retain_state=True)
                        self.solver_dis.zero_grad()
                        loss_dis.backward(clear_buffer=True)
                        self.solver_dis.update()

                z = F.randn(shape=(batch_size, self.n_latent, 1, 1))
                z = pixel_wise_feature_vector_normalization(
                    z) if self.hyper_sphere else z
                y = self.gen.transition(z, alpha, test=False)
                p_fake = self.dis.transition(y, alpha)

                loss_gen = F.mean(F.pow_scalar((p_fake - 1), 2))
                with nn.parameter_scope("generator"):
                    self.solver_gen.set_parameters(
                        nn.get_parameters(), reset=False, retain_state=True)
                    self.solver_gen.zero_grad()
                    loss_gen.backward(clear_buffer=True)
                    self.solver_gen.update()

                itr += 1
                global_itr += 1.
                alpha = global_itr / total_itr

            if epoch % self.save_image_interval + 1 == self.save_image_interval:
                z = nn.Variable.from_numpy_array(self.z_test)
                z = pixel_wise_feature_vector_normalization(
                    z) if self.hyper_sphere else z
                y = self.gen.transition(z, alpha)
github sony / nnabla / tutorial / finetuning.py View on Github external
def loss_function(pred, label):
    """
        Compute loss.
    """
    loss = F.mean(F.softmax_cross_entropy(pred, label))
    return loss