How to use the nnabla.functions function in nnabla

To help you get started, we’ve selected a few nnabla examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sony / nnabla / python / benchmark / function / test_loss.py View on Github external
def test_categorical_classification_loss(inspecs, loss, nnabla_opts):
    func = getattr(F, loss)
    fb = FunctionBenchmark(
        func, inspecs, [], dict(axis=1),
        nnabla_opts.ext, nnabla_opts.ext_kwargs)
    fb.benchmark()
    fb.write(writer=nnabla_opts.function_benchmark_writer)
github sony / nnabla-examples / reduction / cifar10 / quantization / models.py View on Github external
def res_unit(x, scope_name, dn=False):
        C = x.shape[1]
        with nn.parameter_scope(scope_name):

            # Conv -> BN -> BinaryTanh
            with nn.parameter_scope("conv1"):
                h = PF.binary_connect_convolution(x, C / 2, kernel=(1, 1), pad=(0, 0),
                                                  with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = F.binary_tanh(h)
            # Conv -> BN -> BinaryTanh
            with nn.parameter_scope("conv2"):
                h = PF.binary_connect_convolution(h, C / 2, kernel=(3, 3), pad=(1, 1),
                                                  with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = F.binary_tanh(h)
            # Conv -> BN
            with nn.parameter_scope("conv3"):
                h = PF.binary_connect_convolution(h, C, kernel=(1, 1), pad=(0, 0),
                                                  with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
            # Residual -> BinaryTanh
            h = F.binary_tanh(h + x)

            # Maxpooling
            if dn:
github sony / nnabla-examples / word-embedding / word_embedding.py View on Github external
# - input variables
    xl = nn.Variable((size,))  # variable for word
    yl = nn.Variable((size,))  # variable for context

    # Embed layers for word embedding function
    # - f_embed : word index x to get y, the n_dim vector
    # --  for each sample in a minibatch
    hx = PF.embed(xl, n_word, n_dim, name="e1")  # feature vector for word
    hy = PF.embed(yl, n_word, n_dim, name="e1")  # feature vector for context
    hl = F.sum(hx * hy, axis=1)

    # -- Approximated likelihood of context prediction
    # pos: word context, neg negative samples
    tl = nn.Variable([size, ], need_grad=False)
    loss = F.sigmoid_cross_entropy(hl, tl)
    loss = F.mean(loss)

    # Model for test of searching similar words
    xr = nn.Variable((1,), need_grad=False)
    hr = PF.embed(xr, n_word, n_dim, name="e1")  # feature vector for test

    # Create solver
    solver = S.Adam(args.learning_rate)
    solver.set_parameters(nn.get_parameters())

    # Create monitor.
    monitor = M.Monitor(args.work_dir)
    monitor_loss = M.MonitorSeries(
        "Training loss", monitor, interval=args.monitor_interval)
    monitor_time = M.MonitorTimeElapsed(
        "Training time", monitor, interval=args.monitor_interval)
github sony / nnabla / python / src / nnabla / backward_function / batch_normalization.py View on Github external
v_eps_rsqrt1 = (rv + eps) ** (-1.0 / 2.0)

        # w.r.t. x
        if prop_down[0]:
            g_x0_ = g_dg0 * dy * v_eps_rsqrt1
            if accum[0]:
                g_x0 += g_x0_
            else:
                g_x0.copy_from(g_x0_)

        # w.r.t. beta
        # zero, do nothing

        # w.r.t. gamma
        if prop_down[2]:
            g_g0_ = F.sum(g_dx0 * dy * v_eps_rsqrt1, axes, True)
            if accum[2]:
                g_g0 += g_g0_
            else:
                g_g0.copy_from(g_g0_)

        # no backward w.r.t. rm and rv

        # w.r.t. dy
        if prop_down[5]:
            g_dy_ = g_dx0 * g0 * v_eps_rsqrt1 + \
                g_dg0 * (x0 - rm) * v_eps_rsqrt1 + g_db0
            if accum[5]:
                g_dy += g_dy_
            else:
                g_dy.copy_from(g_dy_)
github sony / nnabla-examples / GANs / pix2pixHD / models.py View on Github external
def instance_norm_lrelu(self, x, alpha=0.2):
        norm = PF.instance_normalization(x, no_scale=True, no_bias=True)
        return F.leaky_relu(norm, alpha=alpha, inplace=True)
github sony / nnabla / tutorial / finetuning.py View on Github external
def construct_networks(args, images, model, num_class, test):
    try:
        pooled = model(images, force_global_pooling=1,
                       use_up_to="pool", training=not test)
    except:
        pooled = model(images, use_up_to="pool", training=not test)

    with nn.parameter_scope("finetuning"):
        if args.model == "VGG":
            pooled = F.relu(pooled)

            with nn.parameter_scope("additional_fc_1"):
                pooled = PF.affine(pooled, 4096)
            pooled = F.relu(pooled)
            if not test:
                pooled = F.dropout(pooled, 0.5)

            with nn.parameter_scope("additional_fc_2"):
                pooled = PF.affine(pooled, 4096)
            pooled = F.relu(pooled)
            if not test:
                pooled = F.dropout(pooled, 0.5)

        with nn.parameter_scope("last_fc"):
            pred = PF.affine(pooled, num_class)
github sony / nnabla-examples / meta-learning / net.py View on Github external
def forward_conv(inp, bn_batch_stat, args, init_params, activation=F.relu):
    hidden1 = conv_block(inp, 'layer1', bn_batch_stat,
                         activation, args, init_params)
    hidden2 = conv_block(hidden1, 'layer2', bn_batch_stat,
                         activation, args, init_params)
    hidden3 = conv_block(hidden2, 'layer3', bn_batch_stat,
                         activation, args, init_params)
    hidden4 = conv_block(hidden3, 'layer4', bn_batch_stat,
                         activation, args, init_params)

    if args.datasource != 'omniglot' or args.method != 'maml':
        # hidden4 = F.reshape(hidden4, (hidden4.d.shape[0], -1), inplace=False)
        pass
    else:
        hidden4 = F.mean(hidden4, (2, 3))

    if init_params is None or 'layer5/affine/W' not in init_params:
        output = PF.affine(hidden4, args.num_classes, name='layer5')
    else:
        output = F.affine(
            hidden4, init_params['layer5/affine/W'], init_params['layer5/affine/b'])
    return output
github sony / nnabla / examples / cpp / forward_check / mnist / dcgan.py View on Github external
z = nn.Variable([args.batch_size, 100, 1, 1])
    fake = generator(z)
    fake.persistent = True  # Not to clear at backward
    pred_fake = discriminator(fake)
    loss_gen = F.mean(F.sigmoid_cross_entropy(
        pred_fake, F.constant(1, pred_fake.shape)))
    fake_dis = fake.unlinked()
    pred_fake_dis = discriminator(fake_dis)
    loss_dis = F.mean(F.sigmoid_cross_entropy(
        pred_fake_dis, F.constant(0, pred_fake_dis.shape)))

    # Real path
    x = nn.Variable([args.batch_size, 1, 28, 28])
    pred_real = discriminator(x)
    loss_dis += F.mean(F.sigmoid_cross_entropy(pred_real,
                                               F.constant(1, pred_real.shape)))

    # Create Solver.
    solver_gen = S.Adam(args.learning_rate, beta1=0.5)
    solver_dis = S.Adam(args.learning_rate, beta1=0.5)
    with nn.parameter_scope("gen"):
        solver_gen.set_parameters(nn.get_parameters())
    with nn.parameter_scope("dis"):
        solver_dis.set_parameters(nn.get_parameters())

    # Create monitor.
    import nnabla.monitor as M
    monitor = M.Monitor(args.monitor_path)
    monitor_loss_gen = M.MonitorSeries("Generator loss", monitor, interval=10)
    monitor_loss_dis = M.MonitorSeries(
        "Discriminator loss", monitor, interval=10)
    monitor_time = M.MonitorTimeElapsed("Time", monitor, interval=100)
github sony / nnabla-examples / reduction / cifar10 / resnet2rnn / classification.py View on Github external
# TRAIN
    maps = 64
    data_iterator = data_iterator_cifar10
    c = 3
    h = w = 32
    n_train = 50000
    n_valid = 10000
    # Create input variables.
    image = nn.Variable([args.batch_size, c, h, w])
    label = nn.Variable([args.batch_size, 1])
    # Create `teacher` model_prediction graph.
    pred = model_prediction(image, maps=maps, test=False)
    pred.persistent = True
    # Create loss function.
    loss = F.mean(F.softmax_cross_entropy(pred, label))

    # TEST
    # Create input variables.
    vimage = nn.Variable([args.batch_size, c, h, w])
    vlabel = nn.Variable([args.batch_size, 1])
    # Create teacher prediction graph.
    vpred = model_prediction(vimage, maps=maps, test=True)

    # Create Solver.
    solver = S.Adam(args.learning_rate)
    solver.set_parameters(nn.get_parameters())

    # Create monitor.
    from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
    monitor = Monitor(args.monitor_path)
    monitor_loss = MonitorSeries("Training loss", monitor, interval=10)
github sony / nnabla / examples / cpp / forward_check / mnist / dcgan.py View on Github external
def downsample2(xx, c):
        return PF.convolution(xx, c, (3, 3), pad=(1, 1), stride=(2, 2), with_bias=False)

    assert maxh / 8 > 0
    with nn.parameter_scope("dis"):
        # (1, 28, 28) --> (32, 16, 16)
        with nn.parameter_scope("conv1"):
            c1 = F.elu(bn(PF.convolution(x, maxh / 8,
                                         (3, 3), pad=(3, 3), stride=(2, 2), with_bias=False)))
        # (32, 16, 16) --> (64, 8, 8)
        with nn.parameter_scope("conv2"):
            c2 = F.elu(bn(downsample2(c1, maxh / 4)))
        # (64, 8, 8) --> (128, 4, 4)
        with nn.parameter_scope("conv3"):
            c3 = F.elu(bn(downsample2(c2, maxh / 2)))
        # (128, 4, 4) --> (256, 4, 4)
        with nn.parameter_scope("conv4"):
            c4 = bn(PF.convolution(c3, maxh, (3, 3),
                                   pad=(1, 1), with_bias=False))
        # (256, 4, 4) --> (1,)
        with nn.parameter_scope("fc1"):
            f = PF.affine(c4, 1)
    if output_hidden:
        return f, [c1, c2, c3, c4]
    return f