How to use the nnabla.parametric_functions.affine function in nnabla

To help you get started, we’ve selected a few nnabla examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sony / nnabla-examples / reduction / cifar10 / factorized-layers / models.py View on Github external
flip_lr=True)
            image.need_grad = False
        h = PF.convolution(image, maps, kernel=(3, 3), pad=(1, 1),
                           with_bias=False)
        h = PF.batch_normalization(h, batch_stat=not test)
        h = F.relu(h)

    h = res_unit(h, "conv2", False)    # -> 32x32
    h = res_unit(h, "conv3", True)     # -> 16x16
    h = res_unit(h, "conv4", False)    # -> 16x16
    h = res_unit(h, "conv5", True)     # -> 8x8
    h = res_unit(h, "conv6", False)    # -> 8x8
    h = res_unit(h, "conv7", True)     # -> 4x4
    h = res_unit(h, "conv8", False)    # -> 4x4
    h = F.average_pooling(h, kernel=(4, 4))  # -> 1x1
    pred = PF.affine(h, ncls)

    return pred
github sony / nnabla-examples / penn-treebank / train.py View on Github external
def get_loss(l1, l2, x, t, w_init, b_init, num_words, batch_size, state_size, dropout=False, dropout_rate=0.5, embed_name='embed', pred_name='pred'):
    e_list = [PF.embed(x_elm, num_words, state_size, name=embed_name)
              for x_elm in F.split(x, axis=1)]
    t_list = F.split(t, axis=1)
    loss = 0
    for i, (e_t, t_t) in enumerate(zip(e_list, t_list)):
        if dropout:
            h1 = l1(F.dropout(e_t, dropout_rate), w_init, b_init)
            h2 = l2(F.dropout(h1, dropout_rate), w_init, b_init)
            y = PF.affine(F.dropout(h2, dropout_rate),
                          num_words, name=pred_name)
        else:
            h1 = l1(e_t, w_init, b_init)
            h2 = l2(h1, w_init, b_init)
            y = PF.affine(h2, num_words, name=pred_name)
        t_t = F.reshape(t_t, [batch_size, 1])
        loss += F.mean(F.softmax_cross_entropy(y, t_t))
    loss /= float(i+1)

    return loss
github sony / nnabla-examples / mnist-collection / vat.py View on Github external
x(`~nnabla.Variable`): N-D array
        n_h(int): number of units in an intermediate layer
        n_y(int): number of classes
        test: operation type train=True, test=False

    Returns:
        ~nnabla.Variable: h
    """

    h = x
    with nn.parameter_scope("fc1"):
        h = F.relu(PF.batch_normalization(
            PF.affine(h, n_h), batch_stat=not test), inplace=True)
    with nn.parameter_scope("fc2"):
        h = F.relu(PF.batch_normalization(
            PF.affine(h, n_h), batch_stat=not test), inplace=True)
    with nn.parameter_scope("fc3"):
        h = PF.affine(h, n_y)
    return h
github peisuke / DeepLearningSpeedComparison / nnabla / mnist / mnist_mlp.py View on Github external
def mlp(image, test=False):
    image /= 255.0
    h = F.relu(PF.affine(image, 1000, name='l1'), inplace=True)
    h = F.relu(PF.affine(h, 1000, name='l2'), inplace=True)
    h = PF.affine(h, 10, name='l3')
    return F.softmax(h)
github sony / nnabla / examples / cpp / forward_check / mnist / vat.py View on Github external
n_y(int): number of classes
        test: operation type train=True, test=False

    Returns:
        ~nnabla.Variable: log(p(y|x))
    """

    h = x
    with nn.parameter_scope("fc1"):
        h = F.relu(PF.batch_normalization(
            PF.affine(h, n_h), batch_stat=not test), inplace=True)
    with nn.parameter_scope("fc2"):
        h = F.relu(PF.batch_normalization(
            PF.affine(h, n_h), batch_stat=not test), inplace=True)
    with nn.parameter_scope("fc3"):
        h = PF.affine(h, n_y)
    return h
github sony / nnabla-examples / mnist-collection / dcgan.py View on Github external
with nn.parameter_scope("conv1"):
            c1 = F.elu(bn(PF.convolution(x, maxh / 8,
                                         (3, 3), pad=(3, 3), stride=(2, 2), with_bias=False)))
        # (32, 16, 16) --> (64, 8, 8)
        with nn.parameter_scope("conv2"):
            c2 = F.elu(bn(downsample2(c1, maxh / 4)))
        # (64, 8, 8) --> (128, 4, 4)
        with nn.parameter_scope("conv3"):
            c3 = F.elu(bn(downsample2(c2, maxh / 2)))
        # (128, 4, 4) --> (256, 4, 4)
        with nn.parameter_scope("conv4"):
            c4 = bn(PF.convolution(c3, maxh, (3, 3),
                                   pad=(1, 1), with_bias=False))
        # (256, 4, 4) --> (1,)
        with nn.parameter_scope("fc1"):
            f = PF.affine(c4, 1)
    if output_hidden:
        return f, [c1, c2, c3, c4]
    return f
github sony / nnabla / examples / cpp / forward_check / mnist / vat.py View on Github external
x(`~nnabla.Variable`): N-D array 
        n_h(int): number of units in an intermediate layer
        n_y(int): number of classes
        test: operation type train=True, test=False

    Returns:
        ~nnabla.Variable: log(p(y|x))
    """

    h = x
    with nn.parameter_scope("fc1"):
        h = F.relu(PF.batch_normalization(
            PF.affine(h, n_h), batch_stat=not test), inplace=True)
    with nn.parameter_scope("fc2"):
        h = F.relu(PF.batch_normalization(
            PF.affine(h, n_h), batch_stat=not test), inplace=True)
    with nn.parameter_scope("fc3"):
        h = PF.affine(h, n_y)
    return h
github sony / nnabla / examples / vision / mnist / vat.py View on Github external
n_y(int): number of classes
        test: operation type train=True, test=False

    Returns:
        ~nnabla.Variable: log(p(y|x))
    """

    h = x
    with nn.parameter_scope("fc1"):
        h = F.relu(PF.batch_normalization(
            PF.affine(h, n_h), batch_stat=not test), inplace=True)
    with nn.parameter_scope("fc2"):
        h = F.relu(PF.batch_normalization(
            PF.affine(h, n_h), batch_stat=not test), inplace=True)
    with nn.parameter_scope("fc3"):
        h = PF.affine(h, n_y)
    return h
github sony / nnabla-examples / reinforcement_learning / dqn / learner.py View on Github external
def q_mlp(s, num_actions, test=False):
    h = s
    for i, n in enumerate([64]):
        with nn.parameter_scope('fc%d' % (i + 1)):
            h = PF.affine(h, n, fix_parameters=test)
            h = F.relu(h)
    return PF.affine(h, num_actions, name='fc_fin', fix_parameters=test)
github peisuke / DeepLearningSpeedComparison / nnabla / vgg16 / predict.py View on Github external
h = F.relu(PF.convolution(h, 128, (3, 3), pad=(1, 1), stride=(1, 1), name='conv3'))
    h = F.relu(PF.convolution(h, 128, (3, 3), pad=(1, 1), stride=(1, 1), name='conv4'))
    h = F.max_pooling(h, (2, 2))
    h = F.relu(PF.convolution(h, 256, (3, 3), pad=(1, 1), stride=(1, 1), name='conv5'))
    h = F.relu(PF.convolution(h, 256, (3, 3), pad=(1, 1), stride=(1, 1), name='conv6'))
    h = F.relu(PF.convolution(h, 256, (3, 3), pad=(1, 1), stride=(1, 1), name='conv7'))
    h = F.max_pooling(h, (2, 2))
    h = F.relu(PF.convolution(h, 512, (3, 3), pad=(1, 1), stride=(1, 1), name='conv8'))
    h = F.relu(PF.convolution(h, 512, (3, 3), pad=(1, 1), stride=(1, 1), name='conv9'))
    h = F.relu(PF.convolution(h, 512, (3, 3), pad=(1, 1), stride=(1, 1), name='conv10'))
    h = F.max_pooling(h, (2, 2))
    h = F.relu(PF.convolution(h, 512, (3, 3), pad=(1, 1), stride=(1, 1), name='conv11'))
    h = F.relu(PF.convolution(h, 512, (3, 3), pad=(1, 1), stride=(1, 1), name='conv12'))
    h = F.relu(PF.convolution(h, 512, (3, 3), pad=(1, 1), stride=(1, 1), name='conv13'))
    h = F.max_pooling(h, (2, 2))
    h = PF.affine(h, 4096, name='fc1')
    h = F.relu(h)
    h = PF.affine(h, 4096, name='fc2')
    h = F.relu(h)
    h = PF.affine(h, 1000, name='fc3')
    return F.softmax(h)