How to use the nnabla.parametric_functions function in nnabla

To help you get started, we’ve selected a few nnabla examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sony / nnabla-examples / cifar10-100-collection / models.py View on Github external
def res_unit(x, scope_name, dn=False):
        C = x.shape[1]
        with nn.parameter_scope(scope_name):
            # Conv -> BN -> Nonlinear
            with nn.parameter_scope("conv1"):
                h = PF.convolution(x, C / 2, kernel=(1, 1), pad=(0, 0),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = act(h)
            # Conv -> BN -> Nonlinear
            with nn.parameter_scope("conv2"):
                h = PF.convolution(h, C / 2, kernel=(3, 3), pad=(1, 1),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = act(h)
            # Conv -> BN
            with nn.parameter_scope("conv3"):
                h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
            # Residual -> Nonlinear
            h = act(F.add2(h, x, inplace=True))
github sony / nnabla-examples / reinforcement_learning / dqn / learner.py View on Github external
def _q_cnn(inp, convs, hiddens, num_actions, test=False, variable_dict=None):
    h = inp
    # Build convs
    for i, (o, k, s) in enumerate(convs):
        name = 'conv{}'.format(i + 1)
        h = F.relu(PF.convolution(h, o, (k, k), stride=(
            s, s), fix_parameters=test, name=name))
        update_variable_dict(variable_dict, h, name)
    # Build affines
    for i, o in enumerate(hiddens):
        name = 'fc{}'.format(i + 1)
        h = F.relu(PF.affine(h, o, fix_parameters=test, name=name))
        update_variable_dict(variable_dict, h, name)

    return PF.affine(h, num_actions, fix_parameters=test, name='fc_fin')
github sony / nnabla-examples / reduction / mnist / svd / models.py View on Github external
def mnist_lenet_prediction(image, scope="reference", test=False):
    """
    Construct LeNet for MNIST.
    """
    with nn.parameter_scope(scope):
        image /= 255.0
        c1 = PF.convolution(image, 16, (5, 5), name='conv1')
        c1 = F.relu(F.max_pooling(c1, (2, 2)), inplace=True)
        c2 = PF.convolution(c1, 16, (5, 5), name='conv2')
        c2 = F.relu(F.max_pooling(c2, (2, 2)), inplace=True)
        c3 = F.relu(PF.affine(c2, 50, name='fc3'), inplace=True)
        c4 = PF.affine(c3, 10, name='fc4')
    return c4
github sony / nnabla / examples / vision / mnist / dcgan.py View on Github external
def upsample2(x, c):
        # Twise upsampling with deconvolution.
        return PF.deconvolution(x, c, kernel=(4, 4), pad=(1, 1), stride=(2, 2), with_bias=False)
github sony / nnabla / examples / vision / cifar10 / multi_device_multi_process_classification.py View on Github external
# Conv -> BN -> Relu
            with nn.parameter_scope("conv1"):
                w_init = UniformInitializer(
                    calc_uniform_lim_glorot(C, C / 2, kernel=(1, 1)),
                    rng=rng)
                h = PF.convolution(x, C / 2, kernel=(1, 1), pad=(0, 0),
                                   w_init=w_init, with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = F.relu(h)
            # Conv -> BN -> Relu
            with nn.parameter_scope("conv2"):
                w_init = UniformInitializer(
                    calc_uniform_lim_glorot(C / 2, C / 2, kernel=(3, 3)),
                    rng=rng)
                h = PF.convolution(h, C / 2, kernel=(3, 3), pad=(1, 1),
                                   w_init=w_init, with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = F.relu(h)
            # Conv -> BN
            with nn.parameter_scope("conv3"):
                w_init = UniformInitializer(
                    calc_uniform_lim_glorot(C / 2, C, kernel=(1, 1)),
                    rng=rng)
                h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0),
                                   w_init=w_init, with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
            # Residual -> Relu
            h = F.relu(h + x)

            # Maxpooling
            if dn:
github sony / nnabla-examples / distributed / cifar10-100 / models.py View on Github external
def batch_normalization(h, test=False, comm=None, group="world"):
    if comm is None:
        h = PF.batch_normalization(h, batch_stat=not test)
    else:
        h = PF.sync_batch_normalization(
            h, comm=comm, group=group, batch_stat=not test)
    return h
github sony / nnabla-examples / reduction / mnist / svd / models.py View on Github external
def mnist_lenet_prediction_slim(image, scope="slim", rrate=0.75, test=False):
    """
    Construct LeNet for MNIST.
    """
    with nn.parameter_scope(scope):
        image /= 255.0
        c1 = PF.convolution(image, 16, (5, 5), name='conv1')
        c1 = F.relu(F.max_pooling(c1, (2, 2)), inplace=True)
        c2 = PF.convolution(c1, 16, (5, 5), name='conv2')
        c2 = F.relu(F.max_pooling(c2, (2, 2)), inplace=True)

        # SVD applied
        inmaps = np.prod(c2.shape[1:])  # c * h * w
        outmaps0 = 50  # original outmaps
        outmaps1 = reduce_maps(inmaps, outmaps0, rrate)
        d0 = F.relu(PF.affine(c2, outmaps1, name='fc-d0'), inplace=True)
        d1 = F.relu(PF.affine(d0, outmaps0, name='fc-d1'), inplace=True)

        c4 = PF.affine(d1, 10, name='fc4')
    return c4
github sony / nnabla-examples / GANs / cycle-gan / models.py View on Github external
def deconvolution(x, n, kernel, stride, pad, init_method=None):
    if init_method == "paper":
        init = nn.initializer.NormalInitializer(0.02)
    else:
        s = nn.initializer.calc_normal_std_glorot(x.shape[1], n, kernel=kernel)
        init = nn.initializer.NormalInitializer(s)
    x = PF.deconvolution(x, n, kernel=kernel, stride=stride,
                         pad=pad, with_bias=True, w_init=init)
    return x
github sony / nnabla-examples / imagenet-classification / model_resnet.py View on Github external
def shortcut(x, ochannels, stride, shortcut_type, test):
    ichannels = x.shape[1]
    use_conv = shortcut_type.lower() == 'c'
    if ichannels != ochannels:
        assert (ichannels * 2 == ochannels) or (ichannels * 4 == ochannels)
        if shortcut_type.lower() == 'b':
            use_conv = True
    if use_conv:
        # Convolution does everything.
        # Matching channels, striding.
        with nn.parameter_scope("shortcut_conv"):
            x = PF.convolution(x, ochannels, (1, 1),
                               stride=stride, with_bias=False)
            x = PF.batch_normalization(x, batch_stat=not test)
    else:
        if stride != (1, 1):
            # Stride
            x = F.average_pooling(x, (1, 1), stride)
        if ichannels != ochannels:
            # Zero-padding to channel axis
            ishape = x.shape
            zeros = F.constant(
                0, (ishape[0], ochannels - ichannels) + ishape[-2:])
            x = F.concatenate(x, zeros, axis=1)
    return x