How to use the nnabla.parameter_scope function in nnabla

To help you get started, we’ve selected a few nnabla examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sony / nnabla-examples / reduction / cifar10 / structured-sparsity / models.py View on Github external
def res_unit(x, scope_name, dn=False):
        C = x.shape[1]
        with nn.parameter_scope(scope_name):

            # Conv -> BN -> Relu
            with nn.parameter_scope("conv1"):
                h = masked_convolution(x, C / 2, kernel=(1, 1), pad=(0, 0),
                                       with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = F.relu(h)
            # Conv -> BN -> Relu
            with nn.parameter_scope("conv2"):
                h = masked_convolution(h, C / 2, kernel=(3, 3), pad=(1, 1),
                                       with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = F.relu(h)
            # Conv -> BN
            with nn.parameter_scope("conv3"):
                h = masked_convolution(h, C, kernel=(1, 1), pad=(0, 0),
                                       with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
            # Residual -> Relu
github sony / nnabla-examples / reduction / cifar10 / quantization / models.py View on Github external
def res_unit(x, scope_name, dn=False):
        C = x.shape[1]
        with nn.parameter_scope(scope_name):

            # Conv -> BN -> Relu
            with nn.parameter_scope("conv1"):
                h = PF.convolution(x, C / 2, kernel=(1, 1), pad=(0, 0),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = F.relu(h)
            # Conv -> BN -> Relu
            with nn.parameter_scope("conv2"):
                h = PF.convolution(h, C / 2, kernel=(3, 3), pad=(1, 1),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = F.relu(h)
            # Conv -> BN
            with nn.parameter_scope("conv3"):
                h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
            # Residual -> Relu
            h = F.relu(h + x)

            # Maxpooling
            if dn:
                h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))
github sony / nnabla-examples / cifar10-100-collection / models.py View on Github external
def res_unit(x, scope_name, dn=False):
        C = x.shape[1]
        with nn.parameter_scope(scope_name):
            # Conv -> BN -> Nonlinear
            with nn.parameter_scope("conv1"):
                h = PF.convolution(x, C / 2, kernel=(1, 1), pad=(0, 0),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = act(h)
            # Conv -> BN -> Nonlinear
            with nn.parameter_scope("conv2"):
                h = PF.convolution(h, C / 2, kernel=(3, 3), pad=(1, 1),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = act(h)
            # Conv -> BN
            with nn.parameter_scope("conv3"):
                h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
            # Residual -> Nonlinear
github sony / nnabla-examples / reduction / cifar10 / distillation / models.py View on Github external
# Conv -> BN
            with nn.parameter_scope("conv3"):
                h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
            # Residual -> Relu
            h = F.relu(h + x)

            # Maxpooling
            if dn:
                h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))

        return h

    ncls = 10
    with nn.parameter_scope(net):
        # Conv -> BN -> Relu
        with nn.parameter_scope("conv1"):
            # Preprocess
            image /= 255.0
            if not test:
                image = F.image_augmentation(image, contrast=1.0,
                                             angle=0.25,
                                             flip_lr=True)
                image.need_grad = False
            h = PF.convolution(image, maps, kernel=(3, 3), pad=(1, 1),
                               with_bias=False)
            h = PF.batch_normalization(h, batch_stat=not test)
            h = F.relu(h)

        h = res_unit(h, "conv2", False)    # -> 32x32
        h = res_unit(h, "conv3", True)     # -> 16x16
github sony / nnabla-examples / reduction / cifar10 / distillation / distillation.py View on Github external
pred.persistent = True  # not clear the intermediate buffer used
    loss_ce = F.mean(F.softmax_cross_entropy(pred, label))
    loss_ce_soft = ce_soft(pred, pred_label)
    loss = args.weight_ce * loss_ce + args.weight_ce_soft * loss_ce_soft

    # TEST
    # Create input variables.
    vimage = nn.Variable([args.batch_size, c, h, w])
    vlabel = nn.Variable([args.batch_size, 1])
    # Create teacher prediction graph.
    vpred = model_prediction(vimage, net=student, maps=int(
        maps * (1. - rrate)), test=True)

    # Create Solver.
    solver = S.Adam(args.learning_rate)
    with nn.parameter_scope(student):
        solver.set_parameters(nn.get_parameters())

    # Create monitor.
    from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
    monitor = Monitor(args.monitor_path)
    monitor_loss = MonitorSeries("Training loss", monitor, interval=10)
    monitor_err = MonitorSeries("Training error", monitor, interval=10)
    monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100)
    monitor_verr = MonitorSeries("Test error", monitor, interval=1)

    # Initialize DataIterator for MNIST.
    data = data_iterator(args.batch_size, True)
    vdata = data_iterator(args.batch_size, False)
    best_ve = 1.0
    # Training loop.
    for i in range(args.max_iter):
github sony / nnabla-examples / semantic-segmentation / deeplabv3plus / xception_65.py View on Github external
def xception_65(x, test=False, fix_params=False):

    entry_flow_depth_list = [[128, 128, 128], [256, 256, 256], [728, 728, 728]]
    middle_flow_depth_list = [[728, 728, 728]]
    exit_flow_depth_list = [[728, 1024, 1024], [1536, 1536, 2048]]

    with nn.parameter_scope("xception_65"):
        with nn.parameter_scope("entry_flow"):
            x = entry_flow(x, 3, entry_flow_depth_list,
                           test=test, fix_params=fix_params)

        with nn.parameter_scope("middle_flow"):
            x = middle_flow(x, 1, middle_flow_depth_list,
                            test=test, fix_params=fix_params)

        with nn.parameter_scope("exit_flow"):
            x = exit_flow(x, 2, exit_flow_depth_list,
                          test=test, fix_params=fix_params)
            x = F.relu(x)
        global endpoints
        endpoints['Decoder End Point 2'] = x

    return endpoints
github sony / nnabla / tutorial / finetuning.py View on Github external
def construct_networks(args, images, model, num_class, test):
    try:
        pooled = model(images, force_global_pooling=1,
                       use_up_to="pool", training=not test)
    except:
        pooled = model(images, use_up_to="pool", training=not test)

    with nn.parameter_scope("finetuning"):
        if args.model == "VGG":
            pooled = F.relu(pooled)

            with nn.parameter_scope("additional_fc_1"):
                pooled = PF.affine(pooled, 4096)
            pooled = F.relu(pooled)
            if not test:
                pooled = F.dropout(pooled, 0.5)

            with nn.parameter_scope("additional_fc_2"):
                pooled = PF.affine(pooled, 4096)
            pooled = F.relu(pooled)
            if not test:
                pooled = F.dropout(pooled, 0.5)

        with nn.parameter_scope("last_fc"):
            pred = PF.affine(pooled, num_class)

    return pred
github sony / nnabla-examples / GANs / munit / train_mgpu.py View on Github external
for p_f in p_x_fake_b_list]).apply(persistent=True)
    loss_dis_b = reduce(f, [lsgan_loss(p_f, p_r) for p_f, p_r in
                            zip(p_x_fake_b_list, p_x_real_b_list)]).apply(persistent=True)
    # loss for generator-related models
    loss_gen = loss_gen_a + loss_gen_b \
        + args.lambda_x * (loss_recon_x_a + loss_recon_x_b) \
        + args.lambda_c * (loss_recon_x_content_a + loss_recon_x_content_b) \
        + args.lambda_s * (loss_recon_x_style_a + loss_recon_x_style_b)
    # loss for discriminators
    loss_dis = loss_dis_a + loss_dis_b

    # Solver
    lr_g, lr_d, beta1, beta2 = args.lr_g, args.lr_d, args.beta1, args.beta2
    # solver for generator-related models
    solver_gen = S.Adam(lr_g, beta1, beta2)
    with nn.parameter_scope("generator"):
        params_gen = nn.get_parameters()
    solver_gen.set_parameters(params_gen)
    # solver for discriminators
    solver_dis = S.Adam(lr_d, beta1, beta2)
    with nn.parameter_scope("discriminators"):
        params_dis = nn.get_parameters()
    solver_dis.set_parameters(params_dis)

    # Monitor
    monitor = Monitor(args.monitor_path)
    # time
    monitor_time = MonitorTimeElapsed("Training time", monitor, interval=10)
    # reconstruction
    monitor_loss_recon_x_a = MonitorSeries(
        "Recon Loss Image A", monitor, interval=10)
    monitor_loss_recon_x_content_b = MonitorSeries(
github sony / nnabla-examples / speech-synthesis / WaveNet / model.py View on Github external
# x.shape = (Batch, channels, Time)

        # padding
        pad = causal_padding(x, self.kernel_size, dilation)

        # gated convolution
        with nn.parameter_scope("filter"):
            f_audio = PF.convolution(pad, self.hidden_dims, kernel=(
                self.kernel_size, ), dilation=(dilation,), name="audio")

            f_speaker = PF.convolution(speaker_emb, self.hidden_dims, kernel=(
                1, ), name="speaker") if speaker_emb is not None else 0

            f = F.tanh(f_audio + f_speaker)

        with nn.parameter_scope("gate"):
            g_audio = PF.convolution(pad, self.hidden_dims, kernel=(self.kernel_size, ), dilation=(dilation,),
                                     name="audio")

            g_speaker = PF.convolution(speaker_emb, self.hidden_dims, kernel=(
                1, ), name="speaker") if speaker_emb is not None else 0

            g = F.sigmoid(g_audio + g_speaker)

        h = f * g

        # 1 x 1 convolution
        with nn.parameter_scope("skip"):
            skip = PF.convolution(h, self.skip_dims, kernel=(1, ))

        out = x + skip
github sony / nnabla-examples / reinforcement_learning / dqn / learner.py View on Github external
def sync_models(self):
        with nn.parameter_scope(self.name_q):
            q_params = nn.get_parameters(grad_only=False)
        with nn.parameter_scope(self.name_qnext):
            qnext_params = nn.get_parameters(grad_only=False)
        for k, v in q_params.items():
            qnext_params[k].data.copy_from(v.data)