How to use the nnabla.logger.info function in nnabla

To help you get started, we’ve selected a few nnabla examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github sony / nnabla-examples / reduction / mnist / svd / models.py View on Github external
def reduce_maps(inmaps, outmaps, rrate):
    maps = int(rrate * inmaps * outmaps / (inmaps + outmaps))
    logger.info("###################")
    logger.info(
        "Num.Parameters is reduced {}x{} -> {}x{} + {}x{} by {}".format(
            inmaps, outmaps, inmaps, maps, maps, outmaps, rrate))
    logger.info("###################")
    time.sleep(1)
    return maps
github sony / nnabla-examples / reduction / cifar10 / factorized-layers / classification.py View on Github external
def train():
    args = get_args()

    # Get context.
    from nnabla.ext_utils import get_extension_context
    extension_module = args.context
    if args.context is None:
        extension_module = 'cpu'
    logger.info("Running in %s" % extension_module)
    ctx = get_extension_context(extension_module, device_id=args.device_id)
    nn.set_default_context(ctx)

    # TRAIN
    maps = 64
    data_iterator = data_iterator_cifar10
    c = 3
    h = w = 32
    n_train = 50000
    n_valid = 10000

    # Create input variables.
    image = nn.Variable([args.batch_size, c, h, w])
    label = nn.Variable([args.batch_size, 1])

    # Create CNN network for both training and testing.
github sony / nnabla-examples / GANs / pggan / validate.py View on Github external
logger.info("Sliced Wasserstein Distance")
        monitor_time = MonitorTimeElapsed(
            "SWD-ValidationTime", monitor, interval=1)
        monitor_metric = MonitorSeries("SWD", monitor, interval=1)
        nhoods_per_image = 128
        nhood_size = 7
        level_list = [128, 64, 32, 16]  # TODO: use argument
        dir_repeats = 4
        dirs_per_repeat = 128
        from sliced_wasserstein import compute_metric
        score = compute_metric(di, gen, args.latent, num_batches, nhoods_per_image, nhood_size,
                               level_list, dir_repeats, dirs_per_repeat, args.hyper_sphere)
        monitor_time.add(0)
        monitor_metric.add(0, score)  # averaged in the log
    else:
        logger.info("Set `validation-metric` as either `ms-ssim` or `swd`.")
    logger.info(score)
    logger.info("End validation")
github sony / nnabla-examples / mnist-collection / classification_bnn.py View on Github external
* Initialize a solver and set parameter variables to it.
    * Create monitor instances for saving and displaying training stats.
    * Training loop
      * Computate error rate for validation data (periodically)
      * Get a next minibatch.
      * Set parameter gradients zero
      * Execute forwardprop on the training graph.
      * Execute backprop.
      * Solver updates parameters by using gradients computed by backprop.
      * Compute training error
    """
    args = get_args(monitor_path='tmp.monitor.bnn')

    # Get context.
    from nnabla.ext_utils import get_extension_context
    logger.info("Running in %s" % args.context)
    ctx = get_extension_context(
        args.context, device_id=args.device_id, type_config=args.type_config)
    nn.set_default_context(ctx)

    # Initialize DataIterator for MNIST.
    data = data_iterator_mnist(args.batch_size, True)
    vdata = data_iterator_mnist(args.batch_size, False)

    # Create CNN network for both training and testing.
    mnist_cnn_prediction = mnist_binary_connect_lenet_prediction
    if args.net == 'bincon':
        mnist_cnn_prediction = mnist_binary_connect_lenet_prediction
    elif args.net == 'binnet':
        mnist_cnn_prediction = mnist_binary_net_lenet_prediction
    elif args.net == 'bwn':
        mnist_cnn_prediction = mnist_binary_weight_lenet_prediction
github sony / nnabla-examples / reduction / mnist / svd / models.py View on Github external
def reduce_maps(inmaps, outmaps, rrate):
    maps = int(rrate * inmaps * outmaps / (inmaps + outmaps))
    logger.info("###################")
    logger.info(
        "Num.Parameters is reduced {}x{} -> {}x{} + {}x{} by {}".format(
            inmaps, outmaps, inmaps, maps, maps, outmaps, rrate))
    logger.info("###################")
    time.sleep(1)
    return maps
github sony / nnabla / examples / cpp / nbla_train / create_initialized_model.py View on Github external
parser.add_argument("--cache_dir", "-cd", type=str, default='cache')
    parser.add_argument("--batch-size", "-b", type=int, default=128)
    parser.add_argument("--learning-rate", "-l", type=float, default=1e-3)
    parser.add_argument("--weight-decay", "-w", type=float, default=0)
    parser.add_argument("--device-id", "-d", type=str, default='0')
    parser.add_argument("--type-config", "-t", type=str, default='float')
    parser.add_argument("--net", "-n", type=str, default='lenet')
    parser.add_argument('--context', '-c', type=str,
                        default='cpu', help="Extension modules. ex) 'cpu', 'cudnn'.")
    args = parser.parse_args()

    args_added = parser.parse_args()

    # Get context.
    from nnabla.ext_utils import get_extension_context
    logger.info("Running in %s" % args.context)
    ctx = get_extension_context(
        args.context, device_id=args.device_id, type_config=args.type_config)
    nn.set_default_context(ctx)

    mnist_cnn_prediction = mnist_lenet_prediction
    if args.net == 'resnet':
        mnist_cnn_prediction = mnist_resnet_prediction

    # Create a computation graph to be saved.
    x = nn.Variable([args.batch_size, 1, 28, 28])
    t = nn.Variable([args.batch_size, 1])
    h_t = mnist_cnn_prediction(x, test=False, aug=False)
    loss_t = F.mean(F.softmax_cross_entropy(h_t, t))
    h_v = mnist_cnn_prediction(x, test=True, aug=False)
    loss_v = F.mean(F.softmax_cross_entropy(h_v, t))
github sony / nnabla-examples / reduction / cifar10 / factorized-layers / models.py View on Github external
UV = W.d
        b = get_parameter('conv/b')
        # compute rank (size of intermediate activations)
        # to obtained desired reduction
        inmaps = x.shape[1]
        outmaps = n_outputs
        Ksize = np.prod(kernel)
        rank = int(np.floor((1-cr)*inmaps*outmaps *
                            Ksize/(inmaps*Ksize+inmaps*outmaps)))

        # Initialize bias to existing b in affine if exists
        if b is not None:
            b_new = get_parameter_or_create(
                'svd_conv/b', b.d.shape, need_grad=b.need_grad)
            b_new.d = b.d.copy()
        logger.info("SVD convolution created: inmaps = {}; outmaps = {}; compression = {}; rank = {};".format(
            inmaps, outmaps, cr, rank))

        # create svd_convolution initialized from W in current context if it exists
        return PF.svd_convolution(x, n_outputs, kernel=kernel, r=rank, pad=pad, with_bias=with_bias, uv_init=UV)
github sony / nnabla-examples / reduction / cifar10 / distillation / classification.py View on Github external
def train():
    args = get_args()

    # Get context.
    from nnabla.ext_utils import get_extension_context
    logger.info("Running in %s" % args.context)
    ctx = get_extension_context(
        args.context, device_id=args.device_id, type_config=args.type_config)
    nn.set_default_context(ctx)

    # Create CNN network for both training and testing.
    if args.net == "cifar10_resnet23_prediction":
        model_prediction = cifar10_resnet23_prediction
        data_iterator = data_iterator_cifar10
        c = 3
        h = w = 32
        n_train = 50000
        n_valid = 10000

    # TRAIN
    teacher = "teacher"
    maps = args.maps
github sony / nnabla / examples / vision / mnist / classification_bnn.py View on Github external
* Computate error rate for validation data (periodically)
      * Get a next minibatch.
      * Set parameter gradients zero
      * Execute forwardprop on the training graph.
      * Execute backprop.
      * Solver updates parameters by using gradients computed by backprop.
      * Compute training error
    """
    args = get_args(monitor_path='tmp.monitor.bnn')

    # Get context.
    from nnabla.contrib.context import extension_context
    extension_module = args.context
    if args.context is None:
        extension_module = 'cpu'
    logger.info("Running in %s" % extension_module)
    ctx = extension_context(extension_module, device_id=args.device_id)
    nn.set_default_context(ctx)

    # Initialize DataIterator for MNIST.
    data = data_iterator_mnist(args.batch_size, True)
    vdata = data_iterator_mnist(args.batch_size, False)

    # Create CNN network for both training and testing.
    mnist_cnn_prediction = mnist_binary_connect_lenet_prediction
    if args.net == 'bincon':
        mnist_cnn_prediction = mnist_binary_connect_lenet_prediction
    elif args.net == 'binnet':
        mnist_cnn_prediction = mnist_binary_net_lenet_prediction
    elif args.net == 'bwn':
        mnist_cnn_prediction = mnist_binary_weight_lenet_prediction
    elif args.net == 'bincon_resnet':
github sony / nnabla / python / src / nnabla / utils / converter / onnx / importer.py View on Github external
def Upsample_7(self, func_list, n):
        func = self.generate_default_function("Unpooling", n)
        input_shape = self.get_func_input_shape(n.input[0])
        upp = func.unpooling_param
        scales = []
        for attr in n.attribute:
            if attr.name == "scales":
                if attr.type != AttributeProto.FLOATS:
                    raise ValueError(
                        "Only FLOATS is supported for scales in {} op_type".format(n.op_type))
                scales.extend([int(np.floor(f)) for f in attr.floats])
            elif attr.name == "mode":
                pass
            else:
                logger.info('Unsupported attribute {} was specified at {}'
                            .format(attr.name, n.op_type))

        if len(scales) == 0:
            raise ValueError("Missing 'scales' attribute")

        output_shape = []
        for i in range(len(input_shape)):
            output_shape.append(input_shape[i] * scales[i])
        self._shape_output[n.output[0]] = output_shape
        upp.kernel.dim.extend(scales)
        func_list.append(func)