How to use the mxnet.cpu function in mxnet

To help you get started, we’ve selected a few mxnet examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github hpi-xnor / BMXNet-v2 / tests / python / unittest / test_gluon_estimator.py View on Github external
def test_default_handlers():
    net = _get_test_network()
    train_data, _ = _get_test_data()

    num_epochs = 1
    ctx = mx.cpu()

    net.initialize(ctx=ctx)
    trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.001})

    train_acc = mx.metric.RMSE()
    loss = gluon.loss.L2Loss()

    est = Estimator(net=net,
                    loss=loss,
                    metrics=train_acc,
                    trainer=trainer,
                    context=ctx)
    # no handler
    with warnings.catch_warnings(record=True) as w:
        est.fit(train_data=train_data, epochs=num_epochs)
        assert 'You are training with the' in str(w[-1].message)
github hpi-xnor / BMXNet-v2 / tests / python / unittest / test_multi_device_exec.py View on Github external
def test_ctx_group_sparse():
    with mx.AttrScope(ctx_group='stage1'):
        lhs = mx.symbol.Variable('lhs', stype='csr')
        rhs = mx.symbol.Variable('rhs', stype='row_sparse')
        dot  = mx.symbol.dot(lhs, rhs, name='dot')

    set_stage1 = set(dot.list_arguments())
    with mx.AttrScope(ctx_group='stage2'):
        softmax  = mx.symbol.SoftmaxOutput(data = dot, name = 'softmax')

    set_stage2 = set(softmax.list_arguments()) - set_stage1

    group2ctx = {
        'stage1' : mx.cpu(1),
        'stage2' : mx.cpu(2)
    }
    texec = softmax.simple_bind(mx.cpu(0), group2ctx=group2ctx,
                                lhs=(32,200), rhs=(200, 5))

    for arr, name in zip(texec.arg_arrays, softmax.list_arguments()):
        if name in set_stage1:
            assert arr.context == group2ctx['stage1']
        else:
            assert arr.context == group2ctx['stage2']
github awslabs / sagemaker-debugger / tests / mxnet / test_spot_training.py View on Github external
mnist_valid.transform_first(transformer), batch_size=batch_size, num_workers=4
    )

    # Create Model in Gluon
    net = nn.HybridSequential()
    net.add(
        nn.Conv2D(channels=6, kernel_size=5, activation="relu"),
        nn.MaxPool2D(pool_size=2, strides=2),
        nn.Conv2D(channels=16, kernel_size=3, activation="relu"),
        nn.MaxPool2D(pool_size=2, strides=2),
        nn.Flatten(),
        nn.Dense(120, activation="relu"),
        nn.Dense(84, activation="relu"),
        nn.Dense(10),
    )
    net.initialize(init=init.Xavier(), ctx=mx.cpu())

    if hook is not None:
        # Register the forward Hook
        hook.register_hook(net)

    softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
    trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.1})
    hook.register_hook(softmax_cross_entropy)

    # Start the training.
    for epoch in range(epochs):
        train_loss, train_acc, valid_acc = 0.0, 0.0, 0.0
        tic = time.time()
        if set_modes:
            hook.set_mode(modes.TRAIN)
github hpi-xnor / BMXNet-v2 / tests / python / unittest / test_operator.py View on Github external
ins = []
    if dim == 2:
        shape = (2,2)
    else:
        shape = (2, 2, 2 ,3)
    ins = [np.ones(shape) * i for i in range(num)]
    e = np.hstack(ins)

    e_nd = mx.nd.empty(e.shape)
    e_nd[:] = e
    data = mx.sym.Variable('data')
    op = mx.sym.SliceChannel(data=data, num_outputs=num)
    arg_shape, output_shape, aux_shape = op.infer_shape(data=e_nd.shape)
    grad_nd = [mx.nd.empty(shape) for shape in arg_shape]

    exe = op.bind(mx.cpu(), args=[e_nd], args_grad=grad_nd)
    assert len(exe.outputs) == num
    o_nd = [exe.outputs[i] for i in range(num)]
    # test forward
    exe.forward()
    for i in range(num):
        assert reldiff(o_nd[i].asnumpy(), ins[i]) < 1e-5
    # test backward
    for i in range(num):
        o_nd[i] += i
    exe.backward(o_nd)
    assert reldiff(grad_nd[0].asnumpy(), np.hstack([ins[i] + i for i in range(num)])) < 1e-5
github Angzz / fcos-gluon-cv / gluoncv / model_zoo / fcos / fcos_target.py View on Github external
pl, pt, pr, pb = F.split(box_preds, num_outputs=4, axis=-1)
        x1 = cx - pl
        y1 = cy - pt
        x2 = cx + pr
        y2 = cy + pb
        boxes = F.concat(x1, y1, x2, y2, dim=2)
        return boxes


if __name__ == '__main__':
    img = nd.zeros(shape=(1000, 1000, 3))

    boxes = nd.array([[291, 114, 901, 778, 60],
                      [504, 263, 780, 490, 15],
                      [461, 222, 829, 579, 20],
                      [24, 205, 389, 800, 15]], ctx=mx.cpu())

    target_generator = FCOSTargetGenerator()
    cls_targets, ctr_targets, box_targets, cor_targets = \
            target_generator.generate_targets(img, boxes)
    from IPython import embed; embed()
github opencv / open_model_zoo / tools / accuracy_checker / accuracy_checker / launcher / mxnet_launcher.py View on Github external
# Get model name, prefix, epoch
        self.model = self.config['model']
        model_path, model_file = self.model.parent, self.model.name
        model_name = model_file.rsplit('.', 1)[0]
        model_prefix, model_epoch = model_name.rsplit('-', 1)

        # Get device and set device context
        match = re.match(DEVICE_REGEX, self.config['device'].lower())
        if match.group('device') == 'gpu':
            identifier = match.group('identifier')
            if identifier is None:
                identifier = 0
            device_context = mxnet.gpu(int(identifier))
        else:
            device_context = mxnet.cpu()

        # Get batch from config or 1
        self._batch = self.config.get('batch', 1)

        # Get input shapes
        input_shapes = []

        for input_config in self.config['inputs']:
            input_shape = input_config['shape']
            input_shape = string_to_tuple(input_shape, casting_type=int)
            input_shapes.append((input_config['name'], (self._batch, *input_shape)))

        # Load checkpoints
        sym, arg_params, aux_params = mxnet.model.load_checkpoint(
            model_path / model_prefix, int(model_epoch)
        )
github zhanghang1989 / MXNet-Gluon-Style-Transfer / main.py View on Github external
def evaluate(args):
    if args.cuda:
        ctx = mx.gpu(0)
    else:
        ctx = mx.cpu(0)
    # images
    content_image = utils.tensor_load_rgbimage(args.content_image,ctx, size=args.content_size, keep_asp=True)
    style_image = utils.tensor_load_rgbimage(args.style_image, ctx, size=args.style_size)
    style_image = utils.preprocess_batch(style_image)
    # model
    style_model = net.Net(ngf=args.ngf)
    style_model.load_params(args.model, ctx=ctx)
    # forward
    style_model.set_target(style_image)
    output = style_model(content_image)
    utils.tensor_save_bgrimage(output[0], args.output_image, args.cuda)
github jennyzhang0215 / DKVMN / code / python3 / main.py View on Github external
parser.add_argument('--n_question', type=int, default=1223, help='the number of unique questions in the dataset')
        parser.add_argument('--seqlen', type=int, default=200, help='the allowed maximum length of a sequence')
        parser.add_argument('--data_dir', type=str, default='../../data/STATICS', help='data directory')
        parser.add_argument('--data_name', type=str, default='STATICS', help='data set name')
        parser.add_argument('--load', type=str, default='STATICS', help='model file to load')
        parser.add_argument('--save', type=str, default='STATICS', help='path to save model')

    params = parser.parse_args()
    params.lr = params.init_lr
    params.memory_key_state_dim = params.q_embed_dim
    params.memory_value_state_dim = params.qa_embed_dim

    params.dataset = dataset
    if params.gpus == None:
        ctx = mx.cpu()
        print("Training with cpu ...")
    else:
        ctx = mx.gpu(int(params.gpus))
        print("Training with gpu(" + params.gpus + ") ...")
    params.ctx = ctx

    # Read data
    dat = DATA(n_question=params.n_question, seqlen=params.seqlen, separate_char=',')
    seedNum = params.seedNum
    np.random.seed(seedNum)
    if not params.test:
        params.memory_key_state_dim = params.q_embed_dim
        params.memory_value_state_dim = params.qa_embed_dim
        d = vars(params)
        for key in d:
            print('\t', key, '\t', d[key])
github pavelgonchar / neural-art-mini / run.py View on Github external
img[1, :] += 116.779
    img[2, :] += 103.939
    img = np.swapaxes(img, 1, 2)
    img = np.swapaxes(img, 0, 2)
    img = np.clip(img, 0, 255)
    return img.astype('uint8')

def SaveImage(img, filename):
    logging.info('save output to %s', filename)
    out = PostprocessImage(img)
    if args.remove_noise != 0.0:
        out = denoise_tv_chambolle(out, weight=args.remove_noise, multichannel=True)
    io.imsave(filename, out)

# input
dev = mx.gpu(args.gpu) if args.gpu >= 0 else mx.cpu()
content_np = PreprocessContentImage(args.content_image, args.max_long_edge)
style_np = PreprocessStyleImage(args.style_image, shape=content_np.shape)
size = content_np.shape[2:]

# model
Executor = namedtuple('Executor', ['executor', 'data', 'data_grad'])

def style_gram_symbol(input_size, style):
    _, output_shapes, _ = style.infer_shape(data=(1, 3, input_size[0], input_size[1]))
    gram_list = []
    grad_scale = []
    for i in range(len(style.list_outputs())):
        shape = output_shapes[i]
        x = mx.sym.Reshape(style[i], target_shape=(int(shape[1]), int(np.prod(shape[2:]))))
        # use fully connected to quickly do dot(x, x^T)
        gram = mx.sym.FullyConnected(x, x, no_bias=True, num_hidden=shape[1])
github dmlc / dgl / examples / mxnet / tree_lstm / train.py View on Github external
def main(args):
    np.random.seed(args.seed)
    mx.random.seed(args.seed)

    best_epoch = -1
    best_dev_acc = 0

    cuda = args.gpu >= 0
    if cuda:
        if args.gpu in mx.test_utils.list_gpus():
            ctx = mx.gpu(args.gpu)
        else:
            print('Requested GPU id {} was not found. Defaulting to CPU implementation'.format(args.gpu))
            ctx = mx.cpu()

    if args.use_glove:
        prepare_glove()

    trainset = data.SST()
    train_loader = gluon.data.DataLoader(dataset=trainset,
                                         batch_size=args.batch_size,
                                         batchify_fn=batcher(ctx),
                                         shuffle=True,
                                         num_workers=0)
    devset = data.SST(mode='dev')
    dev_loader = gluon.data.DataLoader(dataset=devset,
                                       batch_size=100,
                                       batchify_fn=batcher(ctx),
                                       shuffle=True,
                                       num_workers=0)