How to use the mxnet.nd.array function in mxnet

To help you get started, we’ve selected a few mxnet examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github dmlc / dgl / examples / mxnet / rgcn / entity_classify.py View on Github external
data = load_data(args.dataset, bfs_level=args.bfs_level, relabel=args.relabel)
    num_nodes = data.num_nodes
    num_rels = data.num_rels
    num_classes = data.num_classes
    labels = data.labels
    train_idx = data.train_idx
    test_idx = data.test_idx

    # split dataset into train, validate, test
    if args.validation:
        val_idx = train_idx[:len(train_idx) // 5]
        train_idx = train_idx[len(train_idx) // 5:]
    else:
        val_idx = train_idx

    train_idx = mx.nd.array(train_idx)
    # since the nodes are featureless, the input feature is then the node id.
    feats = mx.nd.arange(num_nodes, dtype='int32')
    # edge type and normalization factor
    edge_type = mx.nd.array(data.edge_type, dtype='int32')
    edge_norm = mx.nd.array(data.edge_norm).expand_dims(1)
    labels = mx.nd.array(labels).reshape((-1))

    # check cuda
    use_cuda = args.gpu >= 0
    if use_cuda:
        ctx = mx.gpu(args.gpu)
        feats = feats.as_in_context(ctx)
        edge_type = edge_type.as_in_context(ctx)
        edge_norm = edge_norm.as_in_context(ctx)
        labels = labels.as_in_context(ctx)
        train_idx = train_idx.as_in_context(ctx)
github hpi-xnor / BMXNet-v2 / tests / python / unittest / test_ndarray.py View on Github external
def test_ndarray_concatenate():
    axis = 1
    shapes = [(2, 3, 4, 2), (2, 2, 4, 2), (2, 1, 4, 2)]
    arrays_np = [np.random.uniform(-10, 10, s).astype(np.float32) for s in shapes]
    arrays_nd = [mx.nd.array(x) for x in arrays_np]

    array_nd = mx.nd.concatenate(arrays_nd, axis=axis)
    array_np = np.concatenate(arrays_np, axis=axis)

    assert same(array_np, array_nd.asnumpy())
github happywu / A3C / async_dqn_test_new.py View on Github external
action_batch = ([data[1] for data in minibatch])
            R_batch = ([data[4] for data in minibatch])

            # TODO here can only forward one at each time because mxnet need rebind
            # for variable input length
            batch_size = len(minibatch)
            thread_net.bind(data_shapes=[('data', (batch_size, args.agent_history_length,
                                                args.resized_width, args.resized_height)),
                                        ('rewardInput', (batch_size, 1)),
                                        ('actionInput', (batch_size, act_dim))],
                            label_shapes=None, grad_req='write', force_rebind=True)

            batch = mx.io.DataBatch(data=[mx.nd.array(state_batch),
                                          mx.nd.array(np.reshape(
                                              R_batch, (-1, 1))),
                                          mx.nd.array(action_batch)], label=None)

            thread_net.clear_gradients()
            thread_net.forward(batch, is_train=True)
            loss = np.mean(thread_net.get_outputs()[0].asnumpy())
            thread_net.backward()

            s = summary.scalar('loss', loss)
            summary_writer.add_summary(s, T)
            summary_writer.flush()

            with lock:
                Module.clear_gradients()
                Module.add_gradients_from_module(thread_net)
                Module.update()
                Module.clear_gradients()
github hpi-xnor / BMXNet-v2 / tests / python / mkl / test_mkldnn.py View on Github external
def check_batchnorm_training(stype):
        for shape in [(2, 3), (2, 3, 2, 2)]:
            data_tmp = np.random.normal(-0.1, 0.1, size=shape)
            s = shape[1],
            gamma = np.ones(s)
            beta = np.ones(s)
            gamma[1] = 3
            beta[0] = 3

            rolling_mean = np.random.uniform(size=s)
            rolling_std = np.random.uniform(size=s)

            data = mx.symbol.Variable('data', stype=stype)
            in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
                           mx.nd.array(beta).tostype(stype)]
            mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]

            test = mx.symbol.BatchNorm(data, fix_gamma=False)
            check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
github hpi-xnor / BMXNet-v2 / tests / python / unittest / test_operator.py View on Github external
def test_zero_depth():
        shape = (2, 4, 9, 3)
        indices = np.ones(shape)
        depth = 0
        mx_one_hot_array = mx.nd.one_hot(
            mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
            depth=depth, dtype=np.int32).asnumpy()
        expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
        assert same(expected_array, mx_one_hot_array)
github hpi-xnor / BMXNet-v2 / tools / caffe_converter / mean_image.py View on Github external
if caffe_flag:
        mean_blob = caffe.proto.caffe_pb2.BlobProto()
    else:
        mean_blob = caffe_parse.caffe_pb2.BlobProto()

    mean_blob.ParseFromString(data)
    img_mean_np = np.array(mean_blob.data)
    img_mean_np = img_mean_np.reshape(
        mean_blob.channels, mean_blob.height, mean_blob.width
    )
    # swap channels from Caffe BGR to RGB
    img_mean_np2 = img_mean_np
    img_mean_np[0] = img_mean_np2[2]
    img_mean_np[2] = img_mean_np2[0]
    return mx.nd.array(img_mean_np)
github IBM / adversarial-robustness-toolbox / art / classifiers / mxnet.py View on Github external
:rtype: `np.ndarray`
        """
        import mxnet as mx

        # Check value of label for computing gradients
        if not (label is None or (isinstance(label, (int, np.integer)) and label in range(self.nb_classes()))
                or (isinstance(label, np.ndarray) and len(label.shape) == 1 and (label < self.nb_classes()).all()
                    and label.shape[0] == x.shape[0])):
            raise ValueError('Label %s is out of range.' % str(label))

        train_mode = self._learning_phase if hasattr(self, '_learning_phase') else False

        # Apply preprocessing
        x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)

        x_preprocessed = mx.nd.array(x_preprocessed.astype(NUMPY_DTYPE), ctx=self._ctx)
        x_preprocessed.attach_grad()

        if label is None:
            with mx.autograd.record(train_mode=False):
                preds = self._model(x_preprocessed)
                class_slices = [preds[:, i] for i in range(self.nb_classes())]

            grads = []
            for slice_ in class_slices:
                slice_.backward(retain_graph=True)
                grad = x_preprocessed.grad.asnumpy()
                grads.append(grad)
            grads = np.swapaxes(np.array(grads), 0, 1)
        elif isinstance(label, (int, np.integer)):
            with mx.autograd.record(train_mode=train_mode):
                preds = self._model(x_preprocessed)
github WalterMa / gluon-faster-rcnn / dataset / dataloader.py View on Github external
def batchify(self, data):
        data_shape = len(data[0].shape)
        if not isinstance(data[0], nd.NDArray):
            tmp = []
            for i in data:
                tmp.append(nd.array(i, ctx=self._ctx))
            data = tmp
        if data_shape == 1:
            # 2. Stack im_info
            return nd.stack(*data)
        elif data_shape == 2:
            # 2. Padding label
            buf = nd.full((len(data), self._label_max_size, data[0].shape[-1]), val=-1, ctx=self._ctx)
            for i, l in enumerate(data):
                buf[i][:l.shape[0], :] = l
            return buf
        elif data_shape == 3:
            # 2. Padding image
            buf = nd.zeros((len(data), data[0].shape[0], self._image_max_size, self._image_max_size), ctx=self._ctx)
            for i, img in enumerate(data):
                buf[i][:, :img.shape[1], :img.shape[2]] = img
            return buf
github GT-RAIL / rail_object_detection / rail_object_detector / libs / drfcn / operator_py / box_annotator_ohem.py View on Github external
per_roi_loss_cls = per_roi_loss_cls.asnumpy()
        per_roi_loss_cls = per_roi_loss_cls[np.arange(per_roi_loss_cls.shape[0], dtype='int'), labels.astype('int')]
        per_roi_loss_cls = -1 * np.log(per_roi_loss_cls)
        per_roi_loss_cls = np.reshape(per_roi_loss_cls, newshape=(-1,))

        per_roi_loss_bbox = bbox_weights * mx.nd.smooth_l1((bbox_pred - bbox_targets), scalar=1.0)
        per_roi_loss_bbox = mx.nd.sum(per_roi_loss_bbox, axis=1).asnumpy()

        top_k_per_roi_loss = np.argsort(per_roi_loss_cls + per_roi_loss_bbox)
        labels_ohem = labels
        labels_ohem[top_k_per_roi_loss[::-1][self._roi_per_img:]] = -1
        bbox_weights_ohem = bbox_weights.asnumpy()
        bbox_weights_ohem[top_k_per_roi_loss[::-1][self._roi_per_img:]] = 0

        labels_ohem = mx.nd.array(labels_ohem)
        bbox_weights_ohem = mx.nd.array(bbox_weights_ohem)

        for ind, val in enumerate([labels_ohem, bbox_weights_ohem]):
            self.assign(out_data[ind], req[ind], val)
github mahyarnajibi / SNIPER / lib / operator_py / box_annotator_ohem.py View on Github external
per_roi_loss_bbox = bbox_weights[i] * mx.nd.smooth_l1((bbox_pred[i] - bbox_targets[i]), scalar=1.0)
            per_roi_loss_bbox = mx.nd.sum(per_roi_loss_bbox, axis=1).asnumpy()
            per_roi_loss_bbox[nvids] = 0

            top_k_per_roi_loss = np.argsort(per_roi_loss_cls + per_roi_loss_bbox)
            labels[i][nvids] = -1
            labels_ohem[i] = labels[i]
            labels_ohem[i][top_k_per_roi_loss[::-1][self._roi_per_img:]] = -1
            bbox_weights_ohem[i] = bbox_weights[i].asnumpy()
            bbox_weights_ohem[i][top_k_per_roi_loss[::-1][self._roi_per_img:]] = 0

        

        if not self.get_fg_labels:
            labels_ohem = mx.nd.array(labels_ohem)
            bbox_weights_ohem = mx.nd.array(bbox_weights_ohem)
            for ind, val in enumerate([labels_ohem, bbox_weights_ohem]):
                self.assign(out_data[ind], req[ind], val)
        else:
            fg_labels = labels_ohem.copy()
            for i in range(batch_size):
                bg_inds = np.where(labels_ohem[i]==0)[0]
                fg_labels[i][bg_inds] = -1
            labels_ohem = mx.nd.array(labels_ohem)
            bbox_weights_ohem = mx.nd.array(bbox_weights_ohem)
            fg_labels = mx.nd.array(fg_labels)
            for ind, val in enumerate([labels_ohem, bbox_weights_ohem,fg_labels]):
                self.assign(out_data[ind], req[ind], val)