How to use the chainer.cuda.to_cpu function in chainer

To help you get started, we’ve selected a few chainer examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github chainer / chainer-chemistry / tests / links_tests / update_tests / test_rsgcn_update.py View on Github external
def test_forward_cpu_graph_invariant(update, data):
    atom_data, adj_data = data[:2]
    y_actual = cuda.to_cpu(update(atom_data, adj_data).data)

    permutation_index = numpy.random.permutation(atom_size)
    permute_atom_data = permute_node(atom_data, permutation_index, axis=1)
    permute_adj_data = permute_adj(adj_data, permutation_index)
    permute_y_actual = cuda.to_cpu(
        update(permute_atom_data, permute_adj_data).data)
    numpy.testing.assert_allclose(
        permute_node(y_actual, permutation_index, axis=1), permute_y_actual,
        rtol=1e-5, atol=1e-5)
github chainer / chainer / onnx_chainer / export_testcase.py View on Github external
for i, (name, var) in enumerate(outputs.items()):
        pb_name = os.path.join(test_data_dir, 'output_{}.pb'.format(i))
        array = chainer.cuda.to_cpu(var.array)
        write_tensor_pb(pb_name, name, array)

    if output_grad:
        # Perform backward computation
        if len(outputs) > 1:
            outputs = chainer.functions.identity(*outputs)
        for out in outputs.values():
            out.grad = model.xp.ones_like(out.array)
        list(outputs.values())[0].backward()

        for i, (name, param) in enumerate(model.namedparams()):
            pb_name = os.path.join(test_data_dir, 'gradient_{}.pb'.format(i))
            grad = chainer.cuda.to_cpu(param.grad)
            onnx_name = cleanse_param_name(name)
            if grad is None:
                warnings.warn(
                    'Parameter `{}` does not have gradient value'.format(name))
            else:
                write_tensor_pb(pb_name, onnx_name, grad)
github chainer / chainercv / tests / links_tests / model_tests / faster_rcnn_tests / utils_tests / test_delta_encode_decode.py View on Github external
def check_delta_encode_decode_consistency(
            self, raw_bbox_src, raw_bbox_dst):
        bbox = delta_encode(raw_bbox_src, raw_bbox_dst)
        out_raw_bbox = delta_decode(raw_bbox_src, bbox)

        np.testing.assert_almost_equal(
            cuda.to_cpu(out_raw_bbox), cuda.to_cpu(raw_bbox_dst), decimal=5)
github miyamotok0105 / ai_chatbot / team2 / seq2seq_one_layer_chainer1.5 / mt_s2s_attention.py View on Github external
attmt.embed(x)
  x = XP.iarray([src_stoi('') for _ in range(batch_size)])
  attmt.embed(x)

  attmt.encode()
  
  t = XP.iarray([trg_stoi('<s>') for _ in range(batch_size)])
  hyp_batch = [[] for _ in range(batch_size)]

  if is_training:
    loss = XP.fzeros(())
    for l in range(trg_len):
      y = attmt.decode(t)
      t = XP.iarray([trg_stoi(trg_batch[k][l]) for k in range(batch_size)])
      loss += functions.softmax_cross_entropy(y, t)
      output = cuda.to_cpu(y.data.argmax(1))
      for k in range(batch_size):
        hyp_batch[k].append(trg_itos(output[k]))
    return hyp_batch, loss
  
  else:
    while len(hyp_batch[0]) &lt; generation_limit:
      y = attmt.decode(t)
      output = cuda.to_cpu(y.data.argmax(1))
      t = XP.iarray(output)
      for k in range(batch_size):
        hyp_batch[k].append(trg_itos(output[k]))
      if all(hyp_batch[k][-1] == '</s>' for k in range(batch_size)):
        break

    return hyp_batch
github corochann / chainer-pointnet / chainer_pointnet / utils / sampling.py View on Github external
with timer('3rd'):  # time measuring twice.
        farthest_indices, distances = farthest_point_sampling(
            pts, k, skip_initial=True)

    # with timer('gpu'):
    #     farthest_indices = farthest_point_sampling_gpu(pts, k)
    print('farthest_indices', farthest_indices.shape, type(farthest_indices))

    if do_plot:
        import matplotlib
        matplotlib.use('Agg')
        import matplotlib.pyplot as plt
        import os
        pts = cuda.to_cpu(pts)
        farthest_indices = cuda.to_cpu(farthest_indices)
        if not os.path.exists('results'):
            os.mkdir('results')
        for index in range(batch_size):
            fig, ax = plt.subplots()
            plt.grid(False)
            plt.scatter(pts[index, :, 0], pts[index, :, 1], c='k', s=4)
            plt.scatter(pts[index, farthest_indices[index], 0], pts[index, farthest_indices[index], 1], c='r', s=4)
            # plt.show()
            plt.savefig('results/farthest_point_sampling_{}.png'.format(index))

        # --- To extract farthest_points, you can use this kind of advanced indexing ---
        farthest_points = pts[numpy.arange(batch_size)[:, None],
                          farthest_indices, :]
        print('farthest_points', farthest_points.shape)
        for index in range(batch_size):
            farthest_pts_index = pts[index, farthest_indices[index], :]
github sekiguchi92 / SpeechEnhancement / Jointly_Diagonalizable_FullRank_Model / FastFCA.py View on Github external
def convert_to_NumpyArray(self, data):
        if self.xp == np:
            return data
        else:
            return cuda.to_cpu(data)
github musyoku / wasserstein-gan / gan.py View on Github external
def to_numpy(self, x):
		if isinstance(x, Variable) == True:
			x = x.data
		if isinstance(x, cuda.ndarray) == True:
			x = cuda.to_cpu(x)
		return x
github mitmul / chainer-faster-rcnn / lib / faster_rcnn / proposal_layer.py View on Github external
# sort all (proposal, score) pairs by score from highest to lowest
        # take top pre_nms_topN proposals before NMS
        # apply NMS with threshold 0.7 to remaining proposals
        # take after_nms_topN proposals after NMS
        # return the top proposals (-> RoIs top, scores top)

        pre_nms_topN = self.TRAIN_RPN_PRE_NMS_TOP_N \
            if train else self.TEST_RPN_PRE_NMS_TOP_N
        post_nms_topN = self.TRAIN_RPN_POST_NMS_TOP_N \
            if train else self.TEST_RPN_POST_NMS_TOP_N
        nms_thresh = self.RPN_NMS_THRESH
        min_size = self.RPN_MIN_SIZE

        # the first set of _num_anchors channels are bg probs
        # the second set are the fg probs, which we want
        scores = to_cpu(rpn_cls_prob.data[:, self._num_anchors:, :, :])
        bbox_deltas = to_cpu(rpn_bbox_pred.data)
        im_info = im_info[0, :]

        # 1. Generate proposals from bbox deltas and shifted anchors
        height, width = scores.shape[-2:]

        # Enumerate all shifts
        shift_x = np.arange(0, width) * self._feat_stride
        shift_y = np.arange(0, height) * self._feat_stride
        shift_x, shift_y = np.asarray(np.meshgrid(shift_x, shift_y))
        shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
                            shift_x.ravel(), shift_y.ravel())).transpose()

        # Enumerate all shifted anchors:
        #
        # add A anchors (1, A, 4) to
github ronekko / deep_metric_learning / train.py View on Github external
try:
        for epoch in range(p.num_epochs):
            time_begin = time.time()
            epoch_losses = []

            for i in tqdm(range(p.num_batches_per_epoch),
                          desc='# {}'.format(epoch)):
                with chainer.using_config('train', True):
                    loss = func_train_one_batch(model, p, next(iter_train))
                    loss.backward()
                optimizer.update()
                model.cleargrads()
                epoch_losses.append(loss.data)
                del loss

            loss_average = cuda.to_cpu(xp.array(
                xp.hstack(epoch_losses).mean()))

            # average accuracy and distance matrix for training data
            D, soft, hard, retrieval = common.evaluate(
                model, stream_train_eval.get_epoch_iterator(), p.distance_type,
                return_distance_matrix=save_distance_matrix)

            # average accuracy and distance matrix for testing data
            D_test, soft_test, hard_test, retrieval_test = common.evaluate(
                model, stream_test.get_epoch_iterator(), p.distance_type,
                return_distance_matrix=save_distance_matrix)

            time_end = time.time()
            epoch_time = time_end - time_begin
            total_time = time_end - time_origin
github mitmul / tfchain / tfchain / function.py View on Github external
def __call__(self, x):
        if isinstance(x, chainer.Variable):
            x = x.data
        if isinstance(x, cupy.ndarray):
            with cuda.Device(x.device):
                x = cuda.to_cpu(x)
        if hasattr(x, 'ndim') and x.ndim == 4:
            x = x.transpose(0, 2, 3, 1)  # to NHWC
        if isinstance(x, np.ndarray):
            x = tf.Variable(x)
        return self.forward(x)