How to use the chainer.cuda.to_gpu function in chainer

To help you get started, we’ve selected a few chainer examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github chainer / chainer-chemistry / tests / models_tests / gwm_tests / test_gwm.py View on Github external
def test_forward_gpu(gwm, data):
    embed_atom_data, new_embed_atom_data, supernode = data[:3]
    embed_atom_data = cuda.to_gpu(embed_atom_data)
    new_embed_atom_data = cuda.to_gpu(new_embed_atom_data)
    supernode = cuda.to_gpu(supernode)
    gwm.to_gpu()
    check_forward(gwm, embed_atom_data, new_embed_atom_data, supernode)
github chainer / chainer-chemistry / tests / models_tests / prediction_tests / test_regressor.py View on Github external
def to_gpu(self):
        self.x = cuda.to_gpu(self.x)
        self.t = cuda.to_gpu(self.t)
        self.y = cuda.to_gpu(self.y)
github chainer / chainer / tests / chainer_tests / functions_tests / array_tests / test_spatial_transformer_grid.py View on Github external
def test_forward_gpu(self):
        self.check_forward(cuda.to_gpu(self.theta), self.output_shape)
github chainer / chainer / tests / chainermn_tests / communicator_tests / test_communicator.py View on Github external
def test_allreduce_gpu(self):
        self.setup(True)
        for dtype in self.dtypes:
            x = np.arange(18) + self.communicator.rank
            x = x.astype(dtype)
            x = chainer.cuda.to_gpu(x, device=self.device)
            self.check_allreduce(x, dtype, 18)

            x = np.array(1).astype(dtype)
            y = self.communicator.allreduce(x)
            a = x * self.communicator.size
            chainer.testing.assert_allclose(a, y)
        self.teardown()
github chainer / chainer / tests / chainer_tests / graph_optimization_tests / test_static_graph_models.py View on Github external
def test_forward_gpu(self):
        self.chain.to_gpu()
        self.check_forward(cuda.to_gpu(self.x))
github TadaoYamaoka / DeepLearningShogi / dlshogi / train_val_network_bootstrap.py View on Github external
def mini_batch(hcpevec):
    features1 = np.empty((len(hcpevec), FEATURES1_NUM, 9, 9), dtype=np.float32)
    features2 = np.empty((len(hcpevec), FEATURES2_NUM, 9, 9), dtype=np.float32)
    move = np.empty((len(hcpevec)), dtype=np.int32)
    result = np.empty((len(hcpevec)), dtype=np.int32)
    value = np.empty((len(hcpevec)), dtype=np.float32)

    cppshogi.hcpe_decode_with_value(hcpevec, features1, features2, move, result, value)

    return (Variable(cuda.to_gpu(features1)),
            Variable(cuda.to_gpu(features2)),
            Variable(cuda.to_gpu(result.reshape((len(hcpevec), 1)))),
            Variable(cuda.to_gpu(value.reshape((len(value), 1))))
            )
github musyoku / chainer-speech-recognition / dataset.py View on Github external
unigram_ids = unigram_ids[:possibole_t_length]
			bigram_ids = bigram_ids[:possibole_t_length]
			t_length = len(unigram_ids)

		# t
		t_batch[batch_idx, :t_length] = unigram_ids
		bigram_batch[batch_idx, :t_length] = bigram_ids
		t_length_batch.append(t_length)

	x_batch = (x_batch - x_mean) / x_std

	# GPU
	if gpu:
		x_batch = cuda.to_gpu(x_batch.astype(np.float32))
		t_batch = cuda.to_gpu(t_batch.astype(np.int32))
		bigram_batch = cuda.to_gpu(bigram_batch.astype(np.int32))
		x_length_batch = cuda.to_gpu(np.asarray(x_length_batch).astype(np.int32))
		t_length_batch = cuda.to_gpu(np.asarray(t_length_batch).astype(np.int32))

	return x_batch, x_length_batch, t_batch, t_length_batch, bigram_batch
github sekiguchi92 / SpeechEnhancement / Jointly_Diagonalizable_FullRank_Model / FastMNMF_DP.py View on Github external
def load_parameter(self, fileName):
        param_list = pic.load(open(fileName, "rb"))
        if self.xp != np:
            param_list = [cuda.to_gpu(param) for param in param_list]
        self.lambda_NFT, self.G_NFM, self.Q_FMM, self.U_F, self.V_T, self.Z_speech_DT, self.W_noise_NnFK, self.H_noise_NnKT = param_list
github chantera / blstm-cws / app / model.py View on Github external
def _sequential_var(self, xs):
        if self._cpu:
            xs = [Variable(cuda.to_cpu(x), volatile='auto') for x in xs]
        else:
            xs = [Variable(cuda.to_gpu(x), volatile='auto') for x in xs]
        return xs