How to use the chainer.backends.cuda.to_gpu function in chainer

To help you get started, we’ve selected a few chainer examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github chainer / chainer / tests / chainer_tests / test_function.py View on Github external
def setup_gpu(self, device=0):
        self.x1 = cuda.to_gpu(self.x1, device)
        self.x2 = cuda.to_gpu(self.x2, device)
        self.y1 = cuda.to_gpu(self.y1, device)
        self.y2 = cuda.to_gpu(self.y2, device)
        self.gx1 = cuda.to_gpu(self.gx1, device)
        self.gx2 = None
        self.gy1 = cuda.to_gpu(self.gy1, device)
        self.gy2 = cuda.to_gpu(self.gy2, device)
        self.f.forward_gpu = mock.MagicMock(return_value=(self.y1, self.y2))
        self.f.backward_gpu = mock.MagicMock(return_value=(self.gx1, self.gx2))
github chainer / chainer / tests / chainer_tests / functions_tests / pooling_tests / test_unpooling_nd.py View on Github external
def test_double_backward_gpu(self):
        self.check_double_backward(
            cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
github chainer / chainer / tests / chainer_tests / links_tests / connection_tests / test_n_step_lstm.py View on Github external
def test_forward_gpu_train(self):
        self.rnn.to_gpu()
        with chainer.using_config('use_cudnn', 'always'), \
                chainer.using_config('train', True):
            self.check_forward(
                cuda.to_gpu(self.h),
                cuda.to_gpu(self.c),
                [cuda.to_gpu(x) for x in self.xs])
github chainer / chainer / tests / chainer_tests / test_variable.py View on Github external
def check_get_item(self, gpu):
        x_data = self.x
        if gpu:
            x_data = cuda.to_gpu(x_data)
        x = chainer.Variable(x_data)
        if len(self.x_shape) > 0:
            slices = slice(2, 5)
            np.testing.assert_equal(cuda.to_cpu(x[slices].data),
                                    cuda.to_cpu(x_data[slices]))
            slices = slice(2, 5),
            np.testing.assert_equal(cuda.to_cpu(x[slices].data),
                                    cuda.to_cpu(x_data[slices]))
github chainer / chainer / tests / chainer_tests / functions_tests / math_tests / test_logsumexp.py View on Github external
def test_double_backward_negative_multi_axis_invert_gpu(self):
        gy = numpy.ones_like(self.x.sum(axis=(-2, 0))) * self.gy
        self.check_double_backward(
            cuda.to_gpu(self.x), cuda.to_gpu(gy), cuda.to_gpu(self.ggx),
            axis=(-2, 0))
github chainer / chainer / tests / chainer_tests / functions_tests / math_tests / test_erf.py View on Github external
def _erf_gpu(x, dtype):
    return cuda.to_gpu(_erf_cpu(cuda.to_cpu(x), dtype))
github chainer / chainer / tests / chainer_tests / test_function.py View on Github external
def setup_gpu(self, device=0):
        self.x1 = cuda.to_gpu(self.x1, device)
        self.x2 = cuda.to_gpu(self.x2, device)
        self.y1 = cuda.to_gpu(self.y1, device)
        self.y2 = cuda.to_gpu(self.y2, device)
        self.gx1 = cuda.to_gpu(self.gx1, device)
        self.gx2 = None
        self.gy1 = cuda.to_gpu(self.gy1, device)
        self.gy2 = cuda.to_gpu(self.gy2, device)
        self.f.forward_gpu = mock.MagicMock(return_value=(self.y1, self.y2))
        self.f.backward_gpu = mock.MagicMock(return_value=(self.gx1, self.gx2))
github chainer / chainer / tests / chainer_tests / functions_tests / loss_tests / test_cross_covariance.py View on Github external
def test_double_backward_gpu(self):
        self.check_double_backward(
            cuda.to_gpu(self.y), cuda.to_gpu(self.z), cuda.to_gpu(self.gloss),
            cuda.to_gpu(self.ggy), cuda.to_gpu(self.ggz))
github chainer / chainer / tests / chainer_tests / serializers_tests / test_npz.py View on Github external
def test_deserialize_gpu_strip_slashes(self):
        y = numpy.empty((2, 3), dtype=numpy.float32)
        self.check_deserialize(cuda.to_gpu(y), '/y')