How to use the chainer.Variable function in chainer

To help you get started, we’ve selected a few chainer examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github chainer / chainer / tests / chainer_tests / functions_tests / pooling_tests / test_average_pooling_nd.py View on Github external
def forward(self):
        x = chainer.Variable(self.x)
        return functions.average_pooling_nd(
            x, self.ksize, self.stride, self.pad)
github chainer / chainercv / tests / utils_tests / extension_tests / test_forward.py View on Github external
def setUp(self):
        self.xp = np

        self.mocked_model = mock.MagicMock()
        self.mocked_model.xp = self.xp

        self.inputs = tuple(np.empty(shape) for shape in self.in_shapes)
        if len(self.inputs) == 1:
            self.inputs = self.inputs[0]

        self.outputs = tuple(
            self.xp.array(np.empty(shape)) for shape in self.out_shapes)
        if self.variable:
            self.outputs = tuple(
                chainer.Variable(output) for output in self.outputs)
        if len(self.outputs) == 1:
            self.outputs = self.outputs[0]
github chainer / chainer / tests / chainer_tests / test_variable.py View on Github external
def test_backward_no_grad_required(self):
        class DummyId(chainer.functions.math.identity.Identity):

            def backward(self, a, b):
                raise Exception('backward should not be called on inputs that '
                                'do not require grads')

        x = chainer.Variable(self.x)
        y1, y2 = DummyId().apply((x, x))
        x.node._requires_grad = False
        y1.backward()
github chainer / chainer / tests / chainer_tests / functions_tests / math_tests / test_basic_math.py View on Github external
def check_forward(self, op, array_conv, positive):
        value = self.value
        if positive:
            value = numpy.abs(value)
        v = array_conv(value)
        x = chainer.Variable(array_conv(self.x))
        y = op(x, v)
        if self.dtype == numpy.float16:
            tol = 1e-3
        else:
            tol = 1e-6

        testing.assert_allclose(
            op(self.x, value), y.data, atol=tol, rtol=tol)
github takerum / vat_chainer / train_semisup.py View on Github external
alpha_plan[i] = float(args.num_epochs - i) / (args.num_epochs - args.epoch_decay_start) * args.lr
        beta1_plan[i] = args.mom2

    accs_test = np.zeros(args.num_epochs)
    cl_losses = np.zeros(args.num_epochs)
    ul_losses = np.zeros(args.num_epochs)
    mkdir_p(args.log_dir)
    for epoch in range(args.num_epochs):
        optimizer.alpha = alpha_plan[epoch]
        optimizer.beta1 = beta1_plan[epoch]
        sum_loss_l = 0
        sum_loss_ul = 0
        start = time.time()
        for it in range(args.num_iter_per_epoch):
            x, t = train_l.get(args.batchsize, gpu=args.gpu, aug_trans=args.aug_trans, aug_flip=args.aug_flip)
            loss_l = loss_labeled(enc, Variable(x), Variable(t))
            x_u, _ = train_ul.get(args.batchsize_ul, gpu=args.gpu, aug_trans=args.aug_trans, aug_flip=args.aug_flip)
            loss_ul = loss_unlabeled(enc, Variable(x_u), args)
            loss_total = loss_l + loss_ul
            enc.cleargrads()
            loss_total.backward()
            optimizer.update()
            sum_loss_l += loss_l.data
            sum_loss_ul += loss_ul.data
        end = time.time()
        cl_losses[epoch] = sum_loss_l / args.num_iter_per_epoch
        ul_losses[epoch] = sum_loss_ul / args.num_iter_per_epoch
        if (epoch + 1) % args.eval_freq == 0:
            acc_test_sum = 0
            test_x, test_t = test.get()
            N_test = test_x.shape[0]
            for i in range(0, N_test, args.batchsize_eval):
github masashi-y / depccg / src / py / lstm_tagger.py View on Github external
def _init_state(self, xp, batchsize):
        res = [Variable(xp.zeros(( # forward cx, hx, backward cx, hx
                self.nlayers, batchsize, self.hidden_dim), 'f')) for _ in range(4)]
        return res
github TadaoYamaoka / DeepLearningShogi / dlshogi / train_rl_policy_with_value_using_hcpe_bootstrap.py View on Github external
def mini_batch(hcpevec):
    features1 = np.empty((len(hcpevec), FEATURES1_NUM, 9, 9), dtype=np.float32)
    features2 = np.empty((len(hcpevec), FEATURES2_NUM, 9, 9), dtype=np.float32)
    move = np.empty((len(hcpevec)), dtype=np.int32)
    result = np.empty((len(hcpevec)), dtype=np.int32)
    value = np.empty((len(hcpevec)), dtype=np.float32)

    cppshogi.hcpe_decode_with_value(hcpevec, features1, features2, move, result, value)

    z = result.astype(np.float32) - value + 0.5

    return (Variable(cuda.to_gpu(features1)),
            Variable(cuda.to_gpu(features2)),
            Variable(cuda.to_gpu(move)),
            Variable(cuda.to_gpu(result.reshape((len(hcpevec), 1)))),
            Variable(cuda.to_gpu(z)),
            Variable(cuda.to_gpu(value.reshape((len(value), 1))))
            )
github fukuta0614 / chainer-image-generation / vae-gan / train_vaegan.py View on Github external
x_batch = np.zeros((batch_size, img_channel, image_size, image_size), dtype=np.float32)
    attr_batch = np.zeros((batch_size, attribute_size), dtype=np.float32)
    for i in range(batch_size):
        data_index = np.random.randint(N)
        img = d.img_orig(data_index)
        attr = d.attributes[data_index]
        offset_x = np.random.randint(8) + 13
        offset_y = np.random.randint(8) + 33
        w = 144
        h = 144
        pixels = np.asarray(img.convert('RGB').crop((offset_x, offset_y, offset_x + w, offset_y + h)).resize(
            (image_size, image_size)))
        pixels = pixels.astype(np.float32).transpose((2, 0, 1)).reshape((3, image_size, image_size))
        x_batch[i] = pixels / 127.5 - 1
        attr_batch[i] = attr
    x_batch = Variable(xp.asarray(x_batch), volatile=volatile)
    attr_batch = Variable(xp.asarray(attr_batch), volatile=volatile)

    return x_batch, attr_batch
github ronekko / deep_metric_learning / lib / functions / clustering_loss.py View on Github external
gamma (~float):
            Hyperparameter gamma.
        T (int):
            Maximum number of iterations in Algorithm 2.

    Returns:
        ~chainer.Variable: Loss value.

    See: `Learnable Structured Clustering Framework for Deep Metric Learning \
         `_

    """
    if not isinstance(x, chainer.Variable):
        x = chainer.Variable(x)
    if not isinstance(t, chainer.Variable):
        t = chainer.Variable(t)
    t_cpu = chainer.cuda.to_cpu(t.data).ravel()

    batch_size = len(t.data)
    num_classes = len(np.unique(t_cpu))

    v = list(range(batch_size))
    s = []

    # First, search the sub-optimal solution y_PAM of the clustering.
    # Note that this computation is done outside the computational graph.
    # Find an initial medoids of S_PAM by Algorithm 1 in the paper.
    D = distance_matrix(x.data)
    D = cuda.to_cpu(D)
    for _ in range(num_classes):
        # find an element in v which maximise a_function
        a_best = -np.inf
github MorinoseiMorizo / sentence_similarity / enc_dec_model.py View on Github external
def _test_decode(self, batch_size, bos_id, eos_id, limit):
        z_list = []
        y = [bos_id for _ in range(batch_size)]
        y = chainer.Variable(self.xp.array(y, dtype=self.xp.int32))
        for i in range(limit):
            d_h0 = self.dec_embed(y)
            d_h1 = self.dec_lstm_1(d_h0)
            d_h2 = self.dec_lstm_2(d_h1)
            z = self.dec_output(d_h2)
            z = [int(w) for w in z.data.argmax(1)]
            if all(w == eos_id for w in z):
                break
            z_list.append(z)
            y = chainer.Variable(self.xp.array(z, dtype=self.xp.int32))
        return z_list