How to use the torch.autograd.Variable function in torch

To help you get started, we’ve selected a few torch examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github raeidsaqur / CapsGAN / capsule_dcgan_mnist / train.py View on Github external
optimizer_d.zero_grad() #zero the gradient
            x = Variable(x)#change into tensor variable
            if use_cuda:#use cuda
                x = x.cuda()
            #output = discriminator(x)
            output=discriminator(x)
            real_v = Variable(torch.Tensor(batch_size).fill_(real_label).float())
            if use_cuda:
                real_v = real_v.cuda()
            loss_d = discriminator.loss(x,output, real_v,False)
            loss_d.backward()
            Dx = output.data.mean(dim=0)[0]
            Dx= Dx.sum(dim=1)
            # training D on fake data
            z = sample_noise(batch_size, 1)
            z = Variable(z)
            if use_cuda:
                z = z.cuda()

            fake = generator(z)
            output = discriminator(fake.detach())
            fake_v = Variable(torch.Tensor(batch_size).fill_(fake_label).float())
            if use_cuda:
                fake_v = fake_v.cuda()
            loss_g = discriminator.loss(x,output, fake_v, False)
            loss_g.backward()
            optimizer_d.step()
            #should we include the loss function of the data
            err_D = loss_d.data[0] + loss_g.data[0]


            # TODO: D training much faster than G
github HLTCHKUST / Mem2Seq / utils / until_temp.py View on Github external
end = lengths[i]
            padded_seqs[i, :end] = seq[:end]
        return padded_seqs, lengths

    # sort a list by sequence length (descending order) to use pack_padded_sequence
    data.sort(key=lambda x: len(x[0]), reverse=True)
    # seperate source and target sequences
    src_seqs, trg_seqs, ind_seqs, target_plain, max_len, src_plain = zip(*data)
    # merge sequences (from tuple of 1D tensor to 2D tensor)
    src_seqs, src_lengths = merge(src_seqs,max_len)
    ind_seqs, ind_lenght = merge(ind_seqs,None)
    # gete_s, _ = merge(gete_s,None)
    
    src_seqs = Variable(src_seqs).transpose(0,1)
    trg_seqs = Variable(torch.Tensor(trg_seqs))
    ind_seqs = Variable(ind_seqs).transpose(0,1)
    # gete_s = Variable(gete_s).transpose(0,1)
    if USE_CUDA:
        src_seqs = src_seqs.cuda()
        trg_seqs = trg_seqs.cuda()
        ind_seqs = ind_seqs.cuda()
        # gete_s = gete_s.cuda()
    return src_seqs, src_lengths, trg_seqs, ind_lenght, ind_seqs, target_plain, src_plain
github shahsohil / sunets / train_seg.py View on Github external
def valmodel(model, valloader, epoch, data):
    global l_avg_test, totalclasswise_pixel_acc_test, totalclasswise_gtpixels_test, totalclasswise_predpixels_test
    global steps_test

    model.eval()

    for i, (imgs_test, lbls_test) in enumerate(valloader):
        if torch.cuda.is_available():
            imgs_testV = Variable(imgs_test.cuda(), volatile=True)
            lbls_testV = Variable(lbls_test.cuda(), volatile=True)
        else:
            imgs_testV = Variable(imgs_test, volatile=True)
            lbls_testV = Variable(lbls_test, volatile=True)

        outputs, losses, classwise_pixel_acc, classwise_gtpixels, classwise_predpixels, total_valid_pixel = \
            model(imgs_testV, lbls_testV)

        total_valid_pixel = float(total_valid_pixel.sum(0).data.cpu().numpy())

        l_avg_test += (losses.sum().data.cpu().numpy())
        steps_test += total_valid_pixel
        totalclasswise_pixel_acc_test += classwise_pixel_acc.sum(0).data.cpu().numpy()
        totalclasswise_gtpixels_test += classwise_gtpixels.sum(0).data.cpu().numpy()
        totalclasswise_predpixels_test += classwise_predpixels.sum(0).data.cpu().numpy()

        if (i + 1) % 50 == 0:
            pickle.dump(imgs_test[0].numpy(),
                        open("results/saved_val_images/" + str(epoch) + "_" + str(i) + "_input.p", "wb"))
github minerva-ml / open-solution-googleai-object-detection / src / retinanet.py View on Github external
Args:
          x: (tensor) sized [N,D].
          y: (tensor) sized [N,].

        Return:
          (tensor) focal loss.
        """
        alpha = 0.25

        t = one_hot_embedding(y.data.cpu(), 1 + self.num_classes)
        t = t[:, 1:]
        if torch.cuda.is_available():
            t = Variable(t).cuda()  # [N,20]
        else:
            t = Variable(t)  # [N,20]

        xt = x * (2 * t - 1)  # xt = x if t > 0 else -x
        pt = (2 * xt + 1).sigmoid()

        w = alpha * t + (1 - alpha) * (1 - t)
        loss = -w * pt.log() / 2
        return loss.sum()
github melissa135 / mlp_stock / train_net.py View on Github external
mlp = MLP()
    print mlp

    criterion = nn.L1Loss()
    optimizer = optim.Adam(mlp.parameters(),lr=0.001)

    mlp_list = []
    crt_list = []

    for epoch in range(0, max_epochs):

        current_loss = 0
        for i,data in enumerate(trainloader,0):

            input,target = data
            input,target = Variable(input),Variable(target)

            mlp.zero_grad()
            output = mlp(input.float())
            loss = criterion(output, target.float())
      
            loss.backward()
            optimizer.step()

            loss = loss.data[0]
            current_loss += loss

        #print ('[ %d ] loss : %.3f' % (epoch+1,current_loss))

        train_c = incorrectness(mlp,trainloader)
        test_c = incorrectness(mlp,testloader)
        print ('[ %d ] incorrectness: %.4f %.4f' % (epoch+1,train_c,test_c))
github ikostrikov / pytorch-trpo / main.py View on Github external
def get_kl():
        mean1, log_std1, std1 = policy_net(Variable(states))

        mean0 = Variable(mean1.data)
        log_std0 = Variable(log_std1.data)
        std0 = Variable(std1.data)
        kl = log_std1 - log_std0 + (std0.pow(2) + (mean0 - mean1).pow(2)) / (2.0 * std1.pow(2)) - 0.5
        return kl.sum(1, keepdim=True)
github fastnlp / fastNLP / fastNLP / modules / encoder / masked_rnn.py View on Github external
def forward(self, input, mask=None, hx=None):
        batch_size = input.size(0) if self.batch_first else input.size(1)
        lstm = self.Cell is nn.LSTMCell
        if hx is None:
            num_directions = 2 if self.bidirectional else 1
            hx = torch.autograd.Variable(
                input.data.new(self.num_layers * num_directions, batch_size, self.hidden_size).zero_())
            if lstm:
                hx = (hx, hx)

        func = AutogradMaskedRNN(num_layers=self.num_layers,
                                 batch_first=self.batch_first,
                                 step_dropout=self.step_dropout,
                                 layer_dropout=self.layer_dropout,
                                 train=self.training,
                                 bidirectional=self.bidirectional,
                                 lstm=lstm)  # 传入all_cells,继续往底层封装走

        output, hidden = func(input, self.all_cells, hx,
                              None if mask is None else mask.view(mask.size() + (1,)))  # 这个+ (1, )是个什么操作?
        return output, hidden
github jasonwu0731 / trade-dst / utils / masked_cross_entropy.py View on Github external
def masked_cross_entropy_(logits, target, length, take_log=False):
    if USE_CUDA:
        length = Variable(torch.LongTensor(length)).cuda()
    else:
        length = Variable(torch.LongTensor(length))    

    # logits_flat: (batch * max_len, num_classes)
    logits_flat = logits.view(-1, logits.size(-1)) ## -1 means infered from other dimentions
    if take_log:
        logits_flat = torch.log(logits_flat)
    # target_flat: (batch * max_len, 1)
    target_flat = target.view(-1, 1)
    # losses_flat: (batch * max_len, 1)
    losses_flat = -torch.gather(logits_flat, dim=1, index=target_flat)
    # losses: (batch, max_len)
    losses = losses_flat.view(*target.size())
    # mask: (batch, max_len)
    mask = sequence_mask(sequence_length=length, max_len=target.size(1)) 
    losses = losses * mask.float()
    loss = losses.sum() / length.float().sum()
    return loss
github andrewliao11 / dni.pytorch / train.py View on Github external
def test_model(self, epoch):
        # Test the Model
        self.net.eval()
        correct = 0
        total = 0
        for images, labels in self.test_loader:
            images = Variable(images).cuda()
            outputs = self.net(images)
            outputs = outputs[-1]
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted.cpu() == labels).sum()
        perf = 100 * correct / total
        print('Epoch %d: Accuracy of the network on the 10000 test images: %d %%' % (epoch, perf))
        return perf
github ucbdrive / spc / prediction_module / spn_gta / segment.py View on Github external
model.eval()

    end = time.time()
    for i, (input, target) in enumerate(val_loader):
        if type(criterion) in [torch.nn.modules.loss.L1Loss,
                               torch.nn.modules.loss.MSELoss]:
            target = target.float()

        seg[seg < 2] = 0
        seg[seg > 3] = 0
        seg[seg == 3] = 1

        input = input.cuda()
        target = target.cuda(async=True)
        input_var = torch.autograd.Variable(input, volatile=True)
        target_var = torch.autograd.Variable(target, volatile=True)

        # compute output
        output = model(input_var)[0]
        loss = criterion(output, target_var)

        # measure accuracy and record loss
        # prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
        losses.update(loss.data[0], input.size(0))
        if eval_score is not None:
            score.update(eval_score(output, target_var), input.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % print_freq == 0: