How to use the torch.utils.data.DataLoader function in torch

To help you get started, we’ve selected a few torch examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github natanielruiz / deep-head-pose / code / test_AFW.py View on Github external
# ResNet18
    # model = hopenet.Hopenet(torchvision.models.resnet.BasicBlock, [2, 2, 2, 2], 66)

    print 'Loading snapshot.'
    # Load snapshot
    saved_state_dict = torch.load(snapshot_path)
    model.load_state_dict(saved_state_dict)

    print 'Loading data.'

    transformations = transforms.Compose([transforms.Scale(224),
    transforms.CenterCrop(224), transforms.ToTensor()])

    pose_dataset = datasets.AFW(args.data_dir, args.filename_list,
                                transformations)
    test_loader = torch.utils.data.DataLoader(dataset=pose_dataset,
                                               batch_size=args.batch_size,
                                               num_workers=2)

    model.cuda(gpu)

    print 'Ready to test network.'

    # Test the Model
    model.eval()  # Change model to 'eval' mode (BN uses moving mean/var).
    total = 0
    n_margins = 20
    yaw_correct = np.zeros(n_margins)
    pitch_correct = np.zeros(n_margins)
    roll_correct = np.zeros(n_margins)

    idx_tensor = [idx for idx in xrange(66)]
github JunxuanZhang / TURN-TAP-pytorch / main.py View on Github external
model = torch.nn.DataParallel(model, device_ids=args.gpus).cuda()
    cudnn.benchmark = True

    if args.resume:
        if os.path.isfile(args.resume):
            print(("=> loading checkpoint '{}'".format(args.resume)))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print(("=> loaded checkpoint (epoch {})"
                   .format(checkpoint['epoch'])))
        else:
            print(("=> no checkpoint found at '{}'".format(args.resume)))


    train_loader = torch.utils.data.DataLoader(
        turnTrainDataset(ctx_num=args.ctx_number, unit_feature_dim=args.unit_feature_dim,
                         unit_size=args.unit_size, batch_size=args.tr_batch_size,
                         video_length_info=train_video_length_info,
                         feat_dir=args.train_featmap_dir, clip_gt_path=args.train_clip_path,
                         background_path=args.background_path,
                         data_preparation=data_preparation),
        batch_size=args.tr_batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True,
        drop_last=True)

    val_loader = torch.utils.data.DataLoader(
        turnTestDataset(ctx_num=args.ctx_number, feat_dir=args.test_featmap_dir,
                        test_clip_path=args.test_clip_path, batch_size=args.ts_batch_size,
                        unit_feature_dim=args.unit_feature_dim, unit_size=args.unit_size,
                        data_preparation=data_preparation),
        batch_size=args.ts_batch_size, shuffle=False,
github MrtnMndt / OCDVAE_ContinualLearning / lib / Datasets / datasets.py View on Github external
def get_dataset_loader(self, batch_size, workers, is_gpu):
        """
        Defines the dataset loader for wrapped dataset

        Parameters:
            batch_size (int): Defines the batch size in data loader
            workers (int): Number of parallel threads to be used by data loader
            is_gpu (bool): True if CUDA is enabled so pin_memory is set to True

        Returns:
             torch.utils.data.DataLoader: train_loader, val_loader
        """

        train_loader = torch.utils.data.DataLoader(
            self.trainset,
            batch_size=batch_size, shuffle=True,
            num_workers=workers, pin_memory=is_gpu, sampler=None)

        val_loader = torch.utils.data.DataLoader(
            self.valset,
            batch_size=batch_size, shuffle=False,
            num_workers=workers, pin_memory=is_gpu)

        return train_loader, val_loader
github fregu856 / 3DOD_thesis / Extended-Frustum-PointNet / eval_frustum_pointnet_img_val.py View on Github external
batch_size = 8

network = FrustumPointNetImg("Extended-Frustum-PointNet_eval_val", project_dir="/root/3DOD_thesis")
network.load_state_dict(torch.load("/root/3DOD_thesis/pretrained_models/model_38_2_epoch_400.pth"))
network = network.cuda()

NH = network.BboxNet_network.NH

val_dataset = EvalDatasetFrustumPointNetImg(kitti_data_path="/root/3DOD_thesis/data/kitti",
                                            kitti_meta_path="/root/3DOD_thesis/data/kitti/meta",
                                            type="val", NH=NH)

num_val_batches = int(len(val_dataset)/batch_size)

val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                         batch_size=batch_size, shuffle=False,
                                         num_workers=4)

regression_loss_func = nn.SmoothL1Loss()

network.eval() # (set in evaluation mode, this affects BatchNorm, dropout etc.)
batch_losses = []
batch_losses_InstanceSeg = []
batch_losses_TNet = []
batch_losses_BboxNet = []
batch_losses_BboxNet_center = []
batch_losses_BboxNet_size = []
batch_losses_BboxNet_heading_regr = []
batch_losses_BboxNet_heading_class = []
batch_losses_BboxNet_heading_class_weighted = []
batch_losses_corner = []
github thunil / Deep-Flow-Prediction / train / runTrain.py View on Github external
##########################

seed = random.randint(0, 2**32 - 1)
print("Random seed: {}".format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
#torch.backends.cudnn.deterministic=True # warning, slower

# create pytorch data object with dfp dataset
data = dataset.TurbDataset(prop, shuffle=1)
trainLoader = DataLoader(data, batch_size=batch_size, shuffle=True, drop_last=True)
print("Training batches: {}".format(len(trainLoader)))
dataValidation = dataset.ValiDataset(data)
valiLoader = DataLoader(dataValidation, batch_size=batch_size, shuffle=False, drop_last=True) 
print("Validation batches: {}".format(len(valiLoader)))

# setup training
epochs = int(iterations/len(trainLoader) + 0.5)
netG = TurbNetG(channelExponent=expo, dropout=dropout)
print(netG) # print full net
model_parameters = filter(lambda p: p.requires_grad, netG.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("Initialized TurbNet with {} trainable params ".format(params))

netG.apply(weights_init)
if len(doLoad)>0:
    netG.load_state_dict(torch.load(doLoad))
    print("Loaded model "+doLoad)
netG.cuda()
github WynMew / AnimeFaceRanker / FaceClassifier.py View on Github external
img,_ = resize(img, boxes=None, size=(img_size,img_size), random_interpolation=True)
    img = random_flip(img)
    img = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.485,0.456,0.406),(0.229,0.224,0.225))
    ])(img)
    return img, labels

trainset = ListDatasetLite(root='/home/wynmew/data/tachie',
                    list_file='/home/wynmew/data/tachie/faceCropList', # dataset with neg
                    classes = My_CLASSES,
                    barcode_Dict = codeDict,
                    transform=transform_train)

trainBatchSize = 12
trainloader = torch.utils.data.DataLoader(trainset, batch_size=trainBatchSize, shuffle=True, drop_last=True, num_workers=0)

def addlog(x):
    with open(FilePre+"_Log","a+") as outfile:
        outfile.write(x + "\n")

k=1 #OHEM

def train(epoch, model, loss_fn, optimizer, dataloader,log_interval=50):
    model.train()
    train_loss = 0
    for batch_idx, (inputs, cls_targets) in enumerate(dataloader):
        inputs = Variable(inputs.cuda())
        optimizer.zero_grad()
        labelsPre = model(inputs)

        lossCLS = Variable(torch.FloatTensor(1)).zero_()
github espectre / Kaggle-Dogs_vs_Cats_PyTorch / train.py View on Github external
transforms.Resize((256,256)),
	transforms.RandomCrop((224,224)),
	transforms.RandomHorizontalFlip(),
	transforms.ToTensor(),
	transforms.Normalize((0.485,0.456,0.406),(0.229,0.224,0.225))
])

transform_val=transforms.Compose([ 
	transforms.Resize((224,224)),
	transforms.ToTensor(),
	transforms.Normalize((0.485,0.456,0.406),(0.229,0.224,0.225)),
])

trainset=DogCat('./data/train',transform=transform_train)
valset  =DogCat('./data/train',transform=transform_val)
trainloader=torch.utils.data.DataLoader(trainset,batch_size=opt.batchSize,shuffle=True,num_workers=opt.num_workers)
valloader=torch.utils.data.DataLoader(valset,batch_size=opt.batchSize,shuffle=False,num_workers=opt.num_workers)

model=resnet101(pretrained=True)
model.fc=nn.Linear(2048,2)
model.cuda()
optimizer=torch.optim.SGD(model.parameters(),lr=opt.lr,momentum=0.9,weight_decay=5e-4)
scheduler=StepLR(optimizer,step_size=3)
criterion=nn.CrossEntropyLoss()
criterion.cuda()

def train(epoch):
	print('\nEpoch: %d' % epoch)
	scheduler.step()
	model.train()
	for batch_idx,(img,label) in enumerate(trainloader):
		image=Variable(img.cuda())
github guoyongcs / AEGAN / train.py View on Github external
if torch.cuda.is_available() and not opt.cuda:
    print("WARNING: You have a CUDA device, so you should probably run with --cuda")

if opt.dataset in ['flowers', 'birds', 'volcano', 'ant', 'monastery', 'fire_engine', 'harvester', 'broccoli', 'studio_couch', 'lfw', 'imagenet']:
    # folder dataset
    dataset = dset.ImageFolder(root=opt.dataroot,
                               transform=transforms.Compose([
                                   transforms.Scale(opt.imageSize),
                                   transforms.CenterCrop(opt.imageSize),
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                               ]))

assert dataset
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
                                         shuffle=True, num_workers=int(opt.workers))

nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = int(opt.nc)


# design model
netG1 = _netG1(nz, ngf, opt.batchSize)
# netG1 = AEGAN_ResnetDecoder(nz, 64, n_blocks=1, n_downsampling=2)
# netG1.apply(weights_init)
if opt.netG1 != '':
    netG1.load_state_dict(torch.load(opt.netG1))
netG1 = vutils.init_net(netG1, opt.gpu_ids) 
print(netG1)
github danielhavir / capsule-network / main.py View on Github external
else:
    raise ValueError('Dataset must be either MNIST or CIFAR')

transform = transforms.Compose([
    # shift by 2 pixels in either direction with zero padding.
    transforms.RandomCrop(size, padding=2),
    transforms.ToTensor(),
    transforms.Normalize( mean, std )
])

loaders = {}
trainset = datasets[args.dataset.upper()](root=args.data_path, train=True, download=True, transform=transform)
loaders['train'] = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=2)

testset = datasets[args.dataset.upper()](root=args.data_path, train=False, download=True, transform=transform)
loaders['test'] = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=2)
print(8*'#', f'Using {args.dataset.upper()} dataset', 8*'#')

# Run
caps_net = CapsNetTrainer(loaders, args.batch_size, args.learning_rate, args.num_routing, args.lr_decay, device=device, multi_gpu=args.multi_gpu)
caps_net.run(args.epochs, classes=classes)
github ZiangYan / deepdefense.pytorch / deepdefense.py View on Github external
log.info('Final model saved to %s' % fname)


if __name__ == '__main__':
    args = parse_args()

    log.info('Called with args:')
    log.info(args)

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.backends.cudnn.deterministic = True

    if args.dataset == 'mnist':
        train_loader = torch.utils.data.DataLoader(MNISTDataset(phase='trainval'),
                                                   batch_size=args.batch, shuffle=False, num_workers=4,
                                                   pin_memory=False, drop_last=False)
        test_loader = torch.utils.data.DataLoader(MNISTDataset(phase='test'),
                                                  batch_size=args.test_batch, shuffle=False, num_workers=4,
                                                  pin_memory=False, drop_last=False)
    elif args.dataset == 'cifar10':
        train_loader = torch.utils.data.DataLoader(CIFAR10Dataset(phase='trainval'),
                                                   batch_size=args.batch, shuffle=False, num_workers=4,
                                                   pin_memory=False, drop_last=False)
        test_loader = torch.utils.data.DataLoader(CIFAR10Dataset(phase='test'),
                                                  batch_size=args.test_batch, shuffle=False, num_workers=4,
                                                  pin_memory=False, drop_last=False)
    else:
        raise NotImplementedError

    # print this script to log