How to use the torchvision.datasets.CIFAR10 function in torchvision

To help you get started, we’ve selected a few torchvision examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github dnddnjs / pytorch-cifar10 / enas / train.py View on Github external
transforms.RandomHorizontalFlip(),
	transforms.ToTensor(),
	transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transforms_valid = transforms.Compose([
	transforms.ToTensor(),
	transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transforms_test = transforms.Compose([
	transforms.ToTensor(),
	transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

dataset_train = torchvision.datasets.CIFAR10(root='../data', train=True, download=True, transform=transforms_train)
dataset_valid = torchvision.datasets.CIFAR10(root='../data', train=True, download=True, transform=transforms_valid)
dataset_test = torchvision.datasets.CIFAR10(root='../data', train=False, download=True, transform=transforms_test)

# split train dataset into train and valid. After that, make sampler for each dataset
# code from https://gist.github.com/kevinzakka/d33bf8d6c7f06a9d8c76d97a7879f5cb
num_train = len(dataset_train)
indices = list(range(num_train))
split = int(np.floor(args.valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)

train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=args.batch_size, 
	                      num_workers=args.num_worker, sampler=train_sampler)
valid_loader = torch.utils.data.DataLoader(dataset_valid, batch_size=args.batch_size, 
	                      num_workers=args.num_worker, sampler=valid_sampler)
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=100, 
	                     shuffle=False, num_workers=args.num_worker)
github dragen1860 / DARTS-PyTorch / train.py View on Github external
genotype = eval("genotypes.%s" % args.arch)
    model = Network(args.init_ch, 10, args.layers, args.auxiliary, genotype).cuda()

    logging.info("param size = %fMB", utils.count_parameters_in_MB(model))

    criterion = nn.CrossEntropyLoss().cuda()
    optimizer = torch.optim.SGD(
        model.parameters(),
        args.lr,
        momentum=args.momentum,
        weight_decay=args.wd
    )

    train_transform, valid_transform = utils._data_transforms_cifar10(args)
    train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
    valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)

    train_queue = torch.utils.data.DataLoader(
        train_data, batch_size=args.batchsz, shuffle=True, pin_memory=True, num_workers=2)

    valid_queue = torch.utils.data.DataLoader(
        valid_data, batch_size=args.batchsz, shuffle=False, pin_memory=True, num_workers=2)

    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))

    for epoch in range(args.epochs):
        scheduler.step()
        logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
        model.drop_path_prob = args.drop_path_prob * epoch / args.epochs

        valid_acc, valid_obj = infer(valid_queue, model, criterion)
github hwang595 / Draco / src / datasets / data_prepare.py View on Github external
"""
import torch
from torchvision import datasets, transforms

if __name__ == "__main__":
	training_set_mnist = datasets.MNIST('./mnist_data', train=True, download=True,
	           transform=transforms.Compose([
	               transforms.ToTensor(),
	               transforms.Normalize((0.1307,), (0.3081,))]))
	train_loader_mnist = torch.utils.data.DataLoader(training_set_mnist, batch_size=128, shuffle=True)
	test_loader_mnist = torch.utils.data.DataLoader(
	    datasets.MNIST('./mnist_data', train=False, transform=transforms.Compose([
	               transforms.ToTensor(),
	               transforms.Normalize((0.1307,), (0.3081,))
	           ])), batch_size=100, shuffle=True)
	trainset_cifar10 = datasets.CIFAR10(root='./cifar10_data', train=True,
	                                        download=True, transform=transforms.Compose([
	               transforms.ToTensor(),
	               transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
	           ]))
	train_loader_cifar10 = torch.utils.data.DataLoader(trainset_cifar10, batch_size=128,
	                                          shuffle=True)
	test_loader_cifar10 = torch.utils.data.DataLoader(
	    datasets.CIFAR10('./cifar10_data', train=False, transform=transforms.Compose([
	               transforms.ToTensor(),  transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
	           ])), batch_size=100, shuffle=True)
github HelenMao / MSGAN / DCGAN-Mode-Seeking / train.py View on Github external
def main():
    # parse options
    parser = TrainOptions()
    opts = parser.parse()

    # daita loader
    print('\n--- load dataset ---')
    os.makedirs(opts.dataroot, exist_ok=True)

    dataset = torchvision.datasets.CIFAR10(opts.dataroot, train=True, download=True, transform= transforms.Compose([
        transforms.Resize(opts.img_size),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
    train_loader = torch.utils.data.DataLoader(dataset, batch_size=opts.batch_size, shuffle=True, num_workers=opts.nThreads)

    # model
    print('\n--- load model ---')
    model = CDCGAN(opts)
    model.setgpu(opts.gpu)
    if opts.resume is None:
        model.initialize()
        ep0 = -1
        total_it = 0
    else:
        ep0, total_it = model.resume(opts.resume)
    ep0 += 1
github ritheshkumar95 / pytorch-vqvae / vqvae.py View on Github external
download=True, transform=transform)
            test_dataset = datasets.MNIST(args.data_folder, train=False,
                transform=transform)
            num_channels = 1
        elif args.dataset == 'fashion-mnist':
            # Define the train & test datasets
            train_dataset = datasets.FashionMNIST(args.data_folder,
                train=True, download=True, transform=transform)
            test_dataset = datasets.FashionMNIST(args.data_folder,
                train=False, transform=transform)
            num_channels = 1
        elif args.dataset == 'cifar10':
            # Define the train & test datasets
            train_dataset = datasets.CIFAR10(args.data_folder,
                train=True, download=True, transform=transform)
            test_dataset = datasets.CIFAR10(args.data_folder,
                train=False, transform=transform)
            num_channels = 3
        valid_dataset = test_dataset
    elif args.dataset == 'miniimagenet':
        transform = transforms.Compose([
            transforms.RandomResizedCrop(128),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
        # Define the train, valid & test datasets
        train_dataset = MiniImagenet(args.data_folder, train=True,
            download=True, transform=transform)
        valid_dataset = MiniImagenet(args.data_folder, valid=True,
            download=True, transform=transform)
        test_dataset = MiniImagenet(args.data_folder, test=True,
            download=True, transform=transform)
github flyingpot / pytorch_deephash / no_data_normalization / mAP_bn.py View on Github external
def load_data():
    transform_train = transforms.Compose(
        [transforms.Scale(227),
         transforms.ToTensor()])
         # transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
    transform_test = transforms.Compose(
        [transforms.Scale(227),
         transforms.ToTensor()])
         # transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
    trainset = datasets.CIFAR10(root='./data', train=True, download=True,
                                transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=100,
                                              shuffle=False, num_workers=2)

    testset = datasets.CIFAR10(root='./data', train=False, download=True,
                               transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset, batch_size=100,
                                             shuffle=False, num_workers=2)
    return trainloader, testloader
github NoelShin / Deep-Learning-Bootcamp-with-PyTorch / classification / SEResidualNet / CIFAR10_pipeline.py View on Github external
def __init__(self, train=True):
        super(CustomCIFAR10, self).__init__()
        self.cifar10_train = CIFAR10(root='./datasets', train=train, download=True)

        # Get only images (i.e. without labels)
        tensors = list()
        for i in range(len(self.cifar10_train)):
            tensors.append(np.array(self.cifar10_train[i][0]))  # Need to convert PIL.Image.Image to numpy.ndarray
        self.per_pixel_mean_grid = np.mean(tensors, axis=0).astype(np.float32)
        # Calculate per-pixel mean along the batch dimension

        if not train:
            self.cifar10_test = CIFAR10(root='./datasets', train=train, download=True)

        self.train = train
github lancopku / label-embedding-network / ComputerVision / resnet8.py View on Github external
transform_train = transforms.Compose([
    transforms.RandomCrop(32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

trainset = torchvision.datasets.CIFAR10(root=args.data, train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)

testset = torchvision.datasets.CIFAR10(root=args.data, train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)

classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

# Model
if args.resume:
    # Load checkpoint.
    print('==> Resuming from checkpoint..')
    assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
    checkpoint = torch.load('./checkpoint/ckpt.t7')
    net = checkpoint['net']
    best_acc = checkpoint['acc']
    start_epoch = checkpoint['epoch']
else:
    print('==> Building model..')
    net = ResNet8()
github LiyuanLucasLiu / RAdam / cifar_imagenet / cifar.py View on Github external
# Data
    print('==> Preparing dataset %s' % args.dataset)
    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])
    if args.dataset == 'cifar10':
        dataloader = datasets.CIFAR10
        num_classes = 10
    else:
        dataloader = datasets.CIFAR100
        num_classes = 100


    trainset = dataloader(root='./data', train=True, download=True, transform=transform_train)
    trainloader = data.DataLoader(trainset, batch_size=args.train_batch, shuffle=True, num_workers=args.workers)

    testset = dataloader(root='./data', train=False, download=False, transform=transform_test)
    testloader = data.DataLoader(testset, batch_size=args.test_batch, shuffle=False, num_workers=args.workers)

    # Model
    print("==> creating model '{}'".format(args.arch))
    if args.arch.startswith('resnext'):
        model = models.__dict__[args.arch](
github AlexiaJM / relativistic-f-divergences / GAN.py View on Github external
# This makes it into [-1,1]
		transf.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5])
	])
else:
	trans = transf.Compose([
		transf.Resize((param.image_size, param.image_size)),
		# This makes it into [0,1]
		transf.ToTensor(),
		# This makes it into [-1,1]
		transf.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5])
	])

## Importing dataset
data = dset.ImageFolder(root=param.input_folder, transform=trans)
if param.CIFAR10:
	data = dset.CIFAR10(root=param.CIFAR10_input_folder, train=True, download=True, transform=trans)
if param.LSUN:
	print(param.LSUN_classes)
	data = dset.LSUN(root=param.LSUN_input_folder, classes=[param.LSUN_classes], transform=trans)

# Loading data randomly
def generate_random_sample():
	while True:
		random_indexes = numpy.random.choice(data.__len__(), size=param.batch_size, replace=False)
		batch = [data[i][0] for i in random_indexes]
		yield torch.stack(batch, 0)
random_sample = generate_random_sample()

## Models

if param.arch == 1:
	title = title + '_CNN_'