Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
import os, sys, torch
import torchvision.transforms as transforms
from TieredImageNet import TieredImageNet
from MetaBatchSampler import MetaBatchSampler
root_dir = os.environ['TORCH_HOME'] + '/tiered-imagenet'
print ('root : {:}'.format(root_dir))
means, stds = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
lists = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(84, padding=8), transforms.ToTensor(), transforms.Normalize(means, stds)]
transform = transforms.Compose(lists)
dataset = TieredImageNet(root_dir, 'val-test', transform)
image, label = dataset[111]
print ('image shape = {:}, label = {:}'.format(image.size(), label))
print ('image : min = {:}, max = {:} ||| label : {:}'.format(image.min(), image.max(), label))
sampler = MetaBatchSampler(dataset.labels, 250, 100, 10)
dataloader = torch.utils.data.DataLoader(dataset, batch_sampler=sampler)
print ('the length of dataset : {:}'.format( len(dataset) ))
print ('the length of loader : {:}'.format( len(dataloader) ))
for images, labels in dataloader:
def build_imagenet(model_state_dict, optimizer_state_dict, **kwargs):
ratio = kwargs.pop('ratio')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2),
transforms.ToTensor(),
normalize,
])
valid_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
if args.zip_file:
logging.info('Loading data from zip file')
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle= True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
transform (transform): the transformation to be applied to the the
image
"""
# SAMPLE - Random sample the image
# sample = random_sample()
# RESIZE to fixed size
# resize = transforms.RandomSizedCrop(224)
# apply photo-metric distortions https://arxiv.org/pdf/1312.5402.pdf
# photmetric = None
return transforms.Compose([
# sample,
# resize,
transforms.RandomHorizontalFlip()
# photmetric
def load_cifar():
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True,
transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=32,
shuffle=True,
num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True,
transform=transform_test)
trg_size = (224, 224)
elif options.network_type == 'ResNet3D':
trg_size = (110, 110, 110)
elif options.network_type == 'ResNet2D':
trg_size = (224, 224)
if options.network_type == "AlexNet3D":
transformations = transforms.Compose([CustomResize(options.network_type, trg_size),
CustomToTensor(options.network_type)
])
dset_train = AD_Dataset(IMG_PATH, TRAINING_PATH, transformations)
dset_test = AD_Dataset(IMG_PATH, TESTING_PATH, transformations)
elif options.network_type == 'AlexNet2D':
transformations = transforms.Compose([transforms.Resize(trg_size, Image.BICUBIC),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
dset_train = AD_2DSlicesData(IMG_PATH, TRAINING_PATH, transformations)
dset_test = AD_2DSlicesData(IMG_PATH, TESTING_PATH, transformations)
# Use argument load to distinguish training and testing
if options.load is None:
train_loader = DataLoader(dset_train,
batch_size = options.batch_size,
shuffle = True,
num_workers = 4,
drop_last = True
)
else:
# Only shuffle the data when doing training
train_loader = DataLoader(dset_train,
dataset {string} -- name of the dataset: [market1501, duke, cuhk03]
batch_size {int} -- the batch size to load
part {string} -- which part of the dataset: [train, query, gallery]
Returns:
(torch.utils.data.DataLoader, torchvision.datasets.ImageFolder) -- the data loader and the image set
"""
transform_list = [
transforms.Resize(size=(384, 128), interpolation=3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
if augment:
transform_list.insert(1, transforms.RandomHorizontalFlip())
data_transform = transforms.Compose(transform_list)
image_dataset = datasets.ImageFolder(os.path.join(DATASET_PATH[dataset], part),
data_transform)
dataloader = torch.utils.data.DataLoader(image_dataset, batch_size=batch_size,
shuffle=shuffle, num_workers=4)
return dataloader
loss_scale=args.loss_scale)
#model = torch.nn.DataParallel(model).cuda()
#model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank)
model = DDP(model, delay_allreduce=True)
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'valf')
#normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
data_aug_scale = (0.08, 1.0) if args.modelsize == 'large' else (0.2, 1.0)
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomResizedCrop(224, scale = data_aug_scale),
transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# normalize,
]))
val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
# transforms.ToTensor(),
# normalize,
]))
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.train_batch, shuffle=False,
num_workers=args.workers, pin_memory=True, sampler=train_sampler, collate_fn=fast_collate)
elif dataset_name == 'STL10':
data_root_list = []
for data_root in data_root_list:
if os.path.exists(data_root):
print('Found STL10 in %s' % data_root)
break
if split == 'train':
loader = torch.utils.data.DataLoader(
datasets.STL10(
root=data_root, split='train', download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(96),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])),
batch_size=batch_size, shuffle=True)
if split in ['test', 'val']:
loader = torch.utils.data.DataLoader(
datasets.STL10(
root=data_root, split='test', download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])),
batch_size=batch_size, shuffle=False)
if cfg.dataset.name == 'CIFAR':
if num_classes == 10:
# https://github.com/Armour/pytorch-nn-practice/blob/master/utils/meanstd.py
norm = transforms.Normalize(mean=[0.491, 0.482, 0.447], std=[0.247, 0.243, 0.261])
train_transforms = transforms.Compose([transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(), norm
])
test_transforms = transforms.Compose([transforms.ToTensor(), norm])
train_dataset = cifar.CIFAR10(root='./datasets/', train=True, download=True,
transform=train_transforms)
test_dataset = cifar.CIFAR10(root='./datasets/', train=False, transform=test_transforms)
elif num_classes == 100:
norm = transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
train_transforms = transforms.Compose([transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(), norm
])
test_transforms = transforms.Compose([transforms.ToTensor(), norm])
train_dataset = cifar.CIFAR100(root='./datasets/', train=True, download=True,
transform=train_transforms)
test_dataset = cifar.CIFAR100(root='./datasets/', train=False, transform=test_transforms)
elif cfg.dataset.name == 'MNIST':
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_dataset = datasets.MNIST(root='./datasets/MNIST', train=True, download=True, transform=transform)
test_dataset = datasets.MNIST(root='./datasets/MNIST', train=False, download=True, transform=transform)
elif cfg.dataset.name == 'FashionMNIST':
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_dataset = datasets.FashionMNIST(root='./datasets/FashionMNIST', train=True, download=True, transform=transform)
test_dataset = datasets.FashionMNIST(root='./datasets/FashionMNIST', train=False, download=True, transform=transform)
elif cfg.dataset.name == 'EMNIST':
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])