Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val2')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
""" data """
load_size = 286
crop_size = 256
transform = transforms.Compose(
[transforms.RandomHorizontalFlip(),
transforms.Scale(load_size),
transforms.RandomCrop(crop_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5] * 3, std=[0.5] * 3)])
dataset_dirs = utils.reorganize(dataset_dir)
a_data = dsets.ImageFolder(dataset_dirs['trainA'], transform=transform)
b_data = dsets.ImageFolder(dataset_dirs['trainB'], transform=transform)
a_test_data = dsets.ImageFolder(dataset_dirs['testA'], transform=transform)
b_test_data = dsets.ImageFolder(dataset_dirs['testB'], transform=transform)
a_loader = torch.utils.data.DataLoader(a_data, batch_size=batch_size, shuffle=True, num_workers=4)
b_loader = torch.utils.data.DataLoader(b_data, batch_size=batch_size, shuffle=True, num_workers=4)
a_test_loader = torch.utils.data.DataLoader(a_test_data, batch_size=3, shuffle=True, num_workers=4)
b_test_loader = torch.utils.data.DataLoader(b_test_data, batch_size=3, shuffle=True, num_workers=4)
a_fake_pool = utils.ItemPool()
b_fake_pool = utils.ItemPool()
""" model """
Da = models.Discriminator()
Db = models.Discriminator()
Ga = models.Generator()
Gb = models.Generator()
MSE = nn.MSELoss()
L1 = nn.L1Loss()
def collect_image_data(dir, batch_size, resolution, num_workers, max_samplesize=150):
path = os.path.join(dir, str(resolution))
dset = vdatasets.ImageFolder(
root=path, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
dataloader = udata.DataLoader(dset, batch_size=batch_size,
shuffle=True, num_workers=num_workers, drop_last=True)
output = []
for i, j in enumerate(dataloader):
if i == max_samplesize:
break
output.append(j)
return output
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
if args.quantize:
quantization_epochs = len(args.iterative_steps)
quantization_scheduler = inq.INQScheduler(optimizer, args.iterative_steps, strategy="pruning")
else:
def torch_loader(data_path, size, bs, min_scale=0.08):
# Data loading code
traindir = os.path.join(data_path, 'train')
valdir = os.path.join(data_path, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_tfms = transforms.Compose([
transforms.RandomResizedCrop(size, (min_scale,1.)),
transforms.RandomHorizontalFlip(),
#transforms.ColorJitter(.3,.3,.3),
transforms.ToTensor(),
#Lighting(0.1, __imagenet_pca['eigval'], __imagenet_pca['eigvec']),
normalize,
])
train_dataset = datasets.ImageFolder(traindir, train_tfms)
train_sampler = (torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=bs, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_tfms = transforms.Compose([
transforms.Resize(int(size*1.14)),
transforms.CenterCrop(size),
transforms.ToTensor(),
normalize,
])
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, val_tfms), batch_size=bs*2, shuffle=False, num_workers=args.workers, pin_memory=True)
aug_loader = torch.utils.data.DataLoader(
import torch
import torchvision
from torchvision import transforms
_DEFAULT_MU = [.5, .5, .5]
_DEFAULT_SIGMA = [.5, .5, .5]
DEFAULT_TRANSFORM = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(360, pad_if_needed=True),
transforms.ToTensor(),
transforms.Normalize(_DEFAULT_MU, _DEFAULT_SIGMA),
])
class ImageFolder(torchvision.datasets.ImageFolder):
def __init__(self, path, transform, limit=np.inf):
super().__init__(path, transform=transform)
self.limit = limit
def __len__(self):
length = super().__len__()
return min(length, self.limit)
class DataLoader(torch.utils.data.DataLoader):
def __init__(self, path, transform=None, limit=np.inf, shuffle=True,
num_workers=8, batch_size=4, *args, **kwargs):
if transform is None:
transform = DEFAULT_TRANSFORM
print("Loading training data")
st = time.time()
dataset = torchvision.datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.1, hue=0.02),
transforms.ToTensor(),
normalize,
# transforms.RandomErasing(p=0.9, value='random')
]))
print("Took", time.time() - st)
print("Loading validation data")
dataset_test = torchvision.datasets.ImageFolder(
valdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
]))
print("Creating data loaders")
train_sampler = torch.utils.data.RandomSampler(dataset)
test_sampler = torch.utils.data.SequentialSampler(dataset_test)
return dataset, dataset_test, train_sampler, test_sampler
state_dict = checkpoint['state_dict']
state_dict = remove_module_dict(state_dict)
model.load_state_dict(state_dict)
print_log("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']), log)
else:
print_log("=> no checkpoint found at '{}'".format(args.resume), log)
cudnn.benchmark = True
# Data loading code
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
# transforms.Scale(256),
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
criterion = nn.CrossEntropyLoss().cuda()
if args.get_small:
big_path = os.path.join(args.save_dir, "big_model.pt")
torch.save(model, big_path)
small_model = get_small_model(model.cpu())
# small_model = torch.load('small_model.pt')
cudnn.benchmark = True
model = models.resnet18(pretrained=True)
model = torch.nn.DataParallel(model).cuda()
criterion = nn.CrossEntropyLoss().cuda()
print_freq = 5
batch_size = 256
data_path = '/home/hongyang/dataset/imagenet_cls/cls'
valdir = os.path.join(data_path, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=batch_size, shuffle=True,
num_workers=4, pin_memory=True)
validate(val_loader, model, criterion)
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
# Enables benchmark mode in cudnn, which will accelerate for a certain batch size
cudnn.benchmark = True
save_pic = False
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# Load animation and cosplay dataset
dataset_ani = dset.ImageFolder(root=opt.dataroot_ani,
transform=transforms.Compose([
transforms.Resize(opt.imageSize),
transforms.RandomCrop(opt.imageSize),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
dataset_cos = dset.ImageFolder(root=opt.dataroot_cos,
transform=transforms.Compose([
transforms.Resize(opt.imageSize),
transforms.RandomCrop(opt.imageSize),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))