Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def dataloaders(batch_size: int, num_workers: int = 32) -> Tuple[DataLoader, DataLoader]:
num_workers = num_workers if batch_size <= 4096 else num_workers // 2
post_transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
])
train_dataset = torchvision.datasets.ImageNet(
root='imagenet',
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomResizedCrop(224, scale=(0.08, 1.0)),
torchvision.transforms.RandomHorizontalFlip(),
post_transforms,
])
)
test_dataset = torchvision.datasets.ImageNet(
root='imagenet',
split='val',
transform=torchvision.transforms.Compose([
def __init__(self, dataset_dir, crop_size, upscale_factor):
super(TrainDataset, self).__init__()
self.image_filenames = [join(dataset_dir, x) for x in listdir(dataset_dir) if is_image_file(x)]
crop_size = calculate_valid_crop_size(crop_size, upscale_factor)
self.hr_preprocess = Compose([CenterCrop(384), RandomCrop(crop_size), ToTensor()])
self.lr_preprocess = Compose([ToPILImage(), Resize(crop_size // upscale_factor, interpolation=Image.BICUBIC), ToTensor()])
threshold = ll[3]
if threshold > ll[0]:
threshold = ll[0]
labels.append(label >= threshold)
else:
for i,(input,filepath) in enumerate(tqdm(test_loader)):
#3.2 change everything to cuda and get only basename
filepath = [os.path.basename(x) for x in filepath]
inputn = np.zeros(shape=(config.img_weight, config.img_height, 4), dtype=np.uint8)
inputn[:,:,:] = input.squeeze(0).numpy()[:,:,:]
with torch.no_grad():
input = T.Compose([
T.ToPILImage(),
T.RandomHorizontalFlip(p=testn%2), # when not tta ----> p=0
T.RandomVerticalFlip(p=testn//2), # when not tta ----> p=0
T.ToTensor(),
T.Normalize([0.0789, 0.0529, 0.0546, 0.0814], [0.147, 0.113, 0.157, 0.148])
])(inputn).float().unsqueeze(0)
image_var = input.cuda(non_blocking=True)
y_pred = model(image_var)
label = y_pred.sigmoid().cpu().data.numpy()
pred_checkpoint.append(label)
if config.opt_thres:
threshold = threshold_get_copy
labels.append(label > threshold)
else:
ll = label.copy().reshape((-1))
ll = -ll
ll.sort()
ll = -ll
threshold = config.threshold
if threshold < ll[3]:
import os
def save_fig(fname):
if figdir: plt.savefig(os.path.join(figdir, fname))
############
# Get data
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
batch_size = 32
train_dataset = datasets.MNIST('./data',
train=True,
download=True,
transform=transforms.ToTensor())
test_dataset = datasets.MNIST('./data',
train=False,
transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
for (X_train, y_train) in train_loader:
print('X_train:', X_train.size(), 'type:', X_train.type())
def __init__(self, dataset, images_folder, save_img_file, annotations_folder, save_label_file, mode, load=False, masks_path=None):
if dataset == FULL:
order = order_full
elif dataset == R1:
order = order_r1
else:
raise Exception('DATASET MUST BE EITHER FULL OR R1')
self.transform = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
if load:
print("LOADING PRECOMPUTED IMAGES")
self.images = torch.load(save_img_file)
self.labels = torch.load(save_label_file)
else:
print("LOADING IMAGES")
if dataset == FULL:
self.annotations = [None]*24
else:
self.annotations = [None]*14
for annotation_file in os.listdir(annotations_folder):
if dataset == FULL and '_r1' in annotation_file:
continue
def stylize(args):
device = torch.device("cuda" if args.cuda else "cpu")
content_image = utils.load_image(args.content_image, scale=args.content_scale)
content_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))
])
content_image = content_transform(content_image)
content_image = content_image.unsqueeze(0).to(device)
if args.model.endswith(".onnx"):
output = stylize_onnx_caffe2(content_image, args)
else:
with torch.no_grad():
style_model = TransformerNet()
state_dict = torch.load(args.model)
# remove saved deprecated running_* keys in InstanceNorm from the checkpoint
for k in list(state_dict.keys()):
if re.search(r'in\d+\.running_(mean|var)$', k):
del state_dict[k]
style_model.load_state_dict(state_dict)
print("Discretizing")
# get discretization bins for latent variables
zendpoints, zcentres = discretize(nz, quantbits, type, device, model, "cifar")
# get discretization bins for discretized logistic
xbins = ImageBins(type, device, xdim)
xendpoints = xbins.endpoints()
xcentres = xbins.centres()
print("Load data..")
# <=== DATA ===>
class ToInt:
def __call__(self, pic):
return pic * 255
transform_ops = transforms.Compose([transforms.ToTensor(), ToInt()])
test_set = datasets.CIFAR10(root="model/data/cifar", train=False, transform=transform_ops, download=True)
# sample (experiments, ndatapoints) from test set with replacement
if not os.path.exists("bitstreams/cifar/indices"):
randindices = np.random.choice(len(test_set.data), size=(experiments, ndatapoints), replace=False)
np.save("bitstreams/cifar/indices", randindices)
else:
randindices = np.load("bitstreams/cifar/indices")
print("Setting up metrics..")
# metrics for the results
nets = np.zeros((experiments, ndatapoints), dtype=np.float)
elbos = np.zeros((experiments, ndatapoints), dtype=np.float)
cma = np.zeros((experiments, ndatapoints), dtype=np.float)
total = np.zeros((experiments, ndatapoints), dtype=np.float)
flatten_bernoulli = lambda img: transforms.ToTensor()(img).view(-1).bernoulli()
import torchvision.transforms as transforms
import random
import torch.nn.init
torch.manual_seed(777) # reproducibility
# parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
keep_prob = 0.7
# MNIST dataset
mnist_train = dsets.MNIST(root='MNIST_data/',
train=True,
transform=transforms.ToTensor(),
download=True)
mnist_test = dsets.MNIST(root='MNIST_data/',
train=False,
transform=transforms.ToTensor(),
download=True)
# dataset loader
data_loader = torch.utils.data.DataLoader(dataset=mnist_train,
batch_size=batch_size,
shuffle=True)
# nn layers
linear1 = torch.nn.Linear(784, 512, bias=True)
linear2 = torch.nn.Linear(512, 512, bias=True)
linear3 = torch.nn.Linear(512, 512, bias=True)
linear4 = torch.nn.Linear(512, 512, bias=True)
def load_data_fashion_mnist(root, batch_size, resize=None, download=False):
"""Download the fashion mnist dataset and then load into memory."""
# root = os.path.expanduser(root)
transformer = []
if resize:
transformer += [transforms.Resize(resize)]
transformer += [transforms.ToTensor()]
transformer = transforms.Compose(transformer)
mnist_train = datasets.FashionMNIST(root=root, train=True, transform=transformer, download=download)
mnist_test = datasets.FashionMNIST(root=root, train=False, transform=transformer, download=download)
num_workers = 0 if sys.platform.startswith('win32') else 4
train_iter = DataLoader(mnist_train, batch_size, shuffle=True, num_workers=num_workers)
test_iter = DataLoader(mnist_test, batch_size, shuffle=False, num_workers=num_workers)
return train_iter, test_iter