Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def dataloaders(batch_size: int, num_workers: int = 32) -> Tuple[DataLoader, DataLoader]:
num_workers = num_workers if batch_size <= 4096 else num_workers // 2
post_transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
])
train_dataset = torchvision.datasets.ImageNet(
root='imagenet',
split='train',
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomResizedCrop(224, scale=(0.08, 1.0)),
torchvision.transforms.RandomHorizontalFlip(),
post_transforms,
])
)
test_dataset = torchvision.datasets.ImageNet(
root='imagenet',
split='val',
transform=torchvision.transforms.Compose([
def __init__(self, dataset_dir, crop_size, upscale_factor):
super(TrainDataset, self).__init__()
self.image_filenames = [join(dataset_dir, x) for x in listdir(dataset_dir) if is_image_file(x)]
crop_size = calculate_valid_crop_size(crop_size, upscale_factor)
self.hr_preprocess = Compose([CenterCrop(384), RandomCrop(crop_size), ToTensor()])
self.lr_preprocess = Compose([ToPILImage(), Resize(crop_size // upscale_factor, interpolation=Image.BICUBIC), ToTensor()])
args.init_model_cd = os.path.expanduser(args.init_model_cd)
if torch.cuda.is_available() == False:
raise Exception('At least one gpu must be available.')
else:
gpu = torch.device('cuda:0')
# create result directory (if necessary)
if os.path.exists(args.result_dir) == False:
os.makedirs(args.result_dir)
for s in ['phase_1', 'phase_2', 'phase_3']:
if os.path.exists(os.path.join(args.result_dir, s)) == False:
os.makedirs(os.path.join(args.result_dir, s))
# dataset
trnsfm = transforms.Compose([
transforms.Resize(args.cn_input_size),
transforms.RandomCrop((args.cn_input_size, args.cn_input_size)),
transforms.ToTensor(),
])
print('loading dataset... (it may take a few minutes)')
train_dset = ImageDataset(os.path.join(args.data_dir, 'train'), trnsfm, recursive_search=args.recursive_search)
test_dset = ImageDataset(os.path.join(args.data_dir, 'test'), trnsfm, recursive_search=args.recursive_search)
train_loader = DataLoader(train_dset, batch_size=(args.bsize // args.bdivs), shuffle=True)
# compute mean pixel value of training dataset
mpv = np.zeros(shape=(3,))
if args.mpv == None:
pbar = tqdm(total=len(train_dset.imgpaths), desc='computing mean pixel value for training dataset...')
for imgpath in train_dset.imgpaths:
img = Image.open(imgpath)
x = np.array(img, dtype=np.float32) / 255.
mpv += x.mean(axis=(0,1))
recall = [1.0, 0.8533334, 0.28, 0.0666667, 0.0]
for n_iter in range(100):
s1 = torch.rand(1) # value to keep
s2 = torch.rand(1)
# data grouping by `slash`
writer.add_scalar(os.path.join("data", "scalar_systemtime"), s1[0], n_iter)
# data grouping by `slash`
writer.add_scalar(os.path.join("data", "scalar_customtime"), s1[0], n_iter, walltime=n_iter)
writer.add_scalars(os.path.join("data", "scalar_group"), {"xsinx": n_iter * np.sin(n_iter),
"xcosx": n_iter * np.cos(n_iter),
"arctanx": np.arctan(n_iter)}, n_iter)
x = torch.rand(32, 3, 64, 64) # output from network
if n_iter % 10 == 0:
x = vutils.make_grid(x, normalize=True, scale_each=True)
writer.add_image('Image', x, n_iter) # Tensor
# writer.add_image('astronaut', skimage.data.astronaut(), n_iter) # numpy
# writer.add_image('imread',
# skimage.io.imread('screenshots/audio.png'), n_iter) # numpy
x = torch.zeros(sample_rate * 2)
for i in range(x.size(0)):
# sound amplitude should in [-1, 1]
x[i] = np.cos(freqs[n_iter // 10] * np.pi *
float(i) / float(sample_rate))
writer.add_audio('myAudio', x, n_iter)
writer.add_text('Text', 'text logged at step:' + str(n_iter), n_iter)
writer.add_text('markdown Text', '''a|b\n-|-\nc|d''', n_iter)
for name, param in resnet18.named_parameters():
if 'bn' not in name:
writer.add_histogram(name, param, n_iter)
writer.add_pr_curve('xoxo', np.random.randint(2, size=100), np.random.rand(
cache['ssim'] += ssim
cache['psnr'] += psnr
# Avoid out of memory crash on 8G GPU
if len(dev_images) < 60 :
dev_images.extend([to_image()(val_hr_restore.squeeze(0)), to_image()(hr.data.cpu().squeeze(0)), to_image()(sr.data.cpu().squeeze(0))])
dev_images = torch.stack(dev_images)
dev_images = torch.chunk(dev_images, dev_images.size(0) // 3)
dev_save_bar = tqdm(dev_images, desc='[saving training results]')
index = 1
for image in dev_save_bar:
image = utils.make_grid(image, nrow=3, padding=5)
utils.save_image(image, out_path + 'epoch_%d_index_%d.png' % (epoch, index), padding=5)
index += 1
if use_tensorboard:
log_value('ssim', cache['ssim']/len(dev_loader), epoch)
log_value('psnr', cache['psnr']/len(dev_loader), epoch)
np.random.seed(1)
torch.cuda.manual_seed(1)
# /////////////// Dataset Loading ///////////////
if args.c100:
# mean and standard deviation of channels of CIFAR-10 images
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_transform = trn.Compose([trn.RandomHorizontalFlip(), trn.RandomCrop(32, padding=4),
trn.ToTensor(), trn.Normalize(mean, std)])
test_transform = trn.Compose([trn.ToTensor(), trn.Normalize(mean, std)])
train_data = dset.CIFAR100('/share/data/vision-greg/cifarpy', train=True, transform=train_transform, download=False)
test_data = dset.CIFAR100('/share/data/vision-greg/cifarpy', train=False, transform=test_transform, download=False)
num_classes = 100
else:
train_data = dset.ImageFolder('/share/data/vision-greg/DistortedImageNet/Icons-50',
transform=trn.Compose([trn.Resize((32, 32)), trn.RandomHorizontalFlip(),
trn.RandomCrop(32, padding=4), trn.ToTensor(),
# RandomErasing()
]))
test_data = dset.ImageFolder('/share/data/vision-greg/DistortedImageNet/Icons-50',
transform=trn.Compose([trn.Resize((32, 32)), trn.ToTensor()]))
num_classes = 50
if args.traditional:
filtered_imgs = []
for img in train_data.samples:
def verify_img_data(img_data, mode):
if mode is None:
img = transforms.ToPILImage()(img_data)
assert img.mode == 'RGB' # default should assume RGB
else:
img = transforms.ToPILImage(mode=mode)(img_data)
assert img.mode == mode
split = img.split()
for i in range(3):
assert np.allclose(img_data[:, :, i], split[i])
def verify_img_data(img_data, mode):
if mode is None:
img = transforms.ToPILImage()(img_data)
assert img.mode == 'RGB' # default should assume RGB
else:
img = transforms.ToPILImage(mode=mode)(img_data)
assert img.mode == mode
split = img.split()
for i in range(3):
assert np.allclose(img_data[:, :, i], split[i])
for _ in range(n - 1):
v = linear_map(v)
cols.append(v)
return torch.stack(cols, dim=-1)
batch_size = 256
if use_cuda:
loader_args = {'num_workers': 8, 'pin_memory': True}
else:
loader_args = {'num_workers': 1, 'pin_memory': False}
def loader_from_dataset(dataset):
return torch.utils.data.DataLoader(dataset, batch_size=batch_size,
shuffle=True, **loader_args)
mnist_normalize = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))
])
mnist_train = datasets.MNIST(
'../data', train=True, download=True, transform=mnist_normalize)
# Just for faster training on CPU
# mnist_train.train_data = mnist_train.train_data[:5000]
mnist_test = datasets.MNIST(
'data', train=False, download=True, transform=mnist_normalize)
mnist_train, mnist_valid = get_train_valid_datasets(mnist_train)
train_loader = loader_from_dataset(mnist_train)
valid_loader = loader_from_dataset(mnist_valid)
test_loader = loader_from_dataset(mnist_test)
n_features = 28 * 28
n_classes = 10
def start_websocket_server_worker(
id, host, port, hook, verbose, keep_labels=None, training=True
): # pragma: no cover
"""Helper function for spinning up a websocket server and setting up the local datasets."""
server = WebsocketServerWorker(id=id, host=host, port=port, hook=hook, verbose=verbose)
# Setup toy data (mnist example)
mnist_dataset = datasets.MNIST(
root="./data",
train=training,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
if training:
indices = np.isin(mnist_dataset.targets, keep_labels).astype("uint8")
logger.info("number of true indices: %s", indices.sum())
selected_data = (
torch.native_masked_select(mnist_dataset.data.transpose(0, 2), torch.tensor(indices))
.view(28, 28, -1)
.transpose(2, 0)
)
logger.info("after selection: %s", selected_data.shape)
selected_targets = torch.native_masked_select(mnist_dataset.targets, torch.tensor(indices))
dataset = sy.BaseDataset(