How to use the torchvision.transforms.ToPILImage function in torchvision

To help you get started, we’ve selected a few torchvision examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github fmassa / vision / test / test_transforms.py View on Github external
def verify_img_data(img_data, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
                assert img.mode == 'RGB'  # default should assume RGB
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
                assert img.mode == mode
            split = img.split()
            for i in range(3):
                assert np.allclose(img_data[:, :, i], split[i])
github fmassa / vision / test / test_transforms.py View on Github external
def verify_img_data(img_data, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
                assert img.mode == 'RGB'  # default should assume RGB
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
                assert img.mode == mode
            split = img.split()
            for i in range(3):
                assert np.allclose(img_data[:, :, i], split[i])
github felipecode / coiltraine / testing / unit_tests / input_test / test_augmenter.py View on Github external
def test_coarse_dropout(self):
        if not os.path.exists(self.test_images_write_path + 'test_coarse_dropout'):
            os.mkdir(self.test_images_write_path + 'test_coarse_dropout')

        data_loader = self.get_data_loader()
        count = 0
        for data in data_loader:
            image, labels = data
            image_to_save = transforms.ToPILImage()(image['rgb'][count][0].cpu())
            image_to_save.save(os.path.join(self.test_images_write_path + 'test_coarse_dropout',
                                            str(count)+'b.png'))
            result = coarse_dropout_random(count, image['rgb'])
            image_to_save = transforms.ToPILImage()(result[count].cpu())
            image_to_save.save(os.path.join(self.test_images_write_path + 'test_coarse_dropout',
                                            str(count)+'.png'))
            count += 1
github zijundeng / pytorch-semantic-segmentation / test_cityscapes.py View on Github external
batch_size = 8

    net = PSPNet(pretrained=False, num_classes=num_classes, input_size=(512, 1024)).cuda()
    snapshot = 'epoch_48_validation_loss_5.1326_mean_iu_0.3172_lr_0.00001000.pth'
    net.load_state_dict(torch.load(os.path.join(ckpt_path, snapshot)))
    net.eval()

    mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    transform = transforms.Compose([
        expanded_transform.FreeScale((512, 1024)),
        transforms.ToTensor(),
        transforms.Normalize(*mean_std)
    ])
    restore = transforms.Compose([
        expanded_transform.DeNormalize(*mean_std),
        transforms.ToPILImage()
    ])

    lsun_path = '/home/b3-542/LSUN'

    dataset = LSUN(lsun_path, ['tower_val', 'church_outdoor_val', 'bridge_val'], transform=transform)
    dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=16, shuffle=True)

    if not os.path.exists(predict_path):
        os.mkdir(predict_path)

    for vi, data in enumerate(dataloader, 0):
        inputs, labels = data
        inputs = Variable(inputs, volatile=True).cuda()
        outputs = net(inputs)

        prediction = outputs.cpu().data.max(1)[1].squeeze_(1).numpy()
github chrhenning / hypercl / data / cifar10_data.py View on Github external
import torchvision.transforms as transforms

        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])

        train_transform = transforms.Compose([
            transforms.ToPILImage('RGB'),
            transforms.RandomHorizontalFlip(),
            transforms.RandomCrop(size=[32,32], padding=4),
            transforms.ToTensor(),
            normalize,
        ])

        test_transform = transforms.Compose([
            transforms.ToPILImage('RGB'),
            transforms.ToTensor(),
            normalize,
        ])

        return train_transform, test_transform
github Lornatang / PyTorch / research / GAN / dcgan / train.py View on Github external
# Create model
if not os.path.exists(MODEL_PATH):
    os.makedirs(MODEL_PATH)

if not os.path.exists(WORK_DIR + '/' + 'gen'):
    os.makedirs(WORK_DIR + '/' + 'gen')

transform = transforms.Compose([
    transforms.Grayscale(),
    transforms.RandomCrop(32, padding=4),
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])

to_pil_image = transforms.ToPILImage()

# mnist train_dataset
train_dataset = torchvision.datasets.ImageFolder(root=WORK_DIR + '/' + 'train',
                                                 transform=transform)

# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=BATCH_SIZE,
                                           shuffle=True)

# first train run this line
# D = Discriminator().to(device)
# G = Generator().to(device)
# load model
if torch.cuda.is_available():
    D = torch.load(MODEL_PATH + 'D.pth').to(device)
github n01z3 / kaggle-pneumothorax-segmentation / legacy / predict_fold.py View on Github external
pred = result.data.cpu().numpy()
						if inx == 1:
							pred = aug_HFlip(image=pred)['image']
						# elif inx == 2:
						# 	pred = aug_Rot90(image=pred)['image']

						pred = pred.T

						tta_preds.append(pred)
						inx += 1
					# print (pred.shape)
					pred = np.mean(tta_preds, axis=0)
					
					out_cut = np.copy(pred)
					res = transforms.ToPILImage()(out_cut)
					res = np.asarray(res.resize((1024, 1024), resample=Image.BILINEAR))

					total_preds[index,:,:] += res

	if not os.path.exists(model_name+'_out_all_folds/'):
		os.makedirs(model_name+'_out_all_folds/')


	threshold_list = [1.4]
	# threshold_list = [1.2, 1.4, 1.5, 2]
	# threshold = 1.5

	for threshold in threshold_list:
		
		sublist = []
		for index, row in tqdm.tqdm(sample_df.iterrows(), total=len(sample_df)):
github AlexiaJM / RelativisticGAN / code / GAN_losses_iter_PAC.py View on Github external
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transf
import torchvision.models as models
import torchvision.utils as vutils
import torch.nn.utils.spectral_norm as spectral_norm

if param.cuda:
	import torch.backends.cudnn as cudnn
	cudnn.deterministic = True
	cudnn.benchmark = True

# To see images
from IPython.display import Image
to_img = transf.ToPILImage()

import pytorch_visualize as pv

import math

torch.utils.backcompat.broadcast_warning.enabled=True

from fid import calculate_fid_given_paths as calc_fid
#from inception import get_inception_score
#from inception import load_images

## Setting seed
import random
if param.seed is None:
	param.seed = random.randint(1, 10000)
print(f"Random Seed: {param.seed}")
github eghouti / BinaryConnect / train.py View on Github external
def main():
    global args, best_prec1
    args = parser.parse_args()
    if args.tensorboard: configure("runs/%s"%(args.name))

    # Data loading code
    normalize = transforms.Normalize(mean=[x/255.0 for x in [125.3, 123.0, 113.9]],
                                     std=[x/255.0 for x in [63.0, 62.1, 66.7]])

    if args.augment:
        transform_train = transforms.Compose([
        	transforms.ToTensor(),
        	transforms.Lambda(lambda x: F.pad(
        						Variable(x.unsqueeze(0), requires_grad=False, volatile=True),
        						(4,4,4,4),mode='reflect').data.squeeze()),
            transforms.ToPILImage(),
            transforms.RandomCrop(32),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
            ])
    else:
        transform_train = transforms.Compose([
            transforms.ToTensor(),
            normalize,
            ])
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        normalize
        ])

    kwargs = {'num_workers': 1, 'pin_memory': True}
github avinashpaliwal / Super-SloMo / video_to_slomo.py View on Github external
mean = [0.429, 0.431, 0.397]
    std  = [1, 1, 1]
    normalize = transforms.Normalize(mean=mean,
                                     std=std)
    
    negmean = [x * -1 for x in mean]
    revNormalize = transforms.Normalize(mean=negmean, std=std)

    # Temporary fix for issue #7 https://github.com/avinashpaliwal/Super-SloMo/issues/7 -
    # - Removed per channel mean subtraction for CPU.
    if (device == "cpu"):
        transform = transforms.Compose([transforms.ToTensor()])
        TP = transforms.Compose([transforms.ToPILImage()])
    else:
        transform = transforms.Compose([transforms.ToTensor(), normalize])
        TP = transforms.Compose([revNormalize, transforms.ToPILImage()])

    # Load data
    videoFrames = dataloader.Video(root=extractionPath, transform=transform)
    videoFramesloader = torch.utils.data.DataLoader(videoFrames, batch_size=args.batch_size, shuffle=False)

    # Initialize model
    flowComp = model.UNet(6, 4)
    flowComp.to(device)
    for param in flowComp.parameters():
        param.requires_grad = False
    ArbTimeFlowIntrp = model.UNet(20, 5)
    ArbTimeFlowIntrp.to(device)
    for param in ArbTimeFlowIntrp.parameters():
        param.requires_grad = False
    
    flowBackWarp = model.backWarp(videoFrames.dim[0], videoFrames.dim[1], device)