How to use the torchvision.transforms.Normalize function in torchvision

To help you get started, we’ve selected a few torchvision examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github YvanYin / VNL_Monocular_Depth_Prediction / tools / test_any_images.py View on Github external
def scale_torch(img, scale):
    """
    Scale the image and output it in torch.tensor.
    :param img: input image. [C, H, W]
    :param scale: the scale factor. float
    :return: img. [C, H, W]
    """
    img = np.transpose(img, (2, 0, 1))
    img = img[::-1, :, :]
    img = img.astype(np.float32)
    img /= scale
    img = torch.from_numpy(img.copy())
    img = transforms.Normalize(cfg.DATASET.RGB_PIXEL_MEANS, cfg.DATASET.RGB_PIXEL_VARS)(img)
    return img
github SmartDataAnalytics / horus-ner / src / algorithms / computer_vision / places365.py View on Github external
## if you encounter the UnicodeDecodeError when use python3 to load the model, add the following line will fix it. Thanks to @soravux
            #from functools import partial
            #import pickle
            #pickle.load = partial(pickle.load, encoding="latin1")
            #pickle.Unpickler = partial(pickle.Unpickler, encoding="latin1")
            #model = torch.load(model_file, map_location=lambda storage, loc: storage, pickle_module=pickle)
            #torch.save(model, 'whole_%s_places365_python36.pth.tar'%arch)

            self.model.eval()

            # load the image transformer
            self.centre_crop = trn.Compose([
                    trn.Resize((256,256)),
                    trn.CenterCrop(224),
                    trn.ToTensor(),
                    trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ])

            # load the class label
            file_name = 'categories_places365.txt'
            #if not os.access(file_name, os.W_OK):
            #    synset_url = 'https://raw.githubusercontent.com/csailvision/places365/master/categories_places365.txt'
            #    os.system('wget ' + synset_url)
            self.classes = list()
            with open(config.dir_models + "/places365/" + file_name) as class_file:
                for line in class_file:
                    self.classes.append(line.strip().split(' ')[0][3:])
            self.classes = tuple(self.classes)
        except Exception as e:
            raise e
github Tramac / Fast-SCNN-pytorch / demo.py View on Github external
def demo():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # output folder
    if not os.path.exists(args.outdir):
        os.makedirs(args.outdir)

    # image transform
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])
    image = Image.open(args.input_pic).convert('RGB')
    image = transform(image).unsqueeze(0).to(device)

    model = get_fast_scnn(args.dataset, pretrained=True, root=args.weights_folder).to(device)
    print('Finished loading model!')

    model.eval()
    with torch.no_grad():
        outputs = model(image)
    pred = torch.argmax(outputs[0], 1).squeeze(0).cpu().data.numpy()
    mask = get_color_pallete(pred, args.dataset)
    outname = os.path.splitext(os.path.split(args.input_pic)[-1])[0] + '.png'
    mask.save(os.path.join(args.outdir, outname))
github LynnHo / Conditional-GANs-Pytorch / train_InfoGAN2.py View on Github external
# others
use_gpu = torch.cuda.is_available()
device = torch.device("cuda" if use_gpu else "cpu")
c_dim = 10


# ==============================================================================
# =                                   setting                                  =
# ==============================================================================

# data
transform = tforms.Compose(
    [tforms.Scale(size=(32, 32), interpolation=Image.BICUBIC),
     tforms.ToTensor(),
     tforms.Lambda(lambda x: torch.cat((x, x, x), dim=0)),
     tforms.Normalize(mean=[0.5] * 3, std=[0.5] * 3)]
)
train_loader = torch.utils.data.DataLoader(
    dataset=dsets.FashionMNIST('data/FashionMNIST', train=True, download=True, transform=transform),
    batch_size=batch_size,
    shuffle=True,
    num_workers=4,
    pin_memory=use_gpu,
    drop_last=True
)

# model
D = model.DiscriminatorInfoGAN2(x_dim=3, norm=norm, weight_norm=weight_norm).to(device)
Q = model.QInfoGAN2(x_dim=3, c_dim=c_dim, norm='batch_norm', weight_norm='none').to(device)
G = model.GeneratorInfoGAN2(z_dim=z_dim, c_dim=c_dim).to(device)

# gan loss function
github PanJinquan / pytorch-learning-notes / tutorials / lesson27-MLP网络层 / main.py View on Github external
batch_size=200
learning_rate=0.01
epochs=10

train_loader = torch.utils.data.DataLoader(
    datasets.MNIST('../data', train=True, download=True,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])),
    batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
    datasets.MNIST('../data', train=False, transform=transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])),
    batch_size=batch_size, shuffle=True)



class MLP(nn.Module):

    def __init__(self):
        super(MLP, self).__init__()

        self.model = nn.Sequential(
            nn.Linear(784, 200),
            nn.ReLU(inplace=True),
            nn.Linear(200, 200),
            nn.ReLU(inplace=True),
            nn.Linear(200, 10),
github jlevy44 / PathFlowAI / build / lib / pathflowai / datasets.py View on Github external
transforms.ToTensor(),
			transforms.Normalize(mean if mean else [0.7, 0.6, 0.7], std if std is not None else [0.15, 0.15, 0.15]) #mean and standard deviations for lung adenocarcinoma resection slides
		]),
		'val': transforms.Compose([
			transforms.ToPILImage(),
			transforms.Resize((patch_size,patch_size)),
			transforms.CenterCrop(patch_size),
			transforms.ToTensor(),
			transforms.Normalize(mean if mean else [0.7, 0.6, 0.7], std if std is not None else [0.15, 0.15, 0.15])
		]),
		'test': transforms.Compose([
			transforms.ToPILImage(),
			transforms.Resize((patch_size,patch_size)),
			transforms.CenterCrop(patch_size),
			transforms.ToTensor(),
			transforms.Normalize(mean if mean else [0.7, 0.6, 0.7], std if std is not None else [0.15, 0.15, 0.15])
		]),
		'pass': transforms.Compose([
			transforms.ToPILImage(),
			transforms.CenterCrop(patch_size),
			transforms.ToTensor(),
		])
	},
	'albumentations':{
	'train':alb.core.composition.Compose([
		alb.augmentations.transforms.Resize(patch_size, patch_size),
		alb.augmentations.transforms.CenterCrop(patch_size, patch_size),
		alb.augmentations.transforms.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, p=0.5)
		]+([alb.augmentations.transforms.Flip(p=0.5),
		alb.augmentations.transforms.Transpose(p=0.5),
		alb.augmentations.transforms.ShiftScaleRotate(p=0.5)] if not elastic else [alb.augmentations.transforms.RandomRotate90(p=0.5),
		alb.augmentations.transforms.ElasticTransform(p=0.5)])
github YBIGTA / pytorch-hair-segmentation / utils / trainer_verbose.py View on Github external
# build optimizer and scheduler
    model_optimizer = get_optimizer(optimizer, model, lr, momentum)
    lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model_optimizer)

    # transforms on both image and mask
    train_joint_transforms = jnt_trnsf.Compose([
        jnt_trnsf.RandomCrop(img_size),
        jnt_trnsf.RandomRotate(5),
        jnt_trnsf.RandomHorizontallyFlip()
    ])

    # transforms only on images
    train_image_transforms = std_trnsf.Compose([
        std_trnsf.ColorJitter(0.05, 0.05, 0.05, 0.05),
        std_trnsf.ToTensor(),
        std_trnsf.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    
    test_joint_transforms = jnt_trnsf.Compose([
        jnt_trnsf.Safe32Padding()
    ])

    test_image_transforms = std_trnsf.Compose([
        std_trnsf.ToTensor(),
        std_trnsf.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])

    # transforms only on mask
    mask_transforms = std_trnsf.Compose([
        std_trnsf.ToTensor()
        ])
github yunjey / pytorch-tutorial / tutorials / 09 - Image Captioning / configuration.py View on Github external
self.num_threads = 2
        
        # Image preprocessing in training phase
        self.train_transform = T.Compose([
            T.Scale(self.image_size),    
            T.RandomCrop(self.crop_size),
            T.RandomHorizontalFlip(), 
            T.ToTensor(), 
            T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
        
        # Image preprocessing in test phase
        self.test_transform = T.Compose([
            T.Scale(self.crop_size),
            T.CenterCrop(self.crop_size),
            T.ToTensor(),
            T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
        
        # Training 
        self.num_epochs = 5
        self.batch_size = 64
        self.learning_rate = 0.001
        self.log_step = 10
        self.save_step = 1000
        
        # Model
        self.embed_size = 256
        self.hidden_size = 512
        self.num_layers = 2
        
        # Path 
        self.image_path = './data/'
        self.caption_path = './data/annotations/'
github alexandonian / pretorched-x / pretorched / metrics / train_inception.py View on Github external
mean = [0.5, 0.5, 0.5]
        std = [0.5, 0.5, 0.5]
    else:
        resize_size = 256
        input_size = 224
        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.225]
    dataloaders = core.get_dataloaders(args.dataset, args.data_root,
                                       batch_size=args.batch_size,
                                       num_workers=args.num_workers,
                                       distributed=args.distributed,
                                       size=input_size, resolution=resize_size)
    train_loader, val_loader = dataloaders['train'], dataloaders['val']
    train_sampler = train_loader.sampler
    try:
        train_loader.dataset.transforms.transforms[-1] = transforms.Normalize(mean=mean, std=std)
        val_loader.dataset.transforms.transforms[-1] = transforms.Normalize(mean=mean, std=std)
    except Exception:
        for dataset in train_loader.dataset.datasets:
            dataset.transform.transforms[-1] = transforms.Normalize(mean=mean, std=std)
        for dataset in val_loader.dataset.datasets:
            dataset.transform.transforms[-1] = transforms.Normalize(mean=mean, std=std)

    if args.evaluate:
        validate(val_loader, model, criterion, args)
        return

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)

        # train for one epoch
github corenel / GAN-Zoo / DCGAN / data_loader.py View on Github external
"""Dataset setting and data loader for DCGAN."""

import torchvision.datasets as dset
from torch.utils import data
from torchvision import transforms

from params import batch_size, data_root, dataset_mean, dataset_std, image_size

# image pre-processing
pre_process = transforms.Compose([transforms.Scale(image_size),
                                  transforms.ToTensor(),
                                  transforms.Normalize(mean=dataset_mean,
                                                       std=dataset_std)])

# dataset and data loader
dataset = dset.CIFAR10(root=data_root,
                       transform=pre_process,
                       download=True
                       )

data_loader = data.DataLoader(dataset=dataset,
                              batch_size=batch_size,
                              shuffle=True)