How to use the torchvision.transforms.RandomResizedCrop function in torchvision

To help you get started, we’ve selected a few torchvision examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tczhangzhi / pytorch-distributed / multiprocessing_distributed.py View on Github external
# define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda(gpu)

    optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))

    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=2,
                                               pin_memory=True,
                                               sampler=train_sampler)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
github jiangtaoxie / fast-MPN-COV / imagepreprocess.py View on Github external
normalize,
        ])
        val_transforms = transforms.Compose([
            transforms.Resize((448,448)),
            transforms.ToTensor(),
            normalize,
        ])
        evaluate_transforms = transforms.Compose([
            transforms.Resize((448,448)),
            CenterCropWithFlip(448),
            transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
            transforms.Lambda(lambda crops: torch.stack([normalize(crop) for crop in crops])),
        ])
    elif dataset.startswith('ImageNet'):
        train_transforms = transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ])
        val_transforms = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])
        evaluate_transforms = transforms.Compose([
            transforms.Resize(256),
            transforms.TenCrop(224),
            transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
            transforms.Lambda(lambda crops: torch.stack([normalize(crop) for crop in crops])),
        ])
github google / unrestricted-adversarial-examples / examples / undefended_pytorch_resnet / main.py View on Github external
# Data loading code
  traindirs = [os.path.join(args.data, partition)
               for partition in ['train', 'extras']]
  valdir = os.path.join(args.data, 'test')

  # this normalization is NOT used, as the attack API requires
  # the images to be in [0, 1] range. So we prepend a BatchNorm
  # layer to the model instead of normalizing the images in the
  # data iter.
  normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                   std=[0.229, 0.224, 0.225])

  train_dataset = [datasets.ImageFolder(
    traindir,
    transforms.Compose([
      transforms.RandomResizedCrop(224),
      transforms.RandomHorizontalFlip(),
      transforms.ToTensor(),
      # normalize,
    ]))
    for traindir in traindirs]
  if len(train_dataset) == 1:
    train_dataset = train_dataset[0]
  else:
    train_dataset = torch.utils.data.ConcatDataset(train_dataset)

  # Duplicated the dataset to make it as
  # train_dataset.samples = train_dataset.samples * 100

  train_loader = torch.utils.data.DataLoader(
    train_dataset, batch_size=args.batch_size, shuffle=True,
    num_workers=args.workers, pin_memory=True)
github BG2CRW / top_k_optimization / sample_data / sample_data.py View on Github external
def make_transform(sz_resize = 256, sz_crop = 227, mean = [128, 117, 104], #224/227
		std = [1, 1, 1], rgb_to_bgr = True, is_train = True, 
		intensity_scale = [[0, 1], [0, 255]]):
	return transforms.Compose([
		transforms.Compose([ # train: horizontal flip and random resized crop
			transforms.RandomResizedCrop(sz_crop),
			transforms.RandomHorizontalFlip(),
		]) if is_train else transforms.Compose([ # test: else center crop
			transforms.Resize(sz_resize),
			transforms.CenterCrop(sz_crop),
		]),
		transforms.ToTensor(),
		ScaleIntensities(
			*intensity_scale) if intensity_scale is not None else Identity(),
		transforms.Normalize(
			mean=mean,
			std=std,
		),
		transforms.Lambda(
			lambda x: x[[2, 1, 0], ...]
		) if rgb_to_bgr else Identity()
	])
github renqianluo / NAO_pytorch / train_search_3.py View on Github external
def build_imagenet(model_state_dict, optimizer_state_dict, **kwargs):
    ratio = kwargs.pop('ratio')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    train_transform = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(
            brightness=0.4,
            contrast=0.4,
            saturation=0.4,
            hue=0.2),
        transforms.ToTensor(),
        normalize,
    ])
    valid_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        normalize,
    ])
    if args.zip_file:
github DistributedML / FoolsGold / deep-fg / fg / train.py View on Github external
def get_loader(option):
    if option.dataset == "vggface2":
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
        datadir = os.path.join(option.data_path, "data")
        train_transform = transforms.Compose([
                transforms.Resize(256),
                transforms.RandomResizedCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ])
        test_transform = transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                normalize,
            ])
        df_path = os.path.join(option.data_path, "top10_files.csv")
        df = pd.read_csv(df_path)
        train_df = df[df['train_flag'] == 0].reset_index()
        val_df = df[df['train_flag'] == 1].reset_index()

        trainset = VGG_Face2(train_df, datadir, train_transform)
github leeesangwon / PyTorch-Image-Retrieval / data_loader.py View on Github external
def train_data_loader(data_path, img_size, use_augment=False):
    if use_augment:
        data_transforms = transforms.Compose([
            transforms.RandomOrder([
                transforms.RandomApply([transforms.ColorJitter(contrast=0.5)], .5),
                transforms.Compose([
                    transforms.RandomApply([transforms.ColorJitter(saturation=0.5)], .5),
                    transforms.RandomApply([transforms.ColorJitter(hue=0.1)], .5),
                ])
            ]),
            transforms.RandomApply([transforms.ColorJitter(brightness=0.125)], .5),
            transforms.RandomApply([transforms.RandomRotation(15)], .5),
            transforms.RandomResizedCrop(img_size),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    else:
        data_transforms = transforms.Compose([
            transforms.RandomResizedCrop(img_size),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])

    image_dataset = datasets.ImageFolder(data_path, data_transforms)

    return image_dataset
github williamFalcon / pytorch-lightning / pl_examples / full_examples / imagenet / imagenet_example.py View on Github external
def train_dataloader(self):
        normalize = transforms.Normalize(
            mean=[0.485, 0.456, 0.406],
            std=[0.229, 0.224, 0.225],
        )

        train_dir = os.path.join(self.hparams.data, 'train')
        train_dataset = datasets.ImageFolder(
            train_dir,
            transforms.Compose([
                transforms.RandomResizedCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                normalize,
            ]))

        if self.use_ddp:
            train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
        else:
            train_sampler = None

        train_loader = torch.utils.data.DataLoader(
            dataset=train_dataset,
            batch_size=self.hparams.batch_size,
            shuffle=(train_sampler is None),
            num_workers=0,
            sampler=train_sampler
github mahyarnajibi / FreeAdversarialTraining / main_free.py View on Github external
model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(configs.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(configs.resume))

            
    # Initiate data loaders
    traindir = os.path.join(configs.data, 'train')
    valdir = os.path.join(configs.data, 'val')
    
    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(configs.DATA.crop_size),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
        ]))

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=configs.DATA.batch_size, shuffle=True,
        num_workers=configs.DATA.workers, pin_memory=True, sampler=None)
    
    normalize = transforms.Normalize(mean=configs.TRAIN.mean,
                                    std=configs.TRAIN.std)
    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(valdir, transforms.Compose([
            transforms.Resize(configs.DATA.img_size),
            transforms.CenterCrop(configs.DATA.crop_size),
            transforms.ToTensor(),
        ])),
github Eric-mingjie / rethinking-network-pruning / imagenet / thinet / main_E.py View on Github external
.format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
        num_workers=args.workers, pin_memory=True, sampler=train_sampler)

    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(valdir, transforms.Compose([