How to use the torchvision.transforms.Resize function in torchvision

To help you get started, we’ve selected a few torchvision examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github otenim / GLCIC-PyTorch / train.py View on Github external
args.init_model_cd = os.path.expanduser(args.init_model_cd)
    if torch.cuda.is_available() == False:
        raise Exception('At least one gpu must be available.')
    else:
        gpu = torch.device('cuda:0')

    # create result directory (if necessary)
    if os.path.exists(args.result_dir) == False:
        os.makedirs(args.result_dir)
    for s in ['phase_1', 'phase_2', 'phase_3']:
        if os.path.exists(os.path.join(args.result_dir, s)) == False:
            os.makedirs(os.path.join(args.result_dir, s))

    # dataset
    trnsfm = transforms.Compose([
        transforms.Resize(args.cn_input_size),
        transforms.RandomCrop((args.cn_input_size, args.cn_input_size)),
        transforms.ToTensor(),
    ])
    print('loading dataset... (it may take a few minutes)')
    train_dset = ImageDataset(os.path.join(args.data_dir, 'train'), trnsfm, recursive_search=args.recursive_search)
    test_dset = ImageDataset(os.path.join(args.data_dir, 'test'), trnsfm, recursive_search=args.recursive_search)
    train_loader = DataLoader(train_dset, batch_size=(args.bsize // args.bdivs), shuffle=True)

    # compute mean pixel value of training dataset
    mpv = np.zeros(shape=(3,))
    if args.mpv == None:
        pbar = tqdm(total=len(train_dset.imgpaths), desc='computing mean pixel value for training dataset...')
        for imgpath in train_dset.imgpaths:
            img = Image.open(imgpath)
            x = np.array(img, dtype=np.float32) / 255.
            mpv += x.mean(axis=(0,1))
github dkumazaw / mobilenetv3-pytorch / train_cifar10.py View on Github external
def main():

    model = MobileNetV3Large(n_classes=10)
    criterion = nn.CrossEntropyLoss()

    optimizer = torch.optim.Adam(model.parameters(), lr=3e-4)
    device = torch.device('cuda') if torch.cuda.is_available() else 'cpu'

    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.Resize((224, 224)),  # Upsample
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    transform_valid = transforms.Compose([
        transforms.Resize((224, 224)),  # Upsample
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    train_dataset = torchvision.datasets.CIFAR10(
        './data/cifar/train', train=True, transform=transform_train, download=True
    )
github Vermeille / Torchelie / torchelie / recipes / imageclassifier.py View on Github external
def validation_step(self, batch):
        x, y = batch
        pred = self.forward(x)
        loss = torch.nn.functional.cross_entropy(pred, y)
        return {'loss': loss, 'pred': pred}

    def make_optimizer(self):
        return optim.Adam(self.model.parameters(), lr=self.lr)


if __name__ == '__main__':
    from torchvision.datasets import FashionMNIST
    from torch.utils.data import DataLoader
    import torchvision.transforms as TF
    tfm = TF.Compose([
        TF.Resize(128),
        TF.Grayscale(3),
        TF.ToTensor(),
    ])
    trainset = FashionMNIST('../tests/', transform=tfm)
    testset = FashionMNIST('../tests/', train=False, transform=tfm)

    trainloader = DataLoader(trainset,
                             32,
                             num_workers=4,
                             pin_memory=True,
                             shuffle=True)
    testloader = DataLoader(testset,
                            32,
                            num_workers=4,
                            pin_memory=True,
                            shuffle=True)
github dillondavis / RecurrentAttentionConvolutionalNeuralNetwork / src / dataset.py View on Github external
def __init__(self, root, split='train', transform=False, 
		coords=False, flipcrop=False):
        self.root = root
        self.split = split
        self.coords = coords
        self._transform = transform
        self.mean = [0.485, 0.456, 0.406]
        self.std = [0.229, 0.224, 0.225]
        self.bw_image_ids = ['1401', '3617', '3780', '5393', '448', '3619', '5029', '6321']
        self.image_ids = self.get_image_ids()
        self.id_to_file = self.get_id_to_file()
        if flipcrop:
            crop = transforms.RandomCrop(IMSIZE) if self.split == 'train' else transforms.CenterCrop(IMSIZE)
            flip = transforms.RandomHorizontalFlip() if self.split == 'train' else lambda x: x
            self.im_transform = transforms.Compose([
                transforms.Resize((256)),
                crop,
                flip, 
                transforms.ToTensor(),
                transforms.Normalize(
                    mean=self.mean, std=self.std
                )
            ])
        else:
            self.im_transform = transforms.Compose([
                transforms.Resize((IMSIZE, IMSIZE)),
                transforms.ToTensor(),
                transforms.Normalize(
                    mean=self.mean, std=self.std
                )
            ])
        if coords:
github Lornatang / PyTorch / research / MNIST / mnist / prediction.py View on Github external
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

WORK_DIR = '../../../../../data/MNIST/mnist'
BATCH_SIZE = 128

MODEL_PATH = '../../../../models/pytorch/MNIST'
MODEL_NAME = 'mnist.pth'

# Create model
if not os.path.exists(MODEL_PATH):
    os.makedirs(MODEL_PATH)

transform = transforms.Compose([
    transforms.Resize(28),  # 将图像转化为800 * 800
    transforms.ToTensor(),  # 将numpy数据类型转化为Tensor
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])  # 归一化
])


# Load data
val_dataset = torchvision.datasets.ImageFolder(root=WORK_DIR + '/' + 'val',
                                               transform=transform)

val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                         batch_size=BATCH_SIZE,
                                         shuffle=True)


def main():
    print(f"Val numbers:{len(val_dataset)}")
github Lornatang / PyTorch / research / MNIST / mnist / classifier_img.py View on Github external
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# Tensor image transforms to PIL image
to_pil_image = transforms.ToPILImage()

label = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']

# check file name is exist
for dir_index in range(0, 10):
    if not os.path.exists(WORK_DIR + '/' + 'gen' + '/' + label[dir_index]):
        os.makedirs(WORK_DIR + '/' + 'gen' + '/' + label[dir_index])

transform = transforms.Compose([
    transforms.Resize(28),
    transforms.ToTensor()
])

# Load data
test_dataset = torchvision.datasets.ImageFolder(root=WORK_DIR + '/' + 'gen',
                                                transform=transform)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=BATCH_SIZE,
                                          shuffle=True)


def main():
    print(f"Image numbers:{len(test_dataset)}")

    # Load model
github anirudh-chakravarthy / CASENet / visualize_multilabel.py View on Github external
if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

     # Define normalization for data 
    input_size = 472
    normalize = transforms.Normalize(mean=[104.008, 116.669, 122.675], std=[1, 1, 1])
    
    img_transform = transforms.Compose([
                    transforms.Resize([input_size, input_size]),
                    RGB2BGR(roll=True),
                    ToTorchFormatTensor(div=False),
                    normalize,
                    ])
    label_transform = transforms.Compose([
                    transforms.ToPILImage(),
                    transforms.Resize([input_size, input_size], interpolation=PIL.Image.NEAREST),
                    transforms.ToTensor(),
                    ])

    h5_f = h5py.File("./utils/val_label_binary_np.h5", 'r')
    
    for idx_img in range(len(test_list)):
        img = Image.open(test_list[idx_img]).convert('RGB')
        processed_img = img_transform(img).unsqueeze(0)
        processed_img = utils.check_gpu(None, processed_img)    
        score_feats1, score_feats2, score_feats3, score_feats5, score_fuse_feats = model(processed_img, for_vis=True)

        # Load numpy from hdf5 for gt.
        np_data = h5_f['data/'+ori_test_list[idx_img].replace('leftImg8bit', 'gtFine').replace('/', '_').replace('.png', '_edge.npy')]
        label_data = []
        num_cls = np_data.shape[2]
        for k in range(num_cls):
github open-mmlab / mmdetection / tools / train_imagenet / train_imagenet.py View on Github external
transforms.ToTensor(),
            normalize,
        ]))

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
        num_workers=args.workers, pin_memory=True, sampler=train_sampler)

    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(valdir, transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, criterion, args)
        return

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        adjust_learning_rate(optimizer, epoch, args)
github SaoYan / GenerativeSkinLesion / train_gan_archive.py View on Github external
def update_trainer(self, stage, inter_epoch):
        if stage == 1:
            assert inter_epoch < opt.unit_epoch, 'Invalid epoch number!'
            G_alpha = 0
            D_alpha = 0
        else:
            total_stages = int(math.log2(opt.size/4)) + 1
            assert stage <= total_stages, 'Invalid stage number!'
            assert inter_epoch < opt.unit_epoch*3, 'Invalid epoch number!'
            # adjust dataloder (new current_size)
            if inter_epoch == 0:
                self.current_size *= 2
                self.transform = transforms.Compose([
                    transforms.Resize((300,300)),
                    transforms.RandomCrop((opt.size,opt.size)),
                    transforms.RandomVerticalFlip(),
                    transforms.RandomHorizontalFlip(),
                    transforms.Resize((self.current_size,self.current_size), Image.ANTIALIAS),
                    transforms.ToTensor()
                ])
                self.dataset = ISIC_GAN('train_gan.csv', shuffle=True, rotate=True, transform=self.transform)
                self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=opt.batch_size,
                    shuffle=True, num_workers=8, worker_init_fn=__worker_init_fn__)
            # grow networks
            delta = 1. / (opt.unit_epoch-1)
            if inter_epoch == 0:
                self.G.module.grow_network()
                self.D.module.grow_network()
            # fade in G (# epochs: unit_epoch)
            if inter_epoch < opt.unit_epoch: