How to use the torchvision.transforms.Scale function in torchvision

To help you get started, we’ve selected a few torchvision examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github dxyang / StyleTransfer / style.py View on Github external
testImage_maine = utils.load_image("content_imgs/maine.jpg")
        testImage_maine = img_transform_512(testImage_maine)
        testImage_maine = Variable(testImage_maine.repeat(1, 1, 1, 1), requires_grad=False).type(dtype)

    # define network
    image_transformer = ImageTransformNet().type(dtype)
    optimizer = Adam(image_transformer.parameters(), LEARNING_RATE) 

    loss_mse = torch.nn.MSELoss()

    # load vgg network
    vgg = Vgg16().type(dtype)

    # get training dataset
    dataset_transform = transforms.Compose([
        transforms.Scale(IMAGE_SIZE),           # scale shortest side to image_size
        transforms.CenterCrop(IMAGE_SIZE),      # crop center image_size out
        transforms.ToTensor(),                  # turn image from [0-255] to [0-1]
        utils.normalize_tensor_transform()      # normalize with ImageNet values
    ])
    train_dataset = datasets.ImageFolder(args.dataset, dataset_transform)
    train_loader = DataLoader(train_dataset, batch_size = BATCH_SIZE)

    # style image
    style_transform = transforms.Compose([
        transforms.ToTensor(),                  # turn image from [0-255] to [0-1]
        utils.normalize_tensor_transform()      # normalize with ImageNet values
    ])
    style = utils.load_image(args.style_image)
    style = style_transform(style)
    style = Variable(style.repeat(BATCH_SIZE, 1, 1, 1)).type(dtype)
    style_name = os.path.split(args.style_image)[-1].split('.')[0]
github GunhoChoi / Kind-PyTorch-Tutorial / 11_StyleTransfer_ResNet / StyleTransfer_LBFGS_gpu.py View on Github external
def image_preprocess(img_dir):
	img = Image.open(img_dir)
	transform = transforms.Compose([
					transforms.Scale(image_size),
					transforms.CenterCrop(image_size),
					transforms.ToTensor(),
					transforms.Normalize(mean=[0.40760392, 0.45795686, 0.48501961], 
                                         std=[1,1,1]),
				])
	img = transform(img).view((-1,3,image_size,image_size))
	return img
github naviocean / pseudo-3d-pytorch / transforms.py View on Github external
def __init__(self, size, interpolation=Image.BILINEAR):
        self.worker = torchvision.transforms.Scale(size, interpolation)
github CoderEugene / DL-Gesture-Recognition / utils / VisualizeTensor.py View on Github external
def visualize_skig_tensor():
    """对用于训练的Skig数据集进行可视化

    """
    transform = transforms.Compose(
        [transforms.Scale(112, interpolation=Image.CUBIC),
         transforms.ToTensor(),
         transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[1., 1., 1.])])

    train_data = GestureDataSet(root='/home/zdh/zz/workspace/refactorSkig', train=False, output_frames_cnt=16,
                                transform=transform)

    train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=1, shuffle=True, num_workers=0)

    for batch_idx, (data, target) in enumerate(train_loader):
        images = torch.transpose(data[0, ...], 1, 0)  # (L, C, H, W)
        _imshow(torchvision.utils.make_grid(images), title='skig-' + str(target[0]))
github ucbdrive / skipnet / imagenet / train_sp.py View on Github external
model.load_state_dict(checkpoint['state_dict'])
            logging.info('=> loaded checkpoint `{}` (epoch: {})'.format(
                args.resume, checkpoint['epoch']
            ))
        else:
            logging.info('=> no checkpoint found at `{}`'.format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    t = transforms.Compose([
        transforms.Scale(args.scale_size),
        transforms.CenterCrop(args.crop_size),
        transforms.ToTensor(),
        normalize])
    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(valdir, t),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    criterion = nn.CrossEntropyLoss().cuda()
    validate(args, val_loader, model, criterion, args.start_epoch)
github ozansener / GAN / dcgan / run.py View on Github external
parser.add_argument('--dropout', type=float, default=0.2)
parser.add_argument('--clamp', type=float, default=1e-2)
parser.add_argument('--wasserstein', type=bool, default=True)

opt = parser.parse_args()
if opt.clean_ckpt:
  shutil.rmtree(opt.ckpt_path)
os.makedirs(opt.ckpt_path, exist_ok=True)
logger = logging.Logger(opt.ckpt_path)
opt.seed = 1
torch.manual_seed(opt.seed)
torch.cuda.manual_seed(opt.seed)
cudnn.benchmark = True
EPS = 1e-12

transform = transforms.Compose([transforms.Scale(opt.image_size),
                                transforms.ToTensor(),
                                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset = dset.CIFAR10(root=opt.dataset_path, train=True, download=False, transform=transform)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.num_workers)

D = torch.nn.DataParallel(model.Discriminator(opt).cuda(), device_ids=range(opt.num_gpus))
G = torch.nn.DataParallel(model.Generator(opt).cuda(), device_ids=range(opt.num_gpus))

if opt.load_ckpt:
  D.load_state_dict(torch.load(os.path.join(opt.ckpt_path, 'D.pth')))
  G.load_state_dict(torch.load(os.path.join(opt.ckpt_path, 'G.pth')))

criterion = nn.BCELoss().cuda()
if opt.wasserstein:
  optimizer_d = optim.RMSprop(D.parameters(), lr=opt.lr_rmsprop)
  optimizer_g = optim.RMSprop(G.parameters(), lr=opt.lr_rmsprop)
github Marcovaldong / ISGAN / src / train.py View on Github external
encoder.cuda()
    decoder.cuda()
    discriminator.cuda()
    discriminator2.cuda()
encoder.apply(weights_init)
decoder.apply(weights_init)

# loss function
BCE_loss = nn.BCELoss().cuda()
MSE_loss = nn.MSELoss().cuda()
SSIM_loss = SSIM(window_size=11).cuda()
MSSIM_loss = MSSSIM().cuda()


# Data pre-processing
transform = transforms.Compose([transforms.Scale(params.input_size),
                                transforms.ToTensor(),])
                                # transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])
                                # transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
# Train data
# train_data = DatasetFromFolder(data_dir, subfolder=subfolder, transform=transform, resize_scale=params.input_size,
#                                name=False)
train_data = DatasetFromFolder2(data_dir, subfolder=subfolder, transform=transform, resize_scale=params.resize_scale,
                                crop_size=params.crop_size, fliplr=params.fliplr)
train_data_loader = torch.utils.data.DataLoader(dataset=train_data,
                                                batch_size=params.batch_size,
                                                shuffle=True)

# Test data
# test_data = DatasetFromFolder(data_dir, subfolder='img_test', transform=transform, resize_scale=params.input_size,
#                               crop_size=params.crop_size, fliplr=params.fliplr, yuv=True)
test_data = DatasetFromFolder2(data_dir, subfolder='lfw_test.txt', transform=transform,
github halide / Halide / apps / resnet_50_blockwise / run_torch_resnet.py View on Github external
def image_loader(image_name, size):
    loader = transforms.Compose([transforms.Scale(size), transforms.ToTensor()])
    image = Image.open(image_name)
    image = loader(image).float()
    return image