Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
from DeepRobust.image.config import attack_params
import ipdb
model = Net()
print("Load network")
model.load_state_dict(torch.load("DeepRobust/image/save_models/mnist_pgdtraining.pt"))
model.eval()
xx = datasets.MNIST('DeepRobust/image/data', download = False).data[999:1000].to('cuda')
xx = xx.unsqueeze_(1).float()/255
print(xx.size())
## Set Targetå
yy = datasets.MNIST('DeepRobust/image/data', download = False).targets[999:1000].to('cuda')
F1 = FGM(model, device = "cuda") ### or cuda
AdvExArray = F1.generate(xx, yy, **attack_params['FGSM_MNIST'])
predict0 = model(xx)
predict0= predict0.argmax(dim=1, keepdim=True)
predict1 = model(AdvExArray)
predict1= predict1.argmax(dim=1, keepdim=True)
print(predict0)
print(predict1)
AdvExArray = AdvExArray.cpu().detach().numpy()
def get_mnist_train_loader():
mnist_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
mnist_dataset = datasets.MNIST(DATA_FOLDER, train=True, download=True, transform=mnist_transform)
return DataLoader(mnist_dataset, batch_size=BATCH_SIZE, shuffle=True)
droput_p = 0.5
batch_size = 2048
best_loss = 100000
# When you load the model back again via state_dict method,\
# \remember to do net.eval(), otherwise the results will differ
use_gpu = torch.cuda.is_available()
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
dataiter = iter(trainloader)
images, labels = dataiter.next()
class RCNN(nn.Module):
def __init__(self):
super(RCNN, self).__init__()
self.max_pool = nn.MaxPool2d(3,2)
self.lrn = nn.LocalResponseNorm(13)
self.droput = nn.Dropout(droput_p)
self.relu = nn.ReLU()
if args.dataset == 'cifar10':
Dataset = datasets.CIFAR10
mean = [0.49139968, 0.48215827, 0.44653124]
std = [0.24703233, 0.24348505, 0.26158768]
normalize = transforms.Normalize(mean, std)
transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
elif args.dataset == 'MNIST':
Dataset = datasets.MNIST
else:
raise NotImplementedError(f'Unknown dataset: {args.dataset}')
self.train = t.utils.data.DataLoader(
Dataset(root='./data', train=True, transform=transform, download=True),
batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True)
self.valid = t.utils.data.DataLoader(
Dataset(root='./data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
import os
import torch
import torchvision
from torch.autograd import Variable
import torch.utils.data.dataloader as Data
#查看cuda版本
#cat /usr/local/cuda/version.txt
#当cuda版本为8时
#pip install http://download.pytorch.org/whl/cu80/torch-0.4.1-cp27-cp27mu-linux_x86_64.whl
#pip install torchvision
#
train_data = torchvision.datasets.MNIST(
'./mnist-pytorch/data', train=True, transform=torchvision.transforms.ToTensor(), download=True
)
test_data = torchvision.datasets.MNIST(
'./mnist-pytorch/data', train=False, transform=torchvision.transforms.ToTensor()
)
print("train_data:", train_data.train_data.size())
print("train_labels:", train_data.train_labels.size())
print("test_data:", test_data.test_data.size())
#批大小
batch_size=128
#训练的批次数
epochs=10
class Net(torch.nn.Module):
model = DDP(model, delay_allreduce=True)
# Data loading code
traindir = os.path.join(args.data, 'img_train')
valdir = os.path.join(args.data, 'img_val')
# normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
data_aug_scale = (0.08, 1.0)
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomResizedCrop(224, scale=data_aug_scale),
transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# normalize,
]))
val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
# transforms.ToTensor(),
# normalize,
]))
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.train_batch, shuffle=False,
num_workers=args.workers, pin_memory=True, sampler=train_sampler, collate_fn=fast_collate)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.test_batch, shuffle=False,
num_workers=args.workers, pin_memory=True, sampler=val_sampler, collate_fn=fast_collate)
def get_dataloader(config):
data_dir = config.data_dir
batch_size = config.batch_size
trans = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.0,), (1.0,))])
if config.data_name == 'mnist':
train_dataset = dset.MNIST(root=data_dir, train=True, transform=trans, download=True)
test_dataset = dset.MNIST(root=data_dir, train=False, transform=trans, download=True)
elif config.data_name == 'fashion_mnist':
train_dataset = dset.FashionMNIST(root=data_dir, train=True, transform=trans, download=True)
test_dataset = dset.FashionMNIST(root=data_dir, train=False, transform=trans, download=True)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,
num_workers=config.num_work, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size,
num_workers=config.num_work, shuffle=False)
print('==>>> total trainning batch number: {}'.format(len(train_loader)))
print('==>>> total testing batch number: {}'.format(len(test_loader)))
data_loader = {'train': train_loader, 'test': test_loader}
return data_loader
if opt.dataset == 'cifar10':
dataset1 = datasets.CIFAR10(root = opt.dataroot, download = True,
transform = transforms.Compose(transform_list))
dataset2 = datasets.CIFAR10(root = opt.dataroot, train = False,
transform = transforms.Compose(transform_list))
def get_data(k):
if k < len(dataset1):
return dataset1[k][0]
else:
return dataset2[k - len(dataset1)][0]
else:
if opt.dataset in ['imagenet', 'folder', 'lfw']:
dataset = datasets.ImageFolder(root = opt.dataroot,
transform = transforms.Compose(transform_list))
elif opt.dataset == 'lsun':
dataset = datasets.LSUN(db_path = opt.dataroot, classes = [opt.lsun_class + '_train'],
transform = transforms.Compose(transform_list))
def get_data(k):
return dataset[k][0]
data_index = torch.load(os.path.join(opt.dataroot, 'data_index.pt'))
train_index = data_index['train']
if opt.final_test:
test_index = data_index['final_test']
else:
test_index = data_index['running_test']
gen = GeneratorLearnedInputSpace(opt.width, opt.height, opt.nfeature, opt.nlayer, opt.code_size, opt.norm, n_lis_layers=opt.r_iterations, upscaling=opt.g_upscaling)
print(gen)
gen.cuda()
testfunc = nn.MSELoss()
def _prepare_validation_loader(self):
return torch.utils.data.DataLoader(
datasets.MNIST('./data',
train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=self._args.validate_batch_size,
shuffle=True) # shuffle for random test
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root=os.path.join('NN_models', 'CIFAR'), train=True, download=True,
transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root=os.path.join('NN_models', 'CIFAR'), train=False, download=True,
transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
print('==> ...Building model...')
net = VGG('VGG16')
net.features[2] = SELF_DEFINE()
net.features[5] = SELF_DEFINE()
net.features[9] = SELF_DEFINE()
net.features[12] = SELF_DEFINE()
net.features[16] = SELF_DEFINE()