Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def start_websocket_server_worker(
id, host, port, hook, verbose, keep_labels=None, training=True
): # pragma: no cover
"""Helper function for spinning up a websocket server and setting up the local datasets."""
server = WebsocketServerWorker(id=id, host=host, port=port, hook=hook, verbose=verbose)
# Setup toy data (mnist example)
mnist_dataset = datasets.MNIST(
root="./data",
train=training,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
if training:
indices = np.isin(mnist_dataset.targets, keep_labels).astype("uint8")
logger.info("number of true indices: %s", indices.sum())
selected_data = (
torch.native_masked_select(mnist_dataset.data.transpose(0, 2), torch.tensor(indices))
.view(28, 28, -1)
.transpose(2, 0)
)
logger.info("after selection: %s", selected_data.shape)
selected_targets = torch.native_masked_select(mnist_dataset.targets, torch.tensor(indices))
dataset = sy.BaseDataset(
device = torch.device('cuda:{0}'.format(args.gpu))
else:
device = torch.device('cpu')
try:
weights = torch.load(args.checkpoint)
except (Exception,):
print("ERROR: Default weights missing. Please specify weights for the VQA model")
exit(0)
model.load_state_dict(weights["model"])
vision_model.eval().to(device)
model.eval().to(device)
img_transforms = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
print("Processing image")
im = Image.open(args.image)
img = img_transforms(im)
img = img.unsqueeze(0) # add batch dimension
img = img.to(device)
img_features = vision_model(img)
print("Processing question")
(0.2023, 0.1994, 0.2010)),])
trainset = torchvision.datasets.CIFAR10(root='/disk/scratch/datasets/cifar',
train=True, download=False, transform=transforms_train)
valset = torchvision.datasets.CIFAR10(root='/disk/scratch/datasets/cifar',
train=False, download=False, transform=transforms_validate)
elif args.dataset == 'cifar100':
num_classes = 100
if args.network == 'DARTS':
raise NotImplementedError("Could use transforms for CIFAR-10, but not ported yet.")
transforms_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),
])
transforms_validate = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),
])
trainset = torchvision.datasets.CIFAR100(root='/disk/scratch/datasets/cifar100',
train=True, download=True, transform=transforms_train)
validateset = torchvision.datasets.CIFAR100(root='/disk/scratch/datasets/cifar100',
train=False, download=True, transform=transforms_validate)
elif args.dataset == 'imagenet':
num_classes = 1000
traindir = os.path.join(args.imagenet_loc, 'train')
valdir = os.path.join(args.imagenet_loc, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform_train = transforms.Compose([
def preprocess(image_name, image_size):
image = Image.open(image_name).convert('RGB')
if type(image_size) is not tuple:
image_size = tuple([int((float(image_size) / max(image.size))*x) for x in (image.height, image.width)])
Loader = transforms.Compose([transforms.Resize(image_size), transforms.ToTensor()])
rgb2bgr = transforms.Compose([transforms.Lambda(lambda x: x[torch.LongTensor([2,1,0])])])
Normalize = transforms.Compose([transforms.Normalize(mean=[103.939, 116.779, 123.68], std=[1,1,1])])
tensor = Normalize(rgb2bgr(Loader(image) * 256)).unsqueeze(0)
return tensor
def load_data():
transform_train = transforms.Compose(
[transforms.Scale(227),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose(
[transforms.Scale(227),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
trainset = datasets.CIFAR10(root='./data', train=True, download=True,
transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=100,
shuffle=False, num_workers=2)
testset = datasets.CIFAR10(root='./data', train=False, download=True,
transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100,
shuffle=False, num_workers=2)
transform_train= transforms.Compose([
transforms.ToPILImage(),
transforms.RandomCrop(size=227),
transforms.RandomHorizontalFlip(),
normalize,
])
transform_test = transforms.Compose([
transforms.ToPILImage(),
transforms.CenterCrop(227),
normalize,
])
else:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform_train = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomCrop(size=224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
transform_test = transforms.Compose([
transforms.ToPILImage(),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
trainset = datasets.MLDataInstance(src_dir = src_dir, dataset_name = args.dataset, train=True, transform=transform_train)
def __init__(self, data_dir, qafile, img_dir, phase, img_scale=(256, 256), img_crop=224, raw_images=False):
self.data_dir = data_dir
self.examples = pickle.load(open(os.path.join(data_dir, qafile), 'rb'))
#Pdb().set_trace()
if phase == 'train':
self.load_vocab(data_dir)
self.transforms = transforms.Compose([
transforms.Scale(img_scale),
transforms.CenterCrop(img_crop),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
self.img_dir = img_dir
self.phase = phase
self.raw_images = raw_images # if true, images and load images, not embeddings
eps = 2.0 * args.max_epsilon / 255.0
num_steps = 10
step_eps = eps / num_steps
if args.targeted:
dataset = Dataset(args.input_dir)
else:
dataset = Dataset(args.input_dir, target_file=None)
loader = data.DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=False)
tf = transforms.Compose([
transforms.Scale(args.img_size),
transforms.CenterCrop(args.img_size),
transforms.ToTensor(),
LeNormalize(),
])
dataset.set_transform(tf)
model = torchvision.models.inception_v3(pretrained=False, transform_input=False)
loss_fn = torch.nn.CrossEntropyLoss()
if not args.no_gpu:
model = model.cuda()
loss_fn = loss_fn.cuda()
model.eval()
if args.checkpoint_path is not None and os.path.isfile(args.checkpoint_path):
checkpoint = torch.load(args.checkpoint_path)
print("Downloading MNIST data...")
trainset = MNIST('./data', train=True, transform=trans_img, download=True)
testset = MNIST('./data', train=False, transform=trans_img, download=True)
# CIFAR-10 dataset
if args.dataset == 'cifar10':
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = CIFAR10(root='./data', train=True, transform=transform_train, download=True)
testset = CIFAR10(root='./data', train=False, transform=transform_test, download=True)
if args.dataset == 'cifar100':
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
from torch.utils.data import DataLoader
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from sotabenchapi.core import BenchmarkResult, check_inputs
from torchbench.utils import send_model_to_device, default_data_to_device
from torchbench.image_classification.utils import evaluate_classification
class CIFAR10:
"""CIFAR 10 Dataset."""
dataset = datasets.CIFAR10
normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
input_transform = transforms.Compose([transforms.ToTensor(), normalize])
send_data_to_device = default_data_to_device
task = "Image Classification"
@classmethod
@check_inputs
def benchmark(
cls,
model,
model_description=None,
input_transform=None,
target_transform=None,
model_output_transform=None,
send_data_to_device=None,
device: str = "cuda",
data_root: str = "./.data/vision/cifar10",
num_workers: int = 4,