Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, reservoir_dim, sfa_dim, ica_dim, pretrained=False, feature_selector='resnet18'):
"""
Constructor
"""
# Upper class
super(NilsNet, self).__init__()
# ResNet
if feature_selector == 'resnet18':
self.feature_selector = torchvision.models.resnet18(pretrained=True)
elif feature_selector == 'resnet34':
self.feature_selector = torchvision.models.resnet34(pretrained=True)
elif feature_selector == 'resnet50':
self.feature_selector = torchvision.models.resnet50(pretrained=True)
elif feature_selector == 'alexnet':
self.feature_selector = torchvision.models.alexnet(pretrained=True)
# end if
# Skip last layer
self.reservoir_input_dim = self.feature_selector.fc.in_features
self.feature_selector.fc = ecnn.Identity()
"conv3_1": 10, "conv3_2": 12, "conv3_3": 14, "conv3_4": 16,
"conv4_1": 19, "conv4_2": 21, "conv4_3": 23, "conv4_4": 25,
"conv5_1": 28, "conv5_2": 30, "conv5_3": 32, "conv5_4": 34}
# convert the mean and stdv to torch.tensor
mean = torch.tensor(mean, dtype = torch.float32, device = device)
stdv = torch.tensor(stdv, dtype = torch.float32, device = device)
self.transforms = T.Normalize(mean, stdv) # transform to normalize the image
# create an integer mapping of the layer names
self.con_layers = [mapping_dict[layer] for layer in con_layers];
self.sty_layers = [mapping_dict[layer] for layer in sty_layers];
self.all_layers = self.con_layers + self.sty_layers
# initialize a pre-trained model in eval mode (no intent to update the weights)
self.vgg19 = models.vgg19(pretrained = True, progress = True).features
self.vgg19 = self.vgg19.to(device).eval()
# replace the max pooling layers by average pooling
for name, layer in self.vgg19.named_children():
if isinstance(layer, nn.MaxPool2d):
self.vgg19[int(name)] = nn.AvgPool2d(kernel_size = 2, stride = 2)
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from apex import amp
from apex.parallel import DistributedDataParallel
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__") and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--data', metavar='DIR', default='/home/zhangzhi/Data/ImageNet2012', help='path to dataset')
parser.add_argument('-a',
'--arch',
metavar='ARCH',
default='resnet18',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) + ' (default: resnet18)')
parser.add_argument('-j',
'--workers',
default=4,
type=int,
metavar='N',
help='number of data loading workers (default: 4)')
def __init__(self, out_size):
super(DenseNet121, self).__init__()
self.densenet121 = torchvision.models.densenet121(pretrained=True)
num_ftrs = self.densenet121.classifier.in_features
self.densenet121.classifier = nn.Sequential(
nn.Linear(num_ftrs, out_size),
nn.Sigmoid()
)
def __init__(self,
feature_layer=34,
use_bn=False,
use_input_norm=True):
super(VGGFeatureExtractor, self).__init__()
if use_bn:
model = torchvision.models.vgg19_bn(pretrained=True)
else:
model = torchvision.models.vgg19(pretrained=True)
self.use_input_norm = use_input_norm
if self.use_input_norm:
self.mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).cuda()
self.std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).cuda()
self.features = nn.Sequential(*list(model.features.children())[:(feature_layer + 1)])
for k, v in self.features.named_parameters():
v.requires_grad = False
def my_resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MyResNet(torchvision.models.resnet.Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(torch.utils.model_zoo.load_url(torchvision.models.resnet.model_urls['resnet101']))
return model
def __init__(self, num_classes, pretrained=True, **kwargs):
super(ResnetGCN, self).__init__()
resent = models.resnet101(pretrained=pretrained)
self.layer0 = nn.Sequential(resent.conv1, resent.bn1, resent.relu, resent.maxpool)
self.layer1 = resent.layer1
self.layer2 = resent.layer2
self.layer3 = resent.layer3
self.layer4 = resent.layer4
#Assuming input of size 240x320
ks = 7
self.gcn256 = GlobalConvolutionBlock(256, num_classes, (59,79))
self.br256 = BoundaryRefine(num_classes)
self.gcn512 = GlobalConvolutionBlock(512, num_classes, (29,39))
self.br512 = BoundaryRefine(num_classes)
self.gcn1024 = GlobalConvolutionBlock(1024, num_classes, (13,19))
self.br1024 = BoundaryRefine(num_classes)
self.gcn2048 = GlobalConvolutionBlock(2048, num_classes, (7,9))
self.br2048 = BoundaryRefine(num_classes)
def my_resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MyResNet(torchvision.models.resnet.Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(torch.utils.model_zoo.load_url(torchvision.models.resnet.model_urls['resnet101']))
return model
def squeezenet1_0(pretrained=False, **kwargs):
r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
accuracy with 50x fewer parameters and <0.5MB model size"
`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = SqueezeNet(version=1.0, **kwargs)
if pretrained:
model.load_state_dict(torch.utils.model_zoo.load_url(torchvision.models.squeezenet.model_urls['squeezenet1_0']))
return model
def _get_iou_types(model):
model_without_ddp = model
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_without_ddp = model.module
iou_types = ["bbox"]
if isinstance(model_without_ddp, torchvision.models.detection.MaskRCNN):
iou_types.append("segm")
if isinstance(model_without_ddp, torchvision.models.detection.KeypointRCNN):
iou_types.append("keypoints")
return iou_types