Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, in_channels, out_channels,
stride=1, splits_left=2, initialW=None):
super(ShuffleNetV2Block, self).__init__()
with self.init_scope():
if stride == 2:
self.conv1 = L.Convolution2D(
in_channels, in_channels, 1, 1, 0, initialW=initialW,
nobias=True)
self.bn1 = L.BatchNormalization(in_channels)
self.conv2 = L.DepthwiseConvolution2D(
in_channels, 1, 3, 1, 1,
initialW=initialW, nobias=True)
self.bn2 = L.BatchNormalization(in_channels)
self.conv3 = L.Convolution2D(
in_channels, out_channels // 2, 1, 1, 0, initialW=initialW,
nobias=True)
self.bn3 = L.BatchNormalization(out_channels // 2)
self.conv4 = L.DepthwiseConvolution2D(
in_channels, 1, 3, 1, 1,
initialW=initialW, nobias=True)
self.bn4 = L.BatchNormalization(in_channels)
self.conv5 = L.Convolution2D(
in_channels, out_channels // 2, 1, 1, 0, initialW=initialW,
nobias=True)
self.bn5 = L.BatchNormalization(out_channels // 2)
elif stride == 1:
self.in_channels = in_channels - in_channels // splits_left
self.conv1 = L.Convolution2D(
self.in_channels, self.in_channels, 1, 1, 0,
])
self.mixed_3 = Mixed([
('conv', Tower([
('conv', L.Convolution2D(288, 384, 3, stride=2, pad=0)),
('bn_conv', L.BatchNormalization(384)),
('_relu', F.relu)
])),
('tower', Tower([
('conv', L.Convolution2D(288, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', F.relu),
('conv_1', L.Convolution2D(64, 96, 3, stride=1, pad=1)),
('bn_conv_1', L.BatchNormalization(96)),
('_relu_1', F.relu),
('conv_2', L.Convolution2D(96, 96, 3, stride=2, pad=0)),
('bn_conv_2', L.BatchNormalization(96)),
('_relu_2', F.relu)
])),
('pool', Tower([
('_pooling', MaxPooling2D(3, 2, 0))
]))
])
self.mixed_4 = Mixed([
('conv', Tower([
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', F.relu)
])),
('tower', Tower([
('conv', L.Convolution2D(768, 128, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(128)),
('_relu', F.relu),
def get_norm_layer(norm='instance'):
# unchecked: init weight of bn
if norm == 'batch':
norm_layer = functools.partial(L.BatchNormalization, use_gamma=True,
use_beta=True)
elif norm == 'instance':
norm_layer = functools.partial(InstanceNormalization, use_gamma=False,
use_beta=False)
else:
raise NotImplementedError(
'normalization layer [%s] is not found' % norm)
return norm_layer
def __init__(self):
super(AlexBN, self).__init__(
conv1=L.Convolution2D(3, 96, 11, stride=4),
bn1=L.BatchNormalization(96),
conv2=L.Convolution2D(96, 256, 5, pad=2),
bn2=L.BatchNormalization(256),
conv3=L.Convolution2D(256, 384, 3, pad=1),
conv4=L.Convolution2D(384, 384, 3, pad=1),
conv5=L.Convolution2D(384, 256, 3, pad=1),
fc6=L.Linear(9216, 4096),
fc7=L.Linear(4096, 4096),
fc8=L.Linear(4096, 1000),
)
self.train = True
def to_link(self):
args = self.to_chainer_args()
if args["dtype"] == "float32":
args["dtype"] = numpy.float32
elif args["dtype"] == "float64":
args["dtype"] = numpy.float64
elif args["dtype"] == "float16":
args["dtype"] = numpy.float16
return chainer.links.BatchNormalization(**args)
dc5=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 3)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc6=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 3)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc7=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2) + init_channel * (ap_factor ** 3)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc8=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc9=L.DeconvolutionND(ndim, int(init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 2)), self.pool_size, self.pool_size, 0, initialW=initializer, initial_bias=None),
dc10=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1) + init_channel * (ap_factor ** 2)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc11=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), int(init_channel * (ap_factor ** 1)), kernel_size, 1, int(kernel_size/2), initialW=initializer, initial_bias=None),
dc12=L.ConvolutionND(ndim, int(init_channel * (ap_factor ** 1)), n_class, 1, 1, initialW=initializer, initial_bias=None),
bnc0=L.BatchNormalization(init_channel),
bnc1=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bnc2=L.BatchNormalization(int(init_channel * (ap_factor ** 1))),
bnc3=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bnc4=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bnc5=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bnc6=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bnc7=L.BatchNormalization(int(init_channel * (ap_factor ** 4))),
bnc8=L.BatchNormalization(int(init_channel * (ap_factor ** 4))),
bnc9=L.BatchNormalization(int(init_channel * (ap_factor ** 5))),
bndc1=L.BatchNormalization(int(init_channel * (ap_factor ** 4))),
bndc2=L.BatchNormalization(int(init_channel * (ap_factor ** 4))),
bndc4=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bndc5=L.BatchNormalization(int(init_channel * (ap_factor ** 3))),
bndc7=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
bndc8=L.BatchNormalization(int(init_channel * (ap_factor ** 2))),
super(MixConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
with self.init_scope():
self.conv = MixConv(
in_channels=in_channels,
out_channels=out_channels,
ksize=ksize,
stride=stride,
pad=pad,
dilate=dilate,
groups=groups,
use_bias=use_bias)
if self.use_bn:
self.bn = L.BatchNormalization(
size=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
bn_end_point = end_point + "_batchnorm"
setattr(self, bn_end_point, L.BatchNormalization(size=self.depth(conv_def.depth)))
self.layer_names.append(bn_end_point)
if end_point == final_endpoint:
return
elif isinstance(conv_def, DepthSepConv):
end_point = end_point_base + '_depthwise'
if use_explicit_padding:
self.layer_names.append(("use_explicit_padding", conv_def.kernel, layer_rate))
setattr(self, end_point, L.DepthwiseConvolution2D(in_channels=self.depth(conv_def.inchannel), channel_multiplier=1,
ksize=conv_def.kernel, stride=layer_stride, pad=pad,nobias=False,
initialW=initialW))
self.layer_names.append(end_point)
bn_end_point = end_point + "_batchnorm"
setattr(self, bn_end_point, L.BatchNormalization(size=self.depth(conv_def.inchannel)))
self.layer_names.append(bn_end_point)
if end_point == final_endpoint:
return
end_point = end_point_base + '_pointwise'
setattr(self, end_point, L.Convolution2D(in_channels=self.depth(conv_def.inchannel), out_channels=self.depth(conv_def.depth),
ksize=[1,1], stride=1, pad=0, nobias=False, initialW=initialW))
self.layer_names.append(end_point)
bn_end_point = end_point + "_batchnorm"
setattr(self, bn_end_point, L.BatchNormalization(size=self.depth(conv_def.depth)))
self.layer_names.append(bn_end_point)
if end_point == final_endpoint:
return
else:
raise ValueError('Unknown convolution type %s for layer %d'
% (conv_def.ltype, i))