Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_mkldnn_ndarray_slice():
ctx = mx.cpu()
net = gluon.nn.HybridSequential()
with net.name_scope():
net.add(gluon.nn.Conv2D(channels=32, kernel_size=3, activation=None))
net.collect_params().initialize(ctx=ctx)
x = mx.nd.array(np.ones([32, 3, 224, 224]), ctx)
y = net(x)
# trigger computation on ndarray slice
assert_almost_equal(y[0].asnumpy()[0, 0, 0], 0.3376348)
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SEResNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
stage.add(DartsUnit(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels,
genotype=genotype,
reduction=reduction,
prev_reduction=prev_reduction))
prev_in_channels = in_channels
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def embedding_from_numpy(_we, trainable=True):
word_embs = nn.Embedding(_we.shape[0], _we.shape[1],
weight_initializer=mx.init.Constant(_we))
apply_weight_drop(word_embs, 'weight', dropout_dim, axes=(1,))
if not trainable:
word_embs.collect_params().setattr('grad_req', 'null')
return word_embs
LinearBottleneck(in_channels=128, channels=128, t=4, alpha=alpha, stride=1)
)
## mobilenet-v2, t=2, c=16, n=1, s=1
self.lmks_net.add(
LinearBottleneck(in_channels=128, channels=16, t=2, alpha=alpha, stride=1),
)
## landmarks regression: base line
self.s2_conv = nn.Conv2D(channels=32, kernel_size=(3,3), strides=(2,2), padding=(1,1), activation=None, use_bias=True)
self.s2_bn = nn.BatchNorm(scale=True)
self.s2_act = nn.Activation('relu')
self.s3_conv = nn.Conv2D(channels=128, kernel_size=(3,3), strides=(2,2), padding=(1,1), activation=None, use_bias=True)
self.s3_bn = nn.BatchNorm(scale=True)
self.s3_act = nn.Activation('relu')
self.lmks_out = nn.HybridSequential()
self.lmks_out.add(
nn.Conv2D(channels=num_of_pts*2, kernel_size=(3,3), strides=(1,1), padding=(0,0)),
)
def __init__(self, layers, **kwargs):
super(ResNetV2a, self).__init__(**kwargs)
with self.name_scope():
self.layer0 = nn.HybridSequential(prefix='')
self.layer0.add(nn.BatchNorm(scale=False, epsilon=2e-5, use_global_stats=True))
self.layer0.add(nn.Conv2D(64, 7, 2, 3, use_bias=False))
self.layer0.add(nn.BatchNorm(epsilon=2e-5, use_global_stats=True))
self.layer0.add(nn.Activation('relu'))
self.layer0.add(nn.MaxPool2D(3, 2, 1))
self.layer1 = self._make_layer(stage_index=1, layers=layers[0], in_channels=64, channels=256, stride=1)
self.layer2 = self._make_layer(stage_index=2, layers=layers[1], in_channels=256, channels=512, stride=2)
self.layer3 = self._make_layer(stage_index=3, layers=layers[2], in_channels=512, channels=1024, stride=2)
self.layer4 = self._make_layer(stage_index=4, layers=layers[3], in_channels=1024, channels=2048, stride=2)
self.layer4.add(nn.BatchNorm(epsilon=2e-5, use_global_stats=True))
self.layer4.add(nn.Activation('relu'))
bn_use_global_stats=bn_use_global_stats,
conv1_stride=conv1_stride)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def __call__(self):
"""
returns a list of mxnet batchnorm, activation and dropout layers
:return: batchnorm, activation and dropout layers
"""
block = nn.HybridSequential()
if self.batchnorm:
block.add(nn.BatchNorm())
if self.activation_function:
block.add(nn.Activation(activation=utils.get_mxnet_activation_name(self.activation_function)))
if self.dropout_rate:
block.add(nn.Dropout(self.dropout_rate))
return block
def __init__(self,
channels,
init_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(DiracNetV2, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(DiracInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
stage.add(dirac_conv3x3(
in_channels=in_channels,
out_channels=out_channels))
in_channels = out_channels
if i != len(channels) - 1:
stage.add(nn.MaxPool2D(
pool_size=2,
strides=2,