How to use the tensorpack.models.BatchNorm function in tensorpack

To help you get started, we’ve selected a few tensorpack examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github osmr / imgclsmob / tensorflow_ / tensorflowcv / models / others / resnet_.py View on Github external
def get_logits(self, x):
        with argscope([Conv2D, MaxPooling, AvgPooling, GlobalAvgPooling, BatchNorm], data_format='channels_first'):

            x = res_init_block(
                x=x,
                in_channels=self.in_channels,
                out_channels=self.init_block_channels,
                name="features/init_block")
            in_channels = self.init_block_channels
            for i, channels_per_stage in enumerate(self.channels):
                for j, out_channels in enumerate(channels_per_stage):
                    strides = 2 if (j == 0) and (i != 0) else 1
                    x = res_unit(
                        "features/stage{}/unit{}".format(i + 1, j + 1),
                        x,
                        in_channels=in_channels,
                        out_channels=out_channels,
                        strides=strides,
github microsoft / petridishnn / petridish / model / layer.py View on Github external
def projection_layer(name, layer, out_filters, ch_dim, id_mask_slice=None):
    with tf.variable_scope(name):
        n_dim = len(layer.get_shape().as_list())
        if n_dim == 4:
            layer = tf.nn.relu(layer)
            layer = Conv2D('conv1x1_proj', layer, out_filters, 1, strides=1, activation=tf.identity)
            layer = BatchNorm('bn_proj', layer)
        elif n_dim == 2:
            layer = tf.nn.relu(layer)
            layer = FullyConnected('fc_proj', layer, out_filters, activation=tf.identity)
        else:
            raise ValueError("Projection cannot handle tensor of dim {}".format(n_dim))
        return layer
github vqdang / hover_net / src / model / micronet.py View on Github external
def aux_branch(name, main_in, up_kernel, up_strides):
            ch = main_in.get_shape().as_list()[1] # NCHW
            with tf.variable_scope(name): # preserve the depth
                a = Conv2DTranspose('up', main_in, ch, up_kernel, strides=up_strides, padding='same', use_bias=True, activation=tf.identity)
                a = Conv2D('conv', a, self.nr_classes, 3, padding='valid', activation=tf.nn.relu)
                a = tf.layers.dropout(a, rate=0.5, seed=5, training=is_training)
            return a

        #### Xavier initializer
        with argscope(Conv2D, activation=tf.identity, 
                    kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d(uniform=True),
                    bias_initializer=tf.constant_initializer(0.1)), \
             argscope(Conv2DTranspose, activation=tf.identity, 
                    kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d(uniform=True),
                    bias_initializer=tf.constant_initializer(0.1)), \
                argscope([Conv2D, Conv2DTranspose, MaxPooling, BatchNorm], data_format=self.data_format):

            i = tf.transpose(images / 255.0, [0, 3, 1, 2]) # our way
            resize_func = lambda x, y: resize_op(x, size=y,interp='bicubic', data_format='channels_first')

            ####
            b1 = down_branch('b1',  i, resize_func(i, (128, 128)),  64)
            b2 = down_branch('b2', b1, resize_func(i, ( 64,  64)), 128)
            b3 = down_branch('b3', b2, resize_func(i, ( 32,  32)), 256)
            b4 = down_branch('b4', b3, resize_func(i, ( 16,  16)), 512)

            with tf.variable_scope('b5'):
                b5 = Conv2D('conv1', b4, 2048, 3, padding='valid', use_bias=True, activation=tf.nn.relu)
                b5 = Conv2D('conv2', b5, 2048, 3, padding='valid', use_bias=True, activation=tf.nn.relu)
            b6 = up_branch('b6', b5, b4, 1024)
            b7 = up_branch('b7', b6, b3, 512)
            b8 = up_branch('b8', b7, b2, 256)
github wanggrun / Learning-Feature-Pyramids / pyramid / VOC / resnet_model_voc_aspp.py View on Github external
        return lambda x, name=None: BatchNorm('bn', x, gamma_initializer=tf.zeros_initializer())
    else:
github microsoft / petridishnn / petridish / model / layer.py View on Github external
def residual_bottleneck_layer(name, l, out_filters, strides, data_format):
    data_format = get_data_format(data_format, keras_mode=False)
    ch_dim = 3 if data_format == 'NHWC' else 1
    ch_in = _get_dim(l, ch_dim)

    ch_base = out_filters
    ch_last = ch_base * 4
    l_in = l
    with tf.variable_scope('{}.0'.format(name)):
        l = BatchNorm('bn0', l)
        l = tf.nn.relu(l)
        l = (LinearWrap(l)
             .Conv2D('conv1x1_0', ch_base, 1, activation=BNReLU)
             .Conv2D('conv3x3_1', ch_base, 3, strides=strides, activation=BNReLU)
             .Conv2D('conv1x1_2', ch_last, 1)())
        l = BatchNorm('bn_3', l)

        shortcut = l_in
        if ch_in != ch_last:
            shortcut = Conv2D('conv_short', shortcut, ch_last, 1, strides=strides)
            shortcut = BatchNorm('bn_short', shortcut)
        l = l + shortcut
    return l
github microsoft / petridishnn / petridish / model / layer.py View on Github external
def residual_layer(name, l, out_filters, strides, data_format):
    ch_out = out_filters
    data_format = get_data_format(data_format, keras_mode=False)
    ch_dim = 3 if data_format == 'NHWC' else 1
    ch_in = _get_dim(l, ch_dim)

    l_in = l
    with tf.variable_scope('{}.0'.format(name)):
        l = BNReLU(l)
        l = SeparableConv2D('conv1', l, ch_out, 3, strides=strides, activation=BNReLU)
        l = SeparableConv2D('conv2', l, ch_out, 3)
        # The second conv need to be BN before addition.
        l = BatchNorm('bn2', l)

        shortcut = l_in
        if strides > 1:
            shortcut = AvgPooling('pool', shortcut, 2)
        if ch_in < ch_out:
            pad_paddings = [[0, 0], [0, 0], [0, 0], [0, 0]]
            pad_width = (ch_out - ch_in)
            pad_paddings[ch_dim] = [0, pad_width]
            shortcut = tf.pad(shortcut, pad_paddings)
        elif ch_in > ch_out:
            if data_format == 'NHWC':
                shortcut1 = shortcut[:, :, :, :ch_out]
                shortcut2 = shortcut[:, :, :, ch_out:]
            else:
                shortcut1 = shortcut[:, :ch_out, :, :]
                shortcut2 = shortcut[:, ch_out:, :, :]
github microsoft / petridishnn / petridish / model / layer.py View on Github external
def residual_bottleneck_layer(name, l, out_filters, strides, data_format):
    data_format = get_data_format(data_format, keras_mode=False)
    ch_dim = 3 if data_format == 'NHWC' else 1
    ch_in = _get_dim(l, ch_dim)

    ch_base = out_filters
    ch_last = ch_base * 4
    l_in = l
    with tf.variable_scope('{}.0'.format(name)):
        l = BatchNorm('bn0', l)
        l = tf.nn.relu(l)
        l = (LinearWrap(l)
             .Conv2D('conv1x1_0', ch_base, 1, activation=BNReLU)
             .Conv2D('conv3x3_1', ch_base, 3, strides=strides, activation=BNReLU)
             .Conv2D('conv1x1_2', ch_last, 1)())
        l = BatchNorm('bn_3', l)

        shortcut = l_in
        if ch_in != ch_last:
            shortcut = Conv2D('conv_short', shortcut, ch_last, 1, strides=strides)
            shortcut = BatchNorm('bn_short', shortcut)
        l = l + shortcut
    return l
github microsoft / petridishnn / petridish / model / layer.py View on Github external
'softmax_linear', layer, 1, activation=tf.identity)
                            logit = tf.reshape(logit, [-1]) # batch
                            logits.append(logit)
                    logits = tf.stack(logits, axis=1) # batch x len(new_layer)
                    probs = tf.nn.softmax(logits, axis=1) # batch
                    for li, layer in enumerate(new_layer):
                        new_layer[li] = probs[:, li] * layer
                    layer = tf.add_n(new_layer, name='sum_feats')

                else:
                    raise ValueError("Unknown merge operation in info {}".format(
                        layer_info))

                # batch normalization for all non-concat-based merges.
                if bn_after_merge:
                    layer = BatchNorm('bn_after_merge', layer)
            # end else for concat vs non-cat merges.
        else:
            raise ValueError("Layer {} has empty input edges. The info: {}".format(
                name, layer_info))
        return layer
github microsoft / petridishnn / petridish / model / recurrrent.py View on Github external
with argscope(
                    [
                        Conv2D, Deconv2D, GroupedConv2D, AvgPooling,
                        MaxPooling, BatchNorm, GlobalAvgPooling,
                        ResizeImages, SeparableConv2D
                    ],
                    data_format=self.data_format
                ), \
                argscope(
                    [Conv2D, Deconv2D, GroupedConv2D, SeparableConv2D],
                    activation=tf.identity,
                    use_bias=self.options.use_bias
                ), \
                argscope(
                    [BatchNorm],
                    center=False,
                    scale=False,
                    decay=self.options.batch_norm_decay,
                    epsilon=self.options.batch_norm_epsilon
                ), \
                argscope(
                    [candidate_gated_layer],
                    eps=self.options.candidate_gate_eps
                ):

            initializer = tf.random_uniform_initializer(-self.init_range, self.init_range)
            hid_to_fs_params = _init_feature_select(
                self.layer_info_list, 'master', self.options.feat_sel_lambda)
            seq, embedding_w = self._embed_input_if_int(seq, initializer=initializer)
            basic_cells = [
                self._basic_cell(
github microsoft / petridishnn / petridish / model / layer.py View on Github external
elif operation == LayerTypes.IDENTITY:
        if strides == 1:
            return tf.identity(layer, name='id')
        else:
            return _factorized_reduction('id_reduction', layer, out_filters, data_format)

    elif operation == LayerTypes.RESIDUAL_LAYER:
        return residual_layer('res', layer, out_filters, strides, data_format)

    elif operation == LayerTypes.RESIDUAL_BOTTLENECK_LAYER:
        return residual_bottleneck_layer('res_btl', layer, out_filters, strides, data_format)

    elif operation == LayerTypes.CONV_1:
        layer = tf.nn.relu(layer)
        layer = Conv2D('conv1x1', layer, out_filters, 1, strides=strides)
        layer = BatchNorm('bn', layer)
        return layer

    elif operation == LayerTypes.CONV_3:
        layer = tf.nn.relu(layer)
        layer = Conv2D('conv3x3', layer, out_filters, 3, strides=strides)
        layer = BatchNorm('bn', layer)
        return layer

    elif operation == LayerTypes.SEPARABLE_CONV_3:
        layer = tf.nn.relu(layer)
        layer = Conv2D('conv1x1', layer, out_filters, 1, strides=1, activation=BNReLU)
        layer = SeparableConv2D('sep_conv3x3_1', layer, out_filters, 3, strides=strides)
        layer = BatchNorm('bn', layer)
        return layer

    elif operation == LayerTypes.SEPARABLE_CONV_5: