How to use the ops.conditional_normalization function in ops

To help you get started, we’ve selected a few ops examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github linjieyangsc / video_seg / osmn_vs.py View on Github external
with slim.arg_scope([slim.conv2d],
                                padding='SAME',
                                outputs_collections=end_points_collection):
              with slim.arg_scope([slim.max_pool2d], padding='SAME'):
                net = slim.repeat(inputs[2], 2, slim.conv2d, 64, [3, 3], scope='conv1')
                net = slim.max_pool2d(net, [2, 2], scope='pool1')
                net_2 = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
                net = slim.max_pool2d(net_2, [2, 2], scope='pool2')
                net_3 = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
                net_4 = slim.max_pool2d(net_3, [2, 2], scope='pool3')
                prev_mod_id = 0
                prev_sp_id = 0
                for i in range(3):
                    net_4 = slim.conv2d(net_4, 512, [3,3], scope='conv4/conv4_{}'.format(i+1))
                    m_params = tf.slice(modulator_params, [0,prev_mod_id], [batch_size,512], name = 'm_param4')
                    net_4 = conditional_normalization(net_4, m_params, scope='conv4/conv4_{}'.format(i+1))
                    prev_mod_id += 512
                    if not sp_late_fusion:
                        sp_params = tf.slice(conv4_att, [0, 0, 0, prev_sp_id], [batch_size, -1, -1 , 512], name = 'm_sp_param4')
                        net_4 = tf.add(net_4, sp_params)
                        prev_sp_id += 512
                net_5 = slim.max_pool2d(net_4, [2, 2], scope='pool4')
                prev_sp_id = 0
                for i in range(3):
                    net_5 = slim.conv2d(net_5, 512, [3, 3], scope='conv5/conv5_{}'.format(i+1))
                    m_params = tf.slice(modulator_params, [0,prev_mod_id], [batch_size,512], name = 'm_param5')
                    net_5 = conditional_normalization(net_5, m_params, scope='conv5/conv5_{}'.format(i+1))
                    prev_mod_id += 512
                    if not sp_late_fusion:
                        sp_params = tf.slice(conv5_att, [0, 0, 0, prev_sp_id], [batch_size, -1, -1, 512], name='m_sp_param5')
                        net_5 = tf.add(net_5, sp_params)
                        prev_sp_id += 512
github linjieyangsc / video_seg / osmn_vs.py View on Github external
activation_fn=None, biases_initializer=None, padding='VALID',
                                        outputs_collections=end_points_collection, trainable=False):
                        
                        # Main output
                        side_2_f = slim.convolution2d_transpose(side_2, 16, 4, 2, scope='score-multi2-up')
                        side_2_f = crop_features(side_2_f, im_size)
                        side_3_f = slim.convolution2d_transpose(side_3, 16, 8, 4, scope='score-multi3-up')
                        side_3_f = crop_features(side_3_f, im_size)
                        side_4_f = slim.convolution2d_transpose(side_4, 16, 16, 8, scope='score-multi4-up')
                        side_4_f = crop_features(side_4_f, im_size)
                        side_5_f = slim.convolution2d_transpose(side_5, 16, 32, 16, scope='score-multi5-up')
                        side_5_f = crop_features(side_5_f, im_size)
                    concat_side = tf.concat([side_2_f, side_3_f, side_4_f, side_5_f], axis=3)
                    if mod_last_conv:
                        m_params = tf.slice(modulator_params, [0, prev_mod_id], [batch_size, 64], name='m_param_fuse')
                        concat_side = conditional_normalization(concat_side, m_params, scope='conat')

                    with slim.arg_scope([slim.conv2d],
                                        trainable=True, normalizer_fn=None):
                        net = slim.conv2d(concat_side, 1, [1, 1], scope='upscore-fuse')

        end_points = slim.utils.convert_collection_to_dict(end_points_collection)
        return net, end_points
github linjieyangsc / video_seg / osmn_pretrain.py View on Github external
net = slim.max_pool2d(net, [2, 2], scope='pool1')
                net_2 = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
                #m_params = tf.slice(modulator_params, [0,64], [1,128], name = 'm_param2')
                #net_2 = conditional_normalization(net_2, m_params, scope='conv2')
                net = slim.max_pool2d(net_2, [2, 2], scope='pool2')
                net_3 = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
                #m_params = tf.slice(modulator_params, [0,0], [1,256], name = 'm_param3')
                #net_3 = conditional_normalization(net_3, m_params, scope='conv3')
                net = slim.max_pool2d(net_3, [2, 2], scope='pool3')
                net_4 = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
                m_params = tf.slice(modulator_params, [0,0], [batch_size,512], name = 'm_param4')
                net_4 = conditional_normalization(net_4, m_params, scope='conv4')
                net = slim.max_pool2d(net_4, [2, 2], scope='pool4')
                net_5 = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
                m_params = tf.slice(modulator_params, [0,512], [batch_size,512], name = 'm_param5')
                net_5 = conditional_normalization(net_5, m_params, scope='conv5')
                # Get side outputs of the network
                with slim.arg_scope([slim.conv2d],
                                    activation_fn=None):
                    side_2 = slim.conv2d(net_2, 16, [3, 3], scope='conv2_2_16')
                    side_3 = slim.conv2d(net_3, 16, [3, 3], scope='conv3_3_16')
                    side_4 = slim.conv2d(net_4, 16, [3, 3], scope='conv4_3_16')
                    side_5 = slim.conv2d(net_5, 16, [3, 3], scope='conv5_3_16')

                    with slim.arg_scope([slim.convolution2d_transpose],
                                        activation_fn=None, biases_initializer=None, padding='VALID',
                                        outputs_collections=end_points_collection, trainable=False):
                        
                        # Main output
                        side_2_f = slim.convolution2d_transpose(side_2, 16, 4, 2, scope='score-multi2-up')
                        side_2_f = crop_features(side_2_f, im_size)
                        side_3_f = slim.convolution2d_transpose(side_3, 16, 8, 4, scope='score-multi3-up')
github linjieyangsc / video_seg / osmn_vs.py View on Github external
def modulated_conv_block(net, repeat, channels, scope_id=0, visual_mod_id = 0,
        visual_modulation_params = None,
        spatial_modulation_params = None,
        visual_modulation = False,
        spatial_modulation = False):
    spatial_mod_id = 0
    for i in range(repeat):
        net = slim.conv2d(net, channels, [3,3], scope='conv{}/conv{}_{}'.format(scope_id, scope_id, i+1))
        if visual_modulation:
            vis_params = tf.slice(visual_modulation_params, [0,visual_mod_id], [-1,channels], name = 'm_param{}'.format(scope_id))
            net = conditional_normalization(net, vis_params, 
                    scope='conv{}/conv{}_{}'.format(scope_id, scope_id, i+1))
            visual_mod_id += channels
        if spatial_modulation:
            sp_params = tf.slice(spatial_modulation_params, 
                    [0, 0, 0, spatial_mod_id], [-1, -1, -1 , channels], 
                    name = 'm_sp_param{}'.format(scope_id))
            net = tf.add(net, sp_params)
            spatial_mod_id += channels
    return net, visual_mod_id
github linjieyangsc / video_seg / osmn_pretrain3.py View on Github external
#net = conditional_normalization(net, m_params, scope='conv1')
                net = slim.max_pool2d(net, [2, 2], scope='pool1')
                net_2 = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
                #m_params = tf.slice(modulator_params, [0,64], [1,128], name = 'm_param2')
                #net_2 = conditional_normalization(net_2, m_params, scope='conv2')
                net = slim.max_pool2d(net_2, [2, 2], scope='pool2')
                net_3 = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
                #m_params = tf.slice(modulator_params, [0,0], [1,256], name = 'm_param3')
                #net_3 = conditional_normalization(net_3, m_params, scope='conv3')
                net_4 = slim.max_pool2d(net_3, [2, 2], scope='pool3')
                #net_4 = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
                prev_mod_id = 0
                for i in range(3):
                    net_4 = slim.conv2d(net_4, 512, [3,3], scope='conv4/conv4_{}'.format(i+1))
                    m_params = tf.slice(modulator_params, [0,prev_mod_id], [batch_size,512], name = 'm_param4')
                    net_4 = conditional_normalization(net_4, m_params, scope='conv4/conv4_{}'.format(i+1))
                    prev_mod_id += 512
                net_5 = slim.max_pool2d(net_4, [2, 2], scope='pool4')
                for i in range(3):
                    net_5 = slim.conv2d(net_5, 512, [3, 3], scope='conv5/conv5_{}'.format(i+1))
                    m_params = tf.slice(modulator_params, [0,prev_mod_id], [batch_size,512], name = 'm_param5')
                    net_5 = conditional_normalization(net_5, m_params, scope='conv5/conv5_{}'.format(i+1))
                    prev_mod_id += 512
                # Get side outputs of the network
                with slim.arg_scope([slim.conv2d],
                                    activation_fn=None):
                    side_2 = slim.conv2d(net_2, 16, [3, 3], scope='conv2_2_16')
                    side_3 = slim.conv2d(net_3, 16, [3, 3], scope='conv3_3_16')
                    side_4 = slim.conv2d(net_4, 16, [3, 3], scope='conv4_3_16')
                    side_5 = slim.conv2d(net_5, 16, [3, 3], scope='conv5_3_16')

                    with slim.arg_scope([slim.convolution2d_transpose],
github linjieyangsc / video_seg / osmn_pretrain3_init.py View on Github external
with slim.arg_scope([slim.convolution2d_transpose],
                                        activation_fn=None, biases_initializer=None, padding='VALID',
                                        outputs_collections=end_points_collection, trainable=False):
                        
                        # Main output
                        side_2_f = slim.convolution2d_transpose(side_2, 16, 4, 2, scope='score-multi2-up')
                        side_2_f = crop_features(side_2_f, im_size)
                        side_3_f = slim.convolution2d_transpose(side_3, 16, 8, 4, scope='score-multi3-up')
                        side_3_f = crop_features(side_3_f, im_size)
                        side_4_f = slim.convolution2d_transpose(side_4, 16, 16, 8, scope='score-multi4-up')
                        side_4_f = crop_features(side_4_f, im_size)
                        side_5_f = slim.convolution2d_transpose(side_5, 16, 32, 16, scope='score-multi5-up')
                        side_5_f = crop_features(side_5_f, im_size)
                    concat_side = tf.concat([side_2_f, side_3_f, side_4_f, side_5_f], axis=3)
                    m_params = tf.slice(modulator_params, [0, prev_mod_id], [batch_size, 64], name='m_param6')
                    concat_side = conditional_normalization(concat_side, m_params, scope='concat')
                    prev_mod_id += 64
                    with slim.arg_scope([slim.conv2d],
                                        trainable=True, normalizer_fn=None):
                        net = slim.conv2d(concat_side, 1, [1, 1], scope='upscore-fuse')

        end_points = slim.utils.convert_collection_to_dict(end_points_collection)
        return net, end_points
github linjieyangsc / video_seg / osmn_pretrain3.py View on Github external
with slim.arg_scope([slim.convolution2d_transpose],
                                        activation_fn=None, biases_initializer=None, padding='VALID',
                                        outputs_collections=end_points_collection, trainable=False):
                        
                        # Main output
                        side_2_f = slim.convolution2d_transpose(side_2, 16, 4, 2, scope='score-multi2-up')
                        side_2_f = crop_features(side_2_f, im_size)
                        side_3_f = slim.convolution2d_transpose(side_3, 16, 8, 4, scope='score-multi3-up')
                        side_3_f = crop_features(side_3_f, im_size)
                        side_4_f = slim.convolution2d_transpose(side_4, 16, 16, 8, scope='score-multi4-up')
                        side_4_f = crop_features(side_4_f, im_size)
                        side_5_f = slim.convolution2d_transpose(side_5, 16, 32, 16, scope='score-multi5-up')
                        side_5_f = crop_features(side_5_f, im_size)
                    concat_side = tf.concat([side_2_f, side_3_f, side_4_f, side_5_f], axis=3)
                    m_params = tf.slice(modulator_params, [0, prev_mod_id], [batch_size, 64], name='m_param6')
                    concat_side = conditional_normalization(concat_side, m_params, scope='concat')
                    prev_mod_id += 64
                    with slim.arg_scope([slim.conv2d],
                                        trainable=True, normalizer_fn=None):
                        net = slim.conv2d(concat_side, 1, [1, 1], scope='upscore-fuse')

        end_points = slim.utils.convert_collection_to_dict(end_points_collection)
        return net, end_points
github linjieyangsc / video_seg / osmn_pretrain3_init.py View on Github external
net_3 = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
                #m_params = tf.slice(modulator_params, [0,0], [1,256], name = 'm_param3')
                #net_3 = conditional_normalization(net_3, m_params, scope='conv3')
                net_4 = slim.max_pool2d(net_3, [2, 2], scope='pool3')
                #net_4 = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
                prev_mod_id = 0
                for i in range(3):
                    net_4 = slim.conv2d(net_4, 512, [3,3], scope='conv4/conv4_{}'.format(i+1))
                    m_params = tf.slice(modulator_params, [0,prev_mod_id], [batch_size,512], name = 'm_param4')
                    net_4 = conditional_normalization(net_4, m_params, scope='conv4/conv4_{}'.format(i+1))
                    prev_mod_id += 512
                net_5 = slim.max_pool2d(net_4, [2, 2], scope='pool4')
                for i in range(3):
                    net_5 = slim.conv2d(net_5, 512, [3, 3], scope='conv5/conv5_{}'.format(i+1))
                    m_params = tf.slice(modulator_params, [0,prev_mod_id], [batch_size,512], name = 'm_param5')
                    net_5 = conditional_normalization(net_5, m_params, scope='conv5/conv5_{}'.format(i+1))
                    prev_mod_id += 512
                # Get side outputs of the network
                with slim.arg_scope([slim.conv2d],
                                    activation_fn=None):
                    side_2 = slim.conv2d(net_2, 16, [3, 3], scope='conv2_2_16')
                    side_3 = slim.conv2d(net_3, 16, [3, 3], scope='conv3_3_16')
                    side_4 = slim.conv2d(net_4, 16, [3, 3], scope='conv4_3_16')
                    side_5 = slim.conv2d(net_5, 16, [3, 3], scope='conv5_3_16')

                    with slim.arg_scope([slim.convolution2d_transpose],
                                        activation_fn=None, biases_initializer=None, padding='VALID',
                                        outputs_collections=end_points_collection, trainable=False):
                        
                        # Main output
                        side_2_f = slim.convolution2d_transpose(side_2, 16, 4, 2, scope='score-multi2-up')
                        side_2_f = crop_features(side_2_f, im_size)
github linjieyangsc / video_seg / osmn_pretrain2.py View on Github external
#net = conditional_normalization(net, m_params, scope='conv1')
                net = slim.max_pool2d(net, [2, 2], scope='pool1')
                net_2 = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
                #m_params = tf.slice(modulator_params, [0,64], [1,128], name = 'm_param2')
                #net_2 = conditional_normalization(net_2, m_params, scope='conv2')
                net = slim.max_pool2d(net_2, [2, 2], scope='pool2')
                net_3 = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
                #m_params = tf.slice(modulator_params, [0,0], [1,256], name = 'm_param3')
                #net_3 = conditional_normalization(net_3, m_params, scope='conv3')
                net_4 = slim.max_pool2d(net_3, [2, 2], scope='pool3')
                #net_4 = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
                prev_mod_id = 0
                for i in range(3):
                    net_4 = slim.conv2d(net_4, 512, [3,3], scope='conv4/conv4_{}'.format(i+1))
                    m_params = tf.slice(modulator_params, [0,prev_mod_id], [batch_size,512], name = 'm_param4')
                    net_4 = conditional_normalization(net_4, m_params, scope='conv4/conv4_{}'.format(i+1))
                    prev_mod_id += 512
                net_5 = slim.max_pool2d(net_4, [2, 2], scope='pool4')
                for i in range(3):
                    net_5 = slim.conv2d(net_5, 512, [3, 3], scope='conv5/conv5_{}'.format(i+1))
                    m_params = tf.slice(modulator_params, [0,prev_mod_id], [batch_size,512], name = 'm_param5')
                    net_5 = conditional_normalization(net_5, m_params, scope='conv5/conv5_{}'.format(i+1))
                    prev_mod_id += 512
                # Get side outputs of the network
                with slim.arg_scope([slim.conv2d],
                                    activation_fn=None):
                    side_2 = slim.conv2d(net_2, 16, [3, 3], scope='conv2_2_16')
                    side_3 = slim.conv2d(net_3, 16, [3, 3], scope='conv3_3_16')
                    side_4 = slim.conv2d(net_4, 16, [3, 3], scope='conv4_3_16')
                    side_5 = slim.conv2d(net_5, 16, [3, 3], scope='conv5_3_16')

                    with slim.arg_scope([slim.convolution2d_transpose],