How to use the keras.layers.BatchNormalization function in keras

To help you get started, we’ve selected a few keras examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github theislab / trVAE / tests / mmd_resnet.py View on Github external
kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block1_a2)
    block1_output = add([block1_w2, calibInput])
    block2_bn1 = BatchNormalization()(block1_output)
    block2_a1 = Activation('relu')(block2_bn1)
    block2_w1 = Dense(mmdNetLayerSizes[1], activation='linear', kernel_regularizer=l2(l2_penalty),
                      kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block2_a1)
    block2_bn2 = BatchNormalization()(block2_w1)
    block2_a2 = Activation('relu')(block2_bn2)
    block2_w2 = Dense(gex_size, activation='linear', kernel_regularizer=l2(l2_penalty),
                      kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block2_a2)
    block2_output = add([block2_w2, block1_output])
    block3_bn1 = BatchNormalization()(block2_output)
    block3_a1 = Activation('relu')(block3_bn1)
    block3_w1 = Dense(mmdNetLayerSizes[1], activation='linear', kernel_regularizer=l2(l2_penalty),
                      kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block3_a1)
    block3_bn2 = BatchNormalization()(block3_w1)
    block3_a2 = Activation('relu')(block3_bn2)
    block3_w2 = Dense(gex_size, activation='linear', kernel_regularizer=l2(l2_penalty),
                      kernel_initializer=initializers.RandomNormal(stddev=1e-4))(block3_a2)
    block3_output = add([block3_w2, block2_output])

    calibMMDNet = Model(inputs=calibInput, outputs=block3_output)


    # learning rate schedule
    def step_decay(epoch):
        initial_lrate = 0.001
        drop = 0.1
        epochs_drop = 150.0
        lrate = initial_lrate * math.pow(drop, math.floor((1 + epoch) / epochs_drop))
        return lrate
github andrewekhalel / edafa / examples / keras / deepLab / model.py View on Github external
if block_id:
        # Expand

        x = Conv2D(expansion * in_channels, kernel_size=1, padding='same',
                   use_bias=False, activation=None,
                   name=prefix + 'expand')(x)
        x = BatchNormalization(epsilon=1e-3, momentum=0.999,
                               name=prefix + 'expand_BN')(x)
        x = Activation(relu6, name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv_'
    # Depthwise
    x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None,
                        use_bias=False, padding='same', dilation_rate=(rate, rate),
                        name=prefix + 'depthwise')(x)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999,
                           name=prefix + 'depthwise_BN')(x)

    x = Activation(relu6, name=prefix + 'depthwise_relu')(x)

    # Project
    x = Conv2D(pointwise_filters,
               kernel_size=1, padding='same', use_bias=False, activation=None,
               name=prefix + 'project')(x)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999,
                           name=prefix + 'project_BN')(x)

    if skip_connection:
        return Add(name=prefix + 'add')([inputs, x])

    # if in_channels == pointwise_filters and stride == 1:
    #    return Add(name='res_connect_' + str(block_id))([inputs, x])
github micah5 / sneaker-generator / python / train.py View on Github external
generator.add(Reshape(target_shape=(16, 16, 512)))
    generator.add(BatchNormalization(momentum=0.5))
    generator.add(Activation('relu'))

    generator.add(Conv2DTranspose(filters=256, kernel_size=(5, 5),
                                  strides=(2, 2), padding='same',
                                  data_format='channels_last',
                                  kernel_initializer='glorot_uniform'))
    generator.add(BatchNormalization(momentum=0.5))
    generator.add(Activation('relu'))

    generator.add(Conv2DTranspose(filters=128, kernel_size=(5, 5),
                                  strides=(2, 2), padding='same',
                                  data_format='channels_last',
                                  kernel_initializer='glorot_uniform'))
    generator.add(BatchNormalization(momentum=0.5))
    generator.add(Activation('relu'))

    generator.add(Conv2DTranspose(filters=64, kernel_size=(5, 5),
                                  strides=(2, 2), padding='same',
                                  data_format='channels_last',
                                  kernel_initializer='glorot_uniform'))
    generator.add(BatchNormalization(momentum=0.5))
    generator.add(Activation('relu'))

    generator.add(Conv2DTranspose(filters=3, kernel_size=(5, 5),
                                  strides=(2, 2), padding='same',
                                  data_format='channels_last',
                                  kernel_initializer='glorot_uniform'))
    generator.add(Activation('tanh'))

    optimizer = Adam(lr=0.00015, beta_1=0.5)
github deepgram / kur / kur / containers / layers / batchnorm.py View on Github external
def _build(self, model):
		""" Instantiates the layer with the given backend.
		"""
		backend = model.get_backend()
		if backend.get_name() == 'keras':

			if backend.keras_version() == 1:
				import keras.layers as L		# pylint: disable=import-error
				yield L.BatchNormalization(
					mode=2,
					axis=-1 if self.axis is None else self.axis,
					name=self.name,
					trainable=not self.frozen
				)
			else:
				import keras.layers.normalization as L # pylint: disable=import-error

				###############################################################
				# pylint: disable=too-few-public-methods,unused-argument
				class CustomBatchNormalization(L.BatchNormalization):
					""" Custom batch-normalization implementation
					"""
					def call(self, inputs, training=None):
						""" Forces Keras to respect the way we want batch
							normalization to be calculated.
github GalDude33 / Fetal-MRI-Segmentation / fetal_net / model / fetal_net_skip3.py View on Github external
def fc_block(input_layer: Tensor, output_channels, batch_norm=batch_norm,
                 activation='tanh'):
        output = Conv2D_(output_channels,
                         kernel_size=input_layer.shape.as_list()[-3:-1],
                         padding='valid',
                         activation=activation)(input_layer)
        if batch_norm:
            output = BatchNormalization()(output)
        return output
github ybabakhin / kaggle_salt_bes_phalanx / models / resnets.py View on Github external
def f(x):
        y = keras.layers.ZeroPadding2D(padding=1, name="padding{}{}_branch2a".format(stage_char, block_char))(x)
        y = keras.layers.Conv2D(filters, kernel_size, strides=stride, use_bias=False, name="res{}{}_branch2a".format(stage_char, block_char), **parameters)(y)
        y = keras.layers.BatchNormalization(axis=axis, epsilon=1e-5, name="bn{}{}_branch2a".format(stage_char, block_char))(y)
        y = keras.layers.Activation("relu", name="res{}{}_branch2a_relu".format(stage_char, block_char))(y)

        y = keras.layers.ZeroPadding2D(padding=1, name="padding{}{}_branch2b".format(stage_char, block_char))(y)
        y = keras.layers.Conv2D(filters, kernel_size, use_bias=False, name="res{}{}_branch2b".format(stage_char, block_char), **parameters)(y)
        y = keras.layers.BatchNormalization(axis=axis, epsilon=1e-5, name="bn{}{}_branch2b".format(stage_char, block_char))(y)

        if block == 0:
            shortcut = keras.layers.Conv2D(filters, (1, 1), strides=stride, use_bias=False, name="res{}{}_branch1".format(stage_char, block_char), **parameters)(x)
            shortcut = keras.layers.BatchNormalization(axis=axis, epsilon=1e-5, name="bn{}{}_branch1".format(stage_char, block_char))(shortcut)
        else:
            shortcut = x

        y = keras.layers.Add(name="res{}{}".format(stage_char, block_char))([y, shortcut])
        y = keras.layers.Activation("relu", name="res{}{}_relu".format(stage_char, block_char))(y)

        return y
github TianzhongSong / Network-Slimming-Keras / models / resnet.py View on Github external
def residual_block(x, nb_filters, strides=(1, 1), weight_decay=1E-4, sparse_factor=1e-4):
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(nb_filters, (3,3),
                kernel_initializer='he_normal',
                padding="same",
                strides=strides,
                use_bias=False,
                kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization()(x)
    x = SparsityRegularization(l1=sparse_factor)(x)
    x = Activation('relu')(x)
    x = Conv2D(nb_filters, (3, 3),
                kernel_initializer='he_normal',
                padding="same",
                use_bias=False,
                kernel_regularizer=l2(weight_decay))(x)

    return x
github sigmaai / semantic-segmentation / models / icnet_fusion.py View on Github external
d_conv2_2 = Add(name='d_conv2_2')([y_d, y_d_])
        conv2_2 = Add(name='conv2_2')([y, y_])

        y = Add(name='conv2_2_merge_color')([conv2_2, d_conv2_2])
        y_d_ = Add(name='conv2_2_merge_depth')([d_conv2_2, conv2_2])

        y_d_ = Activation('relu', name='d_conv2_2/relu')(y_d_)
        y_ = Activation('relu', name='conv2_2/relu')(y)

        # -----------------------------------------------------------------------
        # part 4 depth
        y_d = Conv2D(32, 1, activation='relu', name='d_conv2_3_1x1_reduce')(y_d_)
        y_d = BatchNormalization(name='d_conv2_3_1x1_reduce_bn')(y_d)
        y_d = ZeroPadding2D(name='d_padding3')(y_d)
        y_d = Conv2D(32, 3, activation='relu', name='d_conv2_3_3x3')(y_d)
        y_d = BatchNormalization(name='d_conv2_3_3x3_bn')(y_d)
        y_d = Conv2D(128, 1, name='d_conv2_3_1x1_increase')(y_d)
        y_d = BatchNormalization(name='d_conv2_3_1x1_increase_bn')(y_d)

        # part 4 color
        y = Conv2D(32, 1, activation='relu', name='conv2_3_1x1_reduce')(y_)
        y = BatchNormalization(name='conv2_3_1x1_reduce_bn')(y)
        y = ZeroPadding2D(name='padding3')(y)
        y = Conv2D(32, 3, activation='relu', name='conv2_3_3x3')(y)
        y = BatchNormalization(name='conv2_3_3x3_bn')(y)
        y = Conv2D(128, 1, name='conv2_3_1x1_increase')(y)
        y = BatchNormalization(name='conv2_3_1x1_increase_bn')(y)

        d_conv2_3 = Add(name='d_conv2_3')([y_d, y_d_])
        conv2_3 = Add(name='conv2_3')([y, y_])

        y = Add(name='conv2_3_merge_color')([conv2_3, d_conv2_3])