How to use the keras.layers.Conv2D function in keras

To help you get started, we’ve selected a few keras examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github YangXiaoo / Lookoop / MachineLearning / Unet / unet.py View on Github external
print ("conv1 shape:",conv1.shape)
		conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
		print ("conv1 shape:",conv1.shape)
		pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
		print ("pool1 shape:",pool1.shape)

		conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
		print ("conv2 shape:",conv2.shape)
		conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
		print ("conv2 shape:",conv2.shape)
		pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
		print ("pool2 shape:",pool2.shape)

		conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
		print ("conv3 shape:",conv3.shape)
		conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
		print ("conv3 shape:",conv3.shape)
		pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
		print ("pool3 shape:",pool3.shape)

		conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
		conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
		drop4 = Dropout(0.5)(conv4)
		pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

		conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
		conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
		drop5 = Dropout(0.5)(conv5)

		up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
		# merge6 = merge([drop4,up6], mode = 'concat', concat_axis = 3)
		merge6 = concatenate([drop4, up6], axis=3)
github zhuyiche / sfcn-opi / src / encoder_decoder_object_det.py View on Github external
x_stage6_B = self.dilated_with_projection(x_stage5, stage=6)
        x_stage6_A1 = self.dilated_bottleneck(x_stage6_B, stage=6, block=1)
        x_stage6 = self.dilated_bottleneck(x_stage6_A1, stage=6, block=2)
        x_stage2_1x1 = Conv2D(filters=64, kernel_size=(1, 1), padding='same',
                              name='stage2_1x1_conv')(x_stage2)
        #x_stage2_1x1 = BatchNormalization(name='x_stage2_1x1_BN')(x_stage2_1x1)
        x_stage3_1x1 = Conv2D(filters=128, kernel_size=(1,1), padding='same',
                              name = 'stage3_1x1_conv')(x_stage3)
        #x_stage3_1x1 = BatchNormalization(name='x_stage3_1x1_BN')(x_stage3_1x1)
        x_stage4_1x1 = Conv2D(filters=256, kernel_size=(1,1), padding='same',
                              name = 'stage4_1x1_conv')(x_stage4)
        #x_stage4_1x1 = BatchNormalization(name='x_stage4_1x1_BN')(x_stage4_1x1)
        x_stage5_1x1 = Conv2D(filters=256, kernel_size=(1, 1), padding='same',
                              name='stage5_1x1_conv')(x_stage5)
        #x_stage5_1x1 = BatchNormalization(name='x_stage5_1x1_BN')(x_stage5_1x1)
        x_stage6_1x1 = Conv2D(filters=256, kernel_size=(1, 1), padding='same',
                              name='stage6_1x1_conv')(x_stage6)
        #x_stage6_1x1 = BatchNormalization(name='x_stage6_1x1_BN')(x_stage6_1x1)


        ## We add second branch to predict feature map on each feature pyramid
        second_branch_concat = self.feature_prediction_deconv_branch(C6=x_stage6_1x1, C5=x_stage5_1x1,
                                                                     C4=x_stage4_1x1, C3=x_stage3_1x1,
                                                                     C2=x_stage2_1x1)

        # Encoder part
        stage_56 = Add(name='stage5_add_6')([x_stage6_1x1, x_stage5_1x1])
        stage_456 = Add(name='stage4_add_56')([stage_56, x_stage4_1x1])
        stage_456_upsample = Conv2DTranspose(filters=128, kernel_size=(1, 1), strides=(2, 2),
                                             name='stage456_upsample')(stage_456)
        #stage_456_upsample = BatchNormalization(name='stage_456_upsample_BN')(stage_456_upsample)
github sigmaai / semantic-segmentation / models / icnet_fusion_old.py View on Github external
y_ = Conv2D(128, 1, name='d_conv2_1_1x1_increase')(y_)
        y_ = BatchNormalization(name='d_conv2_1_1x1_increase_bn')(y_)
        y = Add(name='d_conv2_1')([y, y_])
        y_ = Activation('relu', name='d_conv2_1/relu')(y)

        y = Conv2D(32, 1, activation='relu', name='d_conv2_2_1x1_reduce')(y_)
        y = BatchNormalization(name='d_conv2_2_1x1_reduce_bn')(y)
        y = ZeroPadding2D(name='d_padding2')(y)
        y = Conv2D(32, 3, activation='relu', name='d_conv2_2_3x3')(y)
        y = BatchNormalization(name='d_conv2_2_3x3_bn')(y)
        y = Conv2D(128, 1, name='d_conv2_2_1x1_increase')(y)
        y = BatchNormalization(name='d_conv2_2_1x1_increase_bn')(y)
        y = Add(name='d_conv2_2')([y, y_])
        y_ = Activation('relu', name='d_conv2_2/relu')(y)

        y = Conv2D(32, 1, activation='relu', name='d_conv2_3_1x1_reduce')(y_)
        y = BatchNormalization(name='d_conv2_3_1x1_reduce_bn')(y)
        y = ZeroPadding2D(name='d_padding3')(y)
        y = Conv2D(32, 3, activation='relu', name='d_conv2_3_3x3')(y)
        y = BatchNormalization(name='d_conv2_3_3x3_bn')(y)
        y = Conv2D(128, 1, name='d_conv2_3_1x1_increase')(y)
        y = BatchNormalization(name='d_conv2_3_1x1_increase_bn')(y)
        y = Add(name='d_conv2_3')([y, y_])
        y_ = Activation('relu', name='d_conv2_3/relu')(y)

        y = Conv2D(256, 1, strides=2, name='d_conv3_1_1x1_proj')(y_)
        y = BatchNormalization(name='d_conv3_1_1x1_proj_bn')(y)
        y_ = Conv2D(64, 1, strides=2, activation='relu', name='d_conv3_1_1x1_reduce')(y_)
        y_ = BatchNormalization(name='d_conv3_1_1x1_reduce_bn')(y_)
        y_ = ZeroPadding2D(name='d_padding4')(y_)
        y_ = Conv2D(64, 3, activation='relu', name='d_conv3_1_3x3')(y_)
        y_ = BatchNormalization(name='d_conv3_1_3x3_bn')(y_)
github mtianyan / NeuralNetworksGetStarted / 7-caffe_and_keras / 7-3 keras-master / examples / variational_autoencoder_deconv.py View on Github external
filters = 64
# convolution kernel size
num_conv = 3

batch_size = 100
if K.image_data_format() == 'channels_first':
    original_img_size = (img_chns, img_rows, img_cols)
else:
    original_img_size = (img_rows, img_cols, img_chns)
latent_dim = 2
intermediate_dim = 128
epsilon_std = 1.0
epochs = 5

x = Input(shape=original_img_size)
conv_1 = Conv2D(img_chns,
                kernel_size=(2, 2),
                padding='same', activation='relu')(x)
conv_2 = Conv2D(filters,
                kernel_size=(2, 2),
                padding='same', activation='relu',
                strides=(2, 2))(conv_1)
conv_3 = Conv2D(filters,
                kernel_size=num_conv,
                padding='same', activation='relu',
                strides=1)(conv_2)
conv_4 = Conv2D(filters,
                kernel_size=num_conv,
                padding='same', activation='relu',
                strides=1)(conv_3)
flat = Flatten()(conv_4)
hidden = Dense(intermediate_dim, activation='relu')(flat)
github waspinator / deep-learning-explorer / deeplab / libraries / wdeeplab / model.py View on Github external
'the TensorFlow backend.')

        if config.INPUT_SHAPE_OUTPUT_FEATURE_RATIO == 8:
            entry_block3_stride = 1
            middle_block_rate = 2  # ! Not mentioned in paper, but required
            exit_block_rates = (2, 4)
            atrous_rates = (12, 24, 36)
        else:
            entry_block3_stride = 2
            middle_block_rate = 1
            exit_block_rates = (1, 2)
            atrous_rates = (6, 12, 18)

        input_image = KL.Input(shape=config.INPUT_SHAPE, name="input_image")

        x = KL.Conv2D(32, (3, 3), strides=(2, 2),
                name='entry_flow_conv1_1', use_bias=False, padding='same')(input_image)
        x = KL.BatchNormalization(name='entry_flow_conv1_1_BN')(x)
        x = KL.Activation('relu')(x)

        x = self.conv2d_same(x, 64, 'entry_flow_conv1_2', kernel_size=3, stride=1)
        x = KL.BatchNormalization(name='entry_flow_conv1_2_BN')(x)
        x = KL.Activation('relu')(x)

        x = self.xception_block(x, [128, 128, 128], 'entry_flow_block1',
                        skip_connection_type='conv', stride=2,
                        depth_activation=False)
        x, skip1 = self.xception_block(x, [256, 256, 256], 'entry_flow_block2',
                                skip_connection_type='conv', stride=2,
                                depth_activation=False, return_skip=True)

        x = self.xception_block(x, [728, 728, 728], 'entry_flow_block3',
github petrosgk / Kaggle-Carvana-Image-Masking-Challenge / model / u_net.py View on Github external
# 8

    center = Conv2D(1024, (3, 3), padding='same')(down4_pool)
    center = BatchNormalization()(center)
    center = Activation('relu')(center)
    center = Conv2D(1024, (3, 3), padding='same')(center)
    center = BatchNormalization()(center)
    center = Activation('relu')(center)
    # center

    up4 = UpSampling2D((2, 2))(center)
    up4 = concatenate([down4, up4], axis=3)
    up4 = Conv2D(512, (3, 3), padding='same')(up4)
    up4 = BatchNormalization()(up4)
    up4 = Activation('relu')(up4)
    up4 = Conv2D(512, (3, 3), padding='same')(up4)
    up4 = BatchNormalization()(up4)
    up4 = Activation('relu')(up4)
    up4 = Conv2D(512, (3, 3), padding='same')(up4)
    up4 = BatchNormalization()(up4)
    up4 = Activation('relu')(up4)
    # 16

    up3 = UpSampling2D((2, 2))(up4)
    up3 = concatenate([down3, up3], axis=3)
    up3 = Conv2D(256, (3, 3), padding='same')(up3)
    up3 = BatchNormalization()(up3)
    up3 = Activation('relu')(up3)
    up3 = Conv2D(256, (3, 3), padding='same')(up3)
    up3 = BatchNormalization()(up3)
    up3 = Activation('relu')(up3)
    up3 = Conv2D(256, (3, 3), padding='same')(up3)
github sigmaai / semantic-segmentation / models / icnet_fusion_old.py View on Github external
y_d = Add(name='dconv4_6_depth')([y_d, y_d_])
        y_d = Activation('relu', name='dconv4_6/relu_depth')(y_d)

        # -------------------------------------------------------------------
        # part 9 color
        y_ = Conv2D(1024, 1, name='conv5_1_1x1_proj')(y)
        y_ = BatchNormalization(name='conv5_1_1x1_proj_bn')(y_)
        y = Conv2D(256, 1, activation='relu', name='conv5_1_1x1_reduce')(y)
        y = BatchNormalization(name='conv5_1_1x1_reduce_bn')(y)
        y = ZeroPadding2D(padding=4, name='padding14')(y)
        y = Conv2D(256, 3, dilation_rate=4, activation='relu', name='conv5_1_3x3')(y)
        y = BatchNormalization(name='conv5_1_3x3_bn')(y)
        y = Conv2D(1024, 1, name='conv5_1_1x1_increase')(y)
        y = BatchNormalization(name='conv5_1_1x1_increase_bn')(y)

        y_d_ = Conv2D(1024, 1, name='dconv5_1_1x1_proj')(y_d)
        y_d_ = BatchNormalization(name='dconv5_1_1x1_proj_bn')(y_d_)
        y_d = Conv2D(256, 1, activation='relu', name='dconv5_1_1x1_reduce')(y_d)
        y_d = BatchNormalization(name='dconv5_1_1x1_reduce_bn')(y_d)
        y_d = ZeroPadding2D(padding=4, name='dpadding14')(y_d)
        y_d = Conv2D(256, 3, dilation_rate=4, activation='relu', name='dconv5_1_3x3')(y_d)
        y_d = BatchNormalization(name='dconv5_1_3x3_bn')(y_d)
        y_d = Conv2D(1024, 1, name='dconv5_1_1x1_increase')(y_d)
        y_d = BatchNormalization(name='dconv5_1_1x1_increase_bn')(y_d)

        conv5_1_color = Add(name='conv5_1_color')([y, y_])
        conv5_1_depth = Add(name='conv5_1_depth')([y_d, y_d_])

        y = Add(name="conv5_1_merge_color")([conv5_1_color, conv5_1_depth])
        y_d = Add(name="conv5_1_merge_depth")([conv5_1_depth, conv5_1_color])

        y_ = Activation('relu', name='conv5_1/relu_color')(y)
github jackkwok / neural-road-inspector / unet / unet.py View on Github external
"""
			Generate `dilated U-Net' model where the convolutions in the encoding and
			bottleneck are replaced by dilated convolutions. The second convolution in
			pair at a given scale in the encoder is dilated by 2.
		"""
		inputs = Input((self.img_rows, self.img_cols, self.num_channels))

		conv0 = Conv2D(32, (3, 3), padding="same", activation="relu", dilation_rate=(1, 1))(inputs)
		conv0 = Conv2D(32, (3, 3), padding="same", activation="relu", dilation_rate=(2, 2))(conv0)
		pool0 = MaxPooling2D(pool_size=(2, 2))(conv0)

		conv1 = Conv2D(32, (3, 3), padding="same", activation="relu", dilation_rate=(1, 1))(pool0)
		conv1 = Conv2D(32, (3, 3), padding="same", activation="relu", dilation_rate=(2, 2))(conv1)
		pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

		conv2 = Conv2D(64, (3, 3), padding="same", activation="relu", dilation_rate=(1, 1))(pool1)
		conv2 = Conv2D(64, (3, 3), padding="same", activation="relu", dilation_rate=(2, 2))(conv2)
		pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

		conv3 = Conv2D(128, (3, 3), padding="same", activation="relu", dilation_rate=(1, 1))(pool2)
		conv3 = Conv2D(128, (3, 3), padding="same", activation="relu", dilation_rate=(2, 2))(conv3)
		pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

		conv4 = Conv2D(256, (3, 3), padding="same", activation="relu", dilation_rate=(1, 1))(pool3)
		conv4 = Conv2D(256, (3, 3), padding="same", activation="relu", dilation_rate=(2, 2))(conv4)
		pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

		conv5 = Conv2D(512, (3, 3), padding="same", activation="relu", dilation_rate=(1, 1))(pool4)
		conv5 = Conv2D(512, (3, 3), padding="same", activation="relu", dilation_rate=(2, 2))(conv5)

		up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4], axis=3) # concat_axis=3 for Tensorflow vs 1 for theano
		conv6 = Conv2D(256, (3, 3), padding="same", activation="relu")(up6)
github pedro-abreu / deep-action-detection / arch / code / rgb_model_aug.py View on Github external
filters1, filters2, filters3 = filters
    bn_axis = 3

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters2, kernel_size,
               padding='same', name=conv_name_base + '2b')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    x = add([x, input_tensor])
    x = Activation('relu')(x)
    return x
github ybabakhin / kaggle_salt_bes_phalanx / bes / segmentation_models / fpn / blocks.py View on Github external
def layer(input_tensor):

        x = Conv2D(n_filters, kernel_size, use_bias=not(use_batchnorm), **kwargs)(input_tensor)
        if use_batchnorm:
            x = BatchNormalization()(x)
        x = Activation(activation)(x)

        return x
    return layer