How to use the keras.layers.convolutional.Conv2D function in keras

To help you get started, we’ve selected a few keras examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github deepfakes / faceswap / lib / model / nn_blocks.py View on Github external
def self_attn_block(inp, n_c, squeeze_factor=8):
    """ GAN Self Attention Block
    Code borrows from https://github.com/taki0112/Self-Attention-GAN-Tensorflow
    """
    msg = "Input channels must be >= {}, recieved nc={}".format(squeeze_factor, n_c)
    assert n_c // squeeze_factor > 0, msg
    var_x = inp
    shape_x = var_x.get_shape().as_list()

    var_f = Conv2D(n_c // squeeze_factor, 1,
                   kernel_regularizer=regularizers.l2(GAN22_REGULARIZER))(var_x)
    var_g = Conv2D(n_c // squeeze_factor, 1,
                   kernel_regularizer=regularizers.l2(GAN22_REGULARIZER))(var_x)
    var_h = Conv2D(n_c, 1, kernel_regularizer=regularizers.l2(GAN22_REGULARIZER))(var_x)

    shape_f = var_f.get_shape().as_list()
    shape_g = var_g.get_shape().as_list()
    shape_h = var_h.get_shape().as_list()
    flat_f = Reshape((-1, shape_f[-1]))(var_f)
    flat_g = Reshape((-1, shape_g[-1]))(var_g)
    flat_h = Reshape((-1, shape_h[-1]))(var_h)

    var_s = Lambda(lambda var_x: K.batch_dot(var_x[0],
                                             Permute((2, 1))(var_x[1])))([flat_g, flat_f])

    beta = Softmax(axis=-1)(var_s)
    var_o = Lambda(lambda var_x: K.batch_dot(var_x[0], var_x[1]))([beta, flat_h])
    var_o = Reshape(shape_x[1:])(var_o)
    var_o = Scale()(var_o)
github RanBezen / cycleGan_handwriting / cycleGan2.py View on Github external
def d_layer(layer_input, filters, f_size=3, normalization=True):
                """Discriminator layer"""
                d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
                d = LeakyReLU(alpha=0.2)(d)
                if normalization:
                    d = InstanceNormalization()(d)
                return d

            img = Input(shape=self.img_shape)

            d1 = d_layer(img, self.df, normalization=False)
            d2 = d_layer(d1, self.df * 2)
            d3 = d_layer(d2, self.df * 4)
            d4 = d_layer(d3, self.df * 8)

            validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)

            return Model(img, validity)
github batikim09 / keras_sgan_ser / sgan_mer.py View on Github external
#generator
    #first layer: fully connected layer
    next_G = Dense(n_kernels_G[0] * initial_n_row * initial_n_col, activation="relu", input_dim=noise_source_len)(noise)
    next_G = Reshape((initial_n_row, initial_n_col, n_kernels_G[0]))(next_G)
    next_G = BatchNormalization(momentum=0.8)(next_G)
        
    #middle layers: CNN
    for idx in range(1, depth_G - 1):
        next_G = UpSampling2D()(next_G)
        next_G = Conv2D(n_kernels_G[idx], kernel_size=kernel_resolution_G[idx], padding="same")(next_G)
        next_G = Activation("relu")(next_G)
        next_G = BatchNormalization(momentum=0.8)(next_G)
        
    #last layer: CNN without batch normalisation, last n_kernel is always 1(n_channel), regardless of setting
    next_G = Conv2D(1, kernel_size=kernel_resolution_G[-1], padding="same")(next_G)
    next_G = Activation("tanh")(next_G)   
    
    G = Model(inputs=noise, outputs=next_G)
    
    n_layers = len(G.layers)
    print("total layers: ", n_layers )
    
    for layer in G.layers:  
        layer.name =  prefix_name + "_" + layer.name
        
    return G
github guillaume-chevalier / Hyperopt-Keras-CNN-CIFAR-100 / neural_net.py View on Github external
def convolution_pooling(prev_layer, n_filters, hype_space):
    """
    Pooling with a convolution of stride 2.

    See: https://arxiv.org/pdf/1412.6806.pdf
    """
    current_layer = keras.layers.convolutional.Conv2D(
        filters=n_filters, kernel_size=(3, 3), strides=(2, 2),
        padding='same', activation='linear',
        kernel_regularizer=keras.regularizers.l2(
            STARTING_L2_REG * hype_space['l2_weight_reg_mult'])
    )(prev_layer)

    if hype_space['use_BN']:
        current_layer = bn(current_layer)

    return current_layer
github eriklindernoren / Keras-GAN / discogan / discogan.py View on Github external
def conv2d(layer_input, filters, f_size=4, normalize=True):
            """Layers used during downsampling"""
            d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
            d = LeakyReLU(alpha=0.2)(d)
            if normalize:
                d = InstanceNormalization()(d)
            return d
github ChessWarrior / ChessWarrior / chesswarrior / model.py View on Github external
def build(self):
        model_config = Config.model
        input_data = Input(shape=(18, 8, 8))

        block1 = Conv2D(filters=model_config.cnn_filter_num, kernel_size=model_config.cnn_first_filter_num,
                        kernel_initializer="random_normal",bias_initializer="zeros", padding="same", data_format="channels_first"
                        )(input_data)
        block1 = BatchNormalization(axis=1)(block1)
        block1 = Activation("relu")(block1)

        for _ in range(model_config.res_layer_num):
            block1 = self.add_rsnet(block1)

        block2_value = Conv2D(filters=4, kernel_size=1, data_format="channels_first",
                              kernel_initializer="random_normal",bias_initializer="zeros",
                               kernel_regularizer=l2(model_config.l2_regularizer)
                               )(block1)
        block2_value = BatchNormalization(axis=1)(block2_value)
        block2_value = Activation("relu")(block2_value)
        block2_value = Flatten()(block2_value)

        fc_value = Dense(units=model_config.value_fc_size, kernel_regularizer=l2(model_config.l2_regularizer),
                         activation="relu", kernel_initializer="random_normal", bias_initializer="zeros",)(block2_value)
        value_out = Dense(units=1, kernel_regularizer=l2(model_config.l2_regularizer),
                          kernel_initializer="random_normal", bias_initializer="zeros", activation="tanh", name="value_out")(fc_value)

        self.model = Model(inputs=input_data, outputs= value_out)
github titu1994 / keras-squeeze-excite-network / se_resnext.py View on Github external
grouped_channels = int(filters / cardinality)
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    # Check if input number of filters is same as 16 * k, else create convolution2d for this input
    if K.image_data_format() == 'channels_first':
        if init._keras_shape[1] != 2 * filters:
            init = Conv2D(filters * 2, (1, 1), padding='same', strides=(strides, strides),
                          use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
            init = BatchNormalization(axis=channel_axis)(init)
    else:
        if init._keras_shape[-1] != 2 * filters:
            init = Conv2D(filters * 2, (1, 1), padding='same', strides=(strides, strides),
                          use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
            init = BatchNormalization(axis=channel_axis)(init)

    x = Conv2D(filters, (1, 1), padding='same', use_bias=False,
               kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(input)
    x = BatchNormalization(axis=channel_axis)(x)
    x = LeakyReLU()(x)

    x = __grouped_convolution_block(x, grouped_channels, cardinality, strides, weight_decay)

    x = Conv2D(filters * 2, (1, 1), padding='same', use_bias=False, kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(axis=channel_axis)(x)

    # squeeze and excite block
    x = squeeze_excite_block(x)

    x = add([init, x])
    x = LeakyReLU()(x)
github cvjena / semantic-embeddings / models / DenseNet / densenet.py View on Github external
Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck)
    '''
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
    x = Activation('relu')(x)

    if bottleneck:
        inter_channel = nb_filter * 4  # Obtained from https://github.com/liuzhuang13/DenseNet/blob/master/densenet.lua

        x = Conv2D(inter_channel, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
                   kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
        x = Activation('relu')(x)

    x = Conv2D(nb_filter, (3, 3), kernel_initializer='he_normal', padding='same', use_bias=False)(x)
    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    return x
github baccuslab / deep-retina / deepretina / layers.py View on Github external
def conv(x, nfilters=8, kernel_size=13, strides=(2, 2), l2reg=0.1, sigma=0.05):
    """Conv-BN-GaussianNoise-Relu"""
    y = Conv2D(nfilters, kernel_size, strides=strides,
               kernel_regularizer=l2(l2reg),
               data_format="channels_first")(x)
    y = BatchNormalization()(y)
    y = GaussianNoise(sigma)(y)
    return Activation('relu')(y)
github liuph0119 / Semantic_Segmentation_Keras / core / nets / UNets.py View on Github external
conv6 = Conv2D(init_filters * 8, (3, 3), activation='relu', padding='same',
                   kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(conv6)

    up2 = Concatenate()([Conv2DTranspose(init_filters * 4, (3, 3), padding="same", strides=(2, 2),
                                         kernel_regularizer=l2(weight_decay),
                                         kernel_initializer=kernel_initializer)(conv6), conv3])
    conv7 = Conv2D(init_filters * 4, (3, 3), activation='relu', padding='same',
                   kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(up2)
    conv7 = Dropout(dropout)(conv7)
    conv7 = Conv2D(init_filters * 4, (3, 3), activation='relu', padding='same',
                   kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(conv7)

    up3 = Concatenate()([Conv2DTranspose(init_filters * 2, (3, 3), padding="same", strides=(2, 2),
                                         kernel_regularizer=l2(weight_decay),
                                         kernel_initializer=kernel_initializer)(conv7), conv2])
    conv8 = Conv2D(init_filters * 2, (3, 3), activation='relu', padding='same',
                   kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(up3)
    conv8 = Dropout(dropout)(conv8)
    conv8 = Conv2D(init_filters * 2, (3, 3), activation='relu', padding='same',
                   kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(conv8)

    up4 = Concatenate()([Conv2DTranspose(init_filters, (3, 3), padding="same", strides=(2, 2),
                                         kernel_regularizer=l2(weight_decay),
                                         kernel_initializer=kernel_initializer)(conv8), conv1])
    conv9 = Conv2D(init_filters, (3, 3), activation='relu', padding='same',
                   kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(up4)
    conv9 = Conv2D(init_filters, (3, 3), activation='relu', padding='same',
                   kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(conv9)

    output = Conv2D(n_class, (1, 1), activation=None,
                    kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(conv9)
    output = Activation("softmax")(output)