How to use the keras.layers.convolutional.Convolution2D function in keras

To help you get started, we’ve selected a few keras examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github thomaskuestner / CNNArt / GUI / motion_abd.py View on Github external
subsample=(1, 1),
                            W_regularizer=l2(1e-6),
                            input_shape=(1, patchSize[0,0], patchSize[0,1])))
    cnn.add(Activation('relu'))                    
    cnn.add(Convolution2D(64 ,                    #learning rate: 0.1 -> 76%
                            7, 
                            7, 
                            init='normal',
                           # activation='sigmoid',
                            weights=None,
                            border_mode='valid',
                            subsample=(1, 1),
                            W_regularizer=l2(1e-6)))
    cnn.add(Activation('relu'))
                       
    cnn.add(Convolution2D(128 ,                    #learning rate: 0.1 -> 76%
                            3, 
                            3, 
                            init='normal',
                           # activation='sigmoid',
                            weights=None,
                            border_mode='valid',
                            subsample=(1, 1),
                            W_regularizer=l2(1e-6)))
    cnn.add(Activation('relu'))
#    cnn.add(Convolution2D(256 ,                    #learning rate: 0.1 -> 76%
#                            3, 
#                            3, 
#                            init='normal',
#                           # activation='sigmoid',
#                            weights=None,
#                            border_mode='valid',
github flyyufelix / cnn_finetune / densenet169.py View on Github external
stage: index for dense block
            branch: layer index within each dense block
            nb_filter: number of filters
            dropout_rate: dropout rate
            weight_decay: weight decay factor
    '''
    eps = 1.1e-5
    conv_name_base = 'conv' + str(stage) + '_' + str(branch)
    relu_name_base = 'relu' + str(stage) + '_' + str(branch)

    # 1x1 Convolution (Bottleneck layer)
    inter_channel = nb_filter * 4  
    x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x1_bn')(x)
    x = Scale(axis=concat_axis, name=conv_name_base+'_x1_scale')(x)
    x = Activation('relu', name=relu_name_base+'_x1')(x)
    x = Convolution2D(inter_channel, 1, 1, name=conv_name_base+'_x1', bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    # 3x3 Convolution
    x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x2_bn')(x)
    x = Scale(axis=concat_axis, name=conv_name_base+'_x2_scale')(x)
    x = Activation('relu', name=relu_name_base+'_x2')(x)
    x = ZeroPadding2D((1, 1), name=conv_name_base+'_x2_zeropadding')(x)
    x = Convolution2D(nb_filter, 3, 3, name=conv_name_base+'_x2', bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    return x
github metaflow-ai / neural_style / vgg19 / model_headless.py View on Github external
zp32 = ZeroPadding2D((1, 1), dim_ordering=do)(c31)
    c32 = Convolution2D(256, 3, 3, dim_ordering=do, activation='relu', trainable=trainable, name="conv_3_2")(zp32)
    zp33 = ZeroPadding2D((1, 1), dim_ordering=do)(c32)
    c33 = Convolution2D(256, 3, 3, dim_ordering=do, activation='relu', trainable=trainable, name="conv_3_3")(zp33)
    zp34 = ZeroPadding2D((1, 1), dim_ordering=do)(c33)
    c34 = Convolution2D(256, 3, 3, dim_ordering=do, activation='relu', trainable=trainable, name="conv_3_4")(zp34)
    if pooling_type == 'avg':
        p3 = AveragePooling2D((2, 2), dim_ordering=do, strides=(2, 2))(c34)
    else:
        p3 = MaxPooling2D((2, 2), dim_ordering=do, strides=(2, 2))(c34)
        

    zp41 = ZeroPadding2D((1, 1), dim_ordering=do)(p3)
    c41 = Convolution2D(512, 3, 3, dim_ordering=do, activation='relu', trainable=trainable, name="conv_4_1")(zp41)
    zp42 = ZeroPadding2D((1, 1), dim_ordering=do)(c41)
    c42 = Convolution2D(512, 3, 3, dim_ordering=do, activation='relu', trainable=trainable, name="conv_4_2")(zp42)
    zp43 = ZeroPadding2D((1, 1), dim_ordering=do)(c42)
    c43 = Convolution2D(512, 3, 3, dim_ordering=do, activation='relu', trainable=trainable, name="conv_4_3")(zp43)
    zp44 = ZeroPadding2D((1, 1), dim_ordering=do)(c43)
    c44 = Convolution2D(512, 3, 3, dim_ordering=do, activation='relu', trainable=trainable, name="conv_4_4")(zp44)
 
    model = Model(input=[input], output=[
        c11, c12, 
        c21, c22, 
        c31, c32, c33, c34,
        c41, c42, c43, c44]
    )

    if weights_path:
        model.load_weights(weights_path)

    return model
github david-vazquez / keras_zoo / fcn8.py View on Github external
#                       name='flat')(inputs)
    padded = ZeroPadding2D(padding=(100, 100), dim_ordering=do,
                           name='pad100')(inputs)

    # Block 1
    conv1_1 = Convolution2D(
           64, 3, 3, activation='relu', border_mode='valid', dim_ordering=do,
           name='conv1_1', W_regularizer=l2(l2_reg), trainable=True)(padded)
    conv1_2 = Convolution2D(
           64, 3, 3, activation='relu', border_mode='same', dim_ordering=do,
           name='conv1_2', W_regularizer=l2(l2_reg), trainable=True)(conv1_1)
    pool1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering=do,
                         name='pool1')(conv1_2)

    # Block 2
    conv2_1 = Convolution2D(
           128, 3, 3, activation='relu', border_mode='same', dim_ordering=do,
           name='conv2_1', W_regularizer=l2(l2_reg), trainable=True)(pool1)
    conv2_2 = Convolution2D(
           128, 3, 3, activation='relu', border_mode='same', dim_ordering=do,
           name='conv2_2', W_regularizer=l2(l2_reg), trainable=True)(conv2_1)
    pool2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), dim_ordering=do,
                         name='pool2')(conv2_2)

    # Block 3
    conv3_1 = Convolution2D(
           256, 3, 3, activation='relu', border_mode='same', dim_ordering=do,
           name='conv3_1', W_regularizer=l2(l2_reg), trainable=True
           )(pool2)
    conv3_2 = Convolution2D(
           256, 3, 3, activation='relu', border_mode='same', dim_ordering=do,
           name='conv3_2', W_regularizer=l2(l2_reg), trainable=True)(conv3_1)
github Innixma / dex / scripts / old / qlearnFlappySpyder.py View on Github external
def buildmodel():
    print("Now we build the model")
    model = Sequential()
    model.add(Convolution2D(32, 8, 8, subsample=(4,4),init=lambda shape, name: normal(shape, scale=0.01, name=name), border_mode='same',input_shape=(img_channels,img_rows,img_cols)))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 4, 4, subsample=(2,2),init=lambda shape, name: normal(shape, scale=0.01, name=name), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, subsample=(1,1),init=lambda shape, name: normal(shape, scale=0.01, name=name), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(Dense(512, init=lambda shape, name: normal(shape, scale=0.01, name=name)))
    model.add(Activation('relu'))
    model.add(Dense(2,init=lambda shape, name: normal(shape, scale=0.01, name=name)))
   
    adam = Adam(lr=1e-6)
    model.compile(loss='mse',optimizer=adam)
    print("We finish building the model")
    return model
github yu4u / dnn-watermark / wide_residual_network.py View on Github external
def conv3_block(input, k=1, dropout=0.0, regularizer=None):
    init = input

    channel_axis = 1 if K.image_dim_ordering() == "th" else -1

    # Check if input number of filters is same as 64 * k, else create convolution2d for this input
    if K.image_dim_ordering() == "th":
        if init._keras_shape[1] != 64 * k:
            init = Convolution2D(64 * k, 1, 1, activation='linear', border_mode='same')(init)
    else:
        if init._keras_shape[-1] != 64 * k:
            init = Convolution2D(64 * k, 1, 1, activation='linear', border_mode='same')(init)

    x = Convolution2D(64 * k, 3, 3, border_mode='same')(input)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    if dropout > 0.0: x = Dropout(dropout)(x)

    x = Convolution2D(64 * k, 3, 3, border_mode='same', W_regularizer=regularizer)(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    m = merge([init, x], mode='sum')
    return m
github yardstick17 / ConnectingDots / neural_networks / convolutional_neural_network / convnets-keras / convnetskeras / convnets.py View on Github external
model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_4'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_4'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    if heatmap:
        model.add(Convolution2D(4096, 7, 7, activation='relu', name='dense_1'))
        model.add(Convolution2D(4096, 1, 1, activation='relu', name='dense_2'))
        model.add(Convolution2D(1000, 1, 1, name='dense_3'))
        model.add(Softmax4D(axis=1, name='softmax'))
    else:
        model.add(Flatten())
        model.add(Dense(4096, activation='relu', name='dense_1'))
        model.add(Dropout(0.5))
        model.add(Dense(4096, activation='relu', name='dense_2'))
        model.add(Dropout(0.5))
        model.add(Dense(1000, name='dense_3'))
        model.add(Activation('softmax'))

    if weights_path:
        model.load_weights(weights_path)

    return model
github longubu / datumio / examples / keras / cifar10_cnn_batchgen.py View on Github external
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)

model = Sequential()

model.add(Convolution2D(32, 3, 3, border_mode='same',
                        input_shape=(img_channels, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))

# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)

X_train = X_train.astype('float32')
github david-vazquez / keras_zoo / models / fcn8.py View on Github external
# DECONTRACTING PATH
    # Unpool 1
    score_pool4 = Convolution2D(nclasses, 1, 1, init, 'relu', border_mode='same',
                                name='score_pool4',
                                W_regularizer=l2(l2_reg))(pool4)
    score2 = Deconvolution2D(nclasses, 4, 4, score_fr._keras_shape, init,
                             'linear', border_mode='valid', subsample=(2, 2),
                             name='score2', W_regularizer=l2(l2_reg))(score_fr)

    score_pool4_crop = CropLayer2D(score2,
                                   name='score_pool4_crop')(score_pool4)
    score_fused = merge([score_pool4_crop, score2], mode=custom_sum,
                        output_shape=custom_sum_shape, name='score_fused')

    # Unpool 2
    score_pool3 = Convolution2D(nclasses, 1, 1, init, 'relu', border_mode='valid',
                                name='score_pool3',
                                W_regularizer=l2(l2_reg))(pool3)

    score4 = Deconvolution2D(nclasses, 4, 4, score_fused._keras_shape, init,
                             'linear', border_mode='valid', subsample=(2, 2),
                             bias=True,     # TODO: No bias??
                             name='score4', W_regularizer=l2(l2_reg))(score_fused)

    score_pool3_crop = CropLayer2D(score4, name='score_pool3_crop')(score_pool3)
    score_final = merge([score_pool3_crop, score4], mode=custom_sum,
                        output_shape=custom_sum_shape, name='score_final')

    upsample = Deconvolution2D(nclasses, 16, 16, score_final._keras_shape, init,
                               'linear', border_mode='valid', subsample=(8, 8),
                               bias=False,     # TODO: No bias??
                               name='upsample', W_regularizer=l2(l2_reg))(score_final)
github titu1994 / Fast-Neural-Style / models.py View on Github external
'''

        if train_mode and style_image_path is None:
            raise Exception('Style reference path must be supplied if training mode is enabled')

        self.mode = 2

        if K.image_dim_ordering() == "th":
            ip = Input(shape=(3, self.img_width, self.img_height), name="X_input")
        else:
            ip = Input(shape=(self.img_width, self.img_height, 3), name="X_input")

        c1 = ReflectionPadding2D((4, 4))(ip)

        c1 = Convolution2D(32, 9, 9, activation='linear', border_mode='valid', name='conv1')(c1)
        c1_b = BatchNormalization(axis=1, mode=self.mode, name="batchnorm1")(c1)
        c1_b = Activation('relu')(c1_b)

        c2 = Convolution2D(self.features, self.k, self.k, activation='linear', border_mode='same', subsample=(2, 2),
                           name='conv2')(c1_b)
        c2_b = BatchNormalization(axis=1, mode=self.mode, name="batchnorm2")(c2)
        c2_b = Activation('relu')(c2_b)

        c3 = Convolution2D(self.features, self.k, self.k, activation='linear', border_mode='same', subsample=(2, 2),
                           name='conv3')(c2_b)
        x = BatchNormalization(axis=1, mode=self.mode, name="batchnorm3")(c3)
        x = Activation('relu')(x)

        if self.deep_model:
            c4 = Convolution2D(self.features, self.k, self.k, activation='linear', border_mode='same', subsample=(2, 2),
                               name='conv4')(x)