How to use the keras.layers.Convolution2D function in keras

To help you get started, we’ve selected a few keras examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github germain-hug / GANs-Keras / utils / utils.py View on Github external
def ups_conv_bn(x, dim, act):
    x = UpSampling2D()(x)
    x = Convolution2D(dim, 3, 3, border_mode='same', activation=act)(x)
    return BatchNormalization(mode=2)(x)
github medgift / iMIMIC-RCVs / scripts / models / resnet101.py View on Github external
# Arguments
        input_tensor: input tensor
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
    And the shortcut should have subsample=(2,2) as well
    '''
    eps = 1.1e-5
    nb_filter1, nb_filter2, nb_filter3 = filters
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'
    scale_name_base = 'scale' + str(stage) + block + '_branch'

    x = Convolution2D(nb_filter1, 1, 1, subsample=strides,
                      name=conv_name_base + '2a', bias=False)(input_tensor)
    x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Scale(axis=bn_axis, name=scale_name_base + '2a')(x)
    x = Activation('relu', name=conv_name_base + '2a_relu')(x)

    x = ZeroPadding2D((1, 1), name=conv_name_base + '2b_zeropadding')(x)
    x = Convolution2D(nb_filter2, kernel_size, kernel_size,
                      name=conv_name_base + '2b', bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Scale(axis=bn_axis, name=scale_name_base + '2b')(x)
    x = Activation('relu', name=conv_name_base + '2b_relu')(x)

    x = Convolution2D(nb_filter3, 1, 1, name=conv_name_base + '2c', bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=bn_axis, name=bn_name_base + '2c')(x)
    x = Scale(axis=bn_axis, name=scale_name_base + '2c')(x)

github yconst / burro / src / training / train_generator.py View on Github external
def train(train_folder):
    igen = image_generator(train_folder, indefinite=True)
    bgen = batch_image_generator(igen)

    model = Sequential()
    model.add( Convolution2D(24, (5, 5), strides=(2, 2), activation='relu', input_shape=(120,120,3)) )
    model.add( Convolution2D(32, (5, 5), strides=(2, 2), activation='relu') )
    model.add( Convolution2D(64, (5, 5), strides=(2, 2), activation='relu') )
    model.add( Convolution2D(64, (3, 3), strides=(2, 2), activation='relu') )
    model.add( Convolution2D(24, (3, 3), strides=(1, 1), activation='relu') )
    model.add( Flatten() )
    model.add( Dense(100, activation='relu') )
    model.add( Dropout(.1) )
    model.add( Dense(50, activation='relu') )
    model.add( Dropout(.1) )
    model.add( Dense(15, activation='softmax', name='angle_out') )
    model.compile(optimizer='rmsprop', loss={'angle_out': 'categorical_crossentropy'})
    print model.summary()

    model.fit_generator(bgen, epochs=10, steps_per_epoch=100)
github mmalekzadeh / replacement-autoencoder / opportunity / om5_server_cnn.py View on Github external
num_train, height, width = train_data.shape
num_test = test_data.shape[0] 

#### Building CNN
## Input Layer
inp = Input(shape=(height, width,1)) 
## Convnet -- Layer 1 and 2 
conv_1 = Convolution2D(conv_depth_1, (1 , kernel_size_1),
                       padding='valid', activation='relu')(inp)
conv_2 = Convolution2D(conv_depth_1, (1 , kernel_size_2),
                       padding='same', activation='relu')(conv_1)
dense_2 = Dense(conv_depth_1, activation='relu')(conv_2)
pool_2 = MaxPooling2D(pool_size=(1, pool_size_1))(dense_2)
drop_2 = Dropout(drop_prob_1)(pool_2)
## Convnet -- Layer 3 
conv_3 = Convolution2D(conv_depth_2, (1 , kernel_size_1),
                       padding='valid', activation='relu')(drop_2)
dense_3 = Dense(conv_depth_2, activation='relu')(conv_3)
pool_3 = MaxPooling2D(pool_size=(1, pool_size_2))(dense_3)
drop_3 = Dropout(drop_prob_1)(pool_3)
## Convnet -- Layer 4 
conv_4 = Convolution2D(conv_depth_3, (1 , kernel_size_2),
                       padding='valid', activation='relu')(drop_3)
drop_4 = Dropout(drop_prob_1)(conv_4)
## Flatten Layer
flat = Flatten()(drop_4)
hidden = Dense(hidden_size, activation='relu')(flat)
drop_5 = Dropout(drop_prob_2)(hidden)
out = Dense(num_classes, activation='softmax')(drop_5)
# To define a model , we specify its input and output layers
model = Model(inputs=inp, outputs=out) 
model.compile(loss='categorical_crossentropy', # using the cross-entropy loss function
github gaborvecsei / Emotion-Recognition / train_emotion_recognizer.py View on Github external
batch_size = 64
samples_per_epoch = 20480
nb_epoch = 10

model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same', activation='elu', input_shape=train_image_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Convolution2D(32, 3, 3, border_mode='same', activation='elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Convolution2D(64, 3, 3, border_mode='same', activation='elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Convolution2D(128, 3, 3, border_mode='same', activation='elu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(1024, activation='elu'))
model.add(Dropout(0.5))
model.add(Dense(512, activation='elu'))
model.add(Dropout(0.5))
model.add(Dense(y_train.shape[1], activation='softmax'))

# Uncomment this if you would like to continue a training from a checkpoint:

# existing_model_weights_path = os.path.join(SAVE_MODEL_FOLDER_PATH, "model_weights_10_epochs.h5")
# if os.path.exists(existing_model_weights_path):
#     print("Loading weights...")
#     model.load_weights(existing_model_weights_path)
github leriomaggio / deep-learning-keras-tensorflow / deep_learning_models / vgg16.py View on Github external
else:
        if include_top:
            input_shape = (224, 224, 3)
        else:
            input_shape = (None, None, 3)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor)
        else:
            img_input = input_tensor
    # Block 1
    x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv1')(img_input)
    x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='block1_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv1')(x)
    x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='block2_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    # Block 3
    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv1')(x)
    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv2')(x)
    x = Convolution2D(256, 3, 3, activation='relu', border_mode='same', name='block3_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

    # Block 4
    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv1')(x)
    x = Convolution2D(512, 3, 3, activation='relu', border_mode='same', name='block4_conv2')(x)
github itaicaspi / keras-dqn-doom / main.py View on Github external
tower_3 = Convolution2D(16, 1, 1, border_mode='same', activation='relu')(tower_3)
                output1 = merge([tower_1, tower_2, tower_3], mode='concat', concat_axis=1)
                avgpool = AveragePooling2D((7, 7), strides=(8, 8))(output1)
                flatten = Flatten()(avgpool)
                output = Dense(len(self.environment.actions))(flatten)
                model = Model(input=input_img, output=output)
                model.compile(rmsprop(lr=self.learning_rate), "mse")
                #model.summary()
            elif network_type == "sequential":
                print("Built a sequential DQN")
                model = Sequential()
                model.add(Convolution2D(16, 3, 3, subsample=(2,2), activation='relu', input_shape=(self.history_length, self.state_height, self.state_width), init='uniform', trainable=True))
                model.add(Convolution2D(32, 3, 3, subsample=(2,2), activation='relu', init='uniform', trainable=True))
                model.add(Convolution2D(64, 3, 3, subsample=(2,2), activation='relu', init='uniform', trainable=True))
                model.add(Convolution2D(128, 3, 3, subsample=(1,1), activation='relu', init='uniform'))
                model.add(Convolution2D(256, 3, 3, subsample=(1,1), activation='relu', init='uniform'))
                model.add(Flatten())
                model.add(Dense(512, activation='relu', init='uniform'))
                model.add(Dense(len(self.environment.actions),init='uniform'))
                model.compile(rmsprop(lr=self.learning_rate), "mse")
            elif network_type == "recurrent":
                print("Built a recurrent DQN")
                model = Sequential()
                model.add(TimeDistributed(Convolution2D(16, 3, 3, subsample=(2,2), activation='relu', init='uniform', trainable=True),input_shape=(self.history_length, 1, self.state_height, self.state_width)))
                model.add(TimeDistributed(Convolution2D(32, 3, 3, subsample=(2,2), activation='relu', init='uniform', trainable=True)))
                model.add(TimeDistributed(Convolution2D(64, 3, 3, subsample=(2,2), activation='relu', init='uniform', trainable=True)))
                model.add(TimeDistributed(Convolution2D(128, 3, 3, subsample=(1,1), activation='relu', init='uniform')))
                model.add(TimeDistributed(Convolution2D(256, 3, 3, subsample=(1,1), activation='relu', init='uniform')))
                model.add(TimeDistributed(Flatten()))
                model.add(LSTM(512, activation='relu', init='uniform', unroll=True))
                model.add(Dense(len(self.environment.actions),init='uniform'))
                model.compile(rmsprop(lr=self.learning_rate), "mse")
github ternaus / kaggle_dstl_submission / src / unet_trees.py View on Github external
conv5 = BatchNormalization(mode=0, axis=1)(conv5)
    conv5 = keras.layers.advanced_activations.ELU()(conv5)

    up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
    conv6 = Convolution2D(256, 3, 3, border_mode='same', init='he_uniform')(up6)
    conv6 = BatchNormalization(mode=0, axis=1)(conv6)
    conv6 = keras.layers.advanced_activations.ELU()(conv6)
    conv6 = Convolution2D(256, 3, 3, border_mode='same', init='he_uniform')(conv6)
    conv6 = BatchNormalization(mode=0, axis=1)(conv6)
    conv6 = keras.layers.advanced_activations.ELU()(conv6)

    up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
    conv7 = Convolution2D(128, 3, 3, border_mode='same', init='he_uniform')(up7)
    conv7 = BatchNormalization(mode=0, axis=1)(conv7)
    conv7 = keras.layers.advanced_activations.ELU()(conv7)
    conv7 = Convolution2D(128, 3, 3, border_mode='same', init='he_uniform')(conv7)
    conv7 = BatchNormalization(mode=0, axis=1)(conv7)
    conv7 = keras.layers.advanced_activations.ELU()(conv7)

    up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
    conv8 = Convolution2D(64, 3, 3, border_mode='same', init='he_uniform')(up8)
    conv8 = BatchNormalization(mode=0, axis=1)(conv8)
    conv8 = keras.layers.advanced_activations.ELU()(conv8)
    conv8 = Convolution2D(64, 3, 3, border_mode='same', init='he_uniform')(conv8)
    conv8 = BatchNormalization(mode=0, axis=1)(conv8)
    conv8 = keras.layers.advanced_activations.ELU()(conv8)

    up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
    conv9 = Convolution2D(32, 3, 3, border_mode='same', init='he_uniform')(up9)
    conv9 = BatchNormalization(mode=0, axis=1)(conv9)
    conv9 = keras.layers.advanced_activations.ELU()(conv9)
    conv9 = Convolution2D(32, 3, 3, border_mode='same', init='he_uniform')(conv9)
github yconst / burro / src / training / train_generator.py View on Github external
def train(train_folder):
    igen = image_generator(train_folder, indefinite=True)
    bgen = batch_image_generator(igen)

    model = Sequential()
    model.add( Convolution2D(24, (5, 5), strides=(2, 2), activation='relu', input_shape=(120,120,3)) )
    model.add( Convolution2D(32, (5, 5), strides=(2, 2), activation='relu') )
    model.add( Convolution2D(64, (5, 5), strides=(2, 2), activation='relu') )
    model.add( Convolution2D(64, (3, 3), strides=(2, 2), activation='relu') )
    model.add( Convolution2D(24, (3, 3), strides=(1, 1), activation='relu') )
    model.add( Flatten() )
    model.add( Dense(100, activation='relu') )
    model.add( Dropout(.1) )
    model.add( Dense(50, activation='relu') )
    model.add( Dropout(.1) )
    model.add( Dense(15, activation='softmax', name='angle_out') )
    model.compile(optimizer='rmsprop', loss={'angle_out': 'categorical_crossentropy'})
    print model.summary()

    model.fit_generator(bgen, epochs=10, steps_per_epoch=100)