How to use the keras.models.Model function in keras

To help you get started, we’ve selected a few keras examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github farizrahman4u / recurrentshop / tests / test_recurrent_model.py View on Github external
def test_model():
    x = Input((5,))
    h_tm1 = Input((10,))
    h = add([Dense(10)(x), Dense(10, use_bias=False)(h_tm1)])
    h = Activation('tanh')(h)
    a = Input((7, 5))

    rnn = RecurrentModel(input=x, output=h, initial_states=h_tm1, final_states=h)
    b = rnn(a)
    model = Model(a, b)

    model.compile(loss='mse', optimizer='sgd')
    model.fit(np.random.random((32, 7, 5)), np.random.random((32, 10)))
    model.predict(np.zeros((32, 7, 5)))
github bdqnghi / bi-tbcnn / baselines / ma_lstm_baseline.py View on Github external
# Embedded version of the inputs
encoded_left = cpp_embedding_layer(left_input)
encoded_right = java_embedding_layer(right_input)

# Since this is a siamese network, both sides share the same LSTM
shared_lstm = LSTM(n_hidden)

left_output = shared_lstm(encoded_left)
right_output = shared_lstm(encoded_right)

# Calculates the distance as defined by the MaLSTM model
malstm_distance = Merge(mode=lambda x: exponent_neg_manhattan_distance(x[0], x[1]), output_shape=lambda x: (x[0][0], 1))([left_output, right_output])

# Pack it all up into a model
malstm = Model([left_input, right_input], [malstm_distance])

# Adadelta optimizer, with gradient clipping by norm
optimizer = Adadelta(clipnorm=gradient_clipping_norm)

malstm.compile(loss='mean_squared_error', optimizer=optimizer, metrics=['accuracy'])

# Start training
training_start_time = time()

malstm_trained = malstm.fit([X_train['left'], X_train['right']], Y_train, batch_size=batch_size, nb_epoch=n_epoch,
                            validation_data=([X_validation['left'], X_validation['right']], Y_validation))

print("Training time finished.\n{} epochs in {}".format(n_epoch, datetime.timedelta(seconds=time()-training_start_time)))

# Plot accuracy
plt.plot(malstm_trained.history['acc'])
github titu1994 / LSTM-FCN / adiac_model.py View on Github external
y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model
github Hsankesara / DeepResearch / Hierarchical_Attention_Network / HAN.py View on Github external
if self.hyperparameters['l2_regulizer'] is None:
            kernel_regularizer = None
        else:
            kernel_regularizer = regularizers.l2(self.hyperparameters['l2_regulizer'])
        if self.hyperparameters['dropout_regulizer'] is None:
            dropout_regularizer = 1
        else:
            dropout_regularizer = self.hyperparameters['dropout_regulizer']
        word_input = Input(shape=(self.max_senten_len,), dtype='float32')
        word_sequences = self.get_embedding_layer()(word_input)
        word_lstm = Bidirectional(
            self.hyperparameters['rnn'](self.hyperparameters['rnn_units'], return_sequences=True, kernel_regularizer=kernel_regularizer))(word_sequences)
        word_dense = TimeDistributed(
            Dense(self.hyperparameters['dense_units'], kernel_regularizer=kernel_regularizer))(word_lstm)
        word_att = AttentionWithContext()(word_dense)
        wordEncoder = Model(word_input, word_att)

        sent_input = Input(shape=(self.max_senten_num, self.max_senten_len), dtype='float32')
        sent_encoder = TimeDistributed(wordEncoder)(sent_input)
        sent_lstm = Bidirectional(self.hyperparameters['rnn'](
            self.hyperparameters['rnn_units'], return_sequences=True, kernel_regularizer=kernel_regularizer))(sent_encoder)
        sent_dense = TimeDistributed(
            Dense(self.hyperparameters['dense_units'], kernel_regularizer=kernel_regularizer))(sent_lstm)
        sent_att = Dropout(dropout_regularizer)(
            AttentionWithContext()(sent_dense))
        preds = Dense(len(self.classes))(sent_att)
        self.model = Model(sent_input, preds)
        self.model.compile(
            loss=self.hyperparameters['loss'], optimizer=self.hyperparameters['optimizer'], metrics=self.hyperparameters['metrics'])
github saumyasinha / Logo_generation_GAN / ACgan.py View on Github external
# The generator takes noise and the target label as input
        # and generates the corresponding logo-image of that label
        noise = Input(shape=(100,))
        label = Input(shape=(1,))
        img = self.generator([noise, label])

        # For the combined model we will only train the generator
        self.discriminator.trainable = False

        # The discriminator takes generated image as input and determines validity
        # and the label of that image
        valid, target_label = self.discriminator(img)

        # The combined model  (stacked generator and discriminator) takes
        # noise as input => generates images => determines validity
        self.combined = Model([noise, label], [valid, target_label])
        self.combined.compile(loss=losses,
                              optimizer=optimizer)
github deepdiy / deepdiy / deepdiy / model_zoo / mrcnn / mrcnn / model.py View on Github external
[target_class_ids, mrcnn_class_logits, active_class_ids])
            bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name="mrcnn_bbox_loss")(
                [target_bbox, target_class_ids, mrcnn_bbox])
            mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name="mrcnn_mask_loss")(
                [target_mask, target_class_ids, mrcnn_mask])

            # Model
            inputs = [input_image, input_image_meta,
                      input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]
            if not config.USE_RPN_ROIS:
                inputs.append(input_rois)
            outputs = [rpn_class_logits, rpn_class, rpn_bbox,
                       mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,
                       rpn_rois, output_rois,
                       rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]
            model = KM.Model(inputs, outputs, name='mask_rcnn')
        else:
            # Network Heads
            # Proposal classifier and BBox regressor heads
            mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\
                fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,
                                     config.POOL_SIZE, config.NUM_CLASSES,
                                     train_bn=config.TRAIN_BN,
                                     fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)

            # Detections
            # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in
            # normalized coordinates
            detections = DetectionLayer(config, name="mrcnn_detection")(
                [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])

            # Create masks for detections
github qubvel / segmentation_models / segmentation_models / pspnet / builder.py View on Github external
if dropout is not None:
        x = SpatialDropout2D(dropout)(x)

    x = Conv2D(classes, (3,3), padding='same', name='final_conv')(x)

    if final_interpolation == 'bilinear':
        x = ResizeImage(to_tuple(last_upsampling_factor))(x)
    elif final_interpolation == 'duc':
        x = DUC(to_tuple(last_upsampling_factor))(x)
    else:
        raise ValueError('Unsupported interpolation type {}. '.format(final_interpolation) +
                         'Use `duc` or `bilinear`.')

    x = Activation(activation, name=activation)(x)

    model = Model(input, x)

    return model
github wyu-du / MatchPyramid-for-semantic-matching / match_pyramid.py View on Github external
layer1_conv=Conv2D(filters=8, kernel_size=5, padding='same')(layer1_dot)
layer1_activation=Activation('relu')(layer1_conv)
z=MaxPooling2D(pool_size=(2,2))(layer1_activation)
    
for i in range(num_conv2d_layers):
    z=Conv2D(filters=filters_2d[i], kernel_size=kernel_size_2d[i], padding='same')(z)
    z=Activation('relu')(z)
    z=MaxPooling2D(pool_size=(mpool_size_2d[i][0], mpool_size_2d[i][1]))(z)
        
pool1_flat=Flatten()(z)
pool1_flat_drop=Dropout(rate=dropout_rate)(pool1_flat)
mlp1=Dense(32)(pool1_flat_drop)
mlp1=Activation('relu')(mlp1)
out=Dense(2, activation='softmax')(mlp1)
    
model=Model(inputs=[query, doc], outputs=out)
model.compile(optimizer='Adagrad', loss='categorical_crossentropy', metrics=['acc'])
model.summary()

# build dataset generator
def generator(texts1, texts2, labels, batch_size, min_index, max_index):
    i=min_index
    
    while True:
        if i+batch_size>=max_index:
            i=min_index
        rows=np.arange(i, min(i+batch_size, max_index))
        i+=batch_size
        
        samples1=texts1[rows]
        samples2=texts2[rows]
        targets=labels[rows]
github hugochan / KATE / autoencoder / core / ae.py View on Github external
encoded_layer = Dense(self.dim, activation=act, kernel_initializer="glorot_normal", name="Encoded_Layer")
        encoded = encoded_layer(input_layer)

        if self.comp_topk:
            print 'add k-competitive layer'
            encoded = KCompetitive(self.comp_topk, self.ctype)(encoded)

        # "decoded" is the lossy reconstruction of the input
        # add non-negativity contraint to ensure probabilistic interpretations
        decoded = Dense_tied(self.input_size, activation='sigmoid', tied_to=encoded_layer, name='Decoded_Layer')(encoded)

        # this model maps an input to its reconstruction
        self.autoencoder = Model(outputs=decoded, inputs=input_layer)

        # this model maps an input to its encoded representation
        self.encoder = Model(outputs=encoded, inputs=input_layer)

        # create a placeholder for an encoded input
        encoded_input = Input(shape=(self.dim,))
        # retrieve the last layer of the autoencoder model
        decoder_layer = self.autoencoder.layers[-1]
        # create the decoder model
        self.decoder = Model(outputs=decoder_layer(encoded_input), inputs=encoded_input)
github Abhishekmamidi123 / NLP / Functional_API_Keras / 8_Multiple_Output_Model.py View on Github external
from keras.layers import Input, Dense, Flatten
from keras.utils import plot_model
from keras.layers.recurrent import LSTM
from keras.layers.merge import concatenate
from keras.layers.wrappers import TimeDistributed

visible = Input(shape = (100, 1))

hidden = LSTM(10, return_sequences = True)(visible)

class11 = LSTM(10)(hidden)
class12 = Dense(10, activation = 'relu')(class11)
output1 = Dense(1, activation = 'sigmoid')(class12)
output2 = TimeDistributed(Dense(1, activation = 'linear'))(hidden)

model = Model(inputs = visible, outputs = [output1, output2])
print model.summary()
plot_model(model, '8_Multiple_Output_Model.png', show_shapes=True, show_layer_names=True)