How to use the keras.layers.Lambda function in keras

To help you get started, we’ve selected a few keras examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github SunQinghui / T-CONV / T-CONV / src / Models / conn-local.py View on Github external
e_taxi_id = Embedding(448, 10, embeddings_initializer='glorot_uniform')(input_5)

        mlp_input0 = concatenate([flatten, Flatten()(e_week_of_year)])
        mlp_input1 = concatenate([mlp_input0, Flatten()(e_day_of_week)])
        mlp_input2 = concatenate([mlp_input1, Flatten()(e_qhour_of_day)])
        mlp_input = concatenate([mlp_input2, Flatten()(e_taxi_id)])

        # mlp_input = Dropout(0.2)(mlp_input)
        hidden_layer = Dense(500, activation='relu', kernel_initializer='glorot_uniform')(mlp_input)

        #hidden_layer = Dropout(0.1)(hidden_layer)

        output_layer = Dense(config.tgtcls.shape[0], activation='softmax', kernel_initializer='glorot_uniform')(
            hidden_layer)

        output_1 = Lambda(dot, name='output_1')(output_layer)
        # model=Model(inputs=[inputs,inputs_e_week_of_year,inputs_e_day_of_week,inputs_e_qhour_of_day,inputs_e_taxi_id], outputs=output)
        model = Model(inputs=[input_1, input_2, input_3, input_4, input_5], outputs=output_1)
        model.compile(loss=my_loss_train, optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08))
        result1 = model.predict([snapshot_train, week_of_year, day_of_week, qhour_of_day, taxi_id])
        train_loss = my_loss(result1, train_dest)
        print("train reault is %s" % train_loss)
        model.fit_generator(
            train_data_generator(taxi_id, week_of_year, day_of_week, qhour_of_day, snapshot_train, train_dest,
                                 batch_size), steps_per_epoch=(train_dest.shape[0] // batch_size), epochs=3,
            validation_data=(
            [snapshot_valid, week_of_year_valid, day_of_week_valid, qhour_of_day_valid, taxi_id__valid], [valid_dest]))
        result = model.predict(
            [snapshot_valid, week_of_year_valid, day_of_week_valid, qhour_of_day_valid, taxi_id__valid])
        loss = my_loss(result, valid_dest)
        print("result is %s" % loss)
        if (math.isnan(loss)):
github cvjena / semantic-embeddings / learn_image_embeddings.py View on Github external
model = keras.models.Model(model.inputs, keras.layers.Lambda(utils.l2norm, name = 'l2norm')(model.output))
            elif args.loss == 'softmax_corr':
                model = keras.models.Model(model.inputs, keras.layers.Activation('softmax', name = 'softmax')(model.output))
            if args.cls_weight > 0:
                model = cls_model(model, data_generator.num_classes, args.cls_base)
        par_model = model if args.gpus <= 1 else keras.utils.multi_gpu_model(model, gpus = args.gpus, cpu_merge = False)
    else:
        with K.tf.device('/cpu:0'):
            if args.snapshot and os.path.exists(args.snapshot):
                print('Resuming from snapshot {}'.format(args.snapshot))
                model = keras.models.load_model(args.snapshot, custom_objects = utils.get_custom_objects(args.architecture), compile = False)
            else:
                embed_model = utils.build_network(embedding.shape[1], args.architecture, input_channels=data_generator.num_channels)
                model = embed_model
                if args.loss == 'inv_corr':
                    model = keras.models.Model(model.inputs, keras.layers.Lambda(utils.l2norm, name = 'l2norm')(model.output))
                elif args.loss == 'softmax_corr':
                    model = keras.models.Model(model.inputs, keras.layers.Activation('softmax', name = 'softmax')(model.output))
                if args.cls_weight > 0:
                    model = cls_model(model, data_generator.num_classes, args.cls_base)
        par_model = keras.utils.multi_gpu_model(model, gpus = args.gpus)
    
    if args.loss == 'inv_corr':
        embedding_layer_name = 'l2norm'
    elif args.loss == 'softmax_corr':
        embedding_layer_name = 'softmax'
    else:
        embedding_layer_name = 'embedding'
    
    if not args.no_progress:
        model.summary()
github adalca / neuron / neuron / models.py View on Github external
name = '%s_likelihood_sigmoid' % prefix
        like_tensor = KL.Activation('sigmoid', name=name)(like_tensor)
        merge_op = KL.multiply

    # merge the likelihood and prior layers into posterior layer
    name = '%s_posterior' % prefix
    post_tensor = merge_op([prior_tensor, like_tensor], name=name)

    # output prediction layer
    # we use a softmax to compute P(L_x|I) where x is each location
    pred_name = '%s_prediction' % prefix
    if final_pred_activation == 'softmax':
        assert use_logp, 'cannot do softmax when adding prior via P()'
        print("using final_pred_activation %s for %s" % (final_pred_activation, model_name))
        softmax_lambda_fcn = lambda x: keras.activations.softmax(x, axis=-1)
        pred_tensor = KL.Lambda(softmax_lambda_fcn, name=pred_name)(post_tensor)

    else:
        pred_tensor = KL.Activation('linear', name=pred_name)(post_tensor)

    # create the model
    model_inputs = [*input_model.inputs, prior_tensor_input]
    model = Model(inputs=model_inputs, outputs=[pred_tensor], name=model_name)

    # compile
    return model
github d909b / perfect_match / perfect_match / models / model_builder.py View on Github external
l2_weight=l2_weight,
                                            propensity_dropout=propensity_dropout,
                                            normalize=normalize,
                                            last_activity_regulariser=regulariser)

        last_layer_h = last_layer

        all_indices, outputs = [], []
        for i in range(num_treatments):

            def get_indices_equal_to(x):
                return tf.reshape(tf.to_int32(tf.where(tf.equal(tf.reshape(x, (-1,)), i))), (-1,))

            indices = Lambda(get_indices_equal_to)(treatment_input)

            current_last_layer_h = Lambda(lambda x: tf.gather(x, indices))(last_layer_h)

            if with_propensity_dropout:
                current_propensity_dropout = Lambda(lambda x: tf.gather(propensity_dropout, indices))(propensity_dropout)
            else:
                current_propensity_dropout = None

            last_layer = ModelBuilder.build_mlp(current_last_layer_h,
                                                dim=num_units,
                                                p_dropout=dropout,
                                                num_layers=num_layers,
                                                with_bn=with_bn,
                                                propensity_dropout=current_propensity_dropout,
                                                l2_weight=l2_weight)

            output = Dense(output_dim, activation="linear", name="head_" + str(i))(last_layer)
github NTMC-Community / MatchZoo / matchzoo / contrib / models / esim.py View on Github external
def _avg(self, texts: tf.Tensor, mask: tf.Tensor) -> tf.Tensor:
        """
        Compute the mean of each text according to their real length

        :param texts: np.array with shape [B, T, H]
        :param lengths: np.array with shape [B, T, ],
            where 1 means valid, 0 means pad
        """
        mask = self._expand_dim(mask, axis=2)
        new_texts = keras.layers.Multiply()([texts, mask])

        # timestep-wise division, exclude the PAD number when calc avg
        text_avg = keras.layers.Lambda(
            lambda text_mask:
                K.sum(text_mask[0], axis=1) / K.sum(text_mask[1], axis=1),
        )([new_texts, mask])

        return text_avg
github GlassyWing / transformer-word-segmenter / transformer / core.py View on Github external
def __call__(self, x):
        """

        :param x: a tensor with shape of [N, max_seq_len]
        :return: position encoding
        """
        pos_seq = Lambda(self.get_pos_seq)(x)
        return self.position_encoding(pos_seq)
github gstenger98 / SRGAN-TreeHacks / GAN / generators.py View on Github external
def create_3colorsto1color_2layer_cnn(input_shape, output_size, resize_factor):
    def cnn_transform(inval, n_filters=8, kernel_size=(2,2)):
        transformed = Conv2D(n_filters, kernel_size=kernel_size, padding="SAME", activation='softplus')(inval)
        summed = Conv2D(1, kernel_size=(1,1), padding="SAME", activation="softplus")(transformed)
        return summed
    
    img = Input(input_shape)

    # Float Cast Layer
    float_img = Lambda(to_float, input_shape=input_shape)(img)
    upsample = UpSampling2D(resize_factor)(float_img)

    GBR = []
    for i in range(3):  
        out = cnn_transform(upsample, n_filters=8)
        GBR.append(out)

    out_img = Concatenate()(GBR)
        
    # Resize Layer
    resized_img = Lambda(lambda image: tf.image.resize_images(
        image, output_size,
        method=tf.image.ResizeMethod.BICUBIC,
        align_corners=True
        ))(out_img)
github mzweilin / EvadeML-Zoo / models / keras_models / resnet50_model.py View on Github external
data_format=K.image_data_format(),
                                      include_top=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    x = Lambda(lambda x: scaling_tf(x, input_range_type))(img_input)
    x = ZeroPadding2D((3, 3))(x)

    # x = ZeroPadding2D((3, 3))(img_input)
    x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
    x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
github kensho-technologies / bubs / bubs / embedding_layer.py View on Github external
self._char_lstm_dim,
            use_bias=True,
            activation="tanh",
            recurrent_activation="sigmoid",
            trainable=False,
            input_shape=(None, self._char_embedding_dim),
            return_sequences=True,
            name="backward_lstm",
            weights=self._backward_lstm_weights,
        )

        # Select LSTM outputs at token breaks and make sure the rest is set to zeros
        self.indexing_layer = Lambda(
            batch_indexing, output_shape=tuple((self.max_token_sequence_len, self._char_lstm_dim))
        )
        self.mask_multiply_layer = Lambda(
            multiply, output_shape=tuple((self.max_token_sequence_len, self._char_lstm_dim))
        )
        super().build(input_shape)
github EliasCai / speech_recognition_ctc / 2-2-ctc_speech_thchs30.py View on Github external
skip_tensor=Add()([s for s in skip]) 
    logit=Conv1D(kernel_size=1,filters=192,padding="same")(skip_tensor)
    logit=BatchNormalization(axis=-1)(logit)
    logit=Activation("tanh")(logit)
    y_pred=Conv1D(kernel_size=1,filters=output_size,padding="same",activation="softmax")(logit)

    # Model(inputs=input_tensor, outputs=y_pred).summary()
    
    labels = Input(name='the_labels', shape=[max_pred_len], dtype='float32')
    input_length = Input(name='input_length', shape=[1], dtype='int64')
    label_length = Input(name='label_length', shape=[1], dtype='int64')
    # Keras doesn't currently support loss funcs with extra parameters
    # so CTC loss is implemented in a lambda layer
    loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length])

    # clipnorm seems to speeds up convergence
    opt = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)
    # opt = Adam(lr=0.001)
    model = Model(inputs=[input_tensor, labels, input_length, label_length], outputs=loss_out)

    # the loss calc occurs elsewhere, so use a dummy lambda func for the loss
    # model = multi_gpu_model(model, gpus=2)
    model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=opt)
    test_func = K.function([input_tensor,K.learning_phase()], [y_pred])
    if os.path.exists(join(path_base,"best_weights_680x26.h5")) :
        
        model.load_weights(join(path_base,"best_weights_680x26.h5"))
        print('load weights from', join(path_base,"best_weights_680x26.h5"))
    
    return model, test_func