How to use the keras.layers.Input function in keras

To help you get started, we’ve selected a few keras examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github OlafenwaMoses / ImageAI / imageai / Detection / __init__.py View on Github external
self.__yolo_model_image_size = (416, 416)
            elif (detection_speed == "fastest"):
                self.__yolo_model_image_size = (320, 320)
            elif (detection_speed == "flash"):
                self.__yolo_model_image_size = (272, 272)

        if (self.__modelLoaded == False):
            if (self.__modelType == ""):
                raise ValueError("You must set a valid model type before loading the model.")
            elif (self.__modelType == "retinanet"):
                model = resnet50_retinanet(num_classes=80)
                model.load_weights(self.modelPath)
                self.__model_collection.append(model)
                self.__modelLoaded = True
            elif (self.__modelType == "yolov3"):
                model = yolo_main(Input(shape=(None, None, 3)), len(self.__yolo_anchors) // 3,
                                  len(self.numbers_to_names))
                model.load_weights(self.modelPath)

                hsv_tuples = [(x / len(self.numbers_to_names), 1., 1.)
                              for x in range(len(self.numbers_to_names))]
                self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
                self.colors = list(
                    map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
                        self.colors))
                np.random.seed(10101)
                np.random.shuffle(self.colors)
                np.random.seed(None)

                self.__yolo_input_image_shape = K.placeholder(shape=(2,))
                self.__yolo_boxes, self.__yolo_scores, self.__yolo_classes = yolo_eval(model.output,
                                                                                       self.__yolo_anchors,
github osmr / imgclsmob / keras_ / kerascv / models / preresnet.py View on Github external
init_block_channels : int
        Number of output channels for the initial unit.
    bottleneck : bool
        Whether to use a bottleneck or simple block in units.
    conv1_stride : bool
        Whether to use stride in the first or the second convolution layer in units.
    in_channels : int, default 3
        Number of input channels.
    in_size : tuple of two ints, default (224, 224)
        Spatial size of the expected input image.
    classes : int, default 1000
        Number of classification classes.
    """
    input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\
        (in_size[0], in_size[1], in_channels)
    input = nn.Input(shape=input_shape)

    x = preres_init_block(
        x=input,
        in_channels=in_channels,
        out_channels=init_block_channels,
        name="features/init_block")
    in_channels = init_block_channels
    for i, channels_per_stage in enumerate(channels):
        for j, out_channels in enumerate(channels_per_stage):
            strides = 2 if (j == 0) and (i != 0) else 1
            x = preres_unit(
                x=x,
                in_channels=in_channels,
                out_channels=out_channels,
                strides=strides,
                bottleneck=bottleneck,
github Healthcare-Robotics / mr-gan / mr_nn.py View on Github external
# Scale data to zero mean and unit variance
    scaler = preprocessing.StandardScaler()
    X_train = scaler.fit_transform(X_train)
    X_test = scaler.transform(X_test)

    # Select labeled data
    X_train, y_train = shuffle(X_train, y_train)
    x_labeled = np.concatenate([X_train[y_train==j][:num_labeled_examples] for j in xrange(len(materials))], axis=0)
    y_labeled = np.concatenate([[j]*num_labeled_examples for j in xrange(len(materials))], axis=0)
    if verbose:
        print 'x_labeled:', np.shape(x_labeled), 'y_labeled:', np.shape(y_labeled)

    y_labeled = keras.utils.to_categorical(y_labeled, len(materials))
    y_test = keras.utils.to_categorical(y_test, len(materials))

    inputs = Input(shape=(X_train.shape[1],))
    x = GaussianNoise(0.3)(inputs)
    x = Dense(1000, activation='relu')(x)
    x = GaussianNoise(0.5)(x)
    x = Dense(500, activation='relu')(x)
    x = GaussianNoise(0.5)(x)
    x = Dense(250, activation='relu')(x)
    x = GaussianNoise(0.5)(x)
    x = Dense(250, activation='relu')(x)
    x = GaussianNoise(0.5)(x)
    x = Dense(250, activation='relu')(x)
    outputs = Dense(len(materials))(x)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])

    # Train on x_labeled, y_labeled. Test on X_test, y_test
    model.fit(x_labeled, y_labeled, batch_size=20, epochs=100, validation_split=0.0, verbose=0)
github codekansas / gandlf / examples / mnist_rnn_gan.py View on Github external
def build_discriminator(mode):
    """Builds the discriminator model."""

    image = keras.layers.Input((28, 28, 1), name='real_data')
    rnn_input = keras.layers.Reshape((28, 28))(image)

    rnn_1 = keras.layers.LSTM(128, return_sequences=False)
    class_pred = keras.layers.Dense(1, activation='sigmoid')

    if mode == '1d':  # Pay attention to class labels.
        input_class = keras.layers.Input((1,), dtype='int32',
                                         name='image_class_dis')
        embed = keras.layers.Embedding(10, 64, init='glorot_normal')
        embedded = keras.layers.Flatten()(embed(input_class))
        rnn_1 = gandlf.layers.RecurrentAttention1D(rnn_1, embedded)
        inputs = [image, input_class]

    elif mode == '2d':  # Pay attention to whole image.
        ref_image = keras.layers.Input((28, 28, 1), name='ref_image_dis')
        attn_reshaped = keras.layers.Reshape((28, 28))(ref_image)
        rnn_1 = gandlf.layers.RecurrentAttention2D(rnn_1, attn_reshaped)
        inputs = [image, ref_image]

    else:
        inputs = [image]

    pred_fake = class_pred(rnn_1(rnn_input))
github cvdlab / nn-segmentation-for-lar / two_way_brain_tumor_segmentation_cnn / BrainSegmentation / BrainSegDCNN_2.py View on Github external
def compile_model(self):
        """
        Model and compile the first CNN and the whole two blocks DCNN.
        Also initialize the field cnn1
        :return: Model, Two blocks DeepCNN compiled
        """
        if self.cascade_model:
            # input layers
            input65 = Input(shape=(4, 65, 65))
            input33 = Input(shape=(4, 33, 33))
            # first CNN modeling
            output_cnn1 = self.one_block_model(input65)
            # first cnn compiling
            cnn1 = Model(inputs=input65, outputs=output_cnn1)
            sgd = SGD(lr=self.learning_rate, momentum=self.momentum_rate, decay=self.decay_rate, nesterov=False)
            cnn1.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
            # initialize the field cnn1
            self.cnn1 = cnn1
            print 'First CNN compiled!'
            # concatenation of the output of the first CNN and the input of shape 33x33
            conc_input = Concatenate(axis=1)([input33, output_cnn1])
            # second cnn modeling
            output_dcnn = self.one_block_model(conc_input)
            output_dcnn = Reshape((5,))(output_dcnn)
            # whole dcnn compiling
            dcnn = Model(inputs=[input65, input33], outputs=output_dcnn)
github pinkeshbadjatiya / neuralTextSegmentation / SYSTEM / lstm_2dSent_15Len_TimDisConv.py View on Github external
def lstm_model(sequences_length_for_training, embedding_dim, embedding_matrix, vocab_size):

    which_model = 2
    # Look back is equal to -INF
    # This model creates a Stateful LSTM with lookback of the whole document
    # Input should be of the format (TOTAL_DOCUMENTS, TOTAL_SEQUENCES, SEQUENCE_DIM)
    # Also train using the custom trainer

    # Convolution layers
    print "Building Convolution layers"
    ngram_filters = [1, 2, 3, 4, 5]
    conv_hidden_units = [300, 300, 300, 300, 300]
    
    print 'Build MAIN model...'
    #pdb.set_trace()
    main_input = Input(shape=(SEQUENCES_LENGTH_FOR_TRAINING, embedding_dim), dtype='float32', name='main_input')
    embedded_input = TimeDistributed(Embedding(vocab_size + 1, GLOVE_EMBEDDING_DIM, weights=[embedding_matrix], input_length=embedding_dim, init='uniform'))(main_input)
    convs = []
    for n_gram, hidden_units in zip(ngram_filters, conv_hidden_units):
        conv = TimeDistributed(Convolution1D(nb_filter=hidden_units,
                             filter_length=n_gram,
                             border_mode='same',
                             activation='relu'))(embedded_input)
        flattened = TimeDistributed(Flatten())(conv)
        #pool = GlobalMaxPooling1D()(conv)
        convs.append(flattened)
    convoluted_input = Merge(mode='concat')(convs)
    CONV_DIM = sum(conv_hidden_units)

    # Dropouts for LSTMs can be merged
    #ForLSTM_stateful = LSTM(300, batch_input_shape=(1, embedding_dim, CONV_DIM), return_sequences=False, stateful=True)(convoluted_input)
    #RevLSTM_stateful = LSTM(300, batch_input_shape=(1, embedding_dim, CONV_DIM), return_sequences=False, stateful=True, go_backwards=True)(convoluted_input)
github richliao / textClassifier / textClassifierConv.py View on Github external
embedding_vector = embeddings_index.get(word)
    if embedding_vector is not None:
        # words not found in embedding index will be all-zeros.
        embedding_matrix[i] = embedding_vector
        
embedding_layer = Embedding(len(word_index) + 1,
                            EMBEDDING_DIM,
                            weights=[embedding_matrix],
                            input_length=MAX_SEQUENCE_LENGTH,
                            trainable=True)

# applying a more complex convolutional approach
convs = []
filter_sizes = [3,4,5]

sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)

for fsz in filter_sizes:
    l_conv = Conv1D(nb_filter=128,filter_length=fsz,activation='relu')(embedded_sequences)
    l_pool = MaxPooling1D(5)(l_conv)
    convs.append(l_pool)
    
l_merge = Merge(mode='concat', concat_axis=1)(convs)
l_cov1= Conv1D(128, 5, activation='relu')(l_merge)
l_pool1 = MaxPooling1D(5)(l_cov1)
l_cov2 = Conv1D(128, 5, activation='relu')(l_pool1)
l_pool2 = MaxPooling1D(30)(l_cov2)
l_flat = Flatten()(l_pool2)
l_dense = Dense(128, activation='relu')(l_flat)
preds = Dense(2, activation='softmax')(l_dense)
github Pendulibrium / ai-visual-storytelling-seq2seq / seq2seqbuilder.py View on Github external
config['recurrent_dropout'] = 0.0
            encoder = layers.deserialize({'class_name': encoder.__class__.__name__, 'config': config})

            if i == 0:
                encoder_outputs = encoder(mask_output)
                encoder.set_weights(weights)
                latent_dim = encoder.get_config()['units']
            else:
                encoder_outputs = encoder(encoder_outputs[0])
                encoder.set_weights(weights)

        encoder_states = encoder_outputs[1:]

        if include_sentence_encoder:

            encoder_sentence_inputs = Input(shape=(22,))
            initial_input = [encoder_inputs, encoder_sentence_inputs]

            sentence_encoder_embedding_layer = model.get_layer('sentence_embedding_layer')
            sentence_embedding_outputs = sentence_encoder_embedding_layer(encoder_sentence_inputs)

            if attention:
                sentence_encoder_outputs, initial_encoder_states, new_latent_dim = sentence_encoder.get_last_layer_inference(
                    model,
                    encoder_states,
                    sentence_embedding_outputs,
                    attention=attention)
                #this is just for now, because the our model only accepts only the hidden state from the image encoder
                initial_encoder_states = encoder_states[0]
            else:
                initial_encoder_states, new_latent_dim = sentence_encoder.get_last_layer_inference(model,
                                                                                                   encoder_states,
github czhu12 / light_bulb / light_bulb / models / rnn_model.py View on Github external
def get_classifier_decoder(self, num_classes):
        vec_input = Input(shape=(None, self.hidden_size,))
        x = Lambda(lambda x: x[:, -1, :], output_shape=(self.hidden_size,))(vec_input)
        x = BatchNormalization()(x)
        x = Dropout(0.9)(x)
        decode = Dense(num_classes, activation='softmax')(x)
        model = Model(vec_input, decode)
        return model
github LuEE-C / PPO-Keras / Main.py View on Github external
def build_actor_continuous(self):
        state_input = Input(shape=(NUM_STATE,))
        advantage = Input(shape=(1,))
        old_prediction = Input(shape=(NUM_ACTIONS,))

        x = Dense(HIDDEN_SIZE, activation='tanh')(state_input)
        for _ in range(NUM_LAYERS - 1):
            x = Dense(HIDDEN_SIZE, activation='tanh')(x)

        out_actions = Dense(NUM_ACTIONS, name='output', activation='tanh')(x)

        model = Model(inputs=[state_input, advantage, old_prediction], outputs=[out_actions])
        model.compile(optimizer=Adam(lr=LR),
                      loss=[proximal_policy_optimization_loss_continuous(
                          advantage=advantage,
                          old_prediction=old_prediction)])
        model.summary()