How to use the keras.layers.Dense function in keras

To help you get started, we’ve selected a few keras examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github tatsy / keras-generative / models / cvae.py View on Github external
def build_decoder(self):
        z_inputs = Input(shape=(self.z_dims,))
        c_inputs = Input(shape=(self.num_attrs,))
        z = Concatenate()([z_inputs, c_inputs])

        w = self.input_shape[0] // (2 ** 3)
        x = Dense(w * w * 256)(z)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

        x = Reshape((w, w, 256))(x)

        x = BasicDeconvLayer(filters=256, strides=(2, 2))(x)
        x = BasicDeconvLayer(filters=128, strides=(2, 2))(x)
        x = BasicDeconvLayer(filters=64, strides=(2, 2))(x)
        x = BasicDeconvLayer(filters=3, strides=(1, 1), bnorm=False, activation='tanh')(x)

        return Model([z_inputs, c_inputs], x)
github shivam5992 / language-modelling / model.py View on Github external
def create_model(predictors, label, max_sequence_len, total_words):
	
	model = Sequential()
	model.add(Embedding(total_words, 10, input_length=max_sequence_len-1))
	model.add(LSTM(150, return_sequences = True))
	# model.add(Dropout(0.2))
	model.add(LSTM(100))
	model.add(Dense(total_words, activation='softmax'))

	model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
	earlystop = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto')
	model.fit(predictors, label, epochs=100, verbose=1, callbacks=[earlystop])
	print model.summary()
	return model
github polyaxon / polyaxon / examples / in_cluster / keras / mnist / model.py View on Github external
def train(conv1_size, conv2_size, dropout, hidden1_size, optimizer, log_learning_rate, epochs):
    model = Sequential()
    model.add(Conv2D(filters=conv1_size,
                     kernel_size=(3, 3),
                     activation='relu',
                     input_shape=x_train.shape[1:]))
    model.add(Conv2D(filters=conv2_size,
                     kernel_size=(3, 3),
                     activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(dropout))
    model.add(Flatten())
    model.add(Dense(hidden1_size, activation='relu'))
    model.add(Dense(10, activation='softmax'))
    model.compile(
        optimizer=OPTIMIZERS[optimizer](lr=10 ** log_learning_rate),
        loss='categorical_crossentropy',
        metrics=['accuracy'],
    )

    model.fit(x_train, y_train, epochs=epochs, batch_size=100)

    return model.evaluate(x_test, y_test)[1]
github FitMachineLearning / FitML / SelectiveMemory / QasFeature / LunarLanderContinuous_SMQ_v1.py View on Github external
#Qmodel.add(Dropout(0.2))
Qmodel.add(Dense(64*2, activation='relu'))
#Qmodel.add(Dropout(0.2))
#Qmodel.add(Dense(256, activation='relu'))
#Qmodel.add(Dropout(0.2))
Qmodel.add(Dense(dataY.shape[1]))
opt = optimizers.adam(lr=learning_rate)
Qmodel.compile(loss='mse', optimizer=opt, metrics=['accuracy'])


#initialize the action predictor model
action_predictor_model = Sequential()
#model.add(Dense(num_env_variables+num_env_actions, activation='tanh', input_dim=dataX.shape[1]))
action_predictor_model.add(Dense(2048, activation='tanh', input_dim=apdataX.shape[1]))
#action_predictor_model.add(Dropout(0.2))
action_predictor_model.add(Dense(64*2, activation='relu'))
#action_predictor_model.add(Dropout(0.2))
#action_predictor_model.add(Dense(256, activation='relu'))
#action_predictor_model.add(Dropout(0.2))
action_predictor_model.add(Dense(apdataY.shape[1]))

opt2 = optimizers.adam(lr=apLearning_rate)
action_predictor_model.compile(loss='mse', optimizer=opt2, metrics=['accuracy'])



#load previous model weights if they exist
if load_previous_weights:
    dir_path = os.path.realpath(".")
    fn = dir_path + "/"+weigths_filename
    print("filepath ", fn)
    if  os.path.isfile(fn):
github piojanu / World-Models / vision.py View on Github external
mu, logvar = args
        # NOTE: K.exp(logvar / 2) = var^(1/2) = std. deviation
        return mu + K.exp(logvar / 2) * K.random_normal(
            shape=(batch_size, vae_params['latent_space_dim']))

    z = Lambda(sample, output_shape=(vae_params['latent_space_dim'],))([mu, logvar])

    encoder = Model(encoder_input, [mu, logvar, z], name='Encoder')
    encoder.summary(print_fn=lambda x: log.debug('%s', x))

    # Decoder z -> img #

    decoder_input = Input(shape=(vae_params['latent_space_dim'],))

    h = Reshape(h_shape[1:])(
        Dense(h_shape[1] * h_shape[2] * h_shape[3], activation='relu')(decoder_input)
    )

    h = Conv2DTranspose(128, activation='relu', kernel_size=4, strides=2)(h)     # -> 6x6x128
    h = Conv2DTranspose(64, activation='relu', kernel_size=4, strides=2)(h)      # -> 14x14x64
    h = Conv2DTranspose(32, activation='relu', kernel_size=4, strides=2)(h)      # -> 30x30x32
    out = Conv2DTranspose(3, activation='sigmoid', kernel_size=6, strides=2)(h)  # -> 64x64x3

    decoder = Model(decoder_input, out, name='Decoder')
    decoder.summary(print_fn=lambda x: log.debug('%s', x))

    # VAE loss #

    def elbo_loss(target, pred):
        # NOTE: You use K.reshape to preserve batch dim. K.flatten doesn't work like flatten layer
        #       and flatten batch dim. too!
        # NOTE 2: K.binary_crossentropy does element-wise crossentropy as you need (it calls
github neka-nat / async-rl-noreward / model.py View on Github external
subsample=(2, 2), dim_ordering='th')(state)
    h = ELU(alpha=1.0)(h)
    h = Convolution2D(32, 3, 3, border_mode='same',
                      subsample=(2, 2), dim_ordering='th')(h)
    h = ELU(alpha=1.0)(h)
    h = Convolution2D(32, 3, 3, border_mode='same',
                      subsample=(2, 2), dim_ordering='th')(h)
    h = ELU(alpha=1.0)(h)
    h = Convolution2D(32, 3, 3, border_mode='same',
                      subsample=(2, 2), dim_ordering='th')(h)
    h = ELU(alpha=1.0)(h)
    h = Flatten()(h)

    value = Dense(256, activation='relu')(h)
    value = Dense(1, activation='linear', name='value')(value)
    policy = Dense(output_shape, activation='sigmoid', name='policy')(h)

    value_network = Model(input=state, output=value)
    policy_network = Model(input=state, output=policy)

    adventage = Input(shape=(1,))
    train_network = Model(input=[state, adventage], output=[value, policy])

    return value_network, policy_network, train_network, adventage
github tatsy / keras-generative / models / vae.py View on Github external
def build_encoder(self):
        inputs = Input(shape=self.input_shape)

        x = BasicConvLayer(filters=64, strides=(2, 2))(inputs)
        x = BasicConvLayer(filters=128, strides=(2, 2))(inputs)
        x = BasicConvLayer(filters=256, strides=(2, 2))(inputs)
        x = BasicConvLayer(filters=256, strides=(2, 2))(inputs)

        x = Flatten()(x)
        x = Dense(1024)(x)
        x = Activation('relu')(x)

        z_avg = Dense(self.z_dims)(x)
        z_log_var = Dense(self.z_dims)(x)

        z_avg = Activation('linear')(z_avg)
        z_log_var = Activation('linear')(z_log_var)

        return Model(inputs, [z_avg, z_log_var])
github LongxingTan / Text-classification / models_keras_archives / CNN_keras.py View on Github external
conv1=Conv2D(filters=filter_num,kernel_size=(filter_sizes[0],embedding_dim),padding='valid',activation='relu')(reshape)
    drop1=Dropout(0.5)(conv1)
    pool1=MaxPool2D(pool_size=(2,1))(drop1)

    conv2=Conv2D(filters=filter_num,kernel_size=(filter_sizes[1],embedding_dim),padding='valid',activation='relu')(reshape)
    drop2=Dropout(0.5)(conv2)
    pool2=MaxPool2D(pool_size=(2,1))(drop2)

    conv3=Conv2D(filters=filter_num,kernel_size=(filter_sizes[2],embedding_dim),padding='valid',activation='relu')(reshape)
    drop3=Dropout(0.5)(conv3)
    pool3=MaxPool2D(pool_size=(2,1))(drop3)

    merged=Concatenate(axis=1)([pool1,pool2,pool3])
    flatten=Flatten()(merged)
    #dropout
    outputs=Dense(units=n_classes,activation='softmax')(flatten)
    #outputs=Dense(200,activation='sigmoid')(dense1)

    model=Model(inputs=inputs,outputs=outputs)
    model._make_predict_function()
    model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
    print(model.summary())
    #plot_model(model,show_shapes=True,to_file='multichannel.png')
    #model.save('CAC_keras_cnn.h5')
    cnn_keras = model.to_json()
    with open("cnn_keras.json", "w") as json_file:
        json_file.write(cnn_keras)
    return model
github jsyoon0823 / INVASE / INVASE.py View on Github external
def build_generator(self):

        model = Sequential()
        
        model.add(Dense(100, activation=self.activation, name = 's/dense1', kernel_regularizer=regularizers.l2(1e-3), input_dim = self.input_shape))
        model.add(Dense(100, activation=self.activation, name = 's/dense2', kernel_regularizer=regularizers.l2(1e-3)))
        model.add(Dense(self.input_shape, activation = 'sigmoid', name = 's/dense3', kernel_regularizer=regularizers.l2(1e-3)))
        
        model.summary()

        feature = Input(shape=(self.input_shape,), dtype='float32')
        select_prob = model(feature)

        return Model(feature, select_prob)
github driving-behavior / DBNet / models / inception_v4_pn.py View on Github external
img_input = Input(shape=(img_rows, img_cols, 3), name='data')

    # Make inception base
    net = inception_v4_base(img_input)

    # Final pooling and prediction

    # 8 x 8 x 1536
    net_old = AveragePooling2D((8,8), padding='valid')(net)

    # 1 x 1 x 1536
    net_old = Dropout(dropout_keep_prob)(net_old)
    net_old = Flatten()(net_old)

    # 1536
    predictions = Dense(units=1001, activation='softmax')(net_old)

    model = Model(img_input, predictions, name='inception_v4')

    weights_path = 'utils/weights/inception-v4_weights_tf.h5'
    assert (os.path.exists(weights_path))
    model.load_weights(weights_path, by_name=True)

    # Truncate and replace softmax layer for transfer learning
    # Cannot use model.layers.pop() since model is not of Sequential() type
    # The method below works since pre-trained weights are stored in layers but not in the model
    net_ft = AveragePooling2D((8,8), border_mode='valid')(net)
    net_ft = Dropout(dropout_keep_prob)(net_ft)
    net_ft = Flatten()(net_ft)
    net = Dense(256, name='fc_mid')(net_ft)

    model = Model(img_input, net, name='inception_v4')