How to use the keras.layers.core.Dense function in keras

To help you get started, we’ve selected a few keras examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github thomaskuestner / CNNArt / networks / multiclass / CNN2D / SENets / multiclass_ResNet-56.py View on Github external
weights=None,
                          border_mode='valid',
                          subsample=(1, 1),
                          W_regularizer=l2(1e-6)))
    cnn.add(Activation('relu'))

    # cnn.add(pool2(pool_size=(2, 2), strides=None, border_mode='valid', dim_ordering='th'))

    cnn.add(Flatten())
    # cnn.add(Dense(input_dim= 100,
    #              output_dim= 100,
    #              init = 'normal',
    #              #activation = 'sigmoid',
    #              W_regularizer='l2'))
    # cnn.add(Activation('sigmoid'))
    cnn.add(Dense(input_dim=100,
                  output_dim=2,
                  init='normal',
                  # activation = 'sigmoid',
                  W_regularizer='l2'))
    cnn.add(Activation('softmax'))

    #opti = SGD(lr={{choice([0.1, 0.01, 0.05, 0.005, 0.001])}}, momentum=1e-8, decay=0.1, nesterov=True)
    #cnn.compile(loss='categorical_crossentropy', optimizer=opti)

    epochs = 300

    result = cnn.fit(X_train, Y_train,
                     batch_size=128,  # {{choice([64, 128])}}
                     nb_epoch=epochs,
                     show_accuracy=True,
                     verbose=2,
github niderhoff / reddit-cnn / benchmarks / __init__.py View on Github external
print("Test fraction correct (LR-Accuracy) = {:.6f}".format(
                  lr.score(X_test, y_test)))
        return pred_y
    elif (type == 'k1'):
        # 2-class logistic regression in Keras
        model = Sequential()
        model.add(Dense(1, activation='sigmoid', input_dim=X_train.shape[1]))
        model.compile(optimizer='rmsprop', loss='binary_crossentropy',
                      metrics=['accuracy'])
        model.fit(X_train, y_train, nb_epoch=nb_epoch,
                  validation_data=validation_data, verbose=verb)
        return model
    elif (type == 'k2'):
        # logistic regression with L1 and L2 regularization
        model = Sequential()
        model.add(Dense(1, activation='sigmoid', W_regularizer=reg,
                  input_dim=X_train.shape[1]))
        model.compile(optimizer='rmsprop', loss='binary_crossentropy',
                      metrics=['accuracy'])
        model.fit(X_train, y_train, nb_epoch=nb_epoch,
                  validation_data=validation_data, verbose=verb)
        return model
github thomaskuestner / CNNArt / GUI / PyQt / networks / motion / CNN2D / MNetArt.py View on Github external
stage2_res1_t = fCreateMNet_Block(stage2_inp_t, 32, l2_reg=l2_reg)
    stage2_res2_t = fCreateMNet_Block(stage2_res1_t, 48, forwarding=False)

    stage3_inp_t = fCreateMaxPooling2D(stage2_res2_t, stride=(2, 2))
    stage3_inp_t = concatenate([stage3_inp_t, l_w3_t], axis=1)
    stage3_res1_t = fCreateMNet_Block(stage3_inp_t, 48, l2_reg=l2_reg)
    stage3_res2_t = fCreateMNet_Block(stage3_res1_t, 64, forwarding=False, l2_reg=l2_reg)

    stage4_inp_t = fCreateMaxPooling2D(stage3_res2_t, stride=(2, 2))
    stage4_inp_t = concatenate([stage4_inp_t, l_w4_t], axis=1)
    stage4_res1_t = fCreateMNet_Block(stage4_inp_t, 64, l2_reg=l2_reg)
    stage4_res2_t = fCreateMNet_Block(stage4_res1_t, 128, forwarding=False, l2_reg=l2_reg)

    after_flat_t = Flatten()(stage4_res2_t)

    after_dense_t = Dense(units=2,
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(l2_reg))(after_flat_t)
    output_t = Activation('softmax')(after_dense_t)

    cnn = Model(inputs=[input_t], outputs=[output_t])

    opti, loss = fGetOptimizerAndLoss(optimizer, learningRate=learningRate)
    cnn.compile(optimizer=opti, loss=loss, metrics=['accuracy'])
    sArchiSpecs = '3stages_l2{}'.format(l2_reg)
github bashhike / video-action-recognition / combined / combined_average_predict.py View on Github external
def prepareFeaturesModel(nb_classes,requiredLines):
	print "Preparing architecture of feature model..."
	f_model = Sequential()
	f_model.add(Dense(512, input_shape=(167,requiredLines)))
	f_model.add(Flatten())
	f_model.add(Dense(nb_classes, W_regularizer=l2(0.1)))
	f_model.add(Activation('linear'))
	f_model.add(Activation('softmax'))
	return f_model
github jjakimoto / DQN / model / ddpg.py View on Github external
recieve convereted tensor: raw_data, smooted_data, and downsampled_data
        """
        # lower layer
        lower_model = [self.build_network(self.model_config['critic_lower'], input_shape=(self.history_length, self.n_stock, 1)) 
                       for _ in range(1  + self.n_smooth + self.n_down)]
        merged = Merge(lower_model, mode='concat')
        # upper layer
        upper_model = self.build_network(self.model_config['critic_upper'],  model=merged)
        # action layer
        action = self.build_network(self.model_config['critic_action'], input_shape=(self.n_stock,), is_conv=False)
        # output layer
        merged = Merge([upper_model, action], mode='mul')
        model = Sequential()
        model.add(merged)
        model.add(Dense(1))
        return model
github BenBBear / LipRead / lipreadtrain.py View on Github external
# TODO
        # Reshape -> conv -> reshap
        # model.add(TimeDistributed(Convolution1D(nb_filter, filter_length)))


    print("Adding Masking Layer...")
    model.add(Masking(mask_value=0.0))
    
    print("Adding First LSTM Layer...")
    model.add(LSTM(fc_size, return_sequences=True))

    print("Adding Second LSTM Layer...")
    model.add(LSTM(fc_size, return_sequences=False))

    print("Adding Final Dense Layer...")
    model.add(Dense(dict_size))

    print("Adding Softmax Layer...")
    model.add(Activation('softmax'))

    print("Compiling the model to runnable code, which will take a long time...")
    if optimizer == 'sgd':
        optimizer = SGD(lr=lr, momentum=momentum, decay=decay, nesterov=nesterov)
    elif optimizer == 'rmsprop':
        optimizer = RMSprop(lr=lr, rho=rho, epsilon=epsilon)
    elif optimizer == 'adagrad':
        optimizer = Adagrad(lr=lr, epsilon=epsilon)
    
    ## Takes my macbook pro 1-2min to finish.    
    model.compile(loss='categorical_crossentropy', optimizer=optimizer)

    end_time = time.time()
github CRLab / shape_completion_experiments / experiments / old / reconstruction_3d_variable_shrec_30.py View on Github external
# output: 64 cubes of side length 5-3+1 = 3
    model.add(Convolution3D(nb_filter=nb_filter_out, stack_size=nb_filter_in,
                            nb_row=filter_size, nb_col=filter_size,
                            nb_depth=filter_size, border_mode='valid', activation='relu', init='he_normal'))
    # During training: drop (set to zero) each of the current outputs with a 0.5
    # probability.
    # During testing: multiply each of the current outputs by that probability.
    model.add(Dropout(.5))

    dim = 3

    # output: a vector of size 64*3*3*3 = 1728
    model.add(Flatten())
    # output: a vector of size 3000
    model.add(Dense(nb_filter_out * dim * dim * dim, 3000, activation='relu', init='he_normal'))
    # output: a vector of size 4000
    model.add(Dense(3000, 4000, activation='relu', init='he_normal'))
    # output: a vector of size PATCH_SIZE*PATCH_SIZE*PATCH_SIZE
    model.add(Dense(4000, PATCH_SIZE * PATCH_SIZE * PATCH_SIZE, init='glorot_normal',
                    activation='sigmoid'))

    optimizer = RMSprop()
    model.compile(loss='cross_entropy_error', optimizer=optimizer)

    return model
github forcecore / Keras-GAN-Animeface-Character / nets.py View on Github external
x = conv2d( x, 256 )
    # 8x8

    x = conv2d( x, 512 )
    # 4x4

    if build_disc:
        x = Flatten()(x)
        # add 16 features. Run 1D conv of size 3.
        #x = MinibatchDiscrimination(16, 3)( x )

        #x = Dense(1024, kernel_initializer=Args.kernel_initializer)( x )
        #x = LeakyReLU(alpha=Args.alpha_D)( x )

        # 1 when "real", 0 when "fake".
        x = Dense(1, activation='sigmoid',
            kernel_initializer=Args.kernel_initializer)( x )
        return models.Model( inputs=face, outputs=x )
    else:
        # build encoder.
        x = Conv2D(Args.noise_shape[2], (4, 4), activation='tanh')(x)
        return models.Model( inputs=face, outputs=x )
github root-project / root / tutorials / tmva / keras / GenerateModel.py View on Github external
from keras.layers.core import Dense, Activation
from keras.regularizers import l2
from keras.optimizers import SGD

# Setup the model here
num_input_nodes = 4
num_output_nodes = 2
num_hidden_layers = 1
nodes_hidden_layer = 64
l2_val = 1e-5

model = Sequential()

# Hidden layer 1
# NOTE: Number of input nodes need to be defined in this layer
model.add(Dense(nodes_hidden_layer, activation='relu', W_regularizer=l2(l2_val), input_dim=num_input_nodes))

# Hidden layer 2 to num_hidden_layers
# NOTE: Here, you can do what you want
for k in range(num_hidden_layers-1):
    model.add(Dense(nodes_hidden_layer, activation='relu', W_regularizer=l2(l2_val)))

# Ouput layer
# NOTE: Use following output types for the different tasks
# Binary classification: 2 output nodes with 'softmax' activation
# Regression: 1 output with any activation ('linear' recommended)
# Multiclass classification: (number of classes) output nodes with 'softmax' activation
model.add(Dense(num_output_nodes, activation='softmax'))

# Compile model
# NOTE: Use following settings for the different tasks
# Any classification: 'categorical_crossentropy' is recommended loss function
github lukaszbinden / pediatric-bone-age-prediction / src / models / arch_benchmark / ResnetXtrsna.py View on Github external
N = N[1:]  # remove the first block from block definition list
    filters_list = filters_list[1:]  # remove the first filter from the filter list

    # block 2 to N
    for block_idx, n_i in enumerate(N):
        for i in range(n_i):
            if i == 0:
                x = __bottleneck_block(x, filters_list[block_idx], cardinality, strides=2,
                                       weight_decay=weight_decay)
            else:
                x = __bottleneck_block(x, filters_list[block_idx], cardinality, strides=1,
                                       weight_decay=weight_decay)

    if include_top:
        x = GlobalAveragePooling2D()(x)
        x = Dense(nb_classes, use_bias=False, kernel_regularizer=l2(weight_decay),
                  kernel_initializer='he_normal', activation='softmax')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    return x