How to use the keras.layers.core.Activation function in keras

To help you get started, we’ve selected a few keras examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github keras-team / keras / tests / keras / test_sequential_model.py View on Github external
def test_merge_overlap():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)
    model.predict(X_test, verbose=0)
github Riashat / Active-Learning-Bayesian-Convolutional-Neural-Networks / ConvNets / Cluster_Experiments / Dropout_Bayes_Segnet / Cluster_GPU_Bayes_Segnet.py View on Github external
model.add(Dropout(0.25))
	
	model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))
	model.add(Activation('relu'))
	model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
	model.add(Dropout(0.25))


	model.add(Flatten())
	model.add(Dense(128))
	model.add(Activation('relu'))
	model.add(Dropout(0.5))
	model.add(Dense(nb_classes))
	model.add(Activation('softmax'))

	model.compile(loss='categorical_crossentropy', optimizer='adam')
	hist = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_valid, Y_valid))
	Train_Result_Optimizer = hist.history
	Train_Loss = np.asarray(Train_Result_Optimizer.get('loss'))
	Train_Loss = np.array([Train_Loss]).T
	Valid_Loss = np.asarray(Train_Result_Optimizer.get('val_loss'))
	Valid_Loss = np.asarray([Valid_Loss]).T

	Pool_Train_Loss = Train_Loss
	Pool_Valid_Loss = Valid_Loss

	print('Evaluating Test Accuracy Without Acquisition')
	score, acc = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)

	all_accuracy = acc
github mrgloom / keras-semantic-segmentation-example / multilabel_segmentation / categorical_crossentropy_example.py View on Github external
def get_model():
    
    inputs = Input((IMAGE_H, IMAGE_W, INPUT_CHANNELS))
    
    base = models.get_fcn_vgg16_32s(inputs, NUMBER_OF_CLASSES)
    #base = models.get_fcn_vgg16_16s(inputs, NUMBER_OF_CLASSES)
    #base = models.get_fcn_vgg16_8s(inputs, NUMBER_OF_CLASSES)
    #base = models.get_unet(inputs, NUMBER_OF_CLASSES)
    #base = models.get_segnet_vgg16(inputs, NUMBER_OF_CLASSES)
    
    # softmax
    reshape= Reshape((-1,NUMBER_OF_CLASSES))(base)
    act = Activation('softmax')(reshape)
    
    model = Model(inputs=inputs, outputs=act)
    model.compile(optimizer=Adadelta(), loss='categorical_crossentropy')
    
    #print(model.summary())
    #sys.exit()
    
    return model
github patrick-dd / landsat-landstats / cnn_copy.py View on Github external
model.add(Convolution2D(256, 3, 3, 
			border_mode='same',
                        init='he_uniform'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
#model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(4096, init='he_uniform'))
model.add(Activation('relu'))
model.add(Dense(4096, init='he_uniform'))
model.add(Activation('relu'))

#model.add(Dropout(0.5))
model.add(Dense(1, init='glorot_uniform'))
model.add(Activation('linear'))

# setting sgd optimizer parameters
adam = Adam(lr = 0.01, beta_1 = 0.9, beta_2 = 0.999, epsilon = 1e-8)
#sgd = SGD(lr = 0.1, decay = 1e-6, momentum = 0.9, nesterov = True)
model.compile(loss='mean_squared_error', optimizer='adam')

earlystop = callbacks.EarlyStopping(monitor='val_loss', patience = 5, 
	verbose=1, mode='min')
checkpoint = callbacks.ModelCheckpoint('/tmp/weights.hdf5', 
	monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
history = callbacks.History()

print("Starting training")
model.fit(X_train, y_train, batch_size=32, validation_split=0.15, nb_epoch=25,
        show_accuracy=False, callbacks = [earlystop, checkpoint, history])
github liuph0119 / Semantic_Segmentation_Keras / core / encoder / resnet_v2_separable.py View on Github external
:param block_name: string, name of the bottleneck block, default "block1".
    :param unit_name: string, name of the unit(residual block), default "unit1".
    :param weight_decay: float, default 1e-4.
    :param kernel_initializer: string, default "he_normal".
    :param bn_epsilon: float, default 1e-3.
    :param bn_momentum: float, default 0.99.

    :return: 4-D tensor, shape of (batch_size, height, width, channel).
    """
    depth_in = int(inputs.shape[-1])
    conv_name_base = block_name+"_"+unit_name+"_conv"
    bn_name_base = block_name+"_"+unit_name+"_bn"

    # pre-activation and batch normalization
    preact = BatchNormalization(name=bn_name_base+"0", epsilon=bn_epsilon, momentum=bn_momentum)(inputs)
    preact = Activation("relu")(preact)
    # determine convolutional or identity connection
    if depth_in == depth:
        x_shortcut = MaxPooling2D(pool_size=(1, 1), strides=stride)(inputs) if stride > 1 else inputs
    else:
        x_shortcut = Conv2D(depth, (1, 1), strides=(stride, stride), name=conv_name_base + "short",
                            use_bias=False, activation=None, kernel_initializer=kernel_initializer,
                            kernel_regularizer=l2(weight_decay))(preact)
        x_shortcut = BatchNormalization(name=bn_name_base + "short", epsilon=bn_epsilon,
                                        momentum=bn_momentum)(x_shortcut)

    x = SeparableConv2D(base_depth, (1, 1), strides=(1, 1), padding="same", name=conv_name_base + "2a",
                        use_bias=False, activation=None, kernel_initializer=kernel_initializer,
                        kernel_regularizer=l2(weight_decay))(preact)
    x = BatchNormalization(name=bn_name_base + "2a", epsilon=bn_epsilon, momentum=bn_momentum)(x)
    x = Activation("relu")(x)
github Riashat / Active-Learning-Bayesian-Convolutional-Neural-Networks / ConvNets / active_learning / Acquisition_Functions / BCNN_Maximal_Uncertainty / GPU / Variation_Ratio / Average_GPU_Variation_Ratio.py View on Github external
model.add(Dropout(0.25))

		model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))
		model.add(Activation('relu'))
		model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv))
		model.add(Activation('relu'))
		model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
		model.add(Dropout(0.25))


		model.add(Flatten())
		model.add(Dense(128))
		model.add(Activation('relu'))
		model.add(Dropout(0.5))
		model.add(Dense(nb_classes))
		model.add(Activation('softmax'))

		model.compile(loss='categorical_crossentropy', optimizer='adam')
		hist = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_valid, Y_valid))
		Train_Result_Optimizer = hist.history
		Train_Loss = np.asarray(Train_Result_Optimizer.get('loss'))
		Train_Loss = np.array([Train_Loss]).T
		Valid_Loss = np.asarray(Train_Result_Optimizer.get('val_loss'))
		Valid_Loss = np.asarray([Valid_Loss]).T

		#Accumulate the training and validation/test loss after every pooling iteration - for plotting
		Pool_Valid_Loss = np.append(Pool_Valid_Loss, Valid_Loss, axis=1)
		Pool_Train_Loss = np.append(Pool_Train_Loss, Train_Loss, axis=1)	


		print('Evaluate Model Test Accuracy with pooled points')
github rakshithShetty / dnn-speech / mlpmodel / mlpClassifier.py View on Github external
def build_model(self, params):
    hidden_layers = params['hidden_layers']
    input_dim = params['feat_size']
    output_dim = params['phone_vocab_size']
    drop_prob = params['drop_prob_encoder']
    self.nLayers = len(hidden_layers)
    # first layer takes input data
    self.model.add(Dense(hidden_layers[0], input_dim=input_dim, init='uniform'))
    self.model.add(Activation('sigmoid'))
    self.model.add(Dropout(drop_prob))
    # hidden layers
    for i in xrange(1,len(hidden_layers)):
        self.model.add(Dense(hidden_layers[i], input_dim=hidden_layers[i-1],
            init='uniform'))
        self.model.add(Activation('sigmoid'))
        self.model.add(Dropout(drop_prob))

    #output layer
    self.model.add(Dense(output_dim, input_dim=hidden_layers[-1], init='uniform'))
    self.model.add(Activation('softmax'))
  
    if params['solver'] == 'sgd':
      self.solver = SGD(lr=params['lr'], decay=1-params['decay_rate'], momentum=0.9, nesterov=True)
    else:  
      raise ValueError('ERROR in MLP: %s --> This solver type is not yet supported '%(params['solver']))
      
    self.model.compile(loss='categorical_crossentropy', optimizer=self.solver)
    #score = model.evaluate(test_x)
    self.f_train = self.model.train_on_batch

    return self.f_train
github ljuvela / ResGAN / train_pls_noisegan.py View on Github external
padding='same',
                        strides=1)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.1)(x)

    x = concatenate([pls, x], axis=2) # concat as different channels
                    
    x = Convolution1D(filters=1,
                      kernel_size=15,
                      padding='same',
                        strides=1)(x)

    # tanh activation (GAN hacks)
    #x = Activation('tanh')(x)

    vuv = Activation('sigmoid')(vuv)
    y = multiply([vuv, pls]) # voicing gate for deterministic component
    x = add([x, y])

    #x = add([pls, x]) # force additivity   
             
    # remove singleton outer dimension 
    x = Reshape((output_dim,))(x)
     
    model = Model(inputs=[pls_input, noise_input, vuv_input], outputs=[x],
                  name="generator")

    return model
github kobiso / CBAM-keras / models / densenet.py View on Github external
compression = 1.0 - reduction

    # Initial convolution
    if subsample_initial_block:
        initial_kernel = (7, 7)
        initial_strides = (2, 2)
    else:
        initial_kernel = (3, 3)
        initial_strides = (1, 1)

    x = Conv2D(nb_filter, initial_kernel, kernel_initializer='he_normal', padding='same',
               strides=initial_strides, use_bias=False, kernel_regularizer=l2(weight_decay))(img_input)

    if subsample_initial_block:
        x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        x, nb_filter = __dense_block(x, nb_layers[block_idx], nb_filter, growth_rate, bottleneck=bottleneck,
                                     dropout_rate=dropout_rate, weight_decay=weight_decay, attention_module=attention_module)
        # add transition_block
        x = __transition_block(x, nb_filter, compression=compression,
							   weight_decay=weight_decay, attention_module=attention_module)
        nb_filter = int(nb_filter * compression)

    # The last dense_block does not have a transition_block
    x, nb_filter = __dense_block(x, final_nb_layer, nb_filter, growth_rate, bottleneck=bottleneck,
                                 dropout_rate=dropout_rate, weight_decay=weight_decay, attention_module=attention_module)

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
github Riashat / Active-Learning-Bayesian-Convolutional-Neural-Networks / ConvNets / FINAL_Averaged_Experiments / Uncertainty_Calibration / Softmax_BALD_Q10_N3000.py View on Github external
Pool_Valid_Loss = np.zeros(shape=(nb_epoch, 1)) 	
	Pool_Train_Loss = np.zeros(shape=(nb_epoch, 1)) 
	Pool_Valid_Acc = np.zeros(shape=(nb_epoch, 1)) 	
	Pool_Train_Acc = np.zeros(shape=(nb_epoch, 1)) 
	x_pool_All = np.zeros(shape=(1))

	Y_train = np_utils.to_categorical(y_train, nb_classes)

	print('Training Model Without Acquisitions in Experiment', e)


	model = Sequential()
	model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))
	model.add(Activation('relu'))
	model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
	model.add(Dropout(0.25))
	
	model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols)))
	model.add(Activation('relu'))
	model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv))
	model.add(Activation('relu'))
	model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
	model.add(Dropout(0.25))

	c = 10
	Weight_Decay = c / float(X_train.shape[0])
	model.add(Flatten())
	model.add(Dense(128, W_regularizer=l2(Weight_Decay)))
	model.add(Activation('relu'))
	model.add(Dropout(0.5))