How to use the keras.models.Sequential function in keras

To help you get started, we’ve selected a few keras examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github keras-team / keras-contrib / tests / keras_contrib / layers / test_capsule.py View on Github external
def test_capsule_correctness():
    X = np.random.random((1, 1, 1))

    model = Sequential()
    model.add(capsule.Capsule(1, 1, 1, True, activation='sigmoid'))

    model.compile(loss='mse', optimizer='rmsprop')
    init_out = model.predict(X)  # mock predict call to initialize weights
    model.set_weights([np.zeros((1, 1, 1))])
    out = model.predict(X)
    assert_allclose(out, np.zeros((1, 1, 1), dtype=K.floatx()) + 0.5, atol=1e-5)
github yuxiaowww / NLP-Basic-Learning / task_other / task_o_2.py View on Github external
maxlen = 80  
batch_size = 32

print('下载数据...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), '训练序列')
print(len(x_test), '测试序列')

print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('训练数据 shape:', x_train.shape)
print('测试数据 shape:', x_test.shape)

print('构建模型...')    
model = Sequential()
#嵌入词向量
model.add(Embedding(max_features, 128))
#LSTM层
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
#全连接层
model.add(Dense(1, activation='sigmoid'))
#打印构建模型的信息
model.summary()

#模型编译
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])

print('开始训练...')
model.fit(x_train, y_train,batch_size=batch_size,epochs=3,validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test,batch_size=batch_size)
print('Test score:', score)
github tpbarron / rlflow / markov / abstractions / dqn.py View on Github external
def build_model(self):
        model = Sequential()
        model.add(Convolution2D(32, 8, 8, subsample=(4, 4), border_mode='same', input_shape=env.observation_space.shape, activation='relu'))
        model.add(Convolution2D(64, 4, 4, subsample=(2, 2), border_mode='same', activation='relu'))
        model.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same', activation='relu'))
        model.add(Flatten())
        model.add(Dense(512, activation='relu'))
        model.add(Dense(env.action_space.n, activation='sigmoid'))
        return model
github FitMachineLearning / FitML / ParameterNoising / LunarLander_v1.py View on Github external
action_predictor_model.add(Dense(32, activation='relu', input_dim=apdataX.shape[1]))
#action_predictor_model.add(Dropout(0.5))
action_predictor_model.add(Dense(32, activation='relu'))
#action_predictor_model.add(Dropout(0.5))
action_predictor_model.add(Dense(32, activation='relu'))
#action_predictor_model.add(Dropout(0.5))

action_predictor_model.add(Dense(apdataY.shape[1]))
#opt2 = optimizers.adam(lr=apLearning_rate)
opt2 = optimizers.Adadelta()

action_predictor_model.compile(loss='mse', optimizer=opt2, metrics=['accuracy'])


#initialize the action predictor model
noisy_model = Sequential()
#model.add(Dense(num_env_variables+num_env_actions, activation='tanh', input_dim=dataX.shape[1]))
noisy_model.add(Dense(32, activation='relu', input_dim=apdataX.shape[1]))
#noisy_model.add(Dropout(0.5))
noisy_model.add(Dense(32, activation='relu'))
#noisy_model.add(Dropout(0.5))
noisy_model.add(Dense(32, activation='relu'))
#noisy_model.add(Dropout(0.5))
noisy_model.add(Dense(apdataY.shape[1]))
opt3 = optimizers.Adadelta()

noisy_model.compile(loss='mse', optimizer=opt3, metrics=['accuracy'])

#load previous model weights if they exist
if load_previous_weights:
    dir_path = os.path.realpath(".")
    fn = dir_path + "/"+weigths_filename
github ChrisCummins / clgen / deeplearning / clgen / models / builders.py View on Github external
) -> "keras.models.Sequential":
  """Build a Keras model from a Model proto.

  Args:
    config: A Model proto instance.
    vocabulary_size: The number of tokens in the vocabulary.

  Returns:
    A Sequential model instance.
  """
  # Deferred importing of Keras so that we don't have to activate the
  # TensorFlow backend every time we import this module.
  import keras

  dropout = (config.architecture.post_layer_dropout_micros or 0) / 1e6
  model = keras.models.Sequential()
  layer = {
    model_pb2.NetworkArchitecture.LSTM: keras.layers.LSTM,
    model_pb2.NetworkArchitecture.RNN: keras.layers.RNN,
    model_pb2.NetworkArchitecture.GRU: keras.layers.GRU,
  }[config.architecture.neuron_type]

  # The input layer.
  model.add(
    keras.layers.Embedding(
      vocabulary_size,
      config.architecture.embedding_size,
      batch_input_shape=(
        config.training.batch_size,
        config.training.sequence_length,
      ),
    )
github duoergun0729 / 3book / code / keras-dcgan.py View on Github external
def gan_generator_model():
        model = Sequential()
        model.add(Dense(input_dim=200, units=256))
        model.add(Activation('relu'))
        model.add(Dense(28*28*1))
        model.add(Activation('sigmoid'))

        model.add(Reshape((28, 28, 1), input_shape=(28*28*1,)))

        plot_model(model, show_shapes=True, to_file='keras-gan-generator_model.png')
        return model
github PlatformStories / train-cnn-chip-classifier / bin / train-cnn-chip-classifier.py View on Github external
def compile_architecture(self):
        '''
        Implementation of VGG 16-layer net.
        '''
        print 'Compiling VGG Net...'

        model = Sequential()
        model.add(ZeroPadding2D((1,1), input_shape=self.input_shape))
        model.add(Convolution2D(64, self.kernel_size, self.kernel_size, activation='relu',
                                input_shape=self.input_shape))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(64, self.kernel_size, self.kernel_size,
                                activation='relu'))
        model.add(MaxPooling2D((2,2), strides=(2,2)))

        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(128, self.kernel_size, self.kernel_size,
                                activation='relu'))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(128, self.kernel_size, self.kernel_size,
                                activation='relu'))
        model.add(MaxPooling2D((2,2), strides=(2,2)))
github AlenK123 / V.A.Silly / NN / cifar10_cnn_01.py View on Github external
num_predictions = 20
save_dir = os.path.join(os.getcwd(), 'saved_models')
steps_per_epoch = 25
model_name = '00001.h5'

# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
                 input_shape=x_train.shape[1:]))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(32, (3, 3)))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.0))

model.add(Conv2D(64, (3, 3), padding='same'))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(64, (3, 3)))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.0))

model.add(Flatten())
github eriklindernoren / Keras-GAN / cgan / cgan.py View on Github external
def build_generator(self):

        model = Sequential()

        model.add(Dense(256, input_dim=self.latent_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(512))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(1024))
        model.add(LeakyReLU(alpha=0.2))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Dense(np.prod(self.img_shape), activation='tanh'))
        model.add(Reshape(self.img_shape))

        model.summary()

        noise = Input(shape=(self.latent_dim,))
github rlcode / reinforcement-learning-kr / 2-cartpole / 2-actor-critic / cartpole_a2c.py View on Github external
def build_actor(self):
        actor = Sequential()
        actor.add(Dense(24, input_dim=self.state_size, activation='relu',
                        kernel_initializer='he_uniform'))
        actor.add(Dense(self.action_size, activation='softmax',
                        kernel_initializer='he_uniform'))
        actor.summary()
        return actor