Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
if train_epochs:
train_t_begin = time.time()
model.fit(xs, ys, batch_size=batch_size, epochs=train_epochs)
train_t_end = time.time()
# Perform predict() burn-in.
for _ in range(_PREDICT_BURNINS):
model.predict(xs)
# Time predict() by averaging.
predict_t_begin = time.time()
for _ in range(_PREDICT_RUNS):
model.predict(xs)
predict_t_end = time.time()
# Save the model and weights.
tfjs.converters.save_keras_model(model, artifacts_dir)
# Save data about the model and benchmark results.
if train_epochs:
train_time = (train_t_end - train_t_begin) / train_epochs
else:
train_time = None
predict_time = (predict_t_end - predict_t_begin) / _PREDICT_RUNS
data = {
'name': model_name,
'description': description,
'optimizer': optimizer.__class__.__name__,
'loss': loss,
'input_shape': input_shape,
'target_shape': target_shape,
'batch_size': batch_size,
'train_epochs': train_epochs,
def testSaveKerasModel(self):
with self.test_session():
# First create a toy keras model.
model = _createKerasModel('MergedDense')
tfjs.converters.save_keras_model(model, self._tmp_dir)
# Briefly check the model topology.
with open(os.path.join(self._tmp_dir, 'model.json')) as f:
json_content = json.load(f)
model_json = json_content['modelTopology']
self.assertIsInstance(model_json['model_config'], dict)
self.assertIsInstance(model_json['model_config']['config'], dict)
self.assertIn('layers', model_json['model_config']['config'])
weights_manifest = json_content['weightsManifest']
self.assertIsInstance(weights_manifest, list)
# Briefly check the weights manifest.
weight_shapes = dict()
weight_dtypes = dict()
for manifest_item in weights_manifest:
def testLoadKerasModel(self):
# Use separate tf.Graph and tf.compat.v1.Session contexts to prevent name collision.
with tf.Graph().as_default(), tf.compat.v1.Session():
# First create a toy keras model.
model1 = _createKerasModel('MergedDense')
tfjs.converters.save_keras_model(model1, self._tmp_dir)
model1_weight_values = model1.get_weights()
with tf.Graph().as_default(), tf.compat.v1.Session():
# Load the model from saved artifacts.
model2 = tfjs.converters.load_keras_model(
os.path.join(self._tmp_dir, 'model.json'))
# Compare the loaded model with the original one.
model2_weight_values = model2.get_weights()
self.assertEqual(len(model1_weight_values), len(model2_weight_values))
for model1_weight_value, model2_weight_value in zip(
model1_weight_values, model2_weight_values):
self.assertAllClose(model1_weight_value, model2_weight_value)
# Check the content of the output directory.
self.assertTrue(glob.glob(os.path.join(self._tmp_dir, 'group*-*')))
dense2 = keras.layers.Dense(
3, use_bias=True, name='Dense2', activation='softmax')(dense1)
# pylint:disable=redefined-variable-type
model = keras.models.Model(inputs=[iris_x], outputs=[dense2])
# pylint:enable=redefined-variable-type
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(data_x, data_y, batch_size=8, epochs=epochs)
# Run prediction on the training set.
pred_ys = np.argmax(model.predict(data_x), axis=1)
true_ys = np.argmax(data_y, axis=1)
final_train_accuracy = np.mean((pred_ys == true_ys).astype(np.float32))
print('Accuracy on the training set: %g' % final_train_accuracy)
tfjs.converters.save_keras_model(model, artifacts_dir)
return final_train_accuracy
model.add(Dense(3, activation='softmax'))
adam = keras.optimizers.Adam(lr=0.001)
model.compile(loss='categorical_crossentropy',
optimizer=adam,
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=10,
batch_size=128)
score = model.evaluate(x_test, y_test, batch_size=128)
print(score)
model.save("Keras-64x2-10epoch")
tfjs.converters.save_keras_model(model, "trainedModel")
num_encoder_tokens, num_decoder_tokens,
__, target_token_index,
encoder_input_data, decoder_input_data, decoder_target_data) = read_data()
(encoder_inputs, encoder_states, decoder_inputs, decoder_lstm,
decoder_dense, model) = seq2seq_model(
num_encoder_tokens, num_decoder_tokens, FLAGS.latent_dim)
# Run training.
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=FLAGS.batch_size,
epochs=FLAGS.epochs,
validation_split=0.2)
tfjs.converters.save_keras_model(model, FLAGS.artifacts_dir)
# Next: inference mode (sampling).
# Here's the drill:
# 1) encode input and retrieve initial decoder state
# 2) run one step of decoder with this initial state
# and a "start of sequence" token as target.
# Output will be the next target token
# 3) Repeat with the current target token and current states
# Define sampling models
encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(FLAGS.latent_dim,))
decoder_state_input_c = Input(shape=(FLAGS.latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
X_test = X_test.reshape(-1, 784)
def create_model():
model = Sequential([
Dense(512, activation=tf.nn.relu, input_shape=(784,)),
Dropout(0.2),
Dense(10, activation=tf.nn.softmax)
])
print(model.summary())
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model
model = create_model()
model.fit(x = X_train, y = Y_train, batch_size = 100, validation_data = (X_test, Y_test))
model.save('my_mnist_model.h5')
tfjs.converters.save_keras_model(model, 'tfjs_target_dir')
num_encoder_tokens, num_decoder_tokens,
__, target_token_index,
encoder_input_data, decoder_input_data, decoder_target_data) = read_data()
(encoder_inputs, encoder_states, decoder_inputs, decoder_lstm,
decoder_dense, model) = seq2seq_model(
num_encoder_tokens, num_decoder_tokens, FLAGS.latent_dim)
# Run training.
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=FLAGS.batch_size,
epochs=FLAGS.epochs,
validation_split=0.2)
tfjs.converters.save_keras_model(model, FLAGS.artifacts_dir)
# Next: inference mode (sampling).
# Here's the drill:
# 1) encode input and retrieve initial decoder state
# 2) run one step of decoder with this initial state
# and a "start of sequence" token as target.
# Output will be the next target token
# 3) Repeat with the current target token and current states
# Define sampling models
encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(FLAGS.latent_dim,))
decoder_state_input_c = Input(shape=(FLAGS.latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
])
print(model.summary())
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model
model = create_model()
# checkpoint_path = "checkpoints/cp.ckpt"
# checkpoint_dir = os.path.dirname(checkpoint_path)
# cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
# save_weights_only=True,
# verbose=1)
# model.fit(x = X_train, y = Y_train, batch_size = 100, validation_data = (X_test, Y_test), callbacks = [cp_callback])
# model.save('my_mnist_model.h5')
model.fit(x = X_train, y = Y_train, batch_size = 100, validation_data = (X_test, Y_test))
model.save('my_mnist_model.h5')
tfjs.converters.save_keras_model(model, 'tfjs_target_dir')
def optimize_graph(graph, signature_def, output_graph,
tf_version, quantization_dtype=None, skip_op_check=False,
strip_debug_ops=False):
"""Takes a Python Graph object and optimizes the graph.
Args:
graph: The frozen graph to optimize.
signature_def: the SignatureDef of the inference graph.
output_graph: The location of the output graph.
tf_version: Tensorflow version of the input graph.
quantization_dtype: An optional numpy dtype to quantize weights to for
compression. Only np.uint8 and np.uint16 are supported.
skip_op_check: Bool whether to skip the op check.
strip_debug_ops: Bool whether to strip debug ops.
"""
fuse_prelu.register_prelu_func(graph)
# Add a collection 'train_op' so that Grappler knows the outputs.
for _, output in signature_def.outputs.items():
name = output.name.split(':')[0]
graph.add_to_collection('train_op', graph.get_operation_by_name(name))
graph_def = graph.as_graph_def()
unsupported = validate(graph_def.node, skip_op_check,
strip_debug_ops)
if unsupported:
raise ValueError('Unsupported Ops in the model before optimization\n' +
', '.join(unsupported))
# first pass of grappler optimization, this is needed for batch norm folding.
config = config_pb2.ConfigProto()