Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
Linear(10),
])
model1 = create_model()
model2 = create_model()
input = numpy.random.normal(size=[10, 32, 32, 3]).astype('float32')
assert not numpy.allclose(model1.predict_on_batch(input), model2.predict_on_batch(input))
# save arch + weights
with tempfile.NamedTemporaryFile(mode='r+b') as fp:
keras.models.save_model(model1, fp.name)
model3 = keras.models.load_model(fp.name, custom_objects=keras_custom_objects)
assert numpy.allclose(model1.predict_on_batch(input), model3.predict_on_batch(input))
# save arch as json, md5
model4 = keras.models.model_from_json(model1.to_json(), custom_objects=keras_custom_objects)
with tempfile.NamedTemporaryFile(mode='r+b') as fp:
model1.save_weights(fp.name)
model4.load_weights(fp.name)
model2.load_weights(fp.name)
assert numpy.allclose(model1.predict_on_batch(input), model4.predict_on_batch(input))
assert numpy.allclose(model1.predict_on_batch(input), model2.predict_on_batch(input))
# keras bug - fails for pickling
layer2 = pickle.loads(pickle.dumps(layer))
result_symbol2 = layer2(symbol)
result2 = backend.eval_symbol(result_symbol2, eval_inputs)
assert numpy.allclose(result1, result2)
else:
import keras
import einops.layers.keras
model = keras.models.Model(symbol, result_symbol1)
result2 = model.predict_on_batch(x)
# create a temporary file using a context manager
with tempfile.NamedTemporaryFile(mode='r+b') as fp:
keras.models.save_model(model, fp.name)
model2 = keras.models.load_model(fp.name,
custom_objects=einops.layers.keras.keras_custom_objects)
result3 = model2.predict_on_batch(x)
assert numpy.allclose(result1, result2)
assert numpy.allclose(result1, result3)
# now testing back-propagation
just_sum = backend.layers().Reduce('...->', reduction='sum')
result_sum1 = backend.eval_symbol(just_sum(result_symbol1), eval_inputs)
result_sum2 = numpy.sum(x)
assert numpy.allclose(result_sum1, result_sum2)
# keras bug - fails for pickling
layer2 = pickle.loads(pickle.dumps(layer))
result_symbol2 = layer2(symbol)
result2 = backend.eval_symbol(result_symbol2, eval_inputs)
assert numpy.allclose(result1, result2)
else:
import keras
import einops.layers.keras
model = keras.models.Model(symbol, result_symbol1)
result2 = model.predict_on_batch(x)
# create a temporary file using a context manager
with tempfile.NamedTemporaryFile(mode='r+b') as fp:
keras.models.save_model(model, fp.name)
model2 = keras.models.load_model(fp.name,
custom_objects=einops.layers.keras.keras_custom_objects)
result3 = model2.predict_on_batch(x)
assert numpy.allclose(result1, result2)
assert numpy.allclose(result1, result3)
Linear(120),
ReLU(),
Linear(84),
ReLU(),
Linear(10),
])
model1 = create_model()
model2 = create_model()
input = numpy.random.normal(size=[10, 32, 32, 3]).astype('float32')
assert not numpy.allclose(model1.predict_on_batch(input), model2.predict_on_batch(input))
# save arch + weights
with tempfile.NamedTemporaryFile(mode='r+b') as fp:
keras.models.save_model(model1, fp.name)
model3 = keras.models.load_model(fp.name, custom_objects=keras_custom_objects)
assert numpy.allclose(model1.predict_on_batch(input), model3.predict_on_batch(input))
# save arch as json, md5
model4 = keras.models.model_from_json(model1.to_json(), custom_objects=keras_custom_objects)
with tempfile.NamedTemporaryFile(mode='r+b') as fp:
model1.save_weights(fp.name)
model4.load_weights(fp.name)
model2.load_weights(fp.name)
assert numpy.allclose(model1.predict_on_batch(input), model4.predict_on_batch(input))
assert numpy.allclose(model1.predict_on_batch(input), model2.predict_on_batch(input))