Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
"""
Convert tensorflow model to bigdl model
:param input_ops: operation list used for input, should be placeholders
:param output_ops: operations list used for output
:return: bigdl model
"""
input_names = map(lambda x: x.name.split(":")[0], input_ops)
output_names = map(lambda x: x.name.split(":")[0], output_ops)
temp = tempfile.mkdtemp()
dump_model(path=temp)
model_path = temp + '/model.pb'
bin_path = temp + '/model.bin'
model = Model.load_tensorflow(model_path, input_names, output_names,
byte_order, bin_path, bigdl_type)
try:
shutil.rmtree(temp)
except OSError as e:
if e.errno != errno.ENOENT:
raise
return model
def _construct_bigdl_model(self):
for clayer in self.kconfig["layers"]:
if clayer["name"] not in self.node_id_to_instance:
self._do_create_node(self.node_id_to_layer[clayer["name"]],
clayer)
ins = []
for input_layer in self.kconfig["input_layers"]:
name = input_layer[0]
ins.append(self.node_id_to_instance[name])
outs = []
for output_layer in self.kconfig["output_layers"]:
name = output_layer[0]
outs.append(self.node_id_to_instance[name])
return BLayer.Model(inputs=ins, outputs=outs)
def _load(self, path):
return Model.loadModel(path, bigdl_type=self.bigdl_type)
def main():
tf.set_random_seed(1234)
input = tf.placeholder(tf.float32, [None, 5])
weight = tf.Variable(tf.random_uniform([5, 10]))
bias = tf.Variable(tf.random_uniform([10]))
middle = tf.nn.bias_add(tf.matmul(input, weight), bias)
output = tf.nn.tanh(middle)
tensor = np.random.rand(5, 5)
# construct BigDL model and get the result form
bigdl_model = Model(input, output, model_type="tensorflow")
bigdl_result = bigdl_model.forward(tensor)
# get result from tensorflow and compare
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
tensorflow_result = sess.run(output, {input: tensor})
print("Tensorflow forward result is " + str(tensorflow_result))
print("BigDL forward result is " + str(bigdl_result))
np.testing.assert_almost_equal(tensorflow_result, bigdl_result, 6)
print("The results are almost equal in 6 decimals")
def __init__(self, input, output, jvalue=None, bigdl_type="float", **kwargs):
super(BModel, self).__init__(jvalue,
to_list(input),
to_list(output),
bigdl_type,
**kwargs)
from bigdl.util.common import to_list, callBigDlFunc, \
JavaValue, get_node_and_core_number
from zoo.common import Sample, JTensor
from zoo.common.nncontext import getOrCreateSparkContext
from zoo.feature.common import FeatureSet
from zoo.feature.image import ImageSet, ImagePreprocessing
from zoo.pipeline.api.keras.engine.topology import ZooKerasLayer, KerasNet, to_bigdl_metric
from bigdl.optim.optimizer import EveryEpoch, MaxEpoch, Optimizer
from zoo.util import nest
if sys.version >= '3':
long = int
unicode = str
class GraphNet(BModel):
def __init__(self, input, output, jvalue=None, bigdl_type="float", **kwargs):
super(BModel, self).__init__(jvalue,
to_list(input),
to_list(output),
bigdl_type,
**kwargs)
def flattened_layers(self, include_container=False):
jlayers = callBigDlFunc(self.bigdl_type, "getFlattenSubModules", self, include_container)
layers = [Layer.of(jlayer) for jlayer in jlayers]
return layers
@property
def layers(self):
jlayers = callBigDlFunc(self.bigdl_type, "getSubModules", self)
layers = [Layer.of(jlayer) for jlayer in jlayers]
@staticmethod
def load_keras(json_path=None, hdf5_path=None, by_name=False):
"""
Load a pre-trained Keras model.
:param json_path: The json path containing the keras model definition. Default is None.
:param hdf5_path: The HDF5 path containing the pre-trained keras model weights
with or without the model architecture. Default is None.
:param by_name: by default the architecture should be unchanged.
If set as True, only layers with the same name will be loaded.
:return: A BigDL model.
"""
return BModel.load_keras(json_path, hdf5_path, by_name)
def inference(image_path, model_path, sc):
imageDF = NNImageReader.readImages(image_path, sc, resizeH=300, resizeW=300, image_codec=1)
getName = udf(lambda row: row[0], StringType())
transformer = ChainedPreprocessing(
[RowToImageFeature(), ImageResize(256, 256), ImageCenterCrop(224, 224),
ImageChannelNormalize(123.0, 117.0, 104.0), ImageMatToTensor(), ImageFeatureToTensor()])
model = Model.loadModel(model_path)
classifier_model = NNClassifierModel(model, transformer)\
.setFeaturesCol("image").setBatchSize(4)
predictionDF = classifier_model.transform(imageDF).withColumn("name", getName(col("image")))
return predictionDF