Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
nodes[5:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak1'], name="5")]
nodes[6:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak2'], name="6")]
nodes[7:] = [helper.make_node('Cast', ['leak1'], ['cast0'], to=6, name="7")]
nodes[8:] = [helper.make_node('Cast', ['cast0'], ['cast1'], to=1, name="8")]
nodes[9:] = [helper.make_node('Cast', ['leak2'], ['cast2'], to=6, name="9")]
nodes[10:] = [helper.make_node('Cast', ['cast2'], ['cast3'], to=7, name="10")]
nodes[11:] = [helper.make_node('Cast', ['cast3'], ['cast4'], to=1, name="11")]
nodes[12:] = [helper.make_node('Add', ['cast1', 'cast4'], ['add0'], name="12")]
nodes[13:] = [helper.make_node('Transpose', ['add0'], ['tranpose2'], perm=[0, 3, 1, 2], name="13")]
nodes[14:] = [helper.make_node('Conv', ['tranpose2'], ['output0'], name="14")]
input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
graph = helper.make_graph(nodes, 'test0', [input0], [output0])
model = helper.make_model(graph)
self.assertIsNotNone(model)
onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
model = helper.make_model(graph)
onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
self.assertEqual(len(new_nodes), 11)
self.assertIsNotNone(model)
node_inp_list,
["outp"],
domain="finn",
backend="fpgadataflow",
resType="ap_resource_lut()",
MW=mw,
MH=mh,
SIMD=simd,
PE=pe,
resDataType=rdt,
)
graph = helper.make_graph(
nodes=[FCLayer_node], name="fclayer_graph", inputs=[inp], outputs=[outp],
)
model = helper.make_model(graph, producer_name="fclayer-model")
model = ModelWrapper(model)
model.set_tensor_datatype("inp", idt)
model.set_tensor_datatype("outp", odt)
model.set_tensor_datatype("weights", wdt)
model.set_initializer("weights", W_reshaped)
if T is not None:
model.set_tensor_datatype("thresh", tdt)
model.set_initializer("thresh", T_reshaped)
return model
helper.make_tensor_value_info(
input_[0],
TensorProto.FLOAT,
input_[1]
) for input_ in inputs
],
outputs=[
helper.make_tensor_value_info(
output_[0],
output_[2],
output_[1]
) for output_ in outputs
],
initializer=initializer
)
onnx_model = helper.make_model(graph)
return onnx_model
nodes[5:] = [helper.make_node('LeakyRelu', ['tranpose0'], ['tranpose1'])]
nodes[6:] = [helper.make_node('Relu', ['tranpose1'], ['output0'], perm=(0, 3, 1, 2))]
input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
graph = helper.make_graph(nodes, 'test0', [input0], [output0])
model = helper.make_model(graph)
self.assertIsNotNone(model)
onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
self.assertEqual(len(new_nodes), 5)
graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
model = helper.make_model(graph)
onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
self.assertIsNotNone(model)
outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [2, 4, 4])
FIFO_node = helper.make_node(
"FIFO",
["inp"],
["outp"],
"outp",
domain="finn",
backend="fpgadataflow",
depth=1024,
)
graph = helper.make_graph(
nodes=[FIFO_node], name="FIFO_graph", inputs=[inp], outputs=[outp],
)
model = helper.make_model(graph, producer_name="finn-hls-onnx-model")
model = ModelWrapper(model)
# set the tensor datatypes (in this case: all to bipolar)
for tensor in graph.input:
model.set_tensor_datatype(tensor.name, DataType["BIPOLAR"])
for tensor in graph.output:
model.set_tensor_datatype(tensor.name, DataType["BIPOLAR"])
#onnx.save(model.model, "FIFO-model.onnx")
# generate input data
input_tensor = np.random.randint(2, size=32)
input_tensor = (np.asarray(input_tensor, dtype=np.float32)).reshape(2, 4, 4)
input_dict = {"inp": input_tensor}
output_dict = oxe.execute_onnx(model, input_dict)
assert layer_type == 'convolutional'
initializer_layer, inputs_layer = weight_loader.load_conv_weights(
conv_params)
initializer.extend(initializer_layer)
inputs.extend(inputs_layer)
del weight_loader
self.graph_def = helper.make_graph(
nodes=self._nodes,
name='YOLO',
inputs=inputs,
outputs=outputs,
initializer=initializer
)
if verbose:
print(helper.printable_graph(self.graph_def))
model_def = helper.make_model(self.graph_def,
producer_name='NVIDIA TensorRT sample')
return model_def
output_tensors.append(helper.make_tensor_value_info(
name, NP_TYPE_TO_TENSOR_TYPE[var.dtype], var.shape))
if not export_params:
initializers = []
onnx_graph = helper.make_graph(
o.graph, graph_name, input_tensors, output_tensors,
initializer=initializers)
opset_imports = [helper.make_operatorsetid('', opset_version)]
if external_opset_imports:
chainer.utils.experimental('external_opset_imports')
for domain, version in external_opset_imports.items():
opset_imports.append(helper.make_operatorsetid(domain, version))
model = helper.make_model(
onnx_graph,
producer_name='Chainer',
producer_version=chainer.__version__,
opset_imports=opset_imports
)
model.ir_version = onnx.IR_VERSION
check_onnx_model(model, external_converters, external_opset_imports)
if input_shapes is not None:
for output in model.graph.output:
for d in output.type.tensor_type.shape.dim:
d.Clear()
model = shape_inference.infer_shapes(model)
check_onnx_model(model, external_converters, external_opset_imports)
def save_model(model, output_nodes, filepath):
"""
Given an in memory model, will save to disk at given filepath.
"""
new_graph = helper.make_graph(nodes=model.graph.node,
name='new_graph',
inputs=model.graph.input,
outputs=output_nodes,
initializer=model.graph.initializer)
checker.check_graph(new_graph)
new_model = helper.make_model(new_graph)
with open(filepath, "wb") as file_handle:
serialized = new_model.SerializeToString()
file_handle.write(serialized)
def make_model_ex(graph, imported_opset_pairs, target_default_opset, metadata_props=None, **kwargs):
onnx_model = helper.make_model(graph, **kwargs)
# Merge operator sets for the same domain, the largest version number would be kept
purified_operator_set = dict()
for op_domain, op_version in imported_opset_pairs:
if op_domain not in purified_operator_set:
purified_operator_set[op_domain] = op_version
else:
purified_operator_set[op_domain] = max(purified_operator_set[op_domain], op_version)
# Fill operator sets
i = 0
for op_domain, op_version in purified_operator_set.items():
if i == 0 and len(onnx_model.opset_import) == 1:
# Overwrite the default operator set created by helper.make_model(...)
op_set = onnx_model.opset_import[0]
else:
# for ch in model.namedparams():
# print(ch)
outputs_vi = [o.to_value_info(env) for o in output_tensors]
graph = helper.make_graph(env.nodes,
'name_is_unknown_now', input_tensors,
outputs_vi,
)
# inputのうち、重みであるものにはinitializerをつける
# batch_sizeやinput_sizeなどの可変なものはできる限りのそのままで
dprint(graph)
# checker.check_graph(graph)
# oniku独自のノードを使うとcheckできなくなる...
mo = helper.make_model(graph)
# print(mo)
return mo