How to use the onnx.helper.make_graph function in onnx

To help you get started, we’ve selected a few onnx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github onnx / tensorflow-onnx / tests / test_internals.py View on Github external
def sample_net():
        n1 = helper.make_node("Abs", ["input"], ["n1:0"], name="n1")
        n2 = helper.make_node("Abs", ["n1:0"], ["n2:0"], name="n2")
        n3 = helper.make_node("Abs", ["n1:0"], ["n3:0"], name="n3")
        n4 = helper.make_node("Add", ["n2:0", "n3:0"], ["n4:0"], name="n4")
        n5 = helper.make_node("Abs", ["n4:0"], ["n5:0"], name="n5")
        n6 = helper.make_node("Identity", ["n5:0"], ["n6:0"], name="n6")

        graph_proto = helper.make_graph(
            nodes=[n1, n2, n3, n4, n5, n6],
            name="test",
            inputs=[helper.make_tensor_value_info("input", TensorProto.FLOAT, [2, 2])],
            outputs=[helper.make_tensor_value_info("n5:0", TensorProto.FLOAT, [2, 2])],
            initializer=[]
        )
        return graph_proto
github apache / incubator-tvm / tests / python / frontend / onnx / test_forward.py View on Github external
def verify_or(indata, dtype):
    x = indata[0].astype(dtype)
    y = indata[1].astype(dtype)
    outdata = np.logical_or(x, y)

    node = helper.make_node('Or', inputs=['in1', 'in2'], outputs=['out'], )

    graph = helper.make_graph([node],
                              'or_test',
                              inputs=[helper.make_tensor_value_info("in1", TensorProto.BOOL, list(x.shape)),
                                      helper.make_tensor_value_info("in2", TensorProto.BOOL, list(y.shape))],
                              outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))])

    model = helper.make_model(graph, producer_name='or_test')

    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(model, [x, y], target, ctx, outdata.shape)
        tvm.testing.assert_allclose(outdata, tvm_out)
github onnx / tensorflow-onnx / tests / test_optimizers.py View on Github external
],
            )
            return graph

        def _make_loop(external_inputs, outputs):
            trip_cnt = self._make_onnx_const(np.array(10, dtype=np.int64), "trip_cnt")
            cond = self._make_onnx_const(np.array(True, dtype=np.bool), "cond")
            sub_graph = _define_loop_graph(external_inputs)
            loop_node = helper.make_node("Loop", ["trip_cnt", "cond", "cond"], outputs,
                                         name="loop", body=sub_graph)
            return trip_cnt, cond, loop_node

        nodes = _make_loop(["array"], ["loop_carried", "scan_out"])
        res = helper.make_node("Transpose", ["scan_out"], ["Y"], perm=[0, 3, 1, 2], name="trans")

        graph = helper.make_graph(
            [*nodes, res],
            "transpose_with_loop",
            [helper.make_tensor_value_info("array", TensorProto.FLOAT, ["unknow"] * 4)],
            [helper.make_tensor_value_info("Y", TensorProto.FLOAT, ["unknow"] * 4)],
        )

        model_proto = self.make_model(graph, producer_name="onnx-tests")
        self.run_transpose_compare(["Y"], {"array": np.random.randn(10, 3, 4, 5).astype(np.float32)},
                                   model_proto, remaining_transpose_num=0)
github onnx / tensorflow-onnx / tests / test_optimizers.py View on Github external
const_1 = helper.make_tensor("const_1", TensorProto.FLOAT, (1,), const_1_val)
        const_1_node = helper.make_node("Constant", [], ["const_1"], value=const_1, name="const_1")

        const_2_val = np.random.randn(2, 4, 5, 3).astype(np.float32)
        const_2 = helper.make_tensor("const_2", TensorProto.FLOAT, (2, 4, 5, 3), const_2_val.flatten())
        const_2_node = helper.make_node("Constant", [], ["const_2"], value=const_2, name="const_2")

        const_3_val = np.random.randn(2, 4, 5, 3).astype(np.float32)
        const_3 = helper.make_tensor("const_3", TensorProto.FLOAT, (2, 4, 5, 3), const_3_val.flatten())
        const_3_node = helper.make_node("Constant", [], ["const_3"], value=const_3, name="const_3")

        node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=[0, 2, 3, 1], name="trans_1")
        node2 = helper.make_node("Max", ["Y", "const_3", "const_2", "const_1"], ["Z"], name="max")
        node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=[0, 3, 1, 2], name="trans_2")

        graph = helper.make_graph(
            [const_1_node, const_2_node, const_3_node, node1, node2, node3],
            "Max-test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4, 5))],
            [helper.make_tensor_value_info("Z1", TensorProto.FLOAT, (2, 3, 4, 5))],
        )

        model_proto = self.make_model(graph, producer_name="onnx-tests")
        self.run_transpose_compare(["Z1"], {"X": np.random.randn(2, 3, 4, 5).astype(np.float32)},
                                   model_proto, remaining_transpose_num=0)
github apache / incubator-tvm / tests / python / frontend / onnx / test_forward.py View on Github external
def test_squeeze():
    in_shape = (1, 3, 1, 3, 1, 1)
    out_shape = (3, 3)
    y = helper.make_node("Squeeze", ['in'], ['out'], axes=[0, 2, 4, 5])

    graph = helper.make_graph([y],
                              'squeeze_test',
                              inputs=[helper.make_tensor_value_info("in",
                                                                    TensorProto.FLOAT, list(in_shape))],
                              outputs=[helper.make_tensor_value_info("out",
                                                                     TensorProto.FLOAT, list(out_shape))])

    model = helper.make_model(graph, producer_name='squeeze_test')

    for target, ctx in ctx_list():
        x = np.random.uniform(size=in_shape).astype('float32')
        tvm_out = get_tvm_output(model, x, target, ctx, out_shape, 'float32')

    tvm.testing.assert_allclose(out_shape, tvm_out.shape)
github microsoft / onnxruntime / onnxruntime / python / tools / bert / bert_model_optimization.py View on Github external
def change_input_to_int32(self):
        graph = self.graph()
        inputs = []
        input_map = {}
        for input in self.embed_node.input:
            input_map[input] = onnx.helper.make_tensor_value_info(input, TensorProto.INT32, [self.batch_size if self.batch_size > 0 else 1, self.sequence_length])

        new_graph_inputs = []
        for input in graph.input:
            if input.name in self.embed_node.input:
                print("input", input.name)
                new_graph_inputs.append(input_map[input.name])

        graph_def = onnx.helper.make_graph(graph.node,
                        'int32 inputs',
                        new_graph_inputs,
                        graph.output,
                        initializer=graph.initializer,
                        value_info=graph.value_info)

        # replace model
        self.model = onnx.helper.make_model(graph_def, producer_name='bert model optimizer')
github chainer / onnx-chainer / onnx_chainer / export.py View on Github external
for param in context.parameters:
        tensor = convert_parameter(param, context)
        initializers.append(tensor)
        input_tensors.append(helper.make_tensor_value_info(
            context.get_name(param), tensor.data_type, tensor.dims))

    # Convert output tensors
    output_tensors = []
    for name, var in network_outputs.items():
        output_tensors.append(helper.make_tensor_value_info(
            name, NP_TYPE_TO_TENSOR_TYPE[var.dtype], var.shape))

    if not export_params:
        initializers = []

    onnx_graph = helper.make_graph(
        o.graph, graph_name, input_tensors, output_tensors,
        initializer=initializers)

    opset_imports = [helper.make_operatorsetid('', opset_version)]
    if external_opset_imports:
        chainer.utils.experimental('external_opset_imports')
        for domain, version in external_opset_imports.items():
            opset_imports.append(helper.make_operatorsetid(domain, version))
    model = helper.make_model(
        onnx_graph,
        producer_name='Chainer',
        producer_version=chainer.__version__,
        opset_imports=opset_imports
    )

    model.ir_version = onnx.IR_VERSION
github pfnet-research / chainer-compiler / ch2o / ch2o / chainer2onnx.py View on Github external
final_outputs = []
    final_setattrs = []
    for key, (iv, ov, setattr_info) in in_out.items():
        if ov is None:
            continue
        if iv is None:
            iv = Value(False)
        out = ov.copy(env, name=key)
        final_outputs.append((key, out.value))
        if setattr_info is not None:
            final_setattrs.append(tuple(list(setattr_info) + [out]))
        input_values.append(iv.to_value_info(env))
        output_values.append(ov.to_value_info(env))

    cond = new_tensor(name='loop_cond')
    localgraph = helper.make_graph(
        localenv.nodes,
        "Loop_subgraph",
        [cnt, cond, gtx] + input_values,
        [cond, gtx] + output_values
    )

    mtc = env.calc(
        "OnikuxGenericLen",
        inputs=[ite.to_value_info(env).name],
    )

    env.addnode(
        'Loop',
        inputs=([mtc.name, "", ite.to_value_info(env).name] +
                [i.name for i in input_values]),
        outputs=([new_tensor('out_generator').name] +
github pfnet-research / chainer-compiler / scripts / onnx_script.py View on Github external
def make_graph(self):
        inputs_vi = [_extract_value_info(a, n)
                     for n, a in self.inputs + self.params]
        outputs_vi = [_extract_value_info(a, n) for n, a in self.outputs]
        initializer = []
        for name, value in self.params:
            typ = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[value.dtype]
            tensor = numpy_helper.from_array(value, name=name)
            initializer.append(tensor)
        graph = onnx.helper.make_graph(self.nodes, self.graph_name,
                                       inputs=inputs_vi, outputs=outputs_vi,
                                       initializer=initializer)
        return graph
github NVIDIA / tensorrt-inference-server / qa / common / gen_qa_dyna_sequence_models.py View on Github external
add0 = onnx.helper.make_node("Add", ["_INPUT", "START"], ["add0"])
    mul0 = onnx.helper.make_node("Mul", ["END", "onnx_corrid_cast0"], ["mul0"])
    sum0 = onnx.helper.make_node("Add", ["add0", "mul0"], ["sum0"])
    res0 = onnx.helper.make_node("Mul", ["READY", "sum0"], ["CAST"])
    cast = onnx.helper.make_node("Cast", ["CAST"], ["OUTPUT"], to=onnx_dtype)

    # Avoid cast from float16 to float16
    # (bug in Onnx Runtime, cast from float16 to float16 will become cast from float16 to float32)
    if onnx_dtype == onnx.TensorProto.FLOAT16:
        cast = onnx.helper.make_node("Identity", ["CAST"], ["OUTPUT"])

    onnx_nodes = [internal_input, onnx_corrid_cast0, add0, mul0, sum0, res0, cast]
    onnx_inputs = [onnx_input, onnx_start, onnx_end, onnx_ready, onnx_corrid]
    onnx_outputs = [onnx_output]

    graph_proto = onnx.helper.make_graph(onnx_nodes, model_name, onnx_inputs, onnx_outputs)
    model_def = onnx.helper.make_model(graph_proto, producer_name="TRTIS")

    try:
        os.makedirs(model_version_dir)
    except OSError as ex:
        pass # ignore existing dir

    onnx.save(model_def, model_version_dir + "/model.onnx")