How to use the onnx.helper.make_node function in onnx

To help you get started, we’ve selected a few onnx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github onnx / tensorflow-onnx / tests / test_optimizers.py View on Github external
def _define_loop_graph(external_inputs):
            # external_inputs: external node which will be used by this graph
            # graph without loop carried
            # computation
            # for(...){a = external_inputs[i]; b = trans(a), c = squeeze(b)}, c is scan output
            node1 = helper.make_node("Gather", [external_inputs[0], "loop_iter_num"], ["Y0"])
            node2 = helper.make_node("Transpose", ["Y0"], ["Z0"], perm=[0, 2, 3, 1])
            # graph output
            node3 = helper.make_node("Squeeze", ["Z0"], ["scan_output"], axes=[0])
            node4 = helper.make_node("Identity", ["loop_condition"], ["loop_cond_output"])
            node5 = helper.make_node("Identity", ["loop_condition"], ["loop_carried_output"])

            graph = helper.make_graph(
                [node1, node2, node3, node4, node5],
                "loop_subgraph",
                [helper.make_tensor_value_info("loop_iter_num", TensorProto.INT64, (1,)),  # iteration_num
                 helper.make_tensor_value_info("loop_condition", TensorProto.BOOL, ()),  # condition
                 helper.make_tensor_value_info("loop_carried", TensorProto.BOOL, ())  # loop_carried
                 ],
                [helper.make_tensor_value_info("loop_cond_output", TensorProto.BOOL, ()),
                 helper.make_tensor_value_info("loop_carried_output", TensorProto.BOOL, ()),
                 helper.make_tensor_value_info("scan_output", TensorProto.FLOAT, ["unknown"] * 3)
                 ],
            )
            return graph
github onnx / onnx-tensorflow / test / backend / test_node.py View on Github external
def test_tile(self):
    if legacy_onnx_pre_ver(1, 2):
      raise unittest.SkipTest(
          "The current version of ONNX does not record correctly the opset of Tile."
      )
    node_def = helper.make_node("Tile", ["X1", "X2"], ["Z"])
    x = self._get_rnd_float32(shape=[3, 5, 5, 3])
    repeats = [1, 1, 2, 1]
    output = run_node(node_def, [x, repeats])
    np.testing.assert_allclose(output["Z"], np.tile(x, repeats), rtol=1e-3)
github Xilinx / finn / tests / test_mixed_onnx_exec.py View on Github external
def test_execute_mixed_model():

    out0 = helper.make_tensor_value_info("out0", TensorProto.FLOAT, [6, 3, 2, 2])

    graph_def = helper.make_graph(
        nodes=[
            helper.make_node(
                "MultiThreshold", ["v", "thresholds"], ["out0"], domain="finn"
            ),
            helper.make_node("Relu", ["out0"], ["out1"]),
        ],
        name="test-model",
        inputs=[
            helper.make_tensor_value_info("v", TensorProto.FLOAT, [6, 3, 2, 2]),
            helper.make_tensor_value_info("thresholds", TensorProto.FLOAT, [3, 7]),
        ],
        outputs=[
            helper.make_tensor_value_info("out1", TensorProto.FLOAT, [6, 3, 2, 2])
        ],
        value_info=[out0],
    )
    model_def = helper.make_model(graph_def, producer_name="onnx-example")
github apache / singa / test / python / test_onnx_backend.py View on Github external
def test_Asinh(self):  # type: () -> None
        node = onnx.helper.make_node(
            'Asinh',
            inputs=['x'],
            outputs=['y'],
        )

        x = np.array([-1, 0, 1]).astype(np.float32)
        y = np.arcsinh(x)  # expected output [-0.88137358,  0.,  0.88137358]
        expect(node, inputs=[x], outputs=[y],
               name='test_asinh_example')

        x = np.random.randn(3, 4, 5).astype(np.float32)
        y = np.arcsinh(x)
        expect(node, inputs=[x], outputs=[y],
               name='test_asinh')
github onnx / tensorflow-onnx / tests / test_optimizers.py View on Github external
def test_identity_non_graph_output(self):
        node1 = helper.make_node("Add", ["X", "X"], ["Y"], name="add")
        node2 = helper.make_node("Identity", ["Y"], ["Z"], name="identity")
        node3 = helper.make_node("Shape", ["Z"], ["Z1"], name="shape")

        graph = helper.make_graph(
            [node1, node2, node3],
            "identity-test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4, 5))],
            [helper.make_tensor_value_info("Z1", TensorProto.INT64, [4])],
        )

        model_proto = self.make_model(graph, producer_name="onnx-tests")
        self.run_identity_compare(["Z1"], {"X": np.random.randn(2, 3, 4, 5).astype(np.float32)},
                                  model_proto, remaining_identity_num=0)
github onnx / tensorflow-onnx / tests / test_optimizers.py View on Github external
def test_transpose_with_squeeze2(self):
        # squeeze the second dim
        node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=[0, 2, 3, 1], name="trans")
        node2 = helper.make_node("Squeeze", ["Y"], ["Z"], name="squeeze", axes=[1])

        graph = helper.make_graph(
            [node1, node2],
            "transpose_with_squeeze",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (3, 4, 1, 5))],
            [helper.make_tensor_value_info("Z", TensorProto.FLOAT, (3, 5, 4))],
        )

        model_proto = self.make_model(graph, producer_name="onnx-tests")
        model_after_opt = self.run_transpose_compare(["Z"], {"X": np.random.randn(3, 4, 1, 5).astype(np.float32)},
                                                     model_proto, remaining_transpose_num=1)
        self.check_transpose_perm(model_after_opt, [0, 2, 1])
github microsoft / onnxruntime / onnxruntime / python / tools / quantization / quantize.py View on Github external
parameter input_name: Input name
            parameter weight_scale_name: Weight scale.
            parameter bias_scale_name: Bias to quantize.
            parameter quantied_bias_name: Output name to use for quantized bias.
        '''
        qType = onnx_proto.TensorProto.INT32
        
        input_scale_name = input_name + "_scale"
        bias_scale_node = onnx.helper.make_node("Mul", [input_scale_name, weight_scale_name], [bias_name + "_scale"], bias_name + "_scale_node")
        new_node_list.append(bias_scale_node)

        quantize_bias_node = onnx.helper.make_node("Div", [bias_name, bias_scale_node.output[0]],
            [bias_name + "_tmp_quant:0"], bias_name + "_tmp_qaunt")
        new_node_list.append(quantize_bias_node)

        bias_rounded_node = onnx.helper.make_node("Floor", quantize_bias_node.output,
            [bias_name + "_quant_rounded:0"], bias_name + "_quant_rounded")
        new_node_list.append(bias_rounded_node)
        
        bias_cast_node = onnx.helper.make_node("Cast", bias_rounded_node.output,
            [quantized_bias_name], quantized_bias_name + "_node", to=qType)
        new_node_list.append(bias_cast_node)
        
        return 
github microsoft / onnxruntime / onnxruntime / python / tools / quantization / quantize.py View on Github external
assert (node.op_type == "MatMul")

        (quantized_input_names, zero_point_names, scale_names, nodes) = \
            self._quantize_inputs(node, [0, 1], new_nodes_list)

        matmul_integer_output = node.output[0] + "_quantized"
        matmul_integer_name = ""
        if node.name != "":
            matmul_integer_name = node.name + "_quant"
        matmul_integer_node = onnx.helper.make_node("MatMulInteger", quantized_input_names + zero_point_names,
            [matmul_integer_output], matmul_integer_name)
        nodes.append(matmul_integer_node)

        # Add cast operation to cast matmulInteger output to float.
        cast_op_output = matmul_integer_output + "_cast_output"
        cast_node = onnx.helper.make_node("Cast", [matmul_integer_output], [cast_op_output],
            matmul_integer_output + "_cast", to=onnx_proto.TensorProto.FLOAT)
        nodes.append(cast_node)

        # Add mul operation to multiply scales of two inputs.
        assert (len(scale_names) == 2)
        if matmul_integer_name != "":
            scales_mul_op = matmul_integer_name + "_scales_mul"
        else:
            scales_mul_op = scale_names[0] + "_" + scale_names[1] + "_mul"

        scales_mul_node = _find_node_by_name(scales_mul_op, self.model.graph, new_nodes_list)
        if scales_mul_node is None:
            scales_mul_node = _get_mul_node(scale_names, scales_mul_op + ":0", scales_mul_op)
            nodes.append(scales_mul_node)

        scales_mul_op_output = scales_mul_node.output[0]
github chainer / onnx-chainer / onnx_chainer / onnx_helper.py View on Github external
def make_node(*args, **kwargs):
    """A thin wrapper of `onnx.helper.make_node`.

    Node name will be assigned automatically.

    Args:
        *args (tuple): ONNX node parameters of the node
        **kwargs (dict): ONNX attributes of the node.
    Returns:
        An `onnx.NodeProto` object.
    """
    return onnx.helper.make_node(*args, name=get_func_name(), **kwargs)
github pfnet-research / chainer-compiler / lstm.py View on Github external
def export_initial_bias():  # type: () -> None
        input = np.array(
            [[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]]).astype(np.float32)

        input_size = 3
        hidden_size = 4
        weight_scale = 0.1
        custom_bias = 0.1
        number_of_gates = 4

        node = onnx.helper.make_node(
            'LSTM',
            inputs=['X', 'W', 'R', 'B'],
            outputs=['', 'Y'],
            hidden_size=hidden_size
        )

        W = weight_scale * \
            np.ones((1, number_of_gates * hidden_size,
                     input_size)).astype(np.float32)
        R = weight_scale * \
            np.ones((1, number_of_gates * hidden_size,
                     hidden_size)).astype(np.float32)

        # Adding custom bias
        W_B = custom_bias * \
            np.ones((1, number_of_gates * hidden_size)).astype(np.float32)