How to use the onnx.helper function in onnx

To help you get started, we’ve selected a few onnx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github microsoft / onnxruntime / onnxruntime / python / tools / automl / create_test_model.py View on Github external
def create_model():
     """
     This function creates a test feed model that consists of a single node that takes
     Tensors of all inputs
     """
     args = parse_arguments()

     # bool_identity
     bool_input = helper.make_tensor_value_info('BoolInput', TensorProto.BOOL, [1,1])
     # Create output for Identity
     bool_output = helper.make_tensor_value_info('BoolOutput', TensorProto.BOOL, [1,1])
     # Create node def
     bool_identity_def = helper.make_node('Identity', inputs=['BoolInput'], outputs=['BoolOutput'], name='BoolIdentity')

     # Create string_identity
     string_input = helper.make_tensor_value_info('StringInput', TensorProto.STRING, [1,1])
     string_output = helper.make_tensor_value_info('StringOutput', TensorProto.STRING, [1,1])
     string_identity_def = helper.make_node('Identity', inputs=['StringInput'], outputs=['StringOutput'], name='StringIdentity')

     # double
     double_input = helper.make_tensor_value_info('DoubleInput', TensorProto.DOUBLE, [1,1])
     double_output = helper.make_tensor_value_info('DoubleOutput', TensorProto.DOUBLE, [1,1])
     double_identity_def = helper.make_node('Identity', inputs=['DoubleInput'], outputs=['DoubleOutput'], name='DoubleIdentity')

     # int8
github onnx / sklearn-onnx / tests / test_algebra_onnx_operators_scan.py View on Github external
)
        scan_body = onnx.helper.make_graph(
            [add_node, id_node],
            'scan_body',
            [sum_in, next],
            [sum_out, scan_out]
        )
        node = onnx.helper.make_node(
            'Scan',
            inputs=['initial', 'x'],
            outputs=['y', 'z'],
            num_scan_inputs=1,
            body=scan_body
        )

        initial = helper.make_tensor_value_info(
            'initial', TensorProto.FLOAT, [2, ])
        X = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 2])
        Y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, ])
        Z = helper.make_tensor_value_info('z', TensorProto.FLOAT, [3, 2])

        graph_def = helper.make_graph(
            [node],
            'test-model',
            [initial, X],
            [Y, Z],
        )

        model_def = helper.make_model(graph_def, producer_name='onnx-example')

        initial = np.array([0, 0]).astype(np.float32).reshape((2,))
        x = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32).reshape((3, 2))
github hpi-xnor / BMXNet-v2 / tests / python-pytest / onnx / test_node.py View on Github external
def get_onnx_graph(testname, input_names, inputs, output_name, output_shape, attr):
    outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, shape=output_shape)]

    nodes = [helper.make_node(output_name, input_names, ["output"], **attr)]

    graph = helper.make_graph(nodes, testname, inputs, outputs)

    model = helper.make_model(graph)
    return model
github onnx / tensorflow-onnx / tests / test_graph.py View on Github external
def onnx_pretty(g, args=None):
    """Pretty print graph."""
    model_proto = g.make_model("converted from {}".format(args.input))
    return helper.printable_graph(model_proto.graph)
github NVIDIA / tensorrt-inference-server / qa / common / gen_qa_dyna_sequence_models.py View on Github external
# Create the model. For now don't implement a proper accumulator
    # just return 0 if not-ready and 'INPUT'+'START'*('END'*'CORRID')
    # otherwise...  the tests know to expect this.
    onnx_dtype = np_to_onnx_dtype(dtype)
    onnx_input_shape, idx = tu.shape_to_onnx_shape(shape, 0)
    onnx_output_shape, idx = tu.shape_to_onnx_shape(shape, idx)

    # If the input is a string then use int32 for operation and just
    # cast to/from string for input and output.
    onnx_control_dtype = onnx_dtype
    if onnx_dtype == onnx.TensorProto.STRING:
        onnx_control_dtype = onnx.TensorProto.INT32

    batch_dim = [] if max_batch == 0 else [None]

    onnx_input = onnx.helper.make_tensor_value_info("INPUT", onnx_dtype, batch_dim + onnx_input_shape)
    onnx_start = onnx.helper.make_tensor_value_info("START", onnx_control_dtype, batch_dim + [1])
    onnx_end = onnx.helper.make_tensor_value_info("END", onnx_control_dtype, batch_dim + [1])
    onnx_ready = onnx.helper.make_tensor_value_info("READY", onnx_control_dtype, batch_dim + [1])
    onnx_corrid = onnx.helper.make_tensor_value_info("CORRID", onnx.TensorProto.UINT64, batch_dim + [1])
    onnx_output = onnx.helper.make_tensor_value_info("OUTPUT", onnx_dtype, batch_dim + onnx_output_shape)

    internal_input = onnx.helper.make_node("Identity", ["INPUT"], ["_INPUT"])

    # cast int8, int16 input to higer precision int as Onnx Add/Sub operator doesn't support those type
    # Also casting String data type to int32
    if ((onnx_dtype == onnx.TensorProto.INT8) or (onnx_dtype == onnx.TensorProto.INT16) or
        (onnx_dtype == onnx.TensorProto.STRING)):
        internal_input = onnx.helper.make_node("Cast", ["INPUT"], ["_INPUT"], to=onnx.TensorProto.INT32)

    onnx_corrid_cast0 = onnx.helper.make_node("Cast", ["CORRID"], ["onnx_corrid_cast0"],
                                              to=onnx_control_dtype)
github pfnet-research / chainer-compiler / lstm.py View on Github external
def export_peepholes():  # type: () -> None
        input = np.array(
            [[[1., 2., 3., 4.], [5., 6., 7., 8.]]]).astype(np.float32)

        input_size = 4
        hidden_size = 3
        weight_scale = 0.1
        number_of_gates = 4
        number_of_peepholes = 3

        node = onnx.helper.make_node(
            'LSTM',
            inputs=['X', 'W', 'R', 'B', 'sequence_lens',
                    'initial_h', 'initial_c', 'P'],
            outputs=['', 'Y'],
            hidden_size=hidden_size
        )

        # Initializing Inputs
        W = weight_scale * \
            np.ones((1, number_of_gates * hidden_size,
                     input_size)).astype(np.float32)
        R = weight_scale * \
            np.ones((1, number_of_gates * hidden_size,
                     hidden_size)).astype(np.float32)
        B = np.zeros((1, 2 * number_of_gates * hidden_size)).astype(np.float32)
        seq_lens = np.repeat(input.shape[0], input.shape[1]).astype(np.int32)
github microsoft / onnxconverter-common / onnxconverter_common / float16.py View on Github external
continue
        # if output's name is in the value_info_list meaning output is tensor(float16) type, insert a float to
        # float16 Cast node after the node, change current node's output name and create new value_info for the new name
        for i in range(len(node.output)):
            output = node.output[i]
            for value_info in value_info_list:
                if output == value_info.name:
                    # create new value_info for current node's new output
                    new_value_info = model.graph.value_info.add()
                    new_value_info.CopyFrom(value_info)
                    input_name = node.name + '_output_cast_' + str(i)
                    new_value_info.name = input_name
                    new_value_info.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT
                    # add Cast node (from tensor(float) to tensor(float16) after current node
                    node_name = node.name + '_output_cast' + str(i)
                    new_node = [helper.make_node('Cast', [input_name], [output], to=10, name=node_name)]
                    model.graph.node.extend(new_node)
                    # change current node's input name
                    node.output[i] = input_name
                    continue
    return model
github Rapternmn / PyTorch-Onnx-Tensorrt / create_onnx.py View on Github external
elif layer_type == "upsample":
				initializer_layer, inputs_layer = weight_loader.load_upsample_scales(
					params)
				initializer.extend(initializer_layer)
				inputs.extend(inputs_layer)
		del weight_loader
		self.graph_def = helper.make_graph(
			nodes=self._nodes,
			name='YOLOv3-608',
			inputs=inputs,
			outputs=outputs,
			initializer=initializer
		)
		if verbose:
			print(helper.printable_graph(self.graph_def))
		model_def = helper.make_model(self.graph_def,
									  producer_name='NVIDIA TensorRT sample')
		return model_def
github hpi-xnor / BMXNet-v2 / python / mxnet / contrib / onnx / mx2onnx / export_onnx.py View on Github external
# saved to json file,
                    # refer "output_label" initialization above for more details.
                    # if extra node was added then prev_index to the last node is adjusted.
                    if idx == (len(mx_graph) - 1) and \
                            mx_graph[len(mx_graph)-2]["name"] == output_label:
                        prev_index = index_lookup[idx - 2]
                    else:
                        prev_index = index_lookup[idx - 1]

                    index_lookup.append(prev_index+len(converted))
                else:
                    index_lookup.append(len(converted) - 1)
            else:
                logging.info("Operator converter function should always return a list")

        graph = helper.make_graph(
            onnx_processed_nodes,
            "mxnet_converted_model",
            onnx_processed_inputs,
            onnx_processed_outputs
        )

        graph.initializer.extend(initializer)

        checker.check_graph(graph)
        return graph
github Xilinx / finn / src / finn / transformation / fpgadataflow / convert_to_hls_layers.py View on Github external
mt_output = consumer.output[0]
                    mt_thres = consumer.input[1]
                    T = model.get_initializer(mt_thres)
                    assert T.shape[0] == 1 or T.shape[0] == mh
                    odt = model.get_tensor_datatype(mt_output)
                    if odt.bitwidth() == 1:
                        # covers both bipolar and binary
                        actval = 0
                    else:
                        actval = odt.min()
                    in_shape = [1, mw]
                    out_shape = [1, mh]
                    model.set_tensor_shape(mm_input, in_shape)
                    model.set_tensor_shape(mt_output, out_shape)
                    # create and insert new StreamingFCLayer node
                    new_node = helper.make_node(
                        "StreamingFCLayer_Batch",
                        [mm_input, mm_weight, mt_thres],
                        [mt_output],
                        domain="finn",
                        backend="fpgadataflow",
                        resType="ap_resource_lut()",
                        MW=mw,
                        MH=mh,
                        SIMD=simd,
                        PE=pe,
                        inputDataType=idt.name,
                        weightDataType=wdt.name,
                        outputDataType=odt.name,
                        ActVal=actval,
                        binaryXnorMode=1,
                        noActivation=0,