How to use the onnx.helper.make_tensor_value_info function in onnx

To help you get started, we’ve selected a few onnx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pytorch / pytorch / caffe2 / python / trt / test_trt.py View on Github external
def _test_relu_graph(self, X, batch_size, trt_max_batch_size):
        node_def = make_node("Relu", ["X"], ["Y"])
        Y_c2 = c2.run_node(node_def, {"X": X})
        graph_def = make_graph(
            [node_def],
            name="test",
            inputs=[make_tensor_value_info("X", onnx.TensorProto.FLOAT, [batch_size, 1, 3, 2])],
            outputs=[make_tensor_value_info("Y", onnx.TensorProto.FLOAT, [batch_size, 1, 3, 2])])
        model_def = make_model(graph_def, producer_name='relu-test')
        op_outputs = [x.name for x in model_def.graph.output]
        op = convert_onnx_model_to_trt_op(model_def, max_batch_size=trt_max_batch_size)
        device_option = core.DeviceOption(caffe2_pb2.CUDA, 0)
        op.device_option.CopyFrom(device_option)
        Y_trt = None
        ws = Workspace()
        with core.DeviceScope(device_option):
            ws.FeedBlob("X", X)
            ws.RunOperatorsOnce([op])
            output_values = [ws.FetchBlob(name) for name in op_outputs]
            Y_trt = namedtupledict('Outputs', op_outputs)(*output_values)
        np.testing.assert_almost_equal(Y_c2, Y_trt)
github apache / incubator-tvm / tests / python / frontend / onnx / test_forward.py View on Github external
beta = np.random.randn(shape[1]).astype(np.float32)
    epsilon = 1e-5
    y = _get_python_instance_norm(x, gamma, beta, epsilon).astype(np.float32)

    node = onnx.helper.make_node(
        'InstanceNormalization',
        inputs=['x', 'gamma', 'beta'],
        outputs=['y'],
        epsilon=epsilon,
    )
    graph = helper.make_graph([node],
                              "instance_norm_test",
                              inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(shape)),
                                      helper.make_tensor_value_info(
                                          "gamma", TensorProto.FLOAT, (shape[1],)),
                                      helper.make_tensor_value_info("beta", TensorProto.FLOAT, (shape[1],))],
                              outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(shape))])
    model = helper.make_model(graph, producer_name='instance_norm_test')
    for target, ctx in ctx_list():
        tvm_out = get_tvm_output(
            model, [x, gamma, beta], target, ctx, shape, 'float32')
        tvm.testing.assert_allclose(y, tvm_out, rtol=1e-5, atol=1e-5)
github onnx / onnx-tensorflow / test / backend / test_node.py View on Github external
concat_node = helper.make_node("Concat", ["concat1_in", "concat2_in"],
                                   ["concat_out"], axis=0)
    add_node = helper.make_node("Add", ["concat_out", "const_1"], ["add_out"])
    split_node = helper.make_node("Split", ["add_out"],
                                  ["split1_out", "split2_out", "split3_out",
                                   "split4_out"])

    state_in = helper.make_tensor_value_info('state_in',
                                             TensorProto.FLOAT, [1])
    concat1_in = helper.make_tensor_value_info('concat1_in',
                                               TensorProto.FLOAT, input_shape)
    concat2_in = helper.make_tensor_value_info('concat2_in',
                                               TensorProto.FLOAT, input_shape)
    state_out  = helper.make_tensor_value_info('state_out',
                                               TensorProto.FLOAT, [1])
    split1_out = helper.make_tensor_value_info('split1_out',
                                               TensorProto.FLOAT, output_shape)
    split2_out = helper.make_tensor_value_info('split2_out',
                                               TensorProto.FLOAT, output_shape)
    split3_out = helper.make_tensor_value_info('split3_out',
                                               TensorProto.FLOAT, output_shape)
    split4_out = helper.make_tensor_value_info('split4_out',
                                               TensorProto.FLOAT, output_shape)

    scan_body = helper.make_graph(
        [constant_node, state_add_node, concat_node, add_node, split_node],
        "scan_body",
        [state_in, concat1_in, concat2_in],
        [state_out, split1_out, split2_out, split3_out, split4_out],
    )

    node_kwargs = {"op_type": "Scan",
github onnx / tensorflow-onnx / tests / test_optimizers.py View on Github external
const_2_val = np.random.randn(2, 4, 5, 3).astype(np.float32)
        const_2 = helper.make_tensor("const_2", TensorProto.FLOAT, (2, 4, 5, 3), const_2_val.flatten())
        const_2_node = helper.make_node("Constant", [], ["const_2"], value=const_2, name="const_2")

        const_3_val = np.random.randn(2, 4, 5, 3).astype(np.float32)
        const_3 = helper.make_tensor("const_3", TensorProto.FLOAT, (2, 4, 5, 3), const_3_val.flatten())
        const_3_node = helper.make_node("Constant", [], ["const_3"], value=const_3, name="const_3")

        node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=[0, 2, 3, 1], name="trans_1")
        node2 = helper.make_node("Max", ["Y", "const_3", "const_2", "const_1"], ["Z"], name="max")
        node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=[0, 3, 1, 2], name="trans_2")

        graph = helper.make_graph(
            [const_1_node, const_2_node, const_3_node, node1, node2, node3],
            "Max-test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4, 5))],
            [helper.make_tensor_value_info("Z1", TensorProto.FLOAT, (2, 3, 4, 5))],
        )

        model_proto = self.make_model(graph, producer_name="onnx-tests")
        self.run_transpose_compare(["Z1"], {"X": np.random.randn(2, 3, 4, 5).astype(np.float32)},
                                   model_proto, remaining_transpose_num=0)
github onnx / tensorflow-onnx / tests / test_optimizers.py View on Github external
def _define_loop_graph(external_inputs):
            # external_inputs: external node which will be used by this graph
            # graph without loop carried
            # computation
            # for(...){a = external_inputs[i]; b = trans(a), c = squeeze(b)}, c is scan output
            node1 = helper.make_node("Gather", [external_inputs[0], "loop_iter_num"], ["Y0"])
            node2 = helper.make_node("Transpose", ["Y0"], ["Z0"], perm=[0, 2, 3, 1])
            # graph output
            node3 = helper.make_node("Squeeze", ["Z0"], ["scan_output"], axes=[0])
            node4 = helper.make_node("Identity", ["loop_condition"], ["loop_cond_output"])
            node5 = helper.make_node("Identity", ["loop_condition"], ["loop_carried_output"])

            graph = helper.make_graph(
                [node1, node2, node3, node4, node5],
                "loop_subgraph",
                [helper.make_tensor_value_info("loop_iter_num", TensorProto.INT64, (1,)),  # iteration_num
                 helper.make_tensor_value_info("loop_condition", TensorProto.BOOL, ()),  # condition
                 helper.make_tensor_value_info("loop_carried", TensorProto.BOOL, ())  # loop_carried
                 ],
                [helper.make_tensor_value_info("loop_cond_output", TensorProto.BOOL, ()),
                 helper.make_tensor_value_info("loop_carried_output", TensorProto.BOOL, ()),
                 helper.make_tensor_value_info("scan_output", TensorProto.FLOAT, ["unknown"] * 3)
                 ],
            )
            return graph
github LLNL / lbann / tools / onnx / lbann_onnx / l2o / functions / __init__.py View on Github external
"dummy"])):
        return {}

    lbannInputs = list(map(lambda x: "{}_0".format(x),
                           l.parents.split(" ") if l.parents != "" else []))
    lbannOutputs = l.children.split(" ") if len(l.children) > 0 else []

    for f in FUNCTIONS.keys():
        if l.HasField("split"):
            if l.name not in tensorShapes.keys():
                lbann_onnx.util.printError("The shape of \"{}\" cannot be inferred.".format(l.name) \
                                           + " This error may happen when you set incorret an input tensor name.")
                lbann_onnx.util.printParsingState(l, tensorShapes)
                exit()

            ipt = onnx.helper.make_tensor_value_info(name="{}_0".format(l.name),
                                                     elem_type=lbann_onnx.ELEM_TYPE,
                                                     shape=tensorShapes[l.name])

            return {"inputs": [ipt]}

        if l.HasField(f):
            for i in lbannInputs:
                if not i in tensorShapes.keys():
                    lbann_onnx.util.printError("The shape of \"{}\" cannot be inferred.".format(i))
                    lbann_onnx.util.printParsingState(l, tensorShapes)
                    exit()

            arg = getattr(l, f)
            if f == "unpooling":
                arg = list(filter(lambda x: x.name == l.unpooling.pooling_layer, knownNodes))[0]
github NVIDIA / tensorrt-inference-server / qa / common / gen_qa_models.py View on Github external
onnx_input_dtype = np_to_onnx_dtype(input_dtype)
    onnx_output0_dtype = np_to_onnx_dtype(output0_dtype)
    onnx_output1_dtype = np_to_onnx_dtype(output1_dtype)

    onnx_input_shape, idx = tu.shape_to_onnx_shape(input_shape, 0)
    onnx_output0_shape, idx = tu.shape_to_onnx_shape(input_shape, idx)
    onnx_output1_shape, idx = tu.shape_to_onnx_shape(input_shape, idx)

    # Create the model
    model_name = tu.get_model_name("onnx_nobatch" if max_batch == 0 else "onnx",
                                   input_dtype, output0_dtype, output1_dtype)
    model_version_dir = models_dir + "/" + model_name + "/" + str(model_version)

    batch_dim = [] if max_batch == 0 else [None]

    in0 = onnx.helper.make_tensor_value_info("INPUT0", onnx_input_dtype, batch_dim + onnx_input_shape)
    in1 = onnx.helper.make_tensor_value_info("INPUT1", onnx_input_dtype, batch_dim + onnx_input_shape)

    out0 = onnx.helper.make_tensor_value_info("OUTPUT0", onnx_output0_dtype, batch_dim + onnx_output0_shape)
    out1 = onnx.helper.make_tensor_value_info("OUTPUT1", onnx_output1_dtype, batch_dim + onnx_output1_shape)

    internal_in0 = onnx.helper.make_node("Identity", ["INPUT0"], ["_INPUT0"])
    internal_in1 = onnx.helper.make_node("Identity", ["INPUT1"], ["_INPUT1"])

    # cast int8, int16 input to higer precision int as Onnx Add/Sub operator doesn't support those type
    # Also casting String data type to int32
    if ((onnx_input_dtype == onnx.TensorProto.INT8) or (onnx_input_dtype == onnx.TensorProto.INT16) or
        (onnx_input_dtype == onnx.TensorProto.STRING)):
        internal_in0 = onnx.helper.make_node("Cast", ["INPUT0"], ["_INPUT0"], to=onnx.TensorProto.INT32)
        internal_in1 = onnx.helper.make_node("Cast", ["INPUT1"], ["_INPUT1"], to=onnx.TensorProto.INT32)

    add = onnx.helper.make_node("Add", ["_INPUT0", "_INPUT1"], ["CAST0" if not swap else "CAST1"])
github LLNL / lbann / python / lbann / onnx / l2o / __init__.py View on Github external
inputs.extend(ret["inputs"])

        if "inits" in ret.keys():
            inits.extend(ret["inits"])

        if "nodes" in ret.keys():
            nodes.extend(ret["nodes"])

    for l in params.keys():
        for i,p in enumerate(params[l]):
            name = "{}_p{}".format(l, i)
            inits.append(onnx.numpy_helper.from_array(p, name=name))

    for metric in pb.model.metric:
        assert metric.HasField("layer_metric")
        outputs.append(onnx.helper.make_tensor_value_info(name="{}_0".format(metric.layer_metric.layer),
                                                          elem_type=lbann.onnx.ELEM_TYPE,
                                                          shape=[]))

    for term in pb.model.objective_function.layer_term:
        outputs.append(onnx.helper.make_tensor_value_info(name="{}_0".format(term.layer),
                                                          elem_type=lbann.onnx.ELEM_TYPE,
                                                          shape=[]))

    g = onnx.helper.make_graph(nodes, "graph", inputs, outputs, inits)
    o = onnx.helper.make_model(g)
    if addValueInfo:
        o = onnx.shape_inference.infer_shapes(o)

    return o, miniBatchSize