How to use the onnx.TensorProto function in onnx

To help you get started, we’ve selected a few onnx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github onnx / sklearn-onnx / tests / test_algebra_onnx_operators.py View on Github external
def test_onnx_reversed_order_second(self):
        X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [2, 2])
        Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [2, 2])

        nodes = [
            helper.make_node('Add', ['X', 'idi'], ['temp']),
            helper.make_node('Add', ['temp', 'idi2'], ['Y'])
        ]
        graph_def = helper.make_graph(nodes, 't1', [X], [Y])
        model_def = helper.make_model(graph_def, producer_name='A')
        self.assertEqual(len(model_def.graph.output), 1)

        nodes = [
            helper.make_node('Add', ['X', 'idi'], ['temp']),
            helper.make_node('Add', ['idi2', 'temp'], ['Y'])
        ]
        graph_def = helper.make_graph(nodes, 't1', [X], [Y])
        model_def = helper.make_model(graph_def, producer_name='A')
        self.assertEqual(len(model_def.graph.output), 1)
github pytorch / pytorch / caffe2 / python / trt / test_trt.py View on Github external
def _test_relu_graph(self, X, batch_size, trt_max_batch_size):
        node_def = make_node("Relu", ["X"], ["Y"])
        Y_c2 = c2.run_node(node_def, {"X": X})
        graph_def = make_graph(
            [node_def],
            name="test",
            inputs=[make_tensor_value_info("X", onnx.TensorProto.FLOAT, [batch_size, 1, 3, 2])],
            outputs=[make_tensor_value_info("Y", onnx.TensorProto.FLOAT, [batch_size, 1, 3, 2])])
        model_def = make_model(graph_def, producer_name='relu-test')
        op_outputs = [x.name for x in model_def.graph.output]
        op = convert_onnx_model_to_trt_op(model_def, max_batch_size=trt_max_batch_size)
        device_option = core.DeviceOption(caffe2_pb2.CUDA, 0)
        op.device_option.CopyFrom(device_option)
        Y_trt = None
        ws = Workspace()
        with core.DeviceScope(device_option):
            ws.FeedBlob("X", X)
            ws.RunOperatorsOnce([op])
            output_values = [ws.FetchBlob(name) for name in op_outputs]
            Y_trt = namedtupledict('Outputs', op_outputs)(*output_values)
        np.testing.assert_almost_equal(Y_c2, Y_trt)
github microsoft / onnxruntime / onnxruntime / python / tools / bert / bert_model_optimization.py View on Github external
subgraph_nodes, input_nodes = self.get_parent_nodes_and_inputs(reshape_node_after_att, [normalize_node], output_name_to_node)

        nodes_to_remove.extend(subgraph_nodes)
        nodes_to_remove.extend([reshape_node_after_att])

        input_nodes = [n for n in input_nodes if self.get_initializer(n) is None]
        if len(input_nodes) != 1:
            print("Failed. Current normalize node output", normalize_node.output[0])
            raise Exception("There should be one graph input (without initializer) linked to attention. Got:", input_nodes)
        # Here we assume that attention will get only one graph input: the mask
        self.set_mask_input(input_nodes[0])

        attention_node_name = self.create_node_name('Attention')

        weight = onnx.helper.make_tensor(name=attention_node_name + '_qkv_weight',
            data_type=TensorProto.FLOAT,
            dims=[self.hidden_size, 3 * self.hidden_size],
            vals=qkv_weight.flatten().tolist())
        self.add_initializer(weight)

        weight_input = onnx.helper.make_tensor_value_info(weight.name, TensorProto.FLOAT, [self.hidden_size, 3 * self.hidden_size])
        self.add_input(weight_input)

        bias = onnx.helper.make_tensor(name=attention_node_name + '_qkv_bias',
            data_type=TensorProto.FLOAT,
            dims=[3 * self.hidden_size],
            vals=qkv_bias.flatten().tolist())
        self.add_initializer(bias)

        bias_input = onnx.helper.make_tensor_value_info(bias.name, TensorProto.FLOAT, [3 * self.hidden_size])
        self.add_input(bias_input)
github NVIDIA / tensorrt-inference-server / qa / common / gen_qa_sequence_models.py View on Github external
onnx_control_dtype = onnx_dtype
    if onnx_dtype == onnx.TensorProto.STRING:
        onnx_control_dtype = onnx.TensorProto.INT32

    batch_dim = [] if max_batch == 0 else [None]

    onnx_input = onnx.helper.make_tensor_value_info("INPUT", onnx_dtype, batch_dim + onnx_input_shape)
    onnx_start = onnx.helper.make_tensor_value_info("START", onnx_control_dtype, batch_dim + [1])
    onnx_ready = onnx.helper.make_tensor_value_info("READY", onnx_control_dtype, batch_dim + [1])
    onnx_output = onnx.helper.make_tensor_value_info("OUTPUT", onnx_dtype, batch_dim + onnx_output_shape)

    internal_input = onnx.helper.make_node("Identity", ["INPUT"], ["_INPUT"])

    # cast int8, int16 input to higer precision int as Onnx Add/Sub operator doesn't support those type
    # Also casting String data type to int32
    if ((onnx_dtype == onnx.TensorProto.INT8) or (onnx_dtype == onnx.TensorProto.INT16) or
        (onnx_dtype == onnx.TensorProto.STRING)):
        internal_input = onnx.helper.make_node("Cast", ["INPUT"], ["_INPUT"], to=onnx.TensorProto.INT32)

    add = onnx.helper.make_node("Add", ["_INPUT", "START"], ["add"])
    # Take advantage of knowledge that the READY false value is 0 and true is 1
    mul = onnx.helper.make_node("Mul", ["READY", "add"], ["CAST"])
    cast = onnx.helper.make_node("Cast", ["CAST"], ["OUTPUT"], to=onnx_dtype)

    # Avoid cast from float16 to float16
    # (bug in Onnx Runtime, cast from float16 to float16 will become cast from float16 to float32)
    if onnx_dtype == onnx.TensorProto.FLOAT16:
        cast = onnx.helper.make_node("Identity", ["CAST"], ["OUTPUT"])

    onnx_nodes = [internal_input, add, mul, cast]
    onnx_inputs = [onnx_input, onnx_start, onnx_ready]
    onnx_outputs = [onnx_output]
github pfnet-research / chainer-compiler / ch2o / ch2o / chainer2onnx.py View on Github external
# 以下、 最初の外からのためのやつ
        # code.InteractiveConsole({'v': self.ast}).interact()
        self.forward_arglen = len(self.ast.args.args)-1

        # ここで、初期化したやつを上書きしてやる必要が出てくる
        # あとでchainerで実行するために回復しないといけないので、
        # restore_funcs に復元すべきものを追加している
        self.inits = []

        for s, v in ch.namedparams():
            s = s[1:]
            if s.find('/') != -1:
                continue
            t = helper.make_tensor_value_info(
                '/'+s, TensorProto.FLOAT, list(v.shape))
            self.inits.append(t)
            mv = getattr(ch, s)
            setattr(ch, s, t)
            env.restore_funcs.append(lambda: setattr(ch, s, mv))

        # TODO(satos) Yieldをコンパイルできるとこれを消せる
        mv = getattr(ch, 'children')
        setattr(ch, 'children', Func(lambda _, __, ___: mv()))
        env.restore_funcs.append(lambda: setattr(ch, 'children', mv))
github microsoft / onnxruntime / onnxruntime / core / providers / nuphar / scripts / symbolic_shape_infer.py View on Github external
out_rank = len(get_shape_from_type_proto(vi.type))
                in_shapes = [self._get_shape(node, i) for i in range(len(node.input))]
                for d in range(out_rank - (2 if node.op_type in ['MatMul', 'MatMulInteger', 'MatMulInteger16'] else 0)):
                    in_dims = [s[len(s) - out_rank + d] for s in in_shapes if len(s) + d >= out_rank]
                    if len(in_dims) > 1:
                        self._check_merged_dims(in_dims, allow_broadcast=True)

            for i_o in range(len(node.output)):
                vi = self.known_vi_[node.output[i_o]]
                out_type = vi.type
                out_type_kind = out_type.WhichOneof('value')
                # only TensorProto and SparseTensorProto have shape
                if out_type_kind != 'tensor_type' and out_type_kind != 'sparse_tensor_type':
                    continue
                out_shape = get_shape_from_type_proto(vi.type)
                out_type_undefined = out_type.tensor_type.elem_type == onnx.TensorProto.UNDEFINED
                if self.verbose_ > 2:
                    print('  {}: {} {}'.format(node.output[i_o], str(out_shape), vi.type.tensor_type.elem_type))
                    if node.output[i_o] in self.sympy_data_:
                        print('  Sympy Data: ' + str(self.sympy_data_[node.output[i_o]]))

                if None in out_shape or out_type_undefined:
                    if self.auto_merge_:
                        if node.op_type in ['Add', 'Sub', 'Mul', 'Div', 'MatMul', 'MatMulInteger', 'MatMulInteger16', 'Concat', 'Where', 'Sum']:
                            shapes = [self._get_shape(node, i) for i in range(len(node.input))]
                            if node.op_type in ['MatMul', 'MatMulInteger', 'MatMulInteger16']:
                                # only support auto merge for MatMul for dim < rank-2 when rank > 2
                                assert len(shapes[0]) > 2 and dim_idx[0] < len(shapes[0]) - 2
                                assert len(shapes[1]) > 2 and dim_idx[1] < len(shapes[1]) - 2
                        elif node.op_type == 'Expand':
                            # auto merge for cases like Expand([min(batch, 1), min(seq, 512)], [batch, seq])
                            shapes = [self._get_shape(node, 0), self._get_value(node, 1)]
github onnx / onnx-coreml / onnx_coreml / converter.py View on Github external
for i, input_ in enumerate(inputs):
        onnx_type = input_[1]
        if onnx_type == TensorProto.FLOAT:
            _update_multiarray_to_float32(builder.spec.description.input[i])
        elif onnx_type == TensorProto.DOUBLE:
            continue
        elif onnx_type == TensorProto.INT32 or onnx_type == TensorProto.INT64:
            _update_multiarray_to_int32(builder.spec.description.input[i])
        elif onnx_type == TensorProto.BOOL:
            _update_multiarray_to_float32(builder.spec.description.input[i])
        else:
            raise TypeError("Input must be of of type FLOAT, DOUBLE, INT32 or INT64")

    for i, output_ in enumerate(outputs):
        onnx_type = output_[1]
        if onnx_type == TensorProto.FLOAT:
            _update_multiarray_to_float32(builder.spec.description.output[i])
        elif onnx_type == TensorProto.DOUBLE:
            continue
        elif onnx_type == TensorProto.INT32 or onnx_type == TensorProto.INT64:
            _update_multiarray_to_int32(builder.spec.description.output[i])
        elif onnx_type == TensorProto.BOOL:
            _update_multiarray_to_float32(builder.spec.description.output[i])
        else:
            raise TypeError("Output must be of of type FLOAT, DOUBLE, INT32 or INT64")
github ezyang / onnx-pytorch / onnx_pytorch / verify.py View on Github external
def equalAndThen(self, x, y, msg, k):
        """
        Helper for implementing 'requireEqual' and 'checkEqual'.  Upon failure,
        invokes continuation 'k' with the error message.
        """
        if isinstance(x, onnx.TensorProto) and isinstance(y, onnx.TensorProto):
            self.equalAndThen(x.name, y.name, msg, k)
            # Use numpy for the comparison
            t1 = onnx.numpy_helper.to_array(x)
            t2 = onnx.numpy_helper.to_array(y)
            new_msg = "{}In embedded parameter '{}'".format(colonize(msg), x.name)
            self.equalAndThen(t1, t2, new_msg, k)
        elif isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
            try:
                np.testing.assert_equal(x, y)
            except AssertionError as e:
                k("{}{}".format(colonize(msg, ": "), str(e).lstrip()))
        else:
            if x != y:
                # TODO: Better algorithm for lists
                sx = str(x)
                sy = str(y)
github facebookresearch / dlrm / dlrm_s_caffe2.py View on Github external
# save model to a file
        with open("dlrm_s_caffe2.onnx", "w+") as dlrm_caffe2_onnx_file:
            dlrm_caffe2_onnx_file.write(str(dlrm_caffe2_onnx))

    # build protobuf with types and shapes
    if args.save_proto_types_shapes:
        # add types and shapes to protobuf
        __TYPE_MAPPING = {
            onnx.TensorProto.FLOAT: caffe2_pb2.TensorProto.FLOAT,
            onnx.TensorProto.UINT8: caffe2_pb2.TensorProto.UINT8,
            onnx.TensorProto.INT8: caffe2_pb2.TensorProto.INT8,
            onnx.TensorProto.UINT16: caffe2_pb2.TensorProto.UINT16,
            onnx.TensorProto.INT16: caffe2_pb2.TensorProto.INT16,
            onnx.TensorProto.INT32: caffe2_pb2.TensorProto.INT32,
            onnx.TensorProto.INT64: caffe2_pb2.TensorProto.INT64,
            onnx.TensorProto.STRING: caffe2_pb2.TensorProto.STRING,
            onnx.TensorProto.BOOL: caffe2_pb2.TensorProto.BOOL,
            onnx.TensorProto.FLOAT16: caffe2_pb2.TensorProto.FLOAT16,
            onnx.TensorProto.DOUBLE: caffe2_pb2.TensorProto.DOUBLE,
        }

        pnet = dlrm.parameters().net.Proto()
        arg = pnet.arg.add()
        arg.name = "input_shape_info"
        for i in pnet.external_input:
            if i in dlrm.onnx_tsd:
                onnx_dtype, shape = dlrm.onnx_tsd[i]
                t = arg.tensors.add()
                t.name = i
                t.data_type = __TYPE_MAPPING[onnx_dtype]
                t.dims.extend(shape)
            else:
github microsoft / onnxruntime / onnxruntime / core / providers / nuphar / scripts / rnn_benchmark.py View on Github external
def generate_model(rnn_type, input_dim, hidden_dim, bidirectional, layers, model_name, batch_one=True, has_seq_len=False):
    model = onnx.ModelProto()
    opset = model.opset_import.add()
    opset.domain == 'onnx'
    opset.version = 7
    num_directions = 2 if bidirectional else 1

    X = 'input'
    model.graph.input.add().CopyFrom(helper.make_tensor_value_info(X, onnx.TensorProto.FLOAT, ['s', 1 if batch_one else 'b', input_dim]))
    model.graph.initializer.add().CopyFrom(numpy_helper.from_array(np.asarray([0, 0, -1], dtype=np.int64), 'shape'))

    if has_seq_len:
        seq_len = 'seq_len'
        model.graph.input.add().CopyFrom(helper.make_tensor_value_info(seq_len, onnx.TensorProto.INT32, [1 if batch_one else 'b',]))

    gates = {'lstm':4, 'gru':3, 'rnn':1}[rnn_type]
    for i in range(layers):
        layer_input_dim = (input_dim if i == 0 else hidden_dim * num_directions)
        model.graph.initializer.add().CopyFrom(numpy_helper.from_array(np.random.rand(num_directions, gates*hidden_dim, layer_input_dim).astype(np.float32), 'W'+str(i)))
        model.graph.initializer.add().CopyFrom(numpy_helper.from_array(np.random.rand(num_directions, gates*hidden_dim, hidden_dim).astype(np.float32), 'R'+str(i)))
        model.graph.initializer.add().CopyFrom(numpy_helper.from_array(np.random.rand(num_directions, 2*gates*hidden_dim).astype(np.float32), 'B'+str(i)))
        layer_inputs = [X, 'W'+str(i), 'R'+str(i), 'B'+str(i)]
        if has_seq_len:
            layer_inputs += [seq_len]
        layer_outputs = ['layer_output_'+str(i)]