How to use the onnx.TensorProto.FLOAT function in onnx

To help you get started, we’ve selected a few onnx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github onnx / onnx-tensorflow / test / backend / test_model.py View on Github external
def test_relu_node_inplace(self):
    X = np.random.randn(3, 2).astype(np.float32)
    Y_ref = np.clip(X, 0, np.inf)

    node_def = helper.make_node("Relu", ["X"], ["X1"])

    graph_def = helper.make_graph(
        [node_def],
        name="test",
        inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, [3, 2])],
        outputs=[
            helper.make_tensor_value_info("X1", TensorProto.FLOAT, [3, 2])
        ])
    tf_rep = prepare(helper.make_model(graph_def))
    output = tf_rep.run({"X": X})
    np.testing.assert_almost_equal(output.X1, Y_ref)
github onnx / tensorflow-onnx / tests / test_onnx_shape_inference.py View on Github external
def _test_matmul_unknown_shape(self, shapes):
        data_shapes = [
            [1 if s == -1 else s for s in shapes[0]],
            [1 if s == -1 else s for s in shapes[1]]
        ]
        inputs = [INPUT1, INPUT2]
        dtypes = [TensorProto.FLOAT, TensorProto.FLOAT]
        graph = self._create_empty_graph(inputs, shapes, dtypes)
        node = graph.make_node("MatMul", [INPUT1, INPUT2])
        graph.add_graph_output(node.output[0])
        self._run_test_case(graph, self._generate_random_inputs(inputs, data_shapes, dtypes))
github onnx / tensorflow-onnx / tests / test_optimizers.py View on Github external
node2 = helper.make_node("Transpose", ["Y0"], ["Z0"], perm=[0, 2, 3, 1])
            # graph output
            node3 = helper.make_node("Squeeze", ["Z0"], ["scan_output"], axes=[0])
            node4 = helper.make_node("Identity", ["loop_condition"], ["loop_cond_output"])
            node5 = helper.make_node("Identity", ["loop_condition"], ["loop_carried_output"])

            graph = helper.make_graph(
                [node1, node2, node3, node4, node5],
                "loop_subgraph",
                [helper.make_tensor_value_info("loop_iter_num", TensorProto.INT64, (1,)),  # iteration_num
                 helper.make_tensor_value_info("loop_condition", TensorProto.BOOL, ()),  # condition
                 helper.make_tensor_value_info("loop_carried", TensorProto.BOOL, ())  # loop_carried
                 ],
                [helper.make_tensor_value_info("loop_cond_output", TensorProto.BOOL, ()),
                 helper.make_tensor_value_info("loop_carried_output", TensorProto.BOOL, ()),
                 helper.make_tensor_value_info("scan_output", TensorProto.FLOAT, ["unknown"] * 3)
                 ],
            )
            return graph
github apache / incubator-tvm / tests / python / frontend / onnx / test_forward.py View on Github external
ref_shape = (6, 2, 4, 3)

    ref_array = np.array(ref_shape)
    ref_node = onnx.helper.make_node('Constant',
                                     inputs=[],
                                     outputs=['ref_in'],
                                     value=onnx.helper.make_tensor(name='const_tensor',
                                                                   data_type=onnx.TensorProto.INT32,
                                                                   dims=ref_array.shape,
                                                                   vals=ref_array.flatten().astype(int)))
    reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])

    graph = helper.make_graph([ref_node, reshape_node],
                              "reshape_test",
                              inputs=[helper.make_tensor_value_info("in",
                                                                    TensorProto.FLOAT, list(in_shape))],
                              outputs=[helper.make_tensor_value_info("out",
                                                                     TensorProto.FLOAT, list(ref_shape))])

    model = helper.make_model(graph, producer_name='reshape_test')

    for target, ctx in ctx_list():
        x = np.random.uniform(size=in_shape).astype('int32')
        tvm_out = get_tvm_output(model, x, target, ctx, ref_shape, 'float32')

    tvm.testing.assert_allclose(ref_shape, tvm_out.shape)
github apache / singa / python / singa / sonnx.py View on Github external
# firstly we add the max and min
        for tmp_name in ['min', 'max']:
            node_name = op.name+":"+tmp_name
            # moidfy the input of clip
            clip_node.input.append(node_name)

            node = NodeProto()
            node.name = node_name
            node.op_type = cls._rename_operators.get("Dummy", "Dummy")
            node.output.extend([node_name])

            node.attribute.extend([helper.make_attribute(
                'value', helper.make_tensor(
                    name=node_name,
                    data_type=TensorProto.FLOAT,
                    dims=[1],
                    vals=[getattr(op,tmp_name)],
                )
            )])
            nodes.append(node)

        # then we add the clip op itself
        nodes.append(clip_node)

        return nodes
github onnx / onnx-caffe2 / onnx_caffe2 / frontend.py View on Github external
def caffe2_init_net_to_initializer(cls, init_net):
        initializer = []
        for op in init_net.op:
            assert not op.input
            try:
                data_type, field_name = {
                    'GivenTensorFill': (TensorProto.FLOAT, 'floats'),
                    'GivenTensorInt64Fill': (TensorProto.INT64, 'ints'),
                    'GivenTensorIntFill': (TensorProto.INT32, 'ints'),
                    'GivenTensorBoolFill': (TensorProto.BOOL, 'ints'),
                    'GivenTensorStringFill': (TensorProto.STRING, 'strings'),
                }[op.type]
            except KeyError:
                raise RuntimeError(
                    "Can not translate init_net with operator '{}' "
                    "to initializer".format(op.type)
                )
            raw = (data_type != TensorProto.STRING)
            args = {a.name: a for a in op.arg}
            vals = getattr(args['values'], field_name)
            if raw:
                vals = np.asarray(
                    vals,
github sony / nnabla / python / src / nnabla / utils / converter / onnx / exporter.py View on Github external
param.data.extend(d)
                self._parameters_state[func.input[1]
                                       ] = state | ParameterState.TRANSPOSED
            transB = 1
        else:
            w_shape = list(self._var_dict[func.input[1]].dim[:])
            w_shape_dims = [w_shape[0], int(np.prod(w_shape) / w_shape[0])]
            proto_w_shape = self._var_dict[func.input[1]]
            del proto_w_shape.dim[:]
            proto_w_shape.dim.extend(w_shape_dims)

        if len(func.input) <= 2:
            out_c = fork_name("affine_bias")
            shape = (1, )
            raw_data = np.zeros(shape).astype(np.float32).tostring()
            self._add_param(out_c, TensorProto.FLOAT, shape, raw_data)
        else:
            bias_shape = list(self._var_dict[func.input[2]].dim[:])
            new_bias_shape = [np.prod(bias_shape)]
            proto_bias_shape = nnabla_pb2.Shape()
            proto_bias_shape.dim.extend(new_bias_shape)
            self._var_dict[func.input[2]] = proto_bias_shape
            out_c = func.input[2]

        out = fork_name(func.output[0])

        if opset == '6':
            # broadcast is needed.
            n = onnx.helper.make_node(
                "Gemm",
                [out_a, func.input[1], out_c],
                [out],
github microsoft / onnxruntime / onnxruntime / core / providers / nuphar / scripts / symbolic_shape_infer.py View on Github external
def _infer_ZipMap(self, node):
        map_key_type = None
        if get_attribute(node, 'classlabels_int64s') is not None:
            map_key_type = onnx.TensorProto.INT64
        elif get_attribute(node, 'classlabels_strings') is not None:
            map_key_type = onnx.TensorProto.STRING

        assert map_key_type is not None
        new_vi = onnx.ValueInfoProto()
        new_vi.name = node.output[0]
        new_vi.type.sequence_type.elem_type.map_type.value_type.tensor_type.elem_type = onnx.TensorProto.FLOAT
        new_vi.type.sequence_type.elem_type.map_type.key_type = map_key_type
        vi = self.known_vi_[node.output[0]]
        vi.CopyFrom(new_vi)
github Rapternmn / PyTorch-Onnx-Tensorrt / create_onnx.py View on Github external
"""Creates the initializers with weights from the weights file together with
		the input tensors.

		Keyword arguments:
		conv_params -- a ConvParams object
		param_category -- the category of parameters to be created ('bn' or 'conv')
		suffix -- a string determining the sub-type of above param_category (e.g.,
		'weights' or 'bias')
		"""
		param_name, param_data, param_data_shape = self._load_one_param_type(
			conv_params, param_category, suffix)

		initializer_tensor = helper.make_tensor(
			param_name, TensorProto.FLOAT, param_data_shape, param_data)
		input_tensor = helper.make_tensor_value_info(
			param_name, TensorProto.FLOAT, param_data_shape)
		return initializer_tensor, input_tensor
github microsoft / onnxruntime / onnxruntime / core / providers / nuphar / scripts / model_editor.py View on Github external
scan_body = onnx.GraphProto()
            scan_body.name = name_prefix + '_subgraph'

            nf_body = NodeFactory(out_main_graph, scan_body)
            with nf_body.scoped_prefix(name_prefix) as body_scoped_prefix:
                # subgraph inputs
                X_proj_subgraph = X_proj.name + '_subgraph'
                prev_h_subgraph = name_prefix + '_h_subgraph'
                prev_c_subgraph = name_prefix + '_c_subgraph'

                seq_len_subgraph = declare_seq_len_in_subgraph(seq_len, nf_body, X_proj.name, batch_size)

                for subgraph_i in [prev_h_subgraph, prev_c_subgraph]:
                    nf_body.make_value_info(subgraph_i,
                                            data_type=onnx.TensorProto.FLOAT,
                                            shape=(batch_size, hidden_size),
                                            usage=NodeFactory.ValueInfoType.input)

                nf_body.make_value_info(X_proj_subgraph,
                                        data_type=onnx.TensorProto.FLOAT,
                                        shape=(batch_size, 4*hidden_size),
                                        usage=NodeFactory.ValueInfoType.input)
                # subgraph nodes
                # it = f(Xt*(Wi^T) + Ht-1*(Ri^T) + Pi (.) Ct-1 + Wbi + Rbi)
                # ft = f(Xt*(Wf^T) + Ht-1*(Rf^T) + Pf (.) Ct-1 + Wbf + Rbf)
                # ct = g(Xt*(Wc^T) + Ht-1*(Rc^T) + Wbc + Rbc)
                # Ct = ft (.) Ct-1 + it (.) ct
                # ot = f(Xt*(Wo^T) + Ht-1*(Ro^T) + Po (.) Ct + Wbo + Rbo)
                # Ht = ot (.) h(Ct)
                prev_h_proj = nf_body.make_node('MatMul', [prev_h_subgraph, Rt])
                sum_x_proj_h_proj_bias = nf_body.make_node('Add', [X_proj_subgraph, prev_h_proj])