How to use the onnx.onnx_pb.TensorProto.FLOAT function in onnx

To help you get started, we’ve selected a few onnx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github microsoft / onnxconverter-common / tests / test_opt.py View on Github external
def test_optimizer(self):
        val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)

        nodes = []
        nodes[0:] = \
            [helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
                name='const0',
                data_type=onnx_proto.TensorProto.FLOAT,
                dims=val.shape,
                vals=val.flatten().astype(float)))]
        nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'])]
        nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'])]
        nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'])]
        nodes[4:] = [helper.make_node('Transpose', ['max0'], ['tranpose0'], perm=[0, 2, 3, 1])]
        nodes[5:] = [helper.make_node('Transpose', ['tranpose0'], ['tranpose1'], perm=(0, 3, 1, 2))]
        nodes[6:] = [helper.make_node('Relu', ['tranpose1'], ['output0'], perm=(0, 3, 1, 2))]

        input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
        output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])

        graph = helper.make_graph(nodes, 'test0', [input0], [output0])
        model = helper.make_model(graph)
        self.assertIsNotNone(model)
        onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
github onnx / keras-onnx / keras2onnx / ktf2onnx / tf2onnx / onnx_opset / tensor.py View on Github external
squeeze_node.set_attr("axes", needs_squeeze)
            nodes.append(squeeze_node)
            input_dtype = ctx.get_dtype(node.output[0])
            ctx.set_dtype(squeeze_node.output[0], input_dtype)
            ctx.copy_shape(node.output[0], squeeze_node.output[0])

        # onnx slice as of opset 7 does only take float tensors ... cast if needed
        input_dtype = ctx.get_dtype(node.input[0])
        if input_dtype != onnx_pb.TensorProto.FLOAT:
            if node.inputs[0].type == "Cast" and len(ctx.find_output_consumers(node.inputs[0].output[0])) == 1:
                # override the previous cast
                cast_node = node.inputs[0]
            else:
                cast_node = ctx.insert_new_node_on_input(node, "Cast", node.input[0])
                nodes.insert(0, cast_node)
            cast_node.set_attr("to", onnx_pb.TensorProto.FLOAT)
            ctx.set_dtype(cast_node.output[0], onnx_pb.TensorProto.FLOAT)
            ctx.copy_shape(node.input[0], cast_node.output[0])
            # undo the cast afer slice
            name = utils.make_name(node.name)
            cast_node = ctx.insert_new_node_on_output("Cast", nodes[-1].output[0], name)
            cast_node.set_attr("to", input_dtype)
            ctx.set_dtype(cast_node.output[0], input_dtype)
            ctx.copy_shape(node.output[0], cast_node.output[0])
            nodes.append(cast_node)
github onnx / tensorflow-onnx / tf2onnx / onnx_opset / tensor.py View on Github external
def _wrap_concat_with_cast(ctx, node):
    """wrap concat in casts for opset < 8 since it only supports."""
    supported_types = [onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.FLOAT16]
    dtype = ctx.get_dtype(node.output[0])
    need_casting = dtype not in supported_types
    if need_casting:
        output_name = node.output[0]
        # cast each inputs to float
        for i, inp in enumerate(node.inputs):
            input_cast = ctx.insert_new_node_on_input(node, "Cast", node.input[i])
            input_cast.set_attr("to", onnx_pb.TensorProto.FLOAT)
            ctx.set_dtype(input_cast.output[0], onnx_pb.TensorProto.FLOAT)
        next_nodes = ctx.find_output_consumers(node.output[0])
        # cast output back to dtype unless the next op is a cast
        if next_nodes[0].type != "Cast":
            op_name = utils.make_name(node.name)
            output_cast = ctx.insert_new_node_on_output("Cast", output_name, name=op_name)
            output_cast.set_attr("to", dtype)
            ctx.set_dtype(output_cast.output[0], dtype)
            ctx.copy_shape(output_name, output_cast.output[0])
github microsoft / onnxconverter-common / onnxconverter_common / onnx_ops.py View on Github external
# add initializer
                if isinstance(max, np.ndarray):
                    if len(max.shape) == 0:
                        max = [max]
                    elif max.shape == (1,):
                        max = list(max[0]) if hasattr(max[0], '__iter__') else list(max)
                    else:
                        raise RuntimeError("max must be an array of one element.")
                else:
                    max = [max]

                max_name = scope.get_unique_variable_name('clip_max')
                if op_version < 12:
                    max = np.array(max, dtype=getattr(container, 'dtype', np.float32))
                    container.add_initializer(max_name, getattr(container, 'proto_dtype',
                                                                onnx_proto.TensorProto.FLOAT), [], [max[0]])
                else:
                    max = np.array(max)
                    container.add_initializer(max_name, NP_TYPE_TO_TENSOR_TYPE[max.dtype], [], [max[0]])
                max = max_name
            if isinstance(max, str):
                inputs.append(max)
            else:
                raise RuntimeError("Parameter 'max' must be a string or a float.")

        container.add_node('Clip', inputs, output_name, op_version=op_version,
                           **attrs)
github microsoft / onnxconverter-common / onnxconverter_common / onnx_ops.py View on Github external
:param mode: "nearest" or "linear"
    :param scales: a float tensor for scaling (upsampling or downsampling) all input dimensions
    '''
    name = _create_name_or_use_existing_one(scope, 'Resize', operator_name)
    attrs = {'name': name}
    attrs['mode'] = mode.lower()

    inputs = [input_name]

    if container.target_opset < 11:
        op_version = 10
    else:
        op_version = 11
        roi_tensor_name = scope.get_unique_variable_name(name + '_roi')
        roi = [0.0] * len(scales) + [1.0] * len(scales)
        container.add_initializer(roi_tensor_name, onnx_proto.TensorProto.FLOAT, [2 * len(scales)], roi)
        inputs.append(roi_tensor_name)
        attrs['coordinate_transformation_mode'] = coordinate_transformation_mode
        if attrs['mode'] == 'nearest':
            attrs['nearest_mode'] = 'floor'

    scales_tensor_name = scope.get_unique_variable_name(name + '_scales')
    container.add_initializer(scales_tensor_name, onnx_proto.TensorProto.FLOAT, [len(scales)], scales)
    inputs.append(scales_tensor_name)
    container.add_node('Resize', inputs, output_name, op_version=op_version, **attrs)
github microsoft / onnxruntime / onnxruntime / python / tools / quantization / quantize.py View on Github external
assert(quantized_value is not None)
        packed_weight_name = quantized_value.q_name
        scale_name = quantized_value.scale_name
        zero_point_name = quantized_value.zp_name

        # Update packed weight, zero point, and scale initializers
        packed_weight_np_data = np.asarray(weight.quantized_data,
            dtype=onnx.mapping.TENSOR_TYPE_TO_NP_TYPE[weight.qType]).reshape(weight.initializer.dims)
        packed_weight_initializer = onnx.numpy_helper.from_array(packed_weight_np_data, packed_weight_name)

        if weight.axis is not None:
            zero_scale_shape = [weight.initializer.dims[weight.axis]]
        else: # scale and zero point must be scalar
            zero_scale_shape = []
        zero_point_type = weight.qType
        scale_initializer = onnx.helper.make_tensor(scale_name, onnx_proto.TensorProto.FLOAT, zero_scale_shape, weight.scales)
        zero_initializer = onnx.helper.make_tensor(zero_point_name, zero_point_type, zero_scale_shape, weight.zero_points)

        self.model.graph.initializer.extend([packed_weight_initializer, scale_initializer, zero_initializer])

        # Create input for initialized scale and zeros
        packed_weight_value_info = onnx.helper.make_tensor_value_info(packed_weight_name, weight.qType,
                                        weight.initializer.dims)
        scale_value_info = onnx.helper.make_tensor_value_info(scale_name, onnx_proto.TensorProto.FLOAT, zero_scale_shape)
        zero_point_value_info = onnx.helper.make_tensor_value_info(zero_point_name,
            zero_point_type, zero_scale_shape) # zero_point is int for dequantize operator

        self.model.graph.input.extend([packed_weight_value_info, scale_value_info, zero_point_value_info])

        self._quantized_weights.append(weight)
github microsoft / onnxruntime / onnxruntime / python / tools / quantization / quantize.py View on Github external
packed_weight_initializer = onnx.numpy_helper.from_array(packed_weight_np_data, packed_weight_name)

        if weight.axis is not None:
            zero_scale_shape = [weight.initializer.dims[weight.axis]]
        else: # scale and zero point must be scalar
            zero_scale_shape = []
        zero_point_type = weight.qType
        scale_initializer = onnx.helper.make_tensor(scale_name, onnx_proto.TensorProto.FLOAT, zero_scale_shape, weight.scales)
        zero_initializer = onnx.helper.make_tensor(zero_point_name, zero_point_type, zero_scale_shape, weight.zero_points)

        self.model.graph.initializer.extend([packed_weight_initializer, scale_initializer, zero_initializer])

        # Create input for initialized scale and zeros
        packed_weight_value_info = onnx.helper.make_tensor_value_info(packed_weight_name, weight.qType,
                                        weight.initializer.dims)
        scale_value_info = onnx.helper.make_tensor_value_info(scale_name, onnx_proto.TensorProto.FLOAT, zero_scale_shape)
        zero_point_value_info = onnx.helper.make_tensor_value_info(zero_point_name,
            zero_point_type, zero_scale_shape) # zero_point is int for dequantize operator

        self.model.graph.input.extend([packed_weight_value_info, scale_value_info, zero_point_value_info])

        self._quantized_weights.append(weight)
github onnx / onnxmltools / onnxmltools / convert / sparkml / operator_converters / aft_survival_regression.py View on Github external
def convert_aft_survival_regression(scope, operator, container):
    op = operator.raw_operator

    coefficients = op.coefficients.toArray().astype(float)
    coefficients_tensor = scope.get_unique_variable_name('coefficients_tensor')
    container.add_initializer(coefficients_tensor, onnx_proto.TensorProto.FLOAT, [1, len(coefficients)], coefficients)
    intercepts = op.intercept.astype(float) if isinstance(op.intercept, collections.Iterable) else [float(op.intercept)]
    intercepts_tensor = scope.get_unique_variable_name('intercepts_tensor')
    container.add_initializer(intercepts_tensor, onnx_proto.TensorProto.FLOAT, [len(intercepts)], intercepts)

    matmul_result = scope.get_unique_variable_name('matmul_result_tensor')
    apply_matmul(scope, [operator.input_full_names[0], coefficients_tensor], matmul_result, container)
    add_result = scope.get_unique_variable_name('intercept_added_tensor')
    apply_add(scope, [matmul_result, intercepts_tensor], add_result, container)
    apply_exp(scope, add_result, operator.output_full_names, container)
github XiaoMi / kaldi-onnx / converter / graph.py View on Github external
def make_model(self):
        _LOG.info("start making ONNX model.")
        # add placeholders
        self.init_inputs()
        output_tensor_values = []
        for name in self._outputs:
            v = helper.make_tensor_value_info(
                name,
                onnx_pb.TensorProto.FLOAT,
                self.make_onnx_shape(self._shapes[name]))
            output_tensor_values.append(v)

        onnx_nodes = []
        for node in self._nodes:
            if node.type not in['Input', 'Output']:
                try:
                    input_names = node.inputs
                    output_names = node.outputs
                    onnx_node = helper.make_node(node.type,
                                                 input_names,
                                                 output_names,
                                                 name=node.name,
                                                 domain=self._operatorsetid,
                                                 **node.attrs)
                    onnx_nodes.append(onnx_node)
github onnx / tensorflow-onnx / tf2onnx / onnx_opset / math.py View on Github external
def make_min_or_max_op(ctx, op_type, inputs, outputs,
                       output_shapes=None, output_dtypes=None):
    # support more dtype
    supported_dtypes = [
        onnx_pb.TensorProto.FLOAT,
        onnx_pb.TensorProto.FLOAT16,
        onnx_pb.TensorProto.DOUBLE
    ]
    target_dtype = onnx_pb.TensorProto.FLOAT
    need_cast = False
    cast_inputs = []
    for inp in inputs:
        dtype = ctx.get_dtype(inp)
        utils.make_sure(dtype is not None, "dtype of {} is None".format(inp))
        if dtype not in supported_dtypes:
            cast_inp = ctx.make_node("Cast", [inp], attr={"to": target_dtype})
            cast_inputs.append(cast_inp.output[0])
            need_cast = True
        else:
            cast_inputs.append(inp)
    node = ctx.make_node(op_type, cast_inputs, shapes=output_shapes)
    actual_outputs = node.output
    if need_cast:
        origin_dtype = ctx.get_dtype(inputs[0])
        if output_dtypes is not None: