How to use the onnx.onnx_pb function in onnx

To help you get started, we’ve selected a few onnx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github ANRGUSC / Jupiter / app_specific_files / demotest_backup_circe / scripts / utils / onnx2coreml.py View on Github external
for f in files:
        # 1. ONNX to CoreML
        name = 'saved_models/' + f.split('/')[-1].replace('.onnx', '')

        # # Load the ONNX model
        model = onnx.load(f)

        # Check that the IR is well formed
        print(onnx.checker.check_model(model))

        # Print a human readable representation of the graph
        print(onnx.helper.printable_graph(model.graph))

        model_file = open(f, 'rb')
        model_proto = onnx_pb.ModelProto()
        model_proto.ParseFromString(model_file.read())
        yolov3_model = convert(model_proto, image_input_names=['0'], preprocessing_args={'image_scale': 1. / 255})

        # 2. Reduce model to FP16, change outputs to DOUBLE and save
        import coremltools

        spec = yolov3_model.get_spec()
        for i in range(2):
            spec.description.output[i].type.multiArrayType.dataType = \
                coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value('DOUBLE')

        spec = coremltools.utils.convert_neural_network_spec_weights_to_fp16(spec)
        yolov3_model = coremltools.models.MLModel(spec)

        name_out0 = spec.description.output[0].name
        name_out1 = spec.description.output[1].name
github onnx / keras-onnx / tests / test_utils.py View on Github external
cast_node.set_attr("to", onnx_pb.TensorProto.INT64)
    ctx.set_dtype(cast_node.output[0], onnx_pb.TensorProto.INT64)
    ctx.copy_shape(node.name, cast_node.output[0])

    attrs = {'perm': [1, 0]}
    transpose_node = ctx.make_node("Transpose", [cast_node.output[0]], name=tf2onnx.utils.make_name(node.name),
                                   attr=attrs)

    const_name = tf2onnx.utils.make_name(node.name)

    const_array = ctx.make_const(const_name, np.array([-1], dtype=np.int64))

    reshape = ctx.make_node("Reshape", [transpose_node.output[0], const_array.output[0]])
    ctx.replace_input(node, node.input[1], reshape.output[0])

    if origin_dtype not in [onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.FLOAT,
                            onnx_pb.TensorProto.DOUBLE]:
        cast_node = ctx.insert_new_node_on_input(node, "Cast", node.input[0])
        cast_node.set_attr("to", onnx_pb.TensorProto.FLOAT)
        ctx.set_dtype(cast_node.output[0], onnx_pb.TensorProto.FLOAT)
        ctx.copy_shape(node.name, cast_node.output[0])

        cast_back_node = ctx.insert_new_node_on_output("Cast", node.output[0],
                                                       name=tf2onnx.utils.make_name(node.name) + "_castback")
        cast_back_node.set_attr("to", origin_dtype)
        ctx.set_dtype(cast_back_node.output[0], origin_dtype)
        ctx.copy_shape(node.name, cast_back_node.output[0])
github NVIDIA / mxnet_to_onnx / mx2onnx_converter / mx2onnx_converter.py View on Github external
for idx, node in enumerate(mx_graph):
           op = node["op"]
           name = node["name"]
           if log:
               print("Converting idx: %d, op: %s, name: %s" % (idx, op, name))
           converted = MxNetToONNXConverter.convert_layer(
               node,
               mx_graph = mx_graph,
               weights = weights,
               in_shape = in_shape,
               in_type = in_type,
               proc_nodes = all_processed_nodes,
               initializer = initializer
           )

           if isinstance(converted, onnx_pb.ValueInfoProto):
               if idx < (len(mx_graph) - 1):
                   onnx_processed_inputs.append(converted)
               else:
                   onnx_processed_outputs.append(converted)
           elif isinstance(converted, onnx_pb.NodeProto):
               if idx < (len(mx_graph) - 1):
                   onnx_processed_nodes.append(converted)
               else:
                   onnx_processed_nodes.append(converted)
                   onnx_processed_outputs.append(
                       make_tensor_value_info(
                           name=converted.name,
                           elem_type=mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('float32')],
                           shape=(in_shape[0], -1)
                       )
                   )
github onnx / sklearn-onnx / skl2onnx / common / _topology.py View on Github external
def from_pb(obj):
        """
        Creates a data type from a protobuf object.
        """
        def get_shape(tt):
            return [tt.shape.dim[i].dim_value
                    for i in range(len(tt.shape.dim))]

        if hasattr(obj, 'extend'):
            return [Variable.from_pb(o) for o in obj]
        name = obj.name
        if obj.type.tensor_type:
            tt = obj.type.tensor_type
            elem = tt.elem_type
            shape = get_shape(tt)
            if elem == onnx_proto.TensorProto.FLOAT:
                ty = FloatTensorType(shape)
            elif elem == onnx_proto.TensorProto.BOOL:
                ty = BooleanTensorType(shape)
            elif elem == onnx_proto.TensorProto.DOUBLE:
                ty = DoubleTensorType(shape)
            elif elem == onnx_proto.TensorProto.STRING:
                ty = StringTensorType(shape)
            elif elem == onnx_proto.TensorProto.INT64:
                ty = Int64TensorType(shape)
            elif elem == onnx_proto.TensorProto.INT32:
                ty = Int32TensorType(shape)
            else:
                raise NotImplementedError(
                    "Unsupported type '{}' (elem_type={}).".format(
                        type(obj.type.tensor_type), elem))
        else:
github onnx / sklearn-onnx / skl2onnx / common / _topology.py View on Github external
Creates a data type from a protobuf object.
        """
        def get_shape(tt):
            return [tt.shape.dim[i].dim_value
                    for i in range(len(tt.shape.dim))]

        if hasattr(obj, 'extend'):
            return [Variable.from_pb(o) for o in obj]
        name = obj.name
        if obj.type.tensor_type:
            tt = obj.type.tensor_type
            elem = tt.elem_type
            shape = get_shape(tt)
            if elem == onnx_proto.TensorProto.FLOAT:
                ty = FloatTensorType(shape)
            elif elem == onnx_proto.TensorProto.BOOL:
                ty = BooleanTensorType(shape)
            elif elem == onnx_proto.TensorProto.DOUBLE:
                ty = DoubleTensorType(shape)
            elif elem == onnx_proto.TensorProto.STRING:
                ty = StringTensorType(shape)
            elif elem == onnx_proto.TensorProto.INT64:
                ty = Int64TensorType(shape)
            elif elem == onnx_proto.TensorProto.INT32:
                ty = Int32TensorType(shape)
            else:
                raise NotImplementedError(
                    "Unsupported type '{}' (elem_type={}).".format(
                        type(obj.type.tensor_type), elem))
        else:
            raise NotImplementedError("Unsupported type '{}' as "
                                      "a string ({}).".format(
github onnx / keras-onnx / keras2onnx / ktf2onnx / tf2onnx / onnx_opset / tensor.py View on Github external
def _convert_shapenode_to_int64(ctx, node, input_number):
    """cast int32 shape into int64 shape."""
    name = node.input[input_number]

    cast_node = ctx.insert_new_node_on_input(node, "Cast", name)
    cast_node.set_attr("to", onnx_pb.TensorProto.INT64)
    ctx.set_dtype(cast_node.output[0], onnx_pb.TensorProto.INT64)
    ctx.copy_shape(name, cast_node.output[0])
github onnx / tensorflow-onnx / tf2onnx / rewriter / unit_rewriter_base.py View on Github external
def process_seq_length(self, rnn_props, seq_length_node):
        # output: [time step, batch size, input size]
        shape_node = self.g.make_node("Shape", [rnn_props.x_input_id])

        # LSTMCell only allow inputs of [batch size, input_size], so we assume dynamic_rnn has 3 dims.
        # Slice cannot support Int64 in OPSET 7, so we cast here.
        cast_shape_node = self.g.make_node("Cast", [shape_node.output[0]],
                                           attr={"to": onnx_pb.TensorProto.FLOAT},
                                           shapes=[self.g.get_shape(shape_node.output[0])])

        batchsize_node = self.g.make_node("Slice", [cast_shape_node.output[0]],
                                          attr={"axes": [0], "starts": [1], "ends": [2]})

        # Tile's repeats must be INT64
        repeat_node = self.g.make_node("Cast", [batchsize_node.output[0]],
                                       attr={"to": onnx_pb.TensorProto.INT64})

        self.all_nodes.extend([shape_node, cast_shape_node, batchsize_node, repeat_node])

        if not seq_length_node:
            timestep_node = self.g.make_node("Slice", [cast_shape_node.output[0]],
                                             attr={"axes": [0], "starts": [0], "ends": [1]})

            tile_node = self.g.make_node("Tile", [timestep_node.output[0], repeat_node.output[0]])
github onnx / sklearn-onnx / skl2onnx / common / _container.py View on Github external
def __init__(self, raw_model, dtype):
        """
        :param raw_model: *scikit-learn* model to convert
        """
        self._raw_model = raw_model
        self.dtype = dtype
        if dtype == np.float32:
            self.proto_dtype = onnx_proto.TensorProto.FLOAT
        elif dtype == np.float64:
            self.proto_dtype = onnx_proto.TensorProto.DOUBLE
        elif dtype == np.int64:
            self.proto_dtype = onnx_proto.TensorProto.INT64
        else:
            raise ValueError("dtype should be either np.float32, "
                             "np.float64, np.int64.")
github onnx / tensorflow-onnx / tf2onnx / rewriter / unit_rewriter_base.py View on Github external
if not seq_length_node:
            timestep_node = self.g.make_node("Slice", [cast_shape_node.output[0]],
                                             attr={"axes": [0], "starts": [0], "ends": [1]})

            tile_node = self.g.make_node("Tile", [timestep_node.output[0], repeat_node.output[0]])

            # LSTM sequence_lens needs to be int32
            seq_length_node = self.g.make_node('Cast', [tile_node.output[0]],
                                               attr={"to": onnx_pb.TensorProto.INT32})

            self.all_nodes.extend([timestep_node, tile_node, seq_length_node])
        else:
            # LSTM sequence_lens needs to be int32
            ori_seq_dtype = self.g.get_dtype(seq_length_node.name)
            if ori_seq_dtype != onnx_pb.TensorProto.INT32:
                seq_length_node = self.g.make_node('Cast', [seq_length_node.output[0]],
                                                   attr={"to": onnx_pb.TensorProto.INT32})
                self.all_nodes.append(seq_length_node)

        rnn_props.onnx_input_ids["sequence_lens"] = seq_length_node.output[0]
        return seq_length_node, batchsize_node
github NVIDIA / mxnet_to_onnx / mx2onnx_converter / mx2onnx_converter.py View on Github external
onnx_processed_outputs.append(converted)
           elif isinstance(converted, onnx_pb.NodeProto):
               if idx < (len(mx_graph) - 1):
                   onnx_processed_nodes.append(converted)
               else:
                   onnx_processed_nodes.append(converted)
                   onnx_processed_outputs.append(
                       make_tensor_value_info(
                           name=converted.name,
                           elem_type=mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype('float32')],
                           shape=(in_shape[0], -1)
                       )
                   )
                   if log:
                       print("Output node is: %s" % converted.name)
           elif isinstance(converted, onnx_pb.TensorProto):
               raise ValueError("Did not expect TensorProto")
               if idx < (len(mx_graph) - 1):
                   onnx_processed_inputs.append(converted)
               else:
                   onnx_processed_outputs.append(converted)
           else:
               print(converted)
               raise ValueError("node is of an unrecognized type: %s" % type(node))             
               
           all_processed_nodes.append(converted)

        graph = helper.make_graph(
            onnx_processed_nodes,
            "main",
            onnx_processed_inputs,
            onnx_processed_outputs