How to use the onnx.save function in onnx

To help you get started, we’ve selected a few onnx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github microsoft / onnxruntime / onnxruntime / python / tools / featurizer_ops / create_test_model.py View on Github external
# Create a default initializer for float32_input.
     tensor_float32 = helper.make_tensor(name='Float32Input', data_type=TensorProto.FLOAT, dims=[1,1],
                                        vals=np.array([[.0]]).astype(np.float32), raw=False)

     # Make a graph
     graph_def = helper.make_graph(nodes=[bool_identity_def, string_identity_def, double_identity_def, int8_identity_def,
                                          int16_identity_def, int32_identity_def, int64_identity_def, optional_identity_def],
                                   name='optional_input_graph',
                                   inputs=[bool_input, string_input, double_input, int8_input, int16_input, int32_input, int64_input, float32_input],
                                   outputs=[bool_output, string_output, double_output, int8_output, int16_output, int32_output, int64_output, float32_output],
                                   initializer=[tensor_float32])

     model_def = helper.make_model(graph_def, producer_name='feed_inputs_test')
     final_model = onnx.utils.polish_model(model_def)
     onnx.save(final_model, args.output_file)
github microsoft / onnxruntime / onnxruntime / python / tools / quantization / test_calibrate.py View on Github external
def test_get_intermediate_outputs(self):
        # Creating graph
        A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 1, 5, 5])
        B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [1, 1, 5, 5])
        C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [1, 1, 5, 5])
        clip_node = onnx.helper.make_node('Clip', ['A'], ['B'], name='Clip')
        matmul_node = onnx.helper.make_node('MatMul', ['B', 'B'], ['C'], name='MatMul')
        graph = helper.make_graph([clip_node, matmul_node], 'test_graph_small', [A], [C])
        model = helper.make_model(graph)
        model_path = 'test_model_small.onnx'
        onnx.save(model, model_path)

        # Augmenting graph
        augmented_model = calibrate.augment_graph(model)
        augmented_model_path = 'augmented_test_model_small.onnx'
        onnx.save(augmented_model, augmented_model_path)

        # Running inference
        images_folder = 'test_images'
        session = onnxruntime.InferenceSession(augmented_model_path)
        (samples, channels, height, width) = session.get_inputs()[0].shape
        inputs = calibrate.load_batch(images_folder, height, width)
        dict = calibrate.get_intermediate_outputs(model_path, session, inputs)

        min_results, max_results = [], []
        for file in os.listdir(images_folder):
            image_filepath = 'test_images/' + file
github daquexian / onnx-simplifier / onnxsim / __main__.py View on Github external
print("Simplifying...")
    input_shapes = {}
    if args.input_shape is not None:
        for x in args.input_shape:
            if ':' not in x:
                input_shapes[None] = list(map(int, x.split(',')))
            else:
                pieces = x.split(':')
                # for the input name like input:0
                name, shape = ':'.join(
                    pieces[:-1]), list(map(int, pieces[-1].split(',')))
                input_shapes[name] = shape
    model_opt = onnxsim.simplify(
        args.input_model, check_n=args.check_n, perform_optimization=not args.skip_optimization, input_shapes=input_shapes)

    onnx.save(model_opt, args.output_model)
    print("Ok!")
github microsoft / onnxruntime / onnxruntime / core / providers / nuphar / scripts / model_editor.py View on Github external
# remove initializer of concat matmul
                if not [n for n in in_mp.graph.node if n != in_n and replaced_matmul.input[1] in n.input]:
                    nf.remove_initializer(replaced_matmul.input[1])
            elif in_sn != replaced_matmul:
                out_sg.node.add().CopyFrom(in_sn)

        scan = nf.make_node('Scan', new_inputs,
                            {'body':out_sg,
                              'scan_input_directions':scan_input_directions,
                              'scan_output_directions':scan_output_directions,
                              'num_scan_inputs':num_scan_inputs},
                            output_names=list(in_n.output))
        scan.name = in_n.name
        scan.doc_string = in_n.doc_string

    onnx.save(out_mp, output_model)
github pfnet-research / chainer-compiler / scripts / quantize_model.py View on Github external
# Load the onnx model
    model_file = args.model
    model = onnx.load(model_file)
    del args.model

    output_file = args.output
    del args.output

    # Quantize
    print('Quantize config: {}'.format(vars(args)))
    quantized_model = quantize.quantize(model, **vars(args))

    print('Saving "{}" to "{}"'.format(model_file, output_file))

    # Save the quantized model
    onnx.save(quantized_model, output_file)
github NVIDIA / tensorrt-inference-server / qa / common / gen_qa_sequence_models.py View on Github external
if onnx_dtype == onnx.TensorProto.FLOAT16:
        cast = onnx.helper.make_node("Identity", ["CAST"], ["OUTPUT"])

    onnx_nodes = [internal_input, add, mul, cast]
    onnx_inputs = [onnx_input, onnx_start, onnx_ready]
    onnx_outputs = [onnx_output]

    graph_proto = onnx.helper.make_graph(onnx_nodes, model_name, onnx_inputs, onnx_outputs)
    model_def = onnx.helper.make_model(graph_proto, producer_name="TRTIS")

    try:
        os.makedirs(model_version_dir)
    except OSError as ex:
        pass # ignore existing dir

    onnx.save(model_def, model_version_dir + "/model.onnx")
github NVIDIA / tensorrt-inference-server / qa / common / gen_qa_reshape_models.py View on Github external
if input_shapes == output_shapes:
            onnx_nodes.append(onnx.helper.make_node("Identity", [in_name], [out_name]))
        else:
            onnx_nodes.append(onnx.helper.make_node("Shape", [out_name], [out_shape_name]))
            onnx_nodes.append(onnx.helper.make_node("Reshape", [in_name, out_shape_name], [out_name]))

    graph_proto = onnx.helper.make_graph(onnx_nodes, model_name, onnx_inputs, onnx_outputs)
    model_def = onnx.helper.make_model(graph_proto, producer_name="TRTIS")

    try:
        os.makedirs(model_version_dir)
    except OSError as ex:
        pass # ignore existing dir

    onnx.save(model_def, model_version_dir + "/model.onnx")
github microsoft / onnxruntime / onnxruntime / core / providers / nuphar / scripts / model_quantizer.py View on Github external
out_subgraph.ClearField('node')
            scan_nf = NodeFactory(out_mp.graph, out_subgraph)
            subgraph_quantized_inputs = {} if share_input_quantization else None # remember quantized inputs that might be able to share between MatMuls
            for in_sn in in_subgraph.node:
                if in_sn.op_type == 'MatMul':
                    if quantize_matmul_2d_with_weight(in_sn, in_subgraph, scan_nf, converted_weights, subgraph_quantized_inputs, qcfg_dict, export_qcfg_json, default_qcfg):
                        continue

                if in_sn.op_type == 'Slice' and len(in_sn.input) == 1:
                    upgrade_slice_op(scan_nf, in_sn)
                    continue

                out_sn = out_subgraph.node.add()
                out_sn.CopyFrom(in_sn)

    onnx.save(out_mp, output_model)
    if export_qcfg_json:
        with open(qcfg_json, 'w') as f:
            f.write(json.dumps(qcfg_dict, indent=2))
github NVIDIA / tensorrt-inference-server / qa / common / gen_qa_identity_models.py View on Github external
in_name = "INPUT{}".format(io_num)
        out_name = "OUTPUT{}".format(io_num)

        onnx_inputs.append(onnx.helper.make_tensor_value_info(in_name, onnx_dtype, batch_dim + in_shape))
        onnx_outputs.append(onnx.helper.make_tensor_value_info(out_name, onnx_dtype, batch_dim + out_shape))
        onnx_nodes.append(onnx.helper.make_node("Identity", [in_name], [out_name]))

    graph_proto = onnx.helper.make_graph(onnx_nodes, model_name, onnx_inputs, onnx_outputs)
    model_def = onnx.helper.make_model(graph_proto, producer_name="TRTIS")

    try:
        os.makedirs(model_version_dir)
    except OSError as ex:
        pass # ignore existing dir

    onnx.save(model_def, model_version_dir + "/model.onnx")