How to use onnx - 10 common examples

To help you get started, we’ve selected a few onnx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pytorch / pytorch / test / onnx / export_onnx_tests_filter.py View on Github external
for d in os.listdir(root_dir):
        dir_name = os.path.join(root_dir, d)
        if os.path.isdir(dir_name):
            failed = False
            try:
                model_file = os.path.join(dir_name, "model.onnx")
                data_dir_pattern = os.path.join(dir_name, "test_data_set_*")
                for data_dir in glob.glob(data_dir_pattern):
                    for device in torch.testing.get_all_device_types():
                        run_generated_test(model_file, data_dir, device)
                if expect:
                    expect_file = os.path.join(_expect_dir,
                                               "PyTorch-generated-{}.expect".format(d))
                    with open(expect_file, "w") as text_file:
                        model = onnx.load(model_file)
                        onnx.checker.check_model(model)
                        onnx.helper.strip_doc_string(model)
                        text_file.write(google.protobuf.text_format.MessageToString(model))
                total_pass += 1
            except Exception as e:
                if verbose:
                    print("The test case in {} failed!".format(dir_name))
                    traceback.print_exc()
                if fail_dir is None:
                    shutil.rmtree(dir_name)
                else:
                    target_dir = os.path.join(fail_dir, d)
                    if os.path.exists(target_dir):
                        shutil.rmtree(target_dir)
                    shutil.move(dir_name, target_dir)
                total_fail += 1
    print("Successfully generated/updated {} test cases from PyTorch.".format(total_pass))
github ANRGUSC / Jupiter / app_specific_files / demotest_backup_circe / scripts / utils / onnx2coreml.py View on Github external
for f in files:
        # 1. ONNX to CoreML
        name = 'saved_models/' + f.split('/')[-1].replace('.onnx', '')

        # # Load the ONNX model
        model = onnx.load(f)

        # Check that the IR is well formed
        print(onnx.checker.check_model(model))

        # Print a human readable representation of the graph
        print(onnx.helper.printable_graph(model.graph))

        model_file = open(f, 'rb')
        model_proto = onnx_pb.ModelProto()
        model_proto.ParseFromString(model_file.read())
        yolov3_model = convert(model_proto, image_input_names=['0'], preprocessing_args={'image_scale': 1. / 255})

        # 2. Reduce model to FP16, change outputs to DOUBLE and save
        import coremltools

        spec = yolov3_model.get_spec()
        for i in range(2):
            spec.description.output[i].type.multiArrayType.dataType = \
                coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value('DOUBLE')

        spec = coremltools.utils.convert_neural_network_spec_weights_to_fp16(spec)
        yolov3_model = coremltools.models.MLModel(spec)

        name_out0 = spec.description.output[0].name
        name_out1 = spec.description.output[1].name
github onnx / tensorflow-onnx / tests / test_optimizers.py View on Github external
def _define_loop_graph(external_inputs):
            # external_inputs: external node which will be used by this graph
            # graph without loop carried
            # computation
            # for(...){a = external_inputs[i]; b = trans(a), c = squeeze(b)}, c is scan output
            node1 = helper.make_node("Gather", [external_inputs[0], "loop_iter_num"], ["Y0"])
            node2 = helper.make_node("Transpose", ["Y0"], ["Z0"], perm=[0, 2, 3, 1])
            # graph output
            node3 = helper.make_node("Squeeze", ["Z0"], ["scan_output"], axes=[0])
            node4 = helper.make_node("Identity", ["loop_condition"], ["loop_cond_output"])
            node5 = helper.make_node("Identity", ["loop_condition"], ["loop_carried_output"])

            graph = helper.make_graph(
                [node1, node2, node3, node4, node5],
                "loop_subgraph",
                [helper.make_tensor_value_info("loop_iter_num", TensorProto.INT64, (1,)),  # iteration_num
                 helper.make_tensor_value_info("loop_condition", TensorProto.BOOL, ()),  # condition
                 helper.make_tensor_value_info("loop_carried", TensorProto.BOOL, ())  # loop_carried
                 ],
                [helper.make_tensor_value_info("loop_cond_output", TensorProto.BOOL, ()),
                 helper.make_tensor_value_info("loop_carried_output", TensorProto.BOOL, ()),
                 helper.make_tensor_value_info("scan_output", TensorProto.FLOAT, ["unknown"] * 3)
                 ],
            )
            return graph
github onnx / onnx-tensorflow / test / backend / test_node.py View on Github external
def test_tile(self):
    if legacy_onnx_pre_ver(1, 2):
      raise unittest.SkipTest(
          "The current version of ONNX does not record correctly the opset of Tile."
      )
    node_def = helper.make_node("Tile", ["X1", "X2"], ["Z"])
    x = self._get_rnd_float32(shape=[3, 5, 5, 3])
    repeats = [1, 1, 2, 1]
    output = run_node(node_def, [x, repeats])
    np.testing.assert_allclose(output["Z"], np.tile(x, repeats), rtol=1e-3)
github Xilinx / finn / tests / test_mixed_onnx_exec.py View on Github external
def test_execute_mixed_model():

    out0 = helper.make_tensor_value_info("out0", TensorProto.FLOAT, [6, 3, 2, 2])

    graph_def = helper.make_graph(
        nodes=[
            helper.make_node(
                "MultiThreshold", ["v", "thresholds"], ["out0"], domain="finn"
            ),
            helper.make_node("Relu", ["out0"], ["out1"]),
        ],
        name="test-model",
        inputs=[
            helper.make_tensor_value_info("v", TensorProto.FLOAT, [6, 3, 2, 2]),
            helper.make_tensor_value_info("thresholds", TensorProto.FLOAT, [3, 7]),
        ],
        outputs=[
            helper.make_tensor_value_info("out1", TensorProto.FLOAT, [6, 3, 2, 2])
        ],
        value_info=[out0],
    )
    model_def = helper.make_model(graph_def, producer_name="onnx-example")
github apache / singa / test / python / test_onnx_backend.py View on Github external
def test_Asinh(self):  # type: () -> None
        node = onnx.helper.make_node(
            'Asinh',
            inputs=['x'],
            outputs=['y'],
        )

        x = np.array([-1, 0, 1]).astype(np.float32)
        y = np.arcsinh(x)  # expected output [-0.88137358,  0.,  0.88137358]
        expect(node, inputs=[x], outputs=[y],
               name='test_asinh_example')

        x = np.random.randn(3, 4, 5).astype(np.float32)
        y = np.arcsinh(x)
        expect(node, inputs=[x], outputs=[y],
               name='test_asinh')
github onnx / tensorflow-onnx / tests / test_optimizers.py View on Github external
def test_identity_non_graph_output(self):
        node1 = helper.make_node("Add", ["X", "X"], ["Y"], name="add")
        node2 = helper.make_node("Identity", ["Y"], ["Z"], name="identity")
        node3 = helper.make_node("Shape", ["Z"], ["Z1"], name="shape")

        graph = helper.make_graph(
            [node1, node2, node3],
            "identity-test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4, 5))],
            [helper.make_tensor_value_info("Z1", TensorProto.INT64, [4])],
        )

        model_proto = self.make_model(graph, producer_name="onnx-tests")
        self.run_identity_compare(["Z1"], {"X": np.random.randn(2, 3, 4, 5).astype(np.float32)},
                                  model_proto, remaining_identity_num=0)
github onnx / tensorflow-onnx / tests / test_optimizers.py View on Github external
def test_transpose_with_squeeze2(self):
        # squeeze the second dim
        node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=[0, 2, 3, 1], name="trans")
        node2 = helper.make_node("Squeeze", ["Y"], ["Z"], name="squeeze", axes=[1])

        graph = helper.make_graph(
            [node1, node2],
            "transpose_with_squeeze",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, (3, 4, 1, 5))],
            [helper.make_tensor_value_info("Z", TensorProto.FLOAT, (3, 5, 4))],
        )

        model_proto = self.make_model(graph, producer_name="onnx-tests")
        model_after_opt = self.run_transpose_compare(["Z"], {"X": np.random.randn(3, 4, 1, 5).astype(np.float32)},
                                                     model_proto, remaining_transpose_num=1)
        self.check_transpose_perm(model_after_opt, [0, 2, 1])
github pytorch / pytorch / caffe2 / python / trt / test_trt.py View on Github external
def _test_relu_graph(self, X, batch_size, trt_max_batch_size):
        node_def = make_node("Relu", ["X"], ["Y"])
        Y_c2 = c2.run_node(node_def, {"X": X})
        graph_def = make_graph(
            [node_def],
            name="test",
            inputs=[make_tensor_value_info("X", onnx.TensorProto.FLOAT, [batch_size, 1, 3, 2])],
            outputs=[make_tensor_value_info("Y", onnx.TensorProto.FLOAT, [batch_size, 1, 3, 2])])
        model_def = make_model(graph_def, producer_name='relu-test')
        op_outputs = [x.name for x in model_def.graph.output]
        op = convert_onnx_model_to_trt_op(model_def, max_batch_size=trt_max_batch_size)
        device_option = core.DeviceOption(caffe2_pb2.CUDA, 0)
        op.device_option.CopyFrom(device_option)
        Y_trt = None
        ws = Workspace()
        with core.DeviceScope(device_option):
            ws.FeedBlob("X", X)
            ws.RunOperatorsOnce([op])
            output_values = [ws.FetchBlob(name) for name in op_outputs]
            Y_trt = namedtupledict('Outputs', op_outputs)(*output_values)
        np.testing.assert_almost_equal(Y_c2, Y_trt)
github onnx / onnx-tensorflow / test / backend / test_model.py View on Github external
def test_relu_node_inplace(self):
    X = np.random.randn(3, 2).astype(np.float32)
    Y_ref = np.clip(X, 0, np.inf)

    node_def = helper.make_node("Relu", ["X"], ["X1"])

    graph_def = helper.make_graph(
        [node_def],
        name="test",
        inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, [3, 2])],
        outputs=[
            helper.make_tensor_value_info("X1", TensorProto.FLOAT, [3, 2])
        ])
    tf_rep = prepare(helper.make_model(graph_def))
    output = tf_rep.run({"X": X})
    np.testing.assert_almost_equal(output.X1, Y_ref)