How to use the onnx.checker.check_model function in onnx

To help you get started, we’ve selected a few onnx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github pytorch / pytorch / test / onnx / export_onnx_tests_filter.py View on Github external
for d in os.listdir(root_dir):
        dir_name = os.path.join(root_dir, d)
        if os.path.isdir(dir_name):
            failed = False
            try:
                model_file = os.path.join(dir_name, "model.onnx")
                data_dir_pattern = os.path.join(dir_name, "test_data_set_*")
                for data_dir in glob.glob(data_dir_pattern):
                    for device in torch.testing.get_all_device_types():
                        run_generated_test(model_file, data_dir, device)
                if expect:
                    expect_file = os.path.join(_expect_dir,
                                               "PyTorch-generated-{}.expect".format(d))
                    with open(expect_file, "w") as text_file:
                        model = onnx.load(model_file)
                        onnx.checker.check_model(model)
                        onnx.helper.strip_doc_string(model)
                        text_file.write(google.protobuf.text_format.MessageToString(model))
                total_pass += 1
            except Exception as e:
                if verbose:
                    print("The test case in {} failed!".format(dir_name))
                    traceback.print_exc()
                if fail_dir is None:
                    shutil.rmtree(dir_name)
                else:
                    target_dir = os.path.join(fail_dir, d)
                    if os.path.exists(target_dir):
                        shutil.rmtree(target_dir)
                    shutil.move(dir_name, target_dir)
                total_fail += 1
    print("Successfully generated/updated {} test cases from PyTorch.".format(total_pass))
github hpi-xnor / BMXNet-v2 / tests / python-pytest / onnx / test_node.py View on Github external
def test_exports(self):
        input_shape = (2,1,3,1)
        for test in export_test_cases:
            test_name, onnx_name, mx_op, attrs = test
            input_sym = mx.sym.var('data')
            outsym = mx_op(input_sym, **attrs)
            converted_model = onnx_mxnet.export_model(outsym, {}, [input_shape], np.float32,
                                                      onnx_file_path=outsym.name + ".onnx")
            model = load_model(converted_model)
            checker.check_model(model)
github apache / incubator-tvm / python / tvm / relay / frontend / onnx.py View on Github external
This can be helpful for some testing.

    Returns
    -------
    mod : tvm.relay.Module
        The relay module for compilation

    params : dict of str to tvm.NDArray
        The parameter dict to be used by relay
    """
    try:
        import onnx
        if hasattr(onnx.checker, 'check_model'):
            # try use onnx's own model checker before converting any model
            try:
                onnx.checker.check_model(model)
            except onnx.onnx_cpp2py_export.checker.ValidationError as e:
                import warnings
                # the checker is a bit violent about errors, so simply print warnings here
                warnings.warn(str(e))
    except ImportError:
        pass
    g = GraphProto(shape, dtype)
    graph = model.graph
    if opset is None:
        try:
            opset = model.opset_import[0].version if model.opset_import else 1
        except AttributeError:
            opset = 1
    mod, params = g.from_onnx(graph, opset)
    return mod, params
github onnx / sklearn-onnx / docs / examples / plot_onnx_operators.py View on Github external
)

# Create the graph (GraphProto)
graph_def = helper.make_graph(
    [node_def],
    'test-model',
    [X],
    [Y],
)

# Create the model (ModelProto)
model_def = helper.make_model(graph_def, producer_name='onnx-example')
model_def.opset_import[0].version = 10

print('The model is:\n{}'.format(model_def))
onnx.checker.check_model(model_def)
print('The model is checked!')

#####################################
# Same example with sklearn-onnx
# ++++++++++++++++++++++++++++++
#
# Every operator has its own class in *sklearn-onnx*.
# The list is dynamically created based on the installed
# onnx package.

from skl2onnx.algebra.onnx_ops import OnnxPad  # noqa

pad = OnnxPad('X', output_names=['Y'],
              mode='constant', value=1.5,
              pads=[0, 1, 0, 1],
              op_version=2)
github ai-techsystems / dnnCompiler / scripts / read_onnx.py View on Github external
if ( optimize ) :
        print("  Optimization enabled.")
        from onnx import optimizer

        for opt_pass in optimizer.get_available_passes():
            print('    running optimization step : {}'.format(opt_pass.replace("_", " ")))
            try :
                model = optimizer.optimize(model, [opt_pass]);
            except Exception as e:
                print ("        optimization failed." + str(e) + "\n. Abandoning and trying next.");
        print ("  optimization done.")

    if ( checker ) :
        try:
            print ("running ONNX model shape inference engine and verification");
            onnx.checker.check_model(model)
            from onnx import shape_inference
            model = shape_inference.infer_shapes(model)
            onnx.checker.check_model(model)
        except Exception as e:
            print ("        failed. moving to next step." + str(e));


    graph = model.graph

    self._dcGraph = dnnc.Graph();
    self._dcGraph.setName(graph.name)

    nodes = graph.node
    for node in nodes:
      dcNode = self.addOPNode(node);
github microsoft / Windows-Machine-Learning / Tools / WinMLDashboard / public / convert.py View on Github external
def convert_tensorflow_file(filename, opset, output_names):
    import winmltools
    import tensorflow
    import tf2onnx
    from tensorflow.core.framework import graph_pb2
    from tensorflow.python.tools import freeze_graph
    import onnx
    import tensorflow as tf

    graph_def = graph_pb2.GraphDef()
    with open(filename, 'rb') as file:
        graph_def.ParseFromString(file.read())
    g = tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=g) as sess:
        converted_model = winmltools.convert_tensorflow(sess.graph, opset, continue_on_error=True, verbose=True, output_names=output_names)
        onnx.checker.check_model(converted_model)
    return converted_model
github ruiminshen / yolo2-pytorch / convert_onnx_caffe2.py View on Github external
def main():
    args = make_args()
    config = configparser.ConfigParser()
    utils.load_config(config, args.config)
    for cmd in args.modify:
        utils.modify_config(config, cmd)
    with open(os.path.expanduser(os.path.expandvars(args.logging)), 'r') as f:
        logging.config.dictConfig(yaml.load(f))
    model_dir = utils.get_model_dir(config)
    model = onnx.load(model_dir + '.onnx')
    onnx.checker.check_model(model)
    init_net, predict_net = onnx_caffe2.backend.Caffe2Backend.onnx_graph_to_caffe2_net(model.graph, device='CPU')
    onnx_caffe2.helper.save_caffe2_net(init_net, os.path.join(model_dir, 'init_net.pb'))
    onnx_caffe2.helper.save_caffe2_net(predict_net, os.path.join(model_dir, 'predict_net.pb'), output_txt=True)
    logging.info(model_dir)