How to use the onnx.checker function in onnx

To help you get started, we’ve selected a few onnx examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github onnx / sklearn-onnx / tests / test_algebra_onnx_doc.py View on Github external
def test_transpose2(self):
        from skl2onnx.algebra.onnx_ops import OnnxTranspose

        node = OnnxTranspose(
            OnnxTranspose(
                'X', perm=[1, 0, 2],
                op_version=onnx.defs.onnx_opset_version()),
            perm=[1, 0, 2], output_names=['Y'],
            op_version=onnx.defs.onnx_opset_version())
        X = np.arange(2 * 3 * 4).reshape((2, 3, 4)).astype(np.float32)

        model_def = node.to_onnx({'X': X})
        onnx.checker.check_model(model_def)
        res = self.predict_with_onnxruntime(model_def, X)
        assert_almost_equal(res['Y'], X)
github opencv / open_model_zoo / tools / downloader / caffe2_to_onnx.py View on Github external
output_file.parent.mkdir(parents=True, exist_ok=True)
    value_info = {}
    input_names = input_names.split(',')
    for name, shape in zip(input_names, input_shape):
        value_info[name] = [shape[0], shape]
    if predict_net.name == "":
        predict_net.name = model_name

    onnx_model = Caffe2Frontend.caffe2_net_to_onnx_model(
        predict_net,
        init_net,
        value_info
    )
    try:
        onnx.checker.check_model(onnx_model)
        print('ONNX check passed successfully.')
        with open(str(output_file), 'wb') as f:
            f.write(onnx_model.SerializeToString())
    except onnx.onnx_cpp2py_export.checker.ValidationError as exc:
        sys.exit('ONNX check failed with error: ' + str(exc))
github PRBonn / bonnetal / train / tasks / classification / modules / traceSaver.py View on Github external
def export_ONNX(self):
    # convert to ONNX traced model

    # create profile
    onnx_path = os.path.join(self.new_path, "model.onnx")
    with torch.no_grad():
      print("Profiling model")
      print("saving model in ", onnx_path)
      torch.onnx.export(self.model, self.dummy_input, onnx_path)

    # check that it worked
    print("Checking that it all worked out")
    model_onnx = onnx.load(onnx_path)
    onnx.checker.check_model(model_onnx)
github NGnetLab / AITrans_DTP / rf / torch_ddpg.py View on Github external
def load_onnx(self, file_name='torch_15728833105783641_target.onnx'):

        if not file_name:
            return None
        import onnx
        # import onnx_caffe2.backend as backend
        import caffe2.python.onnx.backend as backend
        print("load_onnx")
        model = onnx.load(file_name)
        print(onnx.checker.check_model(model))
        rep = backend.prepare(model, device='CPU')
        s = np.array([0.5, .00004, .3, 0.000002, .01, .006], dtype=np.float32).reshape((-1, self.s_dim))

        print(rep.run(s))
        print(self.choose_action([0.5, .00004, .3, 0.000002, .01, .006]))
        # [0.3615257142857143, 0.0035, 0.0, 0.9994467834731613, 0.0, 1.0] [0.5, 0.5, 0.0]
github onnx / onnx / onnx / bin / checker.py View on Github external
def check_model():  # type: () -> None
    parser = argparse.ArgumentParser('check-model')
    parser.add_argument('model_pb', type=argparse.FileType('rb'))
    args = parser.parse_args()

    model = load(args.model_pb)
    checker.check_model(model)
github htshinichi / caffe-onnx / src / load_save_model.py View on Github external
def saveonnxmodel(onnxmodel,onnx_save_path):
    try:
        onnx.checker.check_model(onnxmodel)
        onnx.save_model(onnxmodel, onnx_save_path)
        print("3.模型保存成功,已保存至"+onnx_save_path)
    except Exception as e:
        print("3.模型存在问题,未保存成功:\n",e)
github NVIDIA / mxnet_to_onnx / mx2onnx_converter / mx2onnx_converter.py View on Github external
else:
               print(converted)
               raise ValueError("node is of an unrecognized type: %s" % type(node))             
               
           all_processed_nodes.append(converted)

        graph = helper.make_graph(
            onnx_processed_nodes,
            "main",
            onnx_processed_inputs,
            onnx_processed_outputs
        )

        graph.initializer.extend(initializer)

        checker.check_graph(graph)
        return graph
github hls-fpga-machine-learning / hls4ml / onnx-to-hls / converters / keras-to-onnx.py View on Github external
if not args.model: parser.error('Model file needs to be specified.')
    if not args.weights: parser.error('Weights file needs to be specified.')
    if not args.weights: parser.error('Output file needs to be specified.')
    
    # Load Keras model and its weights
    with open(args.model, 'r') as json_file:
        keras_model = model_from_json(json_file.read())
    
    keras_model.load_weights(args.weights)
    #keras_model.summary()

    # Save to ONNX format
    onnx_model = onnxmltools.convert_keras(keras_model)

    # Check model
    checker.check_model(onnx_model)

    # Infer shape
    onnx_model = shape_inference.infer_shapes(onnx_model)

    passes = ['fuse_matmul_add_bias_into_gemm', 'fuse_consecutive_transposes', 'fuse_transpose_into_gemm']
    onnx_model = optimizer.optimize(onnx_model, passes)
    onnx_model = polish_model(onnx_model)
    onnxmltools.utils.save_model(onnx_model, args.output)
github apache / singa / python / singa / sonnx.py View on Github external
get onnx model from singa computational graph
        Args:
            inputs: a list of input tensors (each is initialized with a name)
        Args:
            y: a list of tensors, usually the outputs of the graph
        Returns: 
            the onnx model
        """
        opset_id = OperatorSetIdProto()
        opset_id.version = cls._target_opset_version
        model = helper.make_model(cls.singa_to_onnx_graph(
            inputs, y, model_name="sonnx"), producer_name='sonnx',
            opset_imports=[opset_id])
        # print('The model is:\n{}'.format(model))
        model = optimizer.optimize(model)
        checker.check_model(model)
        return model
github michaelulin / pytorch-caffe2-aws-lambda / Convert-Model.py View on Github external
# Evaluation Mode
model.train(False)

# Create dummy input
dummy_input = Variable(torch.randn(1, 3, 224, 224))
output_torch = model(dummy_input)

# Export ONNX model
torch.onnx.export(model, dummy_input, "model.proto", verbose=True)

# Load ONNX model
graph = onnx.load("model.proto")

# Check Formation
onnx.checker.check_graph(graph)

# Print Graph to get blob names
onnx.helper.printable_graph(graph)

# Check model output
rep = backend.prepare(graph, device="CPU")
output_onnx = rep.run(dummy_input.cpu().data.numpy().astype(np.float32))

# Verify the numerical correctness upto 3 decimal places
np.testing.assert_almost_equal(output_torch.data.cpu().numpy(), output_onnx[0], decimal=3)