How to use the onnxruntime.datasets.get_example function in onnxruntime

To help you get started, we’ve selected a few onnxruntime examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github onnx / sklearn-onnx / docs / examples / plot_metadata.py View on Github external
is deployed to production to keep track of which
instance was used at a specific time.
Let's see how to do that with a simple
logistic regression model trained with
*scikit-learn*.
"""

import skl2onnx
import onnxruntime
import sklearn
import numpy
from onnxruntime import InferenceSession
import onnx
from onnxruntime.datasets import get_example

example = get_example("logreg_iris.onnx")

model = onnx.load(example)

print("doc_string={}".format(model.doc_string))
print("domain={}".format(model.domain))
print("ir_version={}".format(model.ir_version))
print("metadata_props={}".format(model.metadata_props))
print("model_version={}".format(model.model_version))
print("producer_name={}".format(model.producer_name))
print("producer_version={}".format(model.producer_version))

#############################
# With *ONNX Runtime*:

sess = InferenceSession(example)
meta = sess.get_modelmeta()
github microsoft / onnxruntime / docs / python / examples / plot_metadata.py View on Github external
"""
Metadata
========

ONNX format contains metadata related to how the
model was produced. It is useful when the model
is deployed to production to keep track of which
instance was used at a specific time.
Let's see how to do that with a simple 
logistic regression model trained with
*scikit-learn* and converted with *sklearn-onnx*.
"""

from onnxruntime.datasets import get_example
example = get_example("logreg_iris.onnx")

import onnx
model = onnx.load(example)

print("doc_string={}".format(model.doc_string))
print("domain={}".format(model.domain))
print("ir_version={}".format(model.ir_version))
print("metadata_props={}".format(model.metadata_props))
print("model_version={}".format(model.model_version))
print("producer_name={}".format(model.producer_name))
print("producer_version={}".format(model.producer_version))

#############################
# With *ONNX Runtime*:

from onnxruntime import InferenceSession
github microsoft / onnxruntime / docs / python / examples / plot_pipeline.py View on Github external
in ONNX format than looking into its node with 
*onnx*. This example demonstrates
how to draw a model and to retrieve it in *json*
format.

.. contents::
    :local:

Retrieve a model in JSON format
+++++++++++++++++++++++++++++++

That's the most simple way.
"""

from onnxruntime.datasets import get_example
example1 = get_example("mul_1.onnx")

import onnx
model = onnx.load(example1)  # model is a ModelProto protobuf message

print(model) 


#################################
# Draw a model with ONNX
# ++++++++++++++++++++++
# We use `net_drawer.py `_
# included in *onnx* package.
# We use *onnx* to load the model
# in a different way than before.
github microsoft / onnxruntime / python / _downloads / df88a32237a9b3e764a8da54c1743145 / plot_backend.py View on Github external
ONNX Runtime Backend for ONNX
=============================

*ONNX Runtime* extends the 
`onnx backend API `_
to run predictions using this runtime.
Let's use the API to compute the prediction
of a simple logistic regression model.
"""
import numpy as np
from onnxruntime import datasets
from onnxruntime.capi.onnxruntime_pybind11_state import InvalidArgument
import onnxruntime.backend as backend
from onnx import load

name = datasets.get_example("logreg_iris.onnx")
model = load(name)

rep = backend.prepare(model, 'CPU')
x = np.array([[-1.0, -2.0]], dtype=np.float32)
try:
    label, proba = rep.run(x)
    print("label={}".format(label))
    print("probabilities={}".format(proba))
except (RuntimeError, InvalidArgument) as e:
    print(e)

########################################
# The device depends on how the package was compiled,
# GPU or CPU.
from onnxruntime import get_device
print(get_device())
github microsoft / onnxruntime / docs / python / examples / plot_backend.py View on Github external
ONNX Runtime Backend for ONNX
=============================

*ONNX Runtime* extends the 
`onnx backend API `_
to run predictions using this runtime.
Let's use the API to compute the prediction
of a simple logistic regression model.
"""
import numpy as np
from onnxruntime import datasets
from onnxruntime.capi.onnxruntime_pybind11_state import InvalidArgument
import onnxruntime.backend as backend
from onnx import load

name = datasets.get_example("logreg_iris.onnx")
model = load(name)

rep = backend.prepare(model, 'CPU')
x = np.array([[-1.0, -2.0]], dtype=np.float32)
try:
    label, proba = rep.run(x)
    print("label={}".format(label))
    print("probabilities={}".format(proba))
except (RuntimeError, InvalidArgument) as e:
    print(e)

########################################
# The device depends on how the package was compiled,
# GPU or CPU.
from onnxruntime import get_device
print(get_device())
github microsoft / onnxruntime / python / _downloads / 94ba4b1ad7abc78b59124c6a39f9a075 / plot_pipeline.py View on Github external
in ONNX format than looking into its node with 
*onnx*. This example demonstrates
how to draw a model and to retrieve it in *json*
format.

.. contents::
    :local:

Retrieve a model in JSON format
+++++++++++++++++++++++++++++++

That's the most simple way.
"""

from onnxruntime.datasets import get_example
example1 = get_example("mul_1.onnx")

import onnx
model = onnx.load(example1)  # model is a ModelProto protobuf message

print(model) 


#################################
# Draw a model with ONNX
# ++++++++++++++++++++++
# We use `net_drawer.py `_
# included in *onnx* package.
# We use *onnx* to load the model
# in a different way than before.
github microsoft / onnxruntime / python / _downloads / 4412ae3bda7068f45094acb5373c790e / plot_load_and_predict.py View on Github external
==========================================================

This example demonstrates how to load a model and compute
the output for an input vector. It also shows how to
retrieve the definition of its inputs and outputs.
"""

import onnxruntime as rt
import numpy
from onnxruntime.datasets import get_example

#########################
# Let's load a very simple model.
# The model is available on github `onnx...test_sigmoid `_.

example1 = get_example("sigmoid.onnx")
sess = rt.InferenceSession(example1)

#########################
# Let's see the input name and shape.

input_name = sess.get_inputs()[0].name
print("input name", input_name)
input_shape = sess.get_inputs()[0].shape
print("input shape", input_shape)
input_type = sess.get_inputs()[0].type
print("input type", input_type)

#########################
# Let's see the output name and shape.

output_name = sess.get_outputs()[0].name
github onnx / sklearn-onnx / docs / examples / plot_backend.py View on Github external
to run predictions using this runtime.
Let's use the API to compute the prediction
of a simple logistic regression model.
"""
import skl2onnx
import onnxruntime
import onnx
import sklearn
import numpy
from onnxruntime import get_device
import numpy as np
from onnxruntime import datasets
import onnxruntime.backend as backend
from onnx import load

name = datasets.get_example("logreg_iris.onnx")
model = load(name)

rep = backend.prepare(model, 'CPU')
x = np.array([[-1.0, -2.0, 5.0, 6.0],
              [-1.0, -2.0, -3.0, -4.0],
              [-1.0, -2.0, 7.0, 8.0]],
             dtype=np.float32)
label, proba = rep.run(x)
print("label={}".format(label))
print("probabilities={}".format(proba))

########################################
# The device depends on how the package was compiled,
# GPU or CPU.
print(get_device())
github microsoft / onnxruntime / docs / python / examples / plot_profiling.py View on Github external
Profile the execution of a simple model
=======================================

*ONNX Runtime* can profile the execution of the model.
This example shows how to interpret the results.
"""

import onnxruntime as rt
import numpy
from onnxruntime.datasets import get_example

#########################
# Let's load a very simple model and compute some prediction.

example1 = get_example("mul_1.onnx")
sess = rt.InferenceSession(example1)
input_name = sess.get_inputs()[0].name

x = numpy.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=numpy.float32)
res = sess.run(None, {input_name: x})
print(res)

#########################
# We need to enable to profiling
# before running the predictions.

options = rt.SessionOptions()
options.enable_profiling = True
sess_profile = rt.InferenceSession(example1, options)
input_name = sess.get_inputs()[0].name