How to use the onnxruntime.backend function in onnxruntime

To help you get started, we’ve selected a few onnxruntime examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github eth-sri / eran / testing / check_models.py View on Github external
elif dataset == 'mnist':
                input = np.array(test_input, dtype=np.float32).reshape([1, 28, 28, 1])

            if is_onnx:
                input = input.transpose(0, 3, 1, 2)
                for name, shape in output_info:
                    out_node = helper.ValueInfoProto(type = helper.TypeProto())
                    out_node.name = name
                    out_node.type.tensor_type.elem_type = model.graph.output[0].type.tensor_type.elem_type
                    if len(shape)==4:
                        shape = [shape[0], shape[3], shape[1], shape[2]]
                    for dim_value in shape:
                        dim = out_node.type.tensor_type.shape.dim.add()
                        dim.dim_value = dim_value
                    model.graph.output.append(out_node)
                runnable = rt.prepare(model, 'CPU')
                pred = runnable.run(input)
                #print(pred)
            else:
                if not (is_saved_tf_model or is_pb_file):
                    input = np.array(test_input, dtype=np.float32)
                output_names = [e[0] for e in output_info]
                pred = sess.run(get_out_tensors(output_names), {sess.graph.get_operations()[0].name + ':0': input})
                #print(pred)
            pred_eran = np.asarray([(i+j)/2 for i, j in zip(nlb[-1], nub[-1])])
            pred_model = np.asarray(pred[-1]).reshape(-1)
            if len(pred_eran) != len(pred_model):
                tested_file.write(', '.join([dataset, network, domain, 'predictions have not the same number of labels. ERAN: ' + str(len(pred_eran)) + ' model: ' + str(len(pred_model))]) + '\n\n\n')
                tested_file.flush()
                continue
            difference = pred_eran - pred_model
            if np.all([abs(elem) < .001 for elem in difference]):
github apache / incubator-tvm / tests / python / frontend / onnx / test_forward.py View on Github external
def get_onnxruntime_output(model, inputs, dtype='float32'):
    import onnxruntime.backend
    rep = onnxruntime.backend.prepare(model, 'CPU')
    if isinstance(inputs, list) and len(inputs) > 1:
        ort_out = rep.run(inputs)
    else:
        x = inputs.astype(dtype)
        ort_out = rep.run(x)[0]
    return ort_out
github microsoft / onnxruntime / docs / python / examples / plot_backend.py View on Github external
print("label={}".format(label))
    print("probabilities={}".format(proba))
except (RuntimeError, InvalidArgument) as e:
    print(e)

########################################
# The device depends on how the package was compiled,
# GPU or CPU.
from onnxruntime import get_device
print(get_device())

########################################
# The backend can also directly load the model
# without using *onnx*.

rep = backend.prepare(name, 'CPU')
x = np.array([[-1.0, -2.0]], dtype=np.float32)
try:
    label, proba = rep.run(x)
    print("label={}".format(label))
    print("probabilities={}".format(proba))
except (RuntimeError, InvalidArgument) as e:
    print(e)
github microsoft / onnxruntime / python / _downloads / df88a32237a9b3e764a8da54c1743145 / plot_backend.py View on Github external
print("label={}".format(label))
    print("probabilities={}".format(proba))
except (RuntimeError, InvalidArgument) as e:
    print(e)

########################################
# The device depends on how the package was compiled,
# GPU or CPU.
from onnxruntime import get_device
print(get_device())

########################################
# The backend can also directly load the model
# without using *onnx*.

rep = backend.prepare(name, 'CPU')
x = np.array([[-1.0, -2.0]], dtype=np.float32)
try:
    label, proba = rep.run(x)
    print("label={}".format(label))
    print("probabilities={}".format(proba))
except (RuntimeError, InvalidArgument) as e:
    print(e)
github onnx / sklearn-onnx / docs / examples / plot_backend.py View on Github external
[-1.0, -2.0, 7.0, 8.0]],
             dtype=np.float32)
label, proba = rep.run(x)
print("label={}".format(label))
print("probabilities={}".format(proba))

########################################
# The device depends on how the package was compiled,
# GPU or CPU.
print(get_device())

########################################
# The backend can also directly load the model
# without using *onnx*.

rep = backend.prepare(name, 'CPU')
x = np.array([[-1.0, -2.0, -3.0, -4.0],
              [-1.0, -2.0, -3.0, -4.0],
              [-1.0, -2.0, -3.0, -4.0]],
             dtype=np.float32)
label, proba = rep.run(x)
print("label={}".format(label))
print("probabilities={}".format(proba))

#######################################
# The backend API is implemented by other frameworks
# and makes it easier to switch between multiple runtimes
# with the same API.

#################################
# **Versions used for this example**
github opencv / open_model_zoo / tools / accuracy_checker / accuracy_checker / launcher / onnx_launcher.py View on Github external
def __init__(self, config_entry: dict, *args, **kwargs):
        super().__init__(config_entry, *args, **kwargs)

        onnx_launcher_config = LauncherConfigValidator('ONNX_Launcher', fields=self.parameters())
        onnx_launcher_config.validate(self.config)

        self.model = str(self.get_value_from_config('model'))

        device = re.match(DEVICE_REGEX, self.get_value_from_config('device').lower()).group('device')
        beckend_rep = backend.prepare(model=self.model, device=device.upper())
        self._inference_session = beckend_rep._session # pylint: disable=W0212
        outputs = self._inference_session.get_outputs()
        self.output_names = [output.name for output in outputs]
github onnx / sklearn-onnx / docs / examples / plot_backend.py View on Github external
"""
import skl2onnx
import onnxruntime
import onnx
import sklearn
import numpy
from onnxruntime import get_device
import numpy as np
from onnxruntime import datasets
import onnxruntime.backend as backend
from onnx import load

name = datasets.get_example("logreg_iris.onnx")
model = load(name)

rep = backend.prepare(model, 'CPU')
x = np.array([[-1.0, -2.0, 5.0, 6.0],
              [-1.0, -2.0, -3.0, -4.0],
              [-1.0, -2.0, 7.0, 8.0]],
             dtype=np.float32)
label, proba = rep.run(x)
print("label={}".format(label))
print("probabilities={}".format(proba))

########################################
# The device depends on how the package was compiled,
# GPU or CPU.
print(get_device())

########################################
# The backend can also directly load the model
# without using *onnx*.
github microsoft / onnxruntime / docs / python / examples / plot_backend.py View on Github external
*ONNX Runtime* extends the 
`onnx backend API `_
to run predictions using this runtime.
Let's use the API to compute the prediction
of a simple logistic regression model.
"""
import numpy as np
from onnxruntime import datasets
from onnxruntime.capi.onnxruntime_pybind11_state import InvalidArgument
import onnxruntime.backend as backend
from onnx import load

name = datasets.get_example("logreg_iris.onnx")
model = load(name)

rep = backend.prepare(model, 'CPU')
x = np.array([[-1.0, -2.0]], dtype=np.float32)
try:
    label, proba = rep.run(x)
    print("label={}".format(label))
    print("probabilities={}".format(proba))
except (RuntimeError, InvalidArgument) as e:
    print(e)

########################################
# The device depends on how the package was compiled,
# GPU or CPU.
from onnxruntime import get_device
print(get_device())

########################################
# The backend can also directly load the model
github microsoft / onnxruntime / python / _downloads / df88a32237a9b3e764a8da54c1743145 / plot_backend.py View on Github external
*ONNX Runtime* extends the 
`onnx backend API `_
to run predictions using this runtime.
Let's use the API to compute the prediction
of a simple logistic regression model.
"""
import numpy as np
from onnxruntime import datasets
from onnxruntime.capi.onnxruntime_pybind11_state import InvalidArgument
import onnxruntime.backend as backend
from onnx import load

name = datasets.get_example("logreg_iris.onnx")
model = load(name)

rep = backend.prepare(model, 'CPU')
x = np.array([[-1.0, -2.0]], dtype=np.float32)
try:
    label, proba = rep.run(x)
    print("label={}".format(label))
    print("probabilities={}".format(proba))
except (RuntimeError, InvalidArgument) as e:
    print(e)

########################################
# The device depends on how the package was compiled,
# GPU or CPU.
from onnxruntime import get_device
print(get_device())

########################################
# The backend can also directly load the model