How to use openvino - 10 common examples

To help you get started, we’ve selected a few openvino examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github opencv / openvino_training_extensions / training_toolbox / ssd_detector / regression_test / train_net.py View on Github external
def run_ie_on_dataset(model_xml, model_bin, cpu_extension_path, images_dir, prob_threshold=0.01):
    plugin = IEPlugin(device='CPU')
    plugin.add_cpu_extension(cpu_extension_path)
    net = IENetwork.from_ir(model=model_xml, weights=model_bin)
    assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(net.outputs) == 1, "Sample supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    exec_net = plugin.load(network=net, num_requests=2)
    num, chs, height, width = net.inputs[input_blob]
    del net
    cur_request_id = 0

    detection_data = []
    for image in os.listdir(images_dir):
      im_path = os.path.join(images_dir, image)
      frame = cv2.imread(im_path)
      initial_h, initial_w, _ = frame.shape
      in_frame = cv2.resize(frame, (width, height))
      in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
github PINTO0309 / OpenVINO-ADAS / openvino_test_CPU.py View on Github external
#!/usr/bin/env python

import sys
import cv2
import numpy as np
from PIL import Image
import time
from openvino.inference_engine import IENetwork, IEPlugin

model_xml='lrmodels/FP32/semantic-segmentation-adas-0001.xml'
model_bin='lrmodels/FP32/semantic-segmentation-adas-0001.bin'
net = IENetwork.from_ir(model=model_xml, weights=model_bin)
seg_image = Image.open("data/input/009649.png")
palette = seg_image.getpalette() # Get a color palette
camera_width = 320
camera_height = 240
fps = ""
framepos = 0
frame_count = 0
vidfps = 0
elapsedTime = 0

#plugin = IEPlugin(device="HETERO:MYRIAD,CPU")
#plugin.set_config({"TARGET_FALLBACK": "HETERO:MYRIAD,CPU"})
#plugin.set_initial_affinity(net)

#plugin = IEPlugin(device="MYRIAD")
#plugin = IEPlugin(device="GPU")
github movidius / ncappzoo / tensorflow / facenet / facenet.py View on Github external
def main():
    global network_input_h, network_input_w
    ie = IECore()
    net = IENetwork(model = ir, weights = ir[:-3] + 'bin')
    input_blob = next(iter(net.inputs))
    output_blob = next(iter(net.outputs))
    
    exec_net = ie.load_network(network = net, device_name = DEVICE)
    n, c, network_input_h, network_input_w = net.inputs[input_blob].shape

    # Read the image
    validated_image = cv2.imread(validated_image_filename)
    if validated_image is None:
    	print("Cannot read image.")
    	exit(1)
    	
    # Preprocess the image
    preprocessed_image = preprocess_image(validated_image)
    
    # Run the inference
github movidius / ncappzoo / apps / gesture_piarm / gesture_ssd_async.py View on Github external
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    log.info("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(plugin.device, ', '.join(not_supported_layers)))
            log.error("Please try to specify cpu extensions library path in demo's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(net.inputs.keys()) == 1, "Demo supports only single input topologies"
    assert len(net.outputs) == 1, "Demo supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    exec_net = plugin.load(network=net, num_requests=2)
github opencv / open_model_zoo / demos / python_demos / image_retrieval_demo / image_retrieval_demo / image_retrieval.py View on Github external
def __init__(self, model_path, device, cpu_extension):
        ie = IECore()
        if device == 'CPU':
            ie.add_extension(cpu_extension, 'CPU')

        path = '.'.join(model_path.split('.')[:-1])
        self.net = IENetwork(model=path + '.xml', weights=path + '.bin')
        self.exec_net = ie.load_network(network=self.net, device_name=device)
github movidius / ncappzoo / tensorflow / SSD_Inception_V2 / gesture_ssd_async.py View on Github external
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    log.info("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(plugin.device, ', '.join(not_supported_layers)))
            log.error("Please try to specify cpu extensions library path in demo's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(net.inputs.keys()) == 1, "Demo supports only single input topologies"
    assert len(net.outputs) == 1, "Demo supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    exec_net = plugin.load(network=net, num_requests=2)
github opencv / openvino_training_extensions / tensorflow_toolkit / lpr / tools / infer_ie.py View on Github external
def load_ir_model(model_xml, device, plugin_dir, cpu_extension):
  model_bin = os.path.splitext(model_xml)[0] + ".bin"

  # initialize plugin
  log.info("Initializing plugin for %s device...", device)
  plugin = IEPlugin(device=device, plugin_dirs=plugin_dir)
  if cpu_extension and 'CPU' in device:
    plugin.add_cpu_extension(cpu_extension)

  # read IR
  log.info("Reading IR...")
  net = IENetwork(model=model_xml, weights=model_bin)

  if "CPU" in device:
    supported_layers = plugin.get_supported_layers(net)
    not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
    if not_supported_layers:
      log.error("Following layers are not supported by the plugin for specified device %s:\n %s",
                device, ', '.join(not_supported_layers))
      log.error("Please try to specify cpu extensions library path in sample's command line parameters using "
                "--cpu_extension command line argument")
      sys.exit(1)
github movidius / ncappzoo / apps / gesture_piarm / gesture_ssd_async_wout_robot.py View on Github external
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    log.info("Reading IR...")
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(plugin.device, ', '.join(not_supported_layers)))
            log.error("Please try to specify cpu extensions library path in demo's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(net.inputs.keys()) == 1, "Demo supports only single input topologies"
github opencv / open_model_zoo / demos / python_demos / affinity_setting_demo / affinity_setting_demo.py View on Github external
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
    args = build_argparser().parse_args()
    assert args.device.split(':')[0] == "HETERO", "This demo supports only Hetero Plugin. " \
                                                  "Please specify correct device, e.g. HETERO:FPGA,CPU"
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(plugin.device, ', '.join(not_supported_layers)))
            log.error("Please try to specify cpu extensions library path in demo's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)
    net_ops = set([l.type for l in net.layers.values()])
    if not any([op == "Convolution" for op in net_ops]):
github movidius / ncappzoo / tensorflow / facenet / facenet.py View on Github external
def main():
    global network_input_h, network_input_w
    ie = IECore()
    net = IENetwork(model = ir, weights = ir[:-3] + 'bin')
    input_blob = next(iter(net.inputs))
    output_blob = next(iter(net.outputs))
    
    exec_net = ie.load_network(network = net, device_name = DEVICE)
    n, c, network_input_h, network_input_w = net.inputs[input_blob].shape

    # Read the image
    validated_image = cv2.imread(validated_image_filename)
    if validated_image is None:
    	print("Cannot read image.")
    	exit(1)
    	
    # Preprocess the image
    preprocessed_image = preprocess_image(validated_image)