How to use the openvino.inference_engine.IECore function in openvino

To help you get started, we’ve selected a few openvino examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github movidius / ncappzoo / tensorflow / facenet / facenet.py View on Github external
def main():
    global network_input_h, network_input_w
    ie = IECore()
    net = IENetwork(model = ir, weights = ir[:-3] + 'bin')
    input_blob = next(iter(net.inputs))
    output_blob = next(iter(net.outputs))
    
    exec_net = ie.load_network(network = net, device_name = DEVICE)
    n, c, network_input_h, network_input_w = net.inputs[input_blob].shape

    # Read the image
    validated_image = cv2.imread(validated_image_filename)
    if validated_image is None:
    	print("Cannot read image.")
    	exit(1)
    	
    # Preprocess the image
    preprocessed_image = preprocess_image(validated_image)
github opencv / dldt / inference-engine / ie_bridges / python / sample / style_transfer_sample / style_transfer_sample.py View on Github external
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Plugin initialization for specified device and load extensions library if specified
    log.info("Creating Inference Engine")
    ie = IECore()
    if args.cpu_extension and 'CPU' in args.device:
        ie.add_extension(args.cpu_extension, "CPU")
    # Read IR
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    if "CPU" in args.device:
        supported_layers = ie.query_network(net, "CPU")
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(args.device, ', '.join(not_supported_layers)))
            log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)
github opencv / open_model_zoo / demos / python_demos / object_detection_demo_ssd_async / object_detection_demo_ssd_async.py View on Github external
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    log.info("Creating Inference Engine...")
    ie = IECore()
    if args.cpu_extension and 'CPU' in args.device:
        ie.add_extension(args.cpu_extension, "CPU")
    # Read IR
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    if "CPU" in args.device:
        supported_layers = ie.query_network(net, "CPU")
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(args.device, ', '.join(not_supported_layers)))
            log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)
github movidius / ncappzoo / apps / simple_classifier_py / run.py View on Github external
def infer(image = '../../data/images/nps_electric_guitar.png', ir = '../../caffe/SqueezeNet/squeezenet_v1.0.xml', labels = '../../data/ilsvrc12/synset_words.txt', mean = None, top = 1):

    ####################### 1. Setup Plugin and Network #######################
    # Select the myriad plugin and IRs to be used
    ie = IECore()
    net = IENetwork(model = ir, weights = ir[:-3] + 'bin')

    # Set up the input and output blobs
    input_blob = next(iter(net.inputs))
    output_blob = next(iter(net.outputs))
    input_shape = net.inputs[input_blob].shape
    output_shape = net.outputs[output_blob].shape
    
    # Display model information
    display_info(input_shape, output_shape, image, ir, labels, mean)
    
    # Load the network and get the network shape information
    exec_net = ie.load_network(network = net, device_name = DEVICE)
    n, c, h, w = input_shape

    # Prepare Categories for age and gender networks
github movidius / ncappzoo / caffe / SSD_Mobilenet / ssd_mobilenet.py View on Github external
# Window properties
    cv2.namedWindow(SSD_WINDOW_NAME, cv2.WINDOW_AUTOSIZE)
    cv2.resizeWindow(SSD_WINDOW_NAME, (640, 360))
    cv2.moveWindow(SSD_WINDOW_NAME, 10, 10)
    # Detemine if using cam or image input
    if ARGS.input == 'cam':
        input_stream = 0
    else:
        input_stream = ARGS.input
        assert os.path.isfile(ARGS.input), "Specified input file doesn't exist"
    
    cap = cv2.VideoCapture(input_stream)
    
    ####################### 1. Setup Plugin and Network #######################
    # Set up the inference engine core and load the IR files
    ie = IECore()
    net = IENetwork(model = ir, weights = ir[:-3] + 'bin')

    # Get the input and output node names
    input_blob = next(iter(net.inputs))
    output_blob = next(iter(net.outputs))
    
    # Get the input and output shapes from the input/output nodes
    input_shape = net.inputs[input_blob].shape
    output_shape = net.outputs[output_blob].shape
    n, c, h, w = input_shape
    x, y, detections_count, detections_size = output_shape
        
    # Display model information
    display_info(input_shape, output_shape, input_stream, ir, labels, show_display)
    
    # Load the network and read a frame
github incluit / OpenVino-Driver-Behaviour / python_utils / blindspot_assistance / object_detection_demo_ssd_async.py View on Github external
print ("Welcome to Blindspot Assistance")

    log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
    args = build_argparser().parse_args()
    
    if (args.sounds):
        soundWelcome = Thread(target = play, args = (path,'welcome.mp3'))
        soundLeft = Thread(target = play, args = (path,'select_left.mp3'))

    if args.sounds: soundWelcome.start()

    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"
    
    log.info("Creating Inference Engine...")
    ie = IECore()
    if args.cpu_extension and 'CPU' in args.device:
        ie.add_extension(args.cpu_extension, "CPU")
    # Read IR
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)
    
    if args.sounds: soundWelcome.join()
    
    if "CPU" in args.device:
        supported_layers = ie.query_network(net, "CPU")
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(args.device, ', '.join(not_supported_layers)))
            log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
                      "or --cpu_extension command line argument")
github opencv / open_model_zoo / demos / python_demos / object_detection_demo_yolov3_async / object_detection_demo_yolov3_async.py View on Github external
def main():
    args = build_argparser().parse_args()

    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # ------------- 1. Plugin initialization for specified device and load extensions library if specified -------------
    log.info("Creating Inference Engine...")
    ie = IECore()
    if args.cpu_extension and 'CPU' in args.device:
        ie.add_extension(args.cpu_extension, "CPU")

    # -------------------- 2. Reading the IR generated by the Model Optimizer (.xml and .bin files) --------------------
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    # ---------------------------------- 3. Load CPU extension for support specific layer ------------------------------
    if "CPU" in args.device:
        supported_layers = ie.query_network(net, "CPU")
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(args.device, ', '.join(not_supported_layers)))
            log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
                      "or --cpu_extension command line argument")
github intel-iot-devkit / store-traffic-monitor-python / Jupyter / inference.py View on Github external
:param cpu_extension: extension for the CPU device
        :param device: Target device
        :param input_size: Number of input layers
        :param output_size: Number of output layers
        :param num_requests: Index of Infer request value. Limited to device capabilities.
        :param plugin: Plugin for specified device
        :return:  Shape of input layer
        """

        model_xml = model
        model_bin = os.path.splitext(model_xml)[0] + ".bin"
        # Plugin initialization for specified device
        # and load extensions library if specified
        if not plugin:
            log.info("Initializing plugin for {} device...".format(device))
            self.plugin = IECore()
        else:
            self.plugin = plugin

        if cpu_extension and 'CPU' in device:
            self.plugin.add_extension(cpu_extension, "CPU")

        # Read IR
        log.info("Reading IR...")
        self.net = IENetwork(model=model_xml, weights=model_bin)
        log.info("Loading IR to the plugin...")

        if "CPU" in device:
            supported_layers = self.plugin.query_network(self.net, "CPU")
            not_supported_layers = \
                [l for l in self.net.layers.keys() if l not in supported_layers]
            if len(not_supported_layers) != 0:
github opencv / dldt / inference-engine / ie_bridges / python / sample / hello_query_device / hello_query_device.py View on Github external
def main():
    ie = IECore()
    print("Available devices:")
    for device in ie.available_devices:
        print("\tDevice: {}".format(device))
        print("\tMetrics:")
        for metric in ie.get_metric(device, "SUPPORTED_METRICS"):
            try:
              metric_val = ie.get_metric(device, metric)
              print("\t\t{}: {}".format(metric, param_to_string(metric_val)))
            except TypeError:
              print("\t\t{}: UNSUPPORTED TYPE".format(metric))

        print("\n\tDefault values for device configuration keys:")
        for cfg in ie.get_metric(device, "SUPPORTED_CONFIG_KEYS"):
            try:
              cfg_val = ie.get_config(device, cfg)
              print("\t\t{}: {}".format(cfg, param_to_string(cfg_val)))