Intel® Distribution of OpenVINO™ Toolkit
Community assistance about the Intel® Distribution of OpenVINO™ toolkit, OpenCV, and all aspects of computer vision-related on Intel® platforms.

Openvino in python cannot use pyinstaller packaging to a exe

zhang__guoxing
Beginner
1,311 Views

Recently I used OpenVino (python-api) to implement a small program. It is normal to run Python script directly on my computer, but when I use 'pyinstaller' to package it as a single exe, double-clicking will cause exe to stop working and no other information. Is there any need to pay attention to when import openvino packaged into exe in python? Or it does not support packaging. Thank you very much!
 

from __future__ import print_function
import sys
import cv2
import numpy as np
import logging as log
import time
from openvino.inference_engine import IENetwork, IECore

def cam_setting(cap,camera_width,camera_height):
    cap.set(cv2.CAP_PROP_FPS,30)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT,camera_height)

def draw_res(res,frame,img_shape):
    for i in range(res.shape[0]):
        l_pos = (int(res[i,3] * img_shape), int(res[i,4] * img_shape))
        r_pos = (int(res[i,5] * img_shape), int(res[i,6] * img_shape))
        cv2.rectangle(frame, l_pos, r_pos, (255,255,255), 1)
    return frame

def inf_img(exec_net,input_blob,frame,out_blob):
    image = frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
    # Start sync inference
    infer_start = time.clock()
    res = exec_net.infer(inputs={input_blob: image})
    log.info("inference in synchronous mode FPS {}".format(1 / (time.clock() - infer_start)))
    res = res[out_blob]
    return res

def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
    model_xml = "inf_model/face-detection-retail-0005.xml"
    model_bin = "inf_model/face-detection-retail-0005.bin"

    # Plugin initialization for specified device and load extensions library if specified

    ie = IECore()
    log.info("Creating Inference Engine sucess ")
    cpu_extension = "cpu_extension_avx2.dll"
    device = "CPU"

    if cpu_extension and 'CPU' in device:
        ie.add_extension(cpu_extension, "CPU")
    # ie.add_extension(None, "CPU")
    # Read IR
    log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
    net = IENetwork(model=model_xml, weights=model_bin)

    supported_layers = ie.query_network(net, "CPU")
    not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
    if len(not_supported_layers) != 0:
        log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                  format("CPU", ', '.join(not_supported_layers)))
        log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
                  "or --cpu_extension command line argument")
        sys.exit(1)

    assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
    assert len(net.outputs) == 1, "Sample supports only single output topologies"

    log.info("Preparing input blobs")
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    # net.batch_size = len(args.input)
    net.batch_size = 1
    # img_path = "test_img/test_img.jpeg"
    # cap = cv2.VideoCapture("sample-videos-master/sample-videos-master/classroom.mp4")
    cap = cv2.VideoCapture(0)
    cam_setting(cap, 1280, 720)

    # Read and pre-process input images
    n, c, h, w = net.inputs[input_blob].shape
    print("n/c/h/w: ",net.inputs[input_blob].shape)
    images = np.ndarray(shape=(n, c, h, w))
    # Loading model to the plugin
    plugin_start_time = time.clock()
    exec_net = ie.load_network(network=net, device_name="CPU")
    log.info("Loading model to the plugin use time {}".format(time.clock() - plugin_start_time))

    itt = 1
    while True:
        ret, frame = cap.read()
        frame = cv2.resize(frame, (300, 300), interpolation=cv2.INTER_AREA)
        if ret != True:
            break
        if n != 1:
            pack_img_start = time.clock()
            for i in range(n):
                image = frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
                images = image
            log.info("pack img as bath {} use time {}".format(net.batch_size, time.clock() - pack_img_start))
            # Start sync inference
            infer_start = time.clock()
            res = exec_net.infer(inputs={input_blob: images})
            log.info("inference in synchronous mode use time {}".format(time.clock() - infer_start))
            log.info("Processing output blob")
            res = res[out_blob]
        else:
            if (itt%1 == 0):
                res =inf_img(exec_net, input_blob, frame, out_blob)
                useful_res = res[0,0][np.where(res[0,0][:,1]==1)]
                useful_res = useful_res[useful_res[:,2]>0.80]
                frame = draw_res(useful_res,frame,img_shape=300)
            cv2.imshow("yourimg", frame)
            if cv2.waitKey(25) & 0xFF == ord("q"):
                break

        itt = itt+1


if __name__ == '__main__':
    main()

 

0 Kudos
1 Solution
Roy_A_Intel
Employee
1,311 Views

Hi Zhang,

We haven't tried packaging with pyinstaller. However some good news is that the next release  of OpenVINO,  R3 will include a package manager to handle this for you.

View solution in original post

0 Kudos
2 Replies
Roy_A_Intel
Employee
1,312 Views

Hi Zhang,

We haven't tried packaging with pyinstaller. However some good news is that the next release  of OpenVINO,  R3 will include a package manager to handle this for you.

0 Kudos
ma__jie
Beginner
1,311 Views

I have meet the same issue. the exe file can't load pre-trained model. Did you solve this puzzle?

0 Kudos
Reply