- Mark as New
- Bookmark
- Subscribe
- Mute
- Subscribe to RSS Feed
- Permalink
- Report Inappropriate Content
I am working with Openvino toolkit to try and get two pieces of Python code to agree whether the inference results are correct or not. Obviously the results returned by the first program must match the input to the second program. The trouble is nobody seems to be willing to define what these results should be. I have included both programs hoping someone can make sense of this. Any ideas ?
######### Command line
python /home/workspace/inference.py -m /home/workspace/models/human-pose-estimation-0001.xml -b /home/workspace/models/human-pose-estimation-0001.bin -i "images/sitting-on-car.jpg" -t "POSE" -r "s"
python /home/workspace/inference.py -m /home/workspace/models/text-detection-0004.xml -b /home/workspace/models/text-detection-0004.bin -i "images/sign.jpg" -t "TEXT" -r "s"
python /home/workspace/inference.py -m /home/workspace/models/vehicle-attributes-recognition-barrier-0039.xml -b /home/workspace/models/vehicle-attributes-recognition-barrier-0039.bin -i "images/blue-car.jpg" -t "CAR_META" -r "s"
################### First program.
import argparse
import os
import cv2
import numpy as np
#from openvino.inference_engine import IENetwork, IECore
import time
from openvino.inference_engine import IECore,IENetwork,IEPlugin
CPU_EXTENSION = "/opt/intel/openvino/deployment_tools/inference_engine/lib/intel64/libcpu_extension_sse4.so"
def get_args():
'''
Gets the arguments from the command line.
'''
parser = argparse.ArgumentParser("Load an IR into the Inference Engine")
# -- Create the descriptions for the commands
m_desc = "The location of the model XML file"
c_desc = "CPU extension file location, if applicable"
d_desc = "Device, if not CPU (GPU, FPGA, MYRIAD)"
i_desc = "The location of the input image"
t_desc = "The type of model: POSE, TEXT or CAR_META"
b_desc = "The location of the bin file"
r_desc = "The request type"
# -- Add required and optional groups
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
# -- Create the arguments
#parser.add_argument("-m", help=m_desc)
required.add_argument("-i", help=i_desc, required=True)
required.add_argument("-m", help=m_desc, required=True)
required.add_argument("-t", help=t_desc, required=True)
optional.add_argument("-c", help=c_desc, default=None)
optional.add_argument("-d", help=d_desc, default="CPU")
required.add_argument("-b", help=b_desc, required=True)
required.add_argument("-r", help=r_desc, required=True)
args = parser.parse_args()
return args
def async_inference(exec_net, input_blob, image, args):
print("async_inference ")
#ie = IECore()
#net = IENetwork(args.m, args.b)
#ie.add_extension(extension_path=CPU_EXTENSION, device_name="CPU")
#exec_net = ie.load_network(net, "CPU")
exec_net.start_async(request_id=0, inputs={input_blob: image}) # exec_net documentation seems to want plugin to initialize
#inference_output = exec_net.requests[0].async_infer({input_blob:image})
while True:
status = exec_net.requests[0].wait(-1)
if status == 0:
break
else:
time.sleep(1)
#status = exec_net.wait(-1)
#print('Request status: {}'.format(status))
print('Request status: {}'.format(exec_net.requests))
return exec_net
#return status
def sync_inference(exec_net, input_blob, image, args):
print("sync_inference ")
#ie = IECore()
#net = IENetwork(args.m, args.b)
#ie.add_extension(extension_path=CPU_EXTENSION, device_name="CPU")
#exec_net = ie.load_network(net, "CPU")
status = exec_net.infer({input_blob:image})
print('Request status: {}'.format(status))
return status
def preprocessing(input_image, height, width):
print(input_image)
image = cv2.resize(input_image, (width, height))
image = image.transpose((2,0,1))
image = image.reshape(1, 3, height, width)
return image
def perform_inference(exec_net, request_type, input_image, type_model, args, net):
print("Start of perform_inference")
# Get input image
image = cv2.imread(input_image)
print(input_image)
# Extract the input shape
#n, c, h, w = input_shape
#ie = IECore()
#net = IENetwork(args.m, args.b)
input_blob = next(iter(net.inputs))
n, c, h, w = net.inputs[input_blob].shape
# Grab the shape of the input
# Preprocess it (applies for the IRs from the Pre-Trained Models lesson)
if(type_model == "POSE"):
preprocessed_image = preprocessing(image, 256, 456)
if(type_model == "TEXT"):
preprocessed_image = preprocessing(image, 768, 1280)
if(type_model == "CAR_META"):
preprocessed_image = preprocessing(image, 72, 72)
#preprocessed_image = preprocessing(image, h, w) # Get the input blob for the inference request
#input_blob = next(iter(exec_net.inputs)) # Perform either synchronous or asynchronous inference
request_type = request_type.lower()
if request_type == 'a':
output = async_inference(exec_net, input_blob, preprocessed_image, args)
print("Processing Asycronously")
elif request_type == 's':
output = sync_inference(exec_net, input_blob, preprocessed_image, args)
print("Processing Syncronously")
else:
print("Unknown inference request type, should be 'A' or 'S'.")
exit(1) # Return the output for testing purposes
return output
def load_to_IE(args, CPU_EXTENSION):
### TODO: Load the Inference Engine API
ie = IECore()
net = IENetwork(args.m, args.b)
print("Before load_network")
ie.add_extension(extension_path=CPU_EXTENSION, device_name="CPU")
exec_net = ie.load_network(net, "CPU") # This is where its failing for the TEXT image
print("After load_network")
#exec_net = ie.load_network(net, CPU_EXTENSION)
### TODO: Load IR files into their related class
### TODO: Add a CPU extension, if applicable. It's suggested to check
#plugin = IEPlugin(device="CPU")
#exec_net = ie.load(network=net)
#plugin.add_cpu_extenstions(CPU_EXTENSION)
#exec_net = ie.query_network(network=net, device_name="CPU")
supported_layers = ie.query_network(network=net, device_name="CPU") # Trying to get the supported_layers here.
#print("1. Supported layers: {} " .format(supported_layers)) # messes with exec_net
unsupported_layers = [l for l in net.layers.keys() if l not in supported_layers]
#print("2. Unsupported layers: {} " .format(unsupported_layers))
if len(unsupported_layers) != 0:
return
### your code for unsupported layers for practice before
### implementing this. Not all of the models may need it.
### TODO: Get the supported layers of the network
### TODO: Check for any unsupported layers, and let the user
### know if anything is missing. Exit the program, if so.
#print("2. exec_net: {}" .format(exec_net))
### TODO: Load the network into the Inference Engine
perform_inference(exec_net, args.r, args.i, args.t, args, net)
print("IR successfully loaded into Inference Engine.")
return
############# Second program
from helpers import load_to_IE, preprocessing
from inference import perform_inference
CPU_EXTENSION = "/opt/intel/openvino/deployment_tools/inference_engine/lib/intel64/libcpu_extension_sse4.so"
MODEL_PATH = "/home/workspace/models/"
OUTPUT_SHAPES = {
"POSE": {"Mconv7_stage2_L1": (1, 38, 32, 57),
"Mconv7_stage2_L2": (1, 19, 32, 57)},
"TEXT": {"model/link_logits_/add": (1, 16, 192, 320),
"model/segm_logits/add": (1, 2, 192, 320)},
"CAR META": {"color": (1, 7, 1, 1),
"type": (1, 4, 1, 1)}
}
def pose_test():
counter = 0
model = MODEL_PATH + "human-pose-estimation-0001.xml"
image = "images/sitting-on-car.jpg"
counter += test(model, "POSE", image)
return counter
def text_test():
counter = 0
model = MODEL_PATH + "text-detection-0004.xml"
image = "images/sign.jpg"
counter += test(model, "TEXT", image)
return counter
def car_test():
counter = 0
model = MODEL_PATH + "vehicle-attributes-recognition-barrier-0039.xml"
image = "images/blue-car.jpg"
counter += test(model, "CAR META", image)
return counter
def test(model, model_type, image):
# Synchronous Test
counter = 0
try:
# Load IE separately to check InferRequest latency
exec_net, input_shape = load_to_IE(model, CPU_EXTENSION)
result = perform_inference(exec_net, "S", image, input_shape)
output_blob = next(iter(exec_net.outputs))
# Check for matching output shape to expected
assert result[output_blob].shape == OUTPUT_SHAPES[model_type][output_blob]
# Check latency is > 0; i.e. a request occurred
assert exec_net.requests[0].latency > 0.0
counter += 1
except:
print("Synchronous Inference failed for {} Model.".format(model_type))
# Asynchronous Test
try:
# Load IE separately to check InferRequest latency
exec_net, input_shape = load_to_IE(model, CPU_EXTENSION)
exec_net = perform_inference(exec_net, "A", image, input_shape)
output_blob = next(iter(exec_net.outputs))
# Check for matching output shape to expected
assert exec_net.requests[0].outputs[output_blob].shape == OUTPUT_SHAPES[model_type][output_blob]
# Check latency is > 0; i.e. a request occurred
assert exec_net.requests[0].latency > 0.0
counter += 1
except:
print("Asynchronous Inference failed for {} Model.".format(model_type))
return counter
def feedback(tests_passed):
print("You passed {} of 6 tests.".format(int(tests_passed)))
if tests_passed == 3:
print("Congratulations!")
else:
print("See above for additional feedback.")
def main():
counter = pose_test() + text_test() + car_test()
feedback(counter)
if __name__ == "__main__":
main()
________________________________________
Thanks for your input.
Randy
Email: rseedle@yahoo.com
Link Copied

- Subscribe to RSS Feed
- Mark Topic as New
- Mark Topic as Read
- Float this Topic for Current User
- Bookmark
- Subscribe
- Printer Friendly Page