from imutils.video import VideoStream from imutils.video import FPS from imutils.video import FileVideoStream import numpy as np import argparse import imutils import time import cv2 import os import json import sys from openvino.inference_engine import IENetwork from openvino.inference_engine import IECore print("load model in NCS....") ie = IECore() net = ie.read_network(model="person-detection-action-recognition-0006.xml", weights="person-detection-action-recognition-0006.bin") print (net.outputs) input_blob = next(iter(net.inputs)) out_blob_h1a1 = "ActionNet/action_heads/out_head_1_anchor_1" out_blob_h2a1 = "ActionNet/action_heads/out_head_2_anchor_1" out_blob_h2a2 = "ActionNet/action_heads/out_head_2_anchor_2" out_blob_h2a3 = "ActionNet/action_heads/out_head_2_anchor_3" out_blob_h2a4 = "ActionNet/action_heads/out_head_2_anchor_4" out_blob_conf = "ActionNet/out_detection_conf" out_blob_loc = "ActionNet/out_detection_loc" # Load network to the plugin exec_net = ie.load_network(network=net, device_name="MYRIAD") print("model loaded correctly...") print("[INFO] starting video stream...") # picamera code # file code # cap = cv2.VideoCapture('/home/pi/Desktop/cogito-person-vehicle-recognition/test.mp4') # vs = FileVideoStream("/home/pi/Desktop/cogito-person-vehicle-recognition/test.m4v") # cap = cv2.VideoCapture('/home/pi/Desktop/cogito-person-vehicle-recognition/test_Cropped_last.m4v') time.sleep(2.0) #fps = FPS().start() initial_w = 600 initial_h = 450 # f = open("timewithmovidius.txt", "a") cont = 0 frame = cv2.imread("test.jpg") # ret, frame = cap.read() # comment this if the video is in streaming # cont = cont+1 # if cont % 15 !=0: # continue # end comment # ret, frame = cap.read() #frame = imutils.resize(frame, width=600, height=450) # create input for the neural network n, c, h, w = net.inputs[input_blob].shape image = cv2.resize(frame, (w, h)) image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW image = image.reshape((n, c, h, w)) # start_time = time.clock() req_handle = exec_net.start_async(request_id=0, inputs={input_blob: image}) status = req_handle.wait() res = req_handle.outputs[out_blob_h1a1] #np.savetxt("h1a1.txt",res[0]) file_1 = open("h1a1.txt","w") print ("shape h1a1") print (res,file=file_1) file_1.close() res = req_handle.outputs[out_blob_h2a1] #np.savetxt("h1a1.txt",res[0]) file_1 = open("h2a1.txt","w") print ("shape h1a1") print (res,file=file_1) file_1.close() res = req_handle.outputs[out_blob_h2a2] #np.savetxt("h1a1.txt",res[0]) file_1 = open("h2a2.txt","w") print (res,file=file_1) file_1.close() res = req_handle.outputs[out_blob_h2a3] #np.savetxt("h1a1.txt",res[0]) file_1 = open("h2a3.txt","w") print (res,file=file_1) file_1.close() res = req_handle.outputs[out_blob_h2a4] #np.savetxt("h1a1.txt",res[0]) file_1 = open("h2a4.txt","w") print (res,file=file_1) file_1.close() res = req_handle.outputs[out_blob_loc] #np.savetxt("h1a1.txt",res[0]) file_1 = open("location.txt","w") print (res,file=file_1) file_1.close() res = req_handle.outputs[out_blob_conf] #np.savetxt("h1a1.txt",res[0]) file_1 = open("conf.txt","w") print (res,file=file_1) file_1.close() exit()