Intel® Distribution of OpenVINO™ Toolkit
Community assistance about the Intel® Distribution of OpenVINO™ toolkit, OpenCV, and all aspects of computer vision-related on Intel® platforms.

how to configure inference to NCS2

rnd-amg
Beginner
566 Views

Hi Team,

i want configure inference to my NCS2, how to set up the code?and which lines i must change?

 

# TechVidvan Vehicle counting and Classification

# Import necessary packages


from distutils.command.config import config
import cv2
import csv
import collections
import numpy as np
from tracker import *
import json
from datetime import datetime
import time
import threading
import aiohttp
import asyncio
import requests



# Initialize Tracker
tracker = EuclideanDistTracker()

# Initialize the videocapture object
cap = cv2.VideoCapture('data/sarinah5(cutted)2.mp4')
# cap = cv2.VideoCapture(0)
input_size = 320

# Detection confidence threshold
confThreshold = 0.2
nmsThreshold = 0.3

font_color = (0, 0, 255)
font_size = 0.5
font_thickness = 2

# Middle cross line position
middle_line_position = 225
up_line_position = middle_line_position - 30
down_line_position = middle_line_position + 30

#line 1===============

line1_start_horizontal_gap =510
line1_start_vertical_gap = 230

line1_end_horizontal_gap = 450
line1_end_vertical_gap = 300

#===============================




#line 2===============

line2_start_horizontal_gap = 460
line2_start_vertical_gap = 325

line2_end_horizontal_gap = 590
line2_end_vertical_gap = 290

#===============================

# validation Line ===============
vertical_validation_width_gap =420
#===============================

# vertical line2 ===============
vertical_width_gap_2 =520
#===============================

# horizontal line 3===============
horizontal_height_gap_3 =230
#===============================

# horizontal line 4===============
horizontal_height_gap_4 =340
#===============================



# validation Line right side===============

vertical_validation_width_gap_right_side =160
#===============================

# vertical line 2 from right side===============
vertical_width_gap_2_right_side =250
#===============================

# horizontal line 3 from right side===============
horizontal_height_gap_3_right_side =240
#===============================

# horizontal line 4 from right side===============
horizontal_height_gap_4_right_side =300
#===============================



# counter line==================
couter_line_left_start_horizontal_val = 460
couter_line_left_start_vertical_val =325

couter_line_left_end_horizontal_val = 580
couter_line_left_end_vertical_val = 260
#===============================

# vertical line 4===============
left_start_horizontal_val = 390
left_start_vertical_val =320

left_end_horizontal_val = 520
left_end_vertical_val = 250
#===============================


# Store Coco Names in a list
classesFile = "coco.names"
classNames = open(classesFile).read().strip().split('\n')
print(classNames)
print(len(classNames))

# class index for our required detection classes
required_class_index = [2, 0, 5, 7] #car, person, bus, truck

detected_classNames = []

# Model Files
modelConfiguration = 'yolov3-320.cfg'
modelWeigheights = 'yolov3-320.weights'

# configure the network model
net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeigheights)

# Configure the network backend


net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)

# Define random colour for each class
np.random.seed(42)
colors = np.random.randint(0, 255, size=(len(classNames), 3), dtype='uint8')


# Function for finding the center of a rectangle
def find_center(x, y, w, h
x1 = int(w/2)
y1 = int(h/2)
cx = x+x1
cy = y+y1
return cx, cy


# List for store vehicle count information
temp_up_list = []
temp_down_list = []
temp_left_list = []
temp_right_list = []
up_list = [0, 0, 0, 0]
down_list = [0, 0, 0, 0]
left_list=[0, 0, 0, 0] #car, person, bus, truck
right_list=[0, 0, 0, 0] #car, person, bus, truck
noted_ids = []
noted_ids_right = []

# Function for count vehicle


def count_vehicle(box_id, img

x, y, w, h, id, index = box_id

# Find the center of the rectangle for detection
center = find_center(x, y, w, h)
ix, iy = center

# 12nd attemp ==============================



# if (ix >= left_start_horizontal_val-8 and iy >= left_start_vertical_val-8) or (ix >= left_end_horizontal_val-8 and iy >= left_end_vertical_val-8):
# if id not in temp_left_list and id not in noted_ids:
# temp_left_list.append(id)
# print('added', id)
# print(temp_left_list)
# print("\n")
# left_list[index] = left_list[index] + 1

# elif ix >= couter_line_left_start_horizontal_val-40 and iy >= couter_line_left_start_vertical_val-25 or ix > couter_line_left_end_horizontal_val-10 and iy >= couter_line_left_end_vertical_val-10 :
# if ix > couter_line_left_start_horizontal_val-10 :
# print('yes')
# if id in temp_left_list:
# temp_left_list.remove(id)
# noted_ids.append(id)
# print('removed', id)
# print(temp_left_list)
# print("\n")
# left_list[index] = left_list[index] + 1

# elif ix >= couter_line_left_start_horizontal_val-8 and iy >= couter_line_left_start_vertical_val-8 or ix > couter_line_left_end_horizontal_val-8 and iy >= couter_line_left_end_vertical_val-8 :
# if ix > couter_line_left_start_horizontal_val-40:
# print('yes')
# if id in temp_left_list:
# temp_left_list.remove(id)
# noted_ids.append(id)
# print('removed', id)
# print(temp_left_list)
# print("\n")
# left_list[index] = left_list[index] + 1
 


# =================================================================

# 13rd attemp ==============================
# validation vehicle from left side
if ix > vertical_validation_width_gap and ix < vertical_width_gap_2 :
if id not in temp_left_list and id not in noted_ids:
temp_left_list.append(id)
# print('added', id)
# print(temp_left_list)
# print("\n")

# counting vehicle from left side
elif (ix > vertical_width_gap_2 and iy > horizontal_height_gap_3_right_side and iy < horizontal_height_gap_4_right_side) :
if id in temp_left_list:
temp_left_list.remove(id)
noted_ids.append(id)
left_list[index] = left_list[index] + 1
#car, person, bus, truck
write_to_log('left',index)
 


# validation vehicle from right side
elif ix > vertical_validation_width_gap_right_side and ix < vertical_width_gap_2_right_side and iy > horizontal_height_gap_3_right_side and iy <horizontal_height_gap_4_right_side :
if id not in temp_right_list and id not in noted_ids_right:
temp_right_list.append(id)
# print('added', id)
# print(temp_left_list)
# print("\n")
 
# counting vehicle from right side
elif ix < vertical_validation_width_gap_right_side and iy > horizontal_height_gap_3_right_side and iy < horizontal_height_gap_4_right_side:
if id in temp_right_list:
temp_right_list.remove(id)
noted_ids_right.append(id)
right_list[index] = right_list[index] + 1
write_to_log('right',index)
# =================================================================


# Draw circle in the middle of the rectangle
cv2.circle(img, center, 2, (0, 0, 255), -1) # end here
# print(up_list, down_list)

# Function for finding the detected objects from the network output


def postProcess(outputs, img
global detected_classNames
height, width = img.shape[:2]
boxes = []
classIds = []
confidence_scores = []
detection = []
for output in outputs:
for det in output:
scores = det[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if classId in required_class_index:
if confidence > confThreshold:
# print(classId)
w, h = int(det[2]*width), int(det[3]*height)
x, y = int((det[0]*width)-w/2), int((det[1]*height)-h/2)
boxes.append([x, y, w, h])
classIds.append(classId)
confidence_scores.append(float(confidence))

# Apply Non-Max Suppression
indices = cv2.dnn.NMSBoxes(
boxes, confidence_scores, confThreshold, nmsThreshold)
# using if line below to use webcam
# if len(indices) > 0:
# print(classIds)

for i in indices.flatten():
x, y, w, h = boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3]
# print(x,y,w,h)

color = [int(c) for c in colors[classIds[i]]]
name = classNames[classIds[i]]
detected_classNames.append(name)
# Draw classname and confidence score
cv2.putText(img, f'{name.upper()} {int(confidence_scores[i]*100)}%',
(x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)

# Draw bounding rectangle
cv2.rectangle(img, (x, y), (x + w, y + h), color, 1)
detection.append([x, y, w, h, required_class_index.index(classIds[i])])

# Update the tracker for each object
boxes_ids = tracker.update(detection)
for box_id in boxes_ids:
count_vehicle(box_id, img)


def realTime():
while True:
 
success, img = cap.read()
img = cv2.resize(img, (0, 0), None, 0.5, 0.5)
ih, iw, channels = img.shape
blob = cv2.dnn.blobFromImage(
img, 1 / 255, (input_size, input_size), [0, 0, 0], 1, crop=False)

# Set the input of the network
net.setInput(blob)
layersNames = net.getLayerNames()
outputNames = [(layersNames[i[0] - 1])
for i in net.getUnconnectedOutLayers()]
# Feed data to the network
outputs = net.forward(outputNames)

# Find the objects from the network output
postProcess(outputs, img)

# End User Line
cv2.line(img, (vertical_width_gap_2,230), (vertical_width_gap_2,340), (124,252,0), 2)
cv2.line(img, (520, horizontal_height_gap_3), (iw,horizontal_height_gap_3), (124,252,0), 2)
cv2.line(img, (520, horizontal_height_gap_4), (iw,horizontal_height_gap_4), (124,252,0), 2)

# End User Right Line Side
cv2.line(img, (vertical_validation_width_gap_right_side, horizontal_height_gap_3_right_side), (vertical_validation_width_gap_right_side,horizontal_height_gap_4_right_side), (124,252,0), 2)
cv2.line(img, (0, horizontal_height_gap_3_right_side), (vertical_validation_width_gap_right_side,horizontal_height_gap_3_right_side), (124,252,0), 2)
cv2.line(img, (0, horizontal_height_gap_4_right_side), (vertical_validation_width_gap_right_side,horizontal_height_gap_4_right_side), (124,252,0), 2)


# vertical_validation_width_gap_right_side
# vertical_width_gap_2_right_side
# horizontal_height_gap_3_right_side
# horizontal_height_gap_4_right_side



cv2.line(img, (vertical_validation_width_gap_right_side, 0), (vertical_validation_width_gap_right_side,ih), (255, 0, 0), 2)
cv2.line(img, (vertical_width_gap_2_right_side, 0), (vertical_width_gap_2_right_side,ih), (255, 0, 0), 2)
cv2.line(img, (0, horizontal_height_gap_3_right_side), (iw,horizontal_height_gap_3_right_side), (255, 0, 0), 2)
# cv2.line(img, (0, horizontal_height_gap_3_right_side), (vertical_validation_width_gap_right_side,horizontal_height_gap_3_right_side), (255, 0, 0), 2)
# cv2.line(img, (0, horizontal_height_gap_4_right_side), (vertical_validation_width_gap_right_side,horizontal_height_gap_4_right_side), (255, 0, 0), 2)

 

# Draw counting texts in the frame
cv2.putText(img, "Left", (140, 10), cv2.FONT_HERSHEY_SIMPLEX, font_size, font_color, font_thickness)
cv2.putText(img, "Right", (220, 10), cv2.FONT_HERSHEY_SIMPLEX, font_size, font_color, font_thickness)

cv2.putText(img, "Car: "" "+ str(left_list[0])+ " "+ str(right_list[0]), (10, 40), cv2.FONT_HERSHEY_SIMPLEX, font_size, font_color, font_thickness)
cv2.putText(img, "Motorbike: "" "+ str(left_list[1])+ " "+ str(right_list[1]), (10, 60), cv2.FONT_HERSHEY_SIMPLEX, font_size, font_color, font_thickness)
cv2.putText(img, "Bus: "" "+ str(left_list[2])+ " "+ str(right_list[2]), (10, 80),cv2.FONT_HERSHEY_SIMPLEX, font_size, font_color, font_thickness)
cv2.putText(img, "Truck: "" "+ str(left_list[3])+ " "+ str(right_list[3]), (10, 100), cv2.FONT_HERSHEY_SIMPLEX, font_size, font_color, font_thickness)

# Show the frames
cv2.imshow('Output', img)

if cv2.waitKey(1) == ord('q'
break

 
# Write the vehicle counting information in a file and save it

with open("data/data.csv", 'w') as f1:
cwriter = csv.writer(f1)
cwriter.writerow(['Direction', 'car', 'motorbike', 'bus', 'truck',])
up_list.insert(0, "Up")
down_list.insert(0, "Down")
cwriter.writerow(up_list)
cwriter.writerow(down_list)
f1.close()

# print("Data saved at 'data.csv'")
# Finally realese the capture object and destroy all active windows
 
cap.release()
cv2.destroyAllWindows()

last_minute ='0'
temp ={}
flag_write = False

def init_temp():
global flag_write
now = datetime.now()
current_time = now.strftime("%H:%M")
current_time_epoch = time.time()
current_time_epoch_int = int(current_time_epoch)
get_date_time_today = datetime.today().strftime('%Y-%m-%d')

temp["time"] = current_time_epoch_int
# temp["date"] = get_date_time_today
# temp["time"] = current_time
temp["ltr_car"] = 0
temp["ltr_motobike"] = 0
temp["ltr_bus"] = 0
temp["ltr_truck"] = 0
temp["rtl_car"] = 0
temp["rtl_motobike"] = 0
temp["rtl_bus"] = 0
temp["rtl_truck"] = 0
# temp["did"] = config["did"]
 
 
def write_to_log(position,index
global last_minute
global temp
global flag_write

get_date_time_today = datetime.today().strftime('%Y-%m-%d')
now = datetime.now()
current_time = now.strftime("%H:%M")
print('now',now)
print('timestamp', now.timestamp())

if temp == {}:
init_temp()
if position == 'left':
if index == 0 :
temp["ltr_car"] =temp["ltr_car"] +1
elif index == 1 :
temp["ltr_motobike"] = temp["ltr_motobike"] +1
elif index == 2 :
temp["ltr_bus"] = temp["ltr_bus"] +1
elif index == 3 :
temp["ltr_truck"] =temp["ltr_truck"] +1

elif position == 'right':
if index == 0 :
temp["rtl_car"] =temp["rtl_car"] +1
if index == 1 :
temp["rtl_motobike"] = temp["rtl_motobike"]+1
if index == 2 :
temp["rtl_bus"] =temp["rtl_bus"] +1
if index == 3 :
temp["rtl_truck"] =temp["rtl_truck"] +1

last_minute = current_time
else:
if position == 'left':
if index == 0 :
temp["ltr_car"] =temp["ltr_car"] +1
elif index == 1 :
temp["ltr_motobike"] = temp["ltr_motobike"] +1
elif index == 2 :
temp["ltr_bus"] = temp["ltr_bus"] +1
elif index == 3 :
temp["ltr_truck"] =temp["ltr_truck"] +1

elif position == 'right':
if index == 0 :
temp["rtl_car"] =temp["rtl_car"] +1
if index == 1 :
temp["rtl_motobike"] = temp["rtl_motobike"]+1
if index == 2 :
temp["rtl_bus"] =temp["rtl_bus"] +1
if index == 3 :
temp["rtl_truck"] =temp["rtl_truck"] +1

if current_time != last_minute:
with open("sample.json", "a") as outfile:
json.dump(temp, outfile)
outfile.write('\n')
init_temp()
last_minute = current_time

print(temp)
print('last_minute_2',last_minute)
print('===========================> \n')

async def printit():

 

print( "this function running per 10 seconds")
# + config["did"] + '&name=' + config["name"]
myobj = {'text': 'haloo!'}
# async.post(url, hooks = {'response' : do_something})
# x = requests.post(url, data = myobj)
# print(x.text)

session = aiohttp.ClientSession()
responses = await session.post(url, data=b'data' )
# print(responses.text)
# await session.close()

threading.Timer(10.0, printit).start()
 

 

if __name__ == '__main__':
# asyncio.run(printit())
realTime()
 
 
# from_static_image(image_file)
 
regard,
rnd
0 Kudos
2 Replies
Wan_Intel
Moderator
536 Views

Hi Rnd-amg,

Thanks for reaching out to us.

For your information, you may refer to the following inference engine samples from OpenVINO™ toolkit 2021.4.2 to configure inference to Intel® Neural Compute Stick 2 (Intel® NCS2):

·      Object Detection SSD C++ Sample

·      Hello Classification C++ Sample

 

For more information, please refer to Use Inference Engine API to Implement Inference Pipeline.

 

 

Regards,

Wan


0 Kudos
Wan_Intel
Moderator
514 Views

Hi Rnd-amg,

Thanks for your question.

This thread will no longer be monitored since we have provided information. 

If you need any additional information from Intel, please submit a new question.

 

 

Best regards,

Wan


0 Kudos
Reply