Community
cancel
Showing results for 
Search instead for 
Did you mean: 
pkhan10
New Contributor I
331 Views

[REQUEST_BUSY] ERROR

hello,
I wrote a small class to automate multiple buffer request 
but first request always end up in request_busy error...
its quite random error...
let me know what inputs you need ..     

196     def predict(self,frame):
    197         self.preprocess_frame(frame)
--> 198         self.exec_net.start_async(request_id=self.cursor_id, inputs={self.input_blob: self.in_frame})
    199         self.frames_buffer[self.cursor_id] = frame
    200         self.cursor_id = (self.cursor_id+1)% self.buffer_shape

ie_api.pyx in openvino.inference_engine.ie_api.ExecutableNetwork.start_async()

ie_api.pyx in openvino.inference_engine.ie_api.InferRequest.async_infer()

ie_api.pyx in openvino.inference_engine.ie_api.InferRequest.async_infer()

ie_api.pyx in openvino.inference_engine.ie_api.InferRequest._fill_inputs()

ie_api.pyx in openvino.inference_engine.ie_api.InferRequest.inputs.__get__()

ie_api.pyx in openvino.inference_engine.ie_api.InferRequest._get_blob_buffer()

RuntimeError: [REQUEST_BUSY] 

 

0 Kudos
2 Replies
Shubha_R_Intel
Employee
331 Views

Dear khandelwal, prateek,

Please see how classification_sample_async.py does it. Your code is missing 'wait'.

Hope it helps,

Thanks,

Shubha

pkhan10
New Contributor I
331 Views

Hello shubha...
I am still not able to understand the logic here..
I referered ssd demo async example

sharing the class here.
as you can see i have used wait when requesting for the output.

 

class async_infer(object):
    def __init__(self,buffer_shape=4):
        """
        buffer_shape : shape of max_request
        check for output only when 
        attrs are other attributes of frame 
        """
        self.buffer_shape = buffer_shape
        self.frames_buffer = [0]*buffer_shape
        self.attrs = [0]*buffer_shape
        self.cursor_id = 0
        self.in_frame  = None
        self.frame_processed = 0
#         self.load_model()
        
        
    def load_model(self,cpu_exension_path,model_path,device):
        self.model_xml = model_path
        self.model_bin = os.path.splitext(self.model_xml)[0] + ".bin"
        self.Device = device
        self.cpu_exension_path =cpu_exension_path
        try : 
            self.plugin = IEPlugin(device=self.Device)
        except Exception as e:
            print("this "+ device + " Not available")
            print(e)
        self.plugin.add_cpu_extension(self.cpu_exension_path)
        net = IENetwork(model=self.model_xml, weights=self.model_bin)
        self.input_blob = next(iter(net.inputs))
        self.out_blob = list(net.outputs.keys())
        self.batch_size, self.channel, self.height, self.width = net.inputs[self.input_blob].shape
        self.exec_net = self.plugin.load(network=net, num_requests=self.buffer_shape)
        del net
        






    def preprocess_frame(self,frame):
        """
        after processing cursor id is updated
        """
        in_frame = cv2.resize(frame.copy(), (self.width, self.height))
        in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
        in_frame = in_frame.reshape((self.batch_size, self.channel, self.height, self.width))
        self.in_frame = in_frame
    

    def predict_sync(self,frame):
        """
        predict async only
        return output_values
        """
        self.preprocess_frame(frame)
        self.exec_net.requests[0].infer({self.input_blob: self.in_frame})
        return [self.exec_net.requests[0].outputs[node] for node in self.out_blob ]      
        #         if self.start_infer :
#             if self.net.requests[cur_request_id].wait(-1) == 0 :
#                 self.output = [exec_net.requests[cur_request_id].outputs[node] for node in self.output_blob]
#         else :
#             if len(self.frames_buffer)>=self.buffer_shape:
#                 self.start_infer = True    

    def predict(self,frame,attr = None):
        """
        predict async only
        attr : attribute
        """
        self.preprocess_frame(frame)
        try:
            self.exec_net.start_async(request_id=self.cursor_id, inputs={self.input_blob: self.in_frame})
            self.frames_buffer[self.cursor_id] = frame
            if attr is not None:
                self.attrs[self.cursor_id] = attr
            self.cursor_id = (self.cursor_id+1)% self.buffer_shape

            self.frame_processed+=1
        except Exception as e:
            print(e)

        #         if self.start_infer :
#             if self.net.requests[cur_request_id].wait(-1) == 0 :
#                 self.output = [exec_net.requests[cur_request_id].outputs[node] for node in self.output_blob]
#         else :
#             if len(self.frames_buffer)>=self.buffer_shape:
#                 self.start_infer = True
                
    def postprocess_op(self,request_id = None):
        """
        use request id 
        prediction only after completing one complete buffer
        return frame, attribute(if available) and output
        """
        if request_id is None:
            request_id = self.cursor_id
        if (self.exec_net.requests[request_id].wait(-1) == 0 ):
            self.output = [self.exec_net.requests[request_id].outputs[node] for node in self.out_blob]
            op_frame = self.frames_buffer[request_id]
            attr = self.attrs[request_id]

            return op_frame,attr,self.output

 

Reply