- Mark as New
- Bookmark
- Subscribe
- Mute
- Subscribe to RSS Feed
- Permalink
- Report Inappropriate Content
.py pass and ok
resized = cv2.resize(image[:,:,::-1]/255., (new_w, new_h))
new_image = np.ones((net_h, net_w, 3)) * 0.5
new_image[(net_h-new_h)//2:(net_h+new_h)//2, (net_w-new_w)//2:(net_w+new_w)//2, :] = resized
new_image = np.expand_dims(new_image, 0) # add a dimension in X axis
batch_input = new_image #batch_input is normalize float value
batch_input = batch_input.transpose(0,3,1,2) #nchw
self.model.start_async(request_id=self.inferred_cnt, inputs={self.input_blob: batch_input})
c++ failed
InferenceEngine::InputsDataMap inputInfo_vehicle(network_vehicle.getInputsInfo());
InferenceEngine::InputInfo::Ptr& inputInfoFirst_vehicle = inputInfo_vehicle.begin()->second;
inputInfoFirst_vehicle->setPrecision(InferenceEngine::Precision::FP16); //Precision::FP16------>failed, Precision::U8 success
inputInfoFirst_vehicle->getInputData()->setLayout(InferenceEngine::Layout::NCHW);
inputName_vehicle = inputInfo_vehicle.begin()->first;
exenet_vehicle = inferplugin.LoadNetwork(network_vehicle, "CPU", config); //Precision::FP16------>failed, Precision::U8 success
//and
virtual void setImage(const cv::Mat& cvimg)
{
uint8_t* blob_data;
InferenceEngine::Blob::Ptr inputBlob;
InferenceEngine::SizeVector blobSize;
size_t width, height, channels;
inputBlob = m_request->GetBlob(m_inputName);
matU8ToBlob<uint8_t>(cvimg, inputBlob);
blobSize = inputBlob->getTensorDesc().getDims();
width = blobSize[3];
height = blobSize[2];
channels = blobSize[1];
blob_data = inputBlob->buffer().as<uint8_t*>();
for (size_t c = 0; c < 3; c++)
{
for (size_t h = 0; h < height; h++)
{
for (size_t w = 0; w < width; w++)
{
// failed by blob_data[c * width * height + h * width + w] error access memory
blob_data[c * width * height + h * width + w] = cvimg.at<float>(0, h, w * 3 + c);
}
}
}
}
Link Copied
- Mark as New
- Bookmark
- Subscribe
- Mute
- Subscribe to RSS Feed
- Permalink
- Report Inappropriate Content
Dear stone, jesse
Replace InferenceEngine::Precision::FP16 with:
InferenceEngine::Precision::I32 or InferenceEngine::Precision::FP32
Hope it helps,
Thanks,
Shubha

- Subscribe to RSS Feed
- Mark Topic as New
- Mark Topic as Read
- Float this Topic for Current User
- Bookmark
- Subscribe
- Printer Friendly Page