- Mark as New
- Bookmark
- Subscribe
- Mute
- Subscribe to RSS Feed
- Permalink
- Report Inappropriate Content
hi, i am trying to customize C:\Program Files (x86)\IntelSWTools\openvino\inference_engine\samples\cpp\object_detection_sample_ssd to output as dll,
below is my code
__declspec(dllexport) void Classify_Image(unsigned char* img_pointer, long data_len,
char* out_result, int length_of_out_result, int top_n_results)
{
std::vector<unsigned char> inputImageBytes(img_pointer, img_pointer + data_len);
cv::Mat image = cv::imdecode(inputImageBytes,cv::IMREAD_COLOR);
//cv::imwrite("lala.jpg", image);
if (inputInfo == nullptr) {
initialize();
}
// --------------------------- 8. Create infer request -------------------------------------------------
//slog::info << "Create infer request" << slog::endl;
InferRequest infer_request = executable_network.CreateInferRequest();
// -----------------------------------------------------------------------------------------------------
std::vector<std::shared_ptr<unsigned char>> imagesData, originalImagesData;
std::vector<size_t> imageWidths, imageHeights;
//FormatReader::ReaderPtr reader("C:\\Users\\LaserTrac\\Desktop\\la.jpg");
cv::Mat dst;
cv::resize(image, dst, cv::Size(400, 225));
cv::imwrite("lala_m.jpg", dst);
//std::shared_ptr<unsigned char> originalData(image.data);
std::shared_ptr<unsigned char> originalData(image.data);
std::shared_ptr<unsigned char> data1(dst.data);
//
originalImagesData.push_back(originalData);
imagesData.push_back(data1);
imageWidths.push_back(1280);
imageHeights.push_back(720);
if (imagesData.empty()) throw std::logic_error("Valid input images were not found!");
size_t batchSize = network.getBatchSize();
if (batchSize != imagesData.size()) {
slog::warn << "Number of images " + std::to_string(imagesData.size()) + \
" doesn't match batch size " + std::to_string(batchSize) << slog::endl;
batchSize = std::min(batchSize, imagesData.size());
slog::warn << "Number of images to be processed is " << std::to_string(batchSize) << slog::endl;
}
///** Creating input blob **/
Blob::Ptr imageInput = infer_request.GetBlob(imageInputName);
///** Filling input tensor with images. First b channel, then g and r channels **/
MemoryBlob::Ptr mimage = as<MemoryBlob>(imageInput);
if (!mimage) {
slog::err << "We expect image blob to be inherited from MemoryBlob, but by fact we were not able "
"to cast imageInput to MemoryBlob" << slog::endl;
return;
}
//// locked memory holder should be alive all time while access to its buffer happens
auto minputHolder = mimage->wmap();
size_t num_channels = mimage->getTensorDesc().getDims()[1];
size_t image_size = mimage->getTensorDesc().getDims()[3] * mimage->getTensorDesc().getDims()[2];
unsigned char *data = minputHolder.as<unsigned char *>();
/** Iterate over all input images **/
for (size_t image_id = 0; image_id < std::min(imagesData.size(), batchSize); ++image_id) {
/** Iterate over all pixel in image (b,g,r) **/
for (size_t pid = 0; pid < image_size; pid++) {
/** Iterate over all channels **/
for (size_t ch = 0; ch < num_channels; ++ch) {
/** [images stride + channels stride + pixel id ] all in bytes **/
data[image_id * image_size * num_channels + ch * image_size + pid] = imagesData.at(image_id).get()[pid*num_channels + ch];
}
}
}
if (imInfoInputName != "") {
Blob::Ptr input2 = infer_request.GetBlob(imInfoInputName);
auto imInfoDim = inputsInfo.find(imInfoInputName)->second->getTensorDesc().getDims()[1];
/** Fill input tensor with values **/
MemoryBlob::Ptr minput2 = as<MemoryBlob>(input2);
if (!minput2) {
slog::err << "We expect input2 blob to be inherited from MemoryBlob, but by fact we were not able "
"to cast input2 to MemoryBlob" << slog::endl;
return;
}
// locked memory holder should be alive all time while access to its buffer happens
auto minput2Holder = minput2->wmap();
float *p = minput2Holder.as<PrecisionTrait<Precision::FP32>::value_type *>();
for (size_t image_id = 0; image_id < std::min(imagesData.size(), batchSize); ++image_id) {
p[image_id * imInfoDim + 0] = static_cast<float>(inputsInfo[imageInputName]->getTensorDesc().getDims()[2]);
p[image_id * imInfoDim + 1] = static_cast<float>(inputsInfo[imageInputName]->getTensorDesc().getDims()[3]);
for (size_t k = 2; k < imInfoDim; k++) {
p[image_id * imInfoDim + k] = 1.0f; // all scale factors are set to 1.0
}
}
}
// -----------------------------------------------------------------------------------------------------
// --------------------------- 10. Do inference ---------------------------------------------------------
slog::info << "Start inference" << slog::endl;
infer_request.Infer();
// -----------------------------------------------------------------------------------------------------
// --------------------------- 11. Process output -------------------------------------------------------
slog::info << "Processing output blobs" << slog::endl;
const Blob::Ptr output_blob = infer_request.GetBlob(outputName);
MemoryBlob::CPtr moutput = as<MemoryBlob>(output_blob);
if (!moutput) {
throw std::logic_error("We expect output to be inherited from MemoryBlob, "
"but by fact we were not able to cast output to MemoryBlob");
}
// locked memory holder should be alive all time while access to its buffer happens
auto moutputHolder = moutput->rmap();
const float *detection = moutputHolder.as<const PrecisionTrait<Precision::FP32>::value_type *>();
std::vector<std::vector<int> > boxes(batchSize);
std::vector<std::vector<int> > classes(batchSize);
///* Each detection has image_id that denotes processed image */
std::string result = "OB_DATA=";
int num_detect = 0;
for (int curProposal = 0; curProposal < maxProposalCount; curProposal++) {
auto image_id = static_cast<int>(detection[curProposal * objectSize + 0]);
if (image_id < 0 ) {
slog::info << "ends with break "<<slog::endl;
break;
}
float confidence = detection[curProposal * objectSize + 2];
auto label = static_cast<int>(detection[curProposal * objectSize + 1]);
auto xmin = static_cast<int>(detection[curProposal * objectSize + 3] * imageWidths[image_id]);
auto ymin = static_cast<int>(detection[curProposal * objectSize + 4] * imageHeights[image_id]);
auto xmax = static_cast<int>(detection[curProposal * objectSize + 5] * imageWidths[image_id]);
auto ymax = static_cast<int>(detection[curProposal * objectSize + 6] * imageHeights[image_id]);
std::cout << "[" << curProposal << "," << label << "] element, prob = " << confidence <<
" (" << xmin << "," << ymin << ")-(" << xmax << "," << ymax << ")" << " batch id : " << image_id;
if (confidence > 0.5) {
num_detect += 1;
result += std::to_string(confidence);
result += ","+ std::to_string(label);
result += "," + std::to_string(xmin);
result += "," + std::to_string(ymin);
result += "," + std::to_string(xmax);
result += "," + std::to_string(ymax);
/** Drawing only objects with >50% probability **/
classes[image_id].push_back(label);
boxes[image_id].push_back(xmin);
boxes[image_id].push_back(ymin);
boxes[image_id].push_back(xmax - xmin);
boxes[image_id].push_back(ymax - ymin);
//std::cout << " WILL BE PRINTED!";
/*std::cout << std::endl;*/
//slog::info << " add prediction" << slog::endl;
}
result += ";";
std::cout << std::endl;
}
data1.reset();
originalData.reset();
//dst.release();
//image.release();
length_of_out_result = (int)result.size();
std::copy(result.begin(), result.end(), out_result);
out_result[std::min(length_of_out_result - 1, (int)result.size())] = 0;
std::cout << "end code"<<std::endl;
}
}
in the above code i have changed only FormatReader with cv::Mat to store image dynamically, with FormatReader code is working right but i want to load image dynamically from c#,
so can anyone please have a look. in c# when i call the program it executes fine i can see the logs in visual studio console logs but after printing "end code" it gives error.
i have checked with FormatReader.h from sample codes with physical path of image file name and code works fine i can also receive the output in c# code. or somehow if i can use FormatReader from samples with the (unsigend char*) above approach.
please help.
- Mark as New
- Bookmark
- Subscribe
- Mute
- Subscribe to RSS Feed
- Permalink
- Report Inappropriate Content
Hi Amit,
Please refer to Hello Classification C++ Sample, which loads image from Mat for inferencing.
Regards,
Munesh
Link Copied
- Mark as New
- Bookmark
- Subscribe
- Mute
- Subscribe to RSS Feed
- Permalink
- Report Inappropriate Content
Hi Amit,
Please refer to Hello Classification C++ Sample, which loads image from Mat for inferencing.
Regards,
Munesh
- Mark as New
- Bookmark
- Subscribe
- Mute
- Subscribe to RSS Feed
- Permalink
- Report Inappropriate Content
thanks for your reply its working now with dynamic images
thank you
- Subscribe to RSS Feed
- Mark Topic as New
- Mark Topic as Read
- Float this Topic for Current User
- Bookmark
- Subscribe
- Printer Friendly Page