添加opencv掉onnx

This commit is contained in:
zjc-zjc-123 2024-11-25 12:25:43 +08:00
parent c500926db1
commit 119f01ae88
4 changed files with 79 additions and 81 deletions

View File

@ -71,9 +71,9 @@ add_subdirectory(tests)
#add_executable(onnx src/Matrox/onnx_running.cpp)
#target_link_libraries(onnx Qt6::Widgets ${MIL_LIBS})
#
#add_executable(opencv_onnx opencv_onnx.cpp)
## OpenCV Qt
#target_link_libraries(opencv_onnx Qt6::Widgets ${OpenCV_LIBS} comdlg32)
add_executable(opencv_onnx opencv_onnx.cpp)
# OpenCV Qt
target_link_libraries(opencv_onnx Qt6::Widgets ${OpenCV_LIBS} comdlg32)
#
#
#add_executable(create_mask src/Matrox/mask.cpp)

View File

@ -85,8 +85,8 @@ int main() {
Timer timer1;
// 加载模型
cv::dnn::Net net = cv::dnn::readNetFromONNX(modelPath);
// net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA); // 设置为使用 CUDA 后端
// net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA); // 设置为在 GPU 上运行
net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA); // 设置为使用 CUDA 后端
net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA); // 设置为在 GPU 上运行
timer1.printElapsedTime("Time to load the model");
// 读取输入图像
@ -112,85 +112,86 @@ int main() {
timer1.printElapsedTime("Time to preprocessing");
timer1.restart();
// 推理模型
cv::Mat output = net.forward();
for(int j = 0; j <30; j++) {
// 推理模型
cv::Mat output = net.forward();
// 处理输出数据
std::vector<Detection> detections;
float* data = (float*)output.data;
for (int i = 0; i < 25200; ++i) {
float confidence = data[i * 6 + 4]; // 置信度
if (confidence >= CONFIDENCE_THRESHOLD) {
// 获取检测框并映射到图像坐标
// Remove the unnecessary multiplication
float cx = data[i * 6];
float cy = data[i * 6 + 1];
float w = data[i * 6 + 2];
float h = data[i * 6 + 3];
// 处理输出数据
std::vector<Detection> detections;
float* data = (float*)output.data;
for (int i = 0; i < 25200; ++i) {
float confidence = data[i * 6 + 4]; // 置信度
if (confidence >= CONFIDENCE_THRESHOLD) {
// 获取检测框并映射到图像坐标
// Remove the unnecessary multiplication
float cx = data[i * 6];
float cy = data[i * 6 + 1];
float w = data[i * 6 + 2];
float h = data[i * 6 + 3];
// If needed, adjust for differences between input image size and model input size
// Since they are the same in your case, this step can be omitted or kept as is
cx = cx * inputImage.cols / INPUT_WIDTH;
cy = cy * inputImage.rows / INPUT_HEIGHT;
w = w * inputImage.cols / INPUT_WIDTH;
h = h * inputImage.rows / INPUT_HEIGHT;
// If needed, adjust for differences between input image size and model input size
// Since they are the same in your case, this step can be omitted or kept as is
cx = cx * inputImage.cols / INPUT_WIDTH;
cy = cy * inputImage.rows / INPUT_HEIGHT;
w = w * inputImage.cols / INPUT_WIDTH;
h = h * inputImage.rows / INPUT_HEIGHT;
// Proceed with the rest of your code
int left = static_cast<int>(cx - w / 2);
int top = static_cast<int>(cy - h / 2);
int width = static_cast<int>(w);
int height = static_cast<int>(h);
// Proceed with the rest of your code
int left = static_cast<int>(cx - w / 2);
int top = static_cast<int>(cy - h / 2);
int width = static_cast<int>(w);
int height = static_cast<int>(h);
// Ensure coordinates are within image bounds
left = std::max(0, std::min(left, inputImage.cols - 1));
top = std::max(0, std::min(top, inputImage.rows - 1));
width = std::min(width, inputImage.cols - left);
height = std::min(height, inputImage.rows - top);
// Ensure coordinates are within image bounds
left = std::max(0, std::min(left, inputImage.cols - 1));
top = std::max(0, std::min(top, inputImage.rows - 1));
width = std::min(width, inputImage.cols - left);
height = std::min(height, inputImage.rows - top);
// Add detection
detections.push_back({cv::Rect(left, top, width, height), confidence});
// Add detection
detections.push_back({cv::Rect(left, top, width, height), confidence});
}
}
}
// 非极大值抑制
std::vector<int> indices;
std::vector<cv::Rect> boxes;
std::vector<float> scores;
for (const auto& detection : detections) {
boxes.push_back(detection.box);
// 非极大值抑制
std::vector<int> indices;
std::vector<cv::Rect> boxes;
std::vector<float> scores;
for (const auto& detection : detections) {
boxes.push_back(detection.box);
scores.push_back(detection.confidence);
}
cv::dnn::NMSBoxes(boxes, scores, CONFIDENCE_THRESHOLD, NMS_THRESHOLD, indices);
std::cout << "Number of detections after NMS: " << indices.size() << std::endl;
if (indices.empty()) {
std::cout << "No boxes passed NMS." << std::endl;
}
for (int idx : indices) {
Detection detection = detections[idx];
std::cout << "Drawing box at: (" << detection.box.x << ", " << detection.box.y
<< "), width: " << detection.box.width << ", height: " << detection.box.height << std::endl;
drawDetections(inputImage, {detection});
}
std::vector<Detection> finalDetections;
for (int idx : indices) {
finalDetections.push_back(detections[idx]);
}
for (int i = 0; i < 25200; ++i) {
float confidence = data[i * 6 + 4];
if (confidence >= CONFIDENCE_THRESHOLD) {
std::cout << "Detection " << i << ": confidence=" << confidence << std::endl;
scores.push_back(detection.confidence);
}
cv::dnn::NMSBoxes(boxes, scores, CONFIDENCE_THRESHOLD, NMS_THRESHOLD, indices);
std::cout << "Number of detections after NMS: " << indices.size() << std::endl;
if (indices.empty()) {
std::cout << "No boxes passed NMS." << std::endl;
}
for (int idx : indices) {
Detection detection = detections[idx];
std::cout << "Drawing box at: (" << detection.box.x << ", " << detection.box.y
<< "), width: " << detection.box.width << ", height: " << detection.box.height << std::endl;
drawDetections(inputImage, {detection});
}
std::vector<Detection> finalDetections;
for (int idx : indices) {
finalDetections.push_back(detections[idx]);
}
for (int i = 0; i < 25200; ++i) {
float confidence = data[i * 6 + 4];
if (confidence >= CONFIDENCE_THRESHOLD) {
// std::cout << "Detection " << i << ": confidence=" << confidence << std::endl;
}
}
// 绘制检测框并显示图像
drawDetections(image, finalDetections);
timer1.printElapsedTime("Time to run inference");
}
// 绘制检测框并显示图像
drawDetections(image, finalDetections);
timer1.printElapsedTime("Time to run inference");
cv::imshow("Detections", inputImage);
cv::waitKey(0);

View File

@ -198,10 +198,10 @@ void TemplateMatcher::LoadTemplate(TemplateMatcher& matcher, std::map<std::strin
"C:\\Users\\zjc\\Desktop\\templates\\template5.png",
"C:\\Users\\zjc\\Desktop\\templates\\template6.png",
},
{0, 20, 30, 10,10,10}, // offsetX
{0, 20, 30, 10,10,10}, // offsetY
{100, 60, 60, 66,66,66}, // sizeX
{100, 60, 60, 66,66,66}, // sizeY
{0, 0, 0, 0,0,0}, // offsetX
{0, 0, 0, 0,0,0}, // offsetY
{100, 80, 200, 96,96,96}, // sizeX
{100, 80, 86, 96,96,96}, // sizeY
{M_COLOR_RED, M_COLOR_GREEN, M_COLOR_BLUE, M_COLOR_GREEN,M_COLOR_BLUE,M_COLOR_BLUE} // drawColor
);
}
@ -215,11 +215,8 @@ void TemplateMatcher::FindTemplates( const MIL_ID& inputImage, MIL_ID& outputIma
cout << "Template matching completed.\n";
}
//TODO: 1加入加载多个模板的功能 已
//TODO: 2加入配置文件解析功能解析后的文件与当前的para map<string, int>兼容
// 配置文件当中加入是否显示参数,能调控加载模板的过程是否显示。已
//TODO: 3修改当前的代码使模板匹配不出错 已
//TODO: 4成立模板文件夹能够加载文件夹下的全部模板并实现检测 已
//TODO: 1加入加载多个模板的功能 已 + 加入配置文件
//TODO: 5制作标准结构的函数例如matcher.findModels(MIL_ID inputImage, MIL_ID output_image, map);
////未实现,因为加载和寻找分开后,要对加载和寻找函数传入类成员,无法统一,其余可用到的参数统一,加一个类成员即可。
//TODO: 6完善相应部分的手册 已

View File

@ -8,7 +8,7 @@
#include "Matrox/utils.h"
#include "Matrox/template_matching.h"
#define IMAGE_PATH MIL_TEXT("C:\\Users\\zjc\\Desktop\\8.bmp")
#define IMAGE_PATH MIL_TEXT("C:\\Users\\zjc\\Desktop\\cotton_image_new\\357.bmp")
#define SAVE_PATH MIL_TEXT("C:\\Users\\zjc\\Desktop\\suspect.png")