四象极限

This commit is contained in:
XinJiang1 2024-12-24 01:55:43 +08:00
parent a4d8c793b8
commit 863ff21036
14 changed files with 5545 additions and 96 deletions

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -3,7 +3,7 @@
#include <QTimer>
// Debug Options
#define GlobalDebug 0 // 全局是否允许打印Debug信息打印会拖慢处理时间
#define GlobalDebug 1 // 全局是否允许打印Debug信息打印会拖慢处理时间
#define DebugDetection 0 // 注意开启这个编译选项会导致图片存储, 处理时间会很慢
#define DebugDetectionTime 1 // 是否打印处理时间
#define DebugLowerMacCOM 0 // 是否打印和下位机通讯的相关信息
@ -143,48 +143,49 @@ bool iniCamera()
MIL_INT ProcessingFunction0(MIL_INT HookType, MIL_ID HookId, void *HookDataPtr)
{
// CallBackTimer0.printElapsedTime("Call Back 0 reach");
// CallBackTimer0.restart();
#if(GlobalDebug)
qDebug()<<"回调1";
#endif
int camera_id = 0;
#if(GlobalDebug && DebugDetectionTime)
Timer timer_detection_time;
timer_detection_time.restart();
#endif
MdigGetHookInfo(HookId, M_MODIFIED_BUFFER + M_BUFFER_ID, &ModifiedBufferId0);
{
QMutexLocker locker(&gDispPicMutex0);
gDispCurrentPicId0 = ModifiedBufferId0;
}
// 拷贝存图图像
MbufCopy(ModifiedBufferId0, MilImage0);
cv::Mat img = ImageUtils::mil2Mat(MilImage0);
// 将图像推入识别队列
ImageData recognitionData;
recognitionData.camera_id = camera_id;
recognitionData.image = img;
g_img_Queue[camera_id]->enqueue(recognitionData);
qDebug() << "Callback0: Enqueued image for recognition";
if (SaveImg_Flag)
{
// 拷贝存图图像
MbufCopy(ModifiedBufferId0, MilImage0);
cv::Mat img = ImageUtils::mil2Mat(MilImage0);
// 将图像数据推入存储队列
ImageData data;
data.camera_id = 0;
data.image = img.clone(); // 确保图像数据被复制
g_storageQueue.enqueue(data);
qDebug() << "Callback0: Enqueued image for camera 0";
FuncCount0++;
}
#if(GlobalDebug)
qDebug()<<"回调1";
#endif
// 拷贝艳丽色检测图像
// 拷贝艳丽色检测图像
MbufCopy(ModifiedBufferId0,MilImage_Color0);
MIL_UNIQUE_BUF_ID MimResizedestination = MbufAllocColor(MilSystem, 3, 2048, 512, 8+M_UNSIGNED, M_IMAGE+M_PROC+M_DISP, M_UNIQUE_ID);
MbufClear(MimResizedestination, M_COLOR_BLACK);
MimResize(MilImage_Color0, MimResizedestination, 0.5, 1 , M_DEFAULT);
//图片镜像翻转
// 图片镜像翻转
MIL_UNIQUE_BUF_ID MimFlipDedtination = MbufClone(MimResizedestination, M_DEFAULT, M_DEFAULT, M_DEFAULT, M_DEFAULT, M_DEFAULT, M_DEFAULT, M_UNIQUE_ID);
MbufClear(MimFlipDedtination, M_COLOR_BLACK);
MimFlip(MimResizedestination, MimFlipDedtination, M_FLIP_HORIZONTAL, M_DEFAULT);
@ -192,30 +193,42 @@ MIL_INT ProcessingFunction0(MIL_INT HookType, MIL_ID HookId, void *HookDataPtr)
//艳丽检测mask
high_sat_detect(MimFlipDedtination, detection_result0, params);
//Onnx检测mask
// MbufCopy(ModifiedBufferId0,MilImage_Onnx0);
// cv::Mat image = ImageUtils::mil2Mat(MilImage_Onnx0);
// cv::Mat Img_Onnx;
// std::vector<Detection> result = runner.predict(image);
// Img_Onnx = runner.postProcess(result, image);
// 将 Matrox 的检测结果转换为 OpenCV Mat
cv::Mat matrox_mask = ImageUtils::mil2Mat(detection_result0);
cv::threshold(matrox_mask, matrox_mask, 128, 255, cv::THRESH_BINARY); // 确保是二值化
// std::vector<std::vector<uint8_t>> mask_Onnx1 = generateMaskFromImage2(Img_Onnx, widthBlocks, heightBlocks, sizeThreshold);
// 获取深度学习的检测结果
ImageData dl_data;
cv::Mat merged_mat_mask;
if(!g_result_Queue[camera_id]->dequeue(dl_data))
{
qWarning() << "Receive empty result from Onnx for camera" << camera_id;
// 如果没有深度学习的检测结果,使用 Matrox 的检测结果
merged_mat_mask = matrox_mask;
}
else
{
// 将深度学习的检测结果转换为 OpenCV Mat
cv::Mat dl_mask = dl_data.image;
cv::threshold(dl_mask, dl_mask, 128, 255, cv::THRESH_BINARY); // 确保是二值化
cv::resize(dl_mask, dl_mask, matrox_mask.size(), 0, 0, cv::INTER_NEAREST);
// 合并 Matrox 的检测结果和深度学习的检测结果(逻辑“或”)
cv::Mat combined_mask;
cv::bitwise_or(matrox_mask, dl_mask, combined_mask);
merged_mat_mask = combined_mask;
}
// Update the current Img MIl id
{
QMutexLocker locker(&gMaskMutex0);
gMask0 = detection_result0;
}
auto [mask_tmp, newTail] = generateMaskWithTail(detection_result0, tail_0, widthBlocks, heightBlocks, sizeThreshold, rowRange, skipLeftCols, skipRightCols);
tail_0 = newTail;
vector<vector<uint8_t>> merged_mask;
merged_mask = ImageUtils::mergeMasks(mask_tmp, mask_tmp); // merge the result of onnx and high sat
mask_0 = merged_mask;
mask_0 = generateMaskFromMatImage(merged_mat_mask, widthBlocks, heightBlocks, sizeThreshold);
#if(GlobalDebug && DebugDetectionTime)
timer_detection_time.printElapsedTime("Time spent in callback func 0");
#endif
detection_ready.release();
MbufFree(detection_result0);
@ -225,62 +238,78 @@ MIL_INT ProcessingFunction0(MIL_INT HookType, MIL_ID HookId, void *HookDataPtr)
MIL_INT ProcessingFunction1(MIL_INT HookType, MIL_ID HookId, void *HookDataPtr)
{
// CallBackTimer1.printElapsedTime("Call Back 1 reached");
// CallBackTimer1.restart();
int camera_id = 1;
// some debug info
#if(GlobalDebug && DebugDetectionTime)
Timer timer_detection_time;
timer_detection_time.restart();
#endif
// FuncCount1++;
MdigGetHookInfo(HookId, M_MODIFIED_BUFFER + M_BUFFER_ID, &ModifiedBufferId1);
// Update the current Img MIl id
// Update the current Img MIl id for display
{
QMutexLocker locker(&gDispPicMutex1);
gDispCurrentPicId1 = ModifiedBufferId1;
}
// 转回OpenCV图像
MbufCopy(ModifiedBufferId1, MilImage1);
cv::Mat img = ImageUtils::mil2Mat(MilImage1);
// 将图像推入识别队列
ImageData recognitionData;
recognitionData.camera_id = 1;
recognitionData.image = img;
g_img_Queue[1]->enqueue(recognitionData);
qDebug() << "Callback1: Enqueued image for recognition";
// 将图像数据推入存储队列
if (SaveImg_Flag)
{
// 拷贝存图图像
MbufCopy(ModifiedBufferId1, MilImage1);
cv::Mat img = ImageUtils::mil2Mat(MilImage1);
// 将图像数据推入存储队列
ImageData data;
data.camera_id = 1;
data.image = img.clone(); // 确保图像数据被复制
g_storageQueue.enqueue(data);
qDebug() << "Callback1: Enqueued image for camera 1";
FuncCount1++;
}
#if (GlobalDebug)
qDebug()<<"回调2";
#endif
//拷贝艳丽色检测图像
MbufCopy(ModifiedBufferId1,MilImage_Color1);
// //拷贝onnx检测图像
// MbufCopy(ModifiedBufferId1,MilImage_Onnx1);
MIL_UNIQUE_BUF_ID MimResizedestination = MbufAllocColor(MilSystem, 3, 2048, 512, 8+M_UNSIGNED, M_IMAGE+M_PROC+M_DISP, M_UNIQUE_ID);
MbufClear(MimResizedestination, M_COLOR_BLACK);
MimResize(MilImage_Color1, MimResizedestination, 0.5, 1 , M_DEFAULT);
#if(GlobalDebug && DebugDetectionTime)
Timer timer2;
#endif
//艳丽检测mask
//拷贝艳丽色检测图像
MbufCopy(ModifiedBufferId1,MilImage_Color1);
MIL_UNIQUE_BUF_ID MimResizedestination = MbufAllocColor(MilSystem, 3, 2048, 512, 8+M_UNSIGNED, M_IMAGE+M_PROC+M_DISP, M_UNIQUE_ID);
MbufClear(MimResizedestination, M_COLOR_BLACK);
MimResize(MilImage_Color1, MimResizedestination, 0.5, 1 , M_DEFAULT);
// 艳丽检测mask
high_sat_detect(MimResizedestination, detection_result1, params);
// 将 Matrox 的检测结果转换为 OpenCV Mat
cv::Mat matrox_mask = ImageUtils::mil2Mat(detection_result1);
cv::threshold(matrox_mask, matrox_mask, 128, 255, cv::THRESH_BINARY); // 确保是二值化
// 获取深度学习的检测结果
ImageData dl_data;
cv::Mat merged_mat_mask;
if(!g_result_Queue[camera_id]->dequeue(dl_data))
{
qWarning() << "Receive empty result from Onnx for camera" << camera_id;
// 如果没有深度学习的检测结果,使用 Matrox 的检测结果
merged_mat_mask = matrox_mask;
}
else
{
// 将深度学习的检测结果转换为 OpenCV Mat
cv::Mat dl_mask = dl_data.image;
cv::threshold(dl_mask, dl_mask, 128, 255, cv::THRESH_BINARY); // 确保是二值化
cv::resize(dl_mask, dl_mask, matrox_mask.size(), 0, 0, cv::INTER_NEAREST);
// 合并 Matrox 的检测结果和深度学习的检测结果(逻辑“或”)
cv::Mat combined_mask;
cv::bitwise_or(matrox_mask, dl_mask, combined_mask);
merged_mat_mask = combined_mask;
}
#if(GlobalDebug && DebugDetectionTime)
timer2.printElapsedTime("Algorithm Spent: ");
timer2.printElapsedTime("Algorithm Paralled Running Spent: ");
#endif
#if(GlobalDebug && DebugDetection)
@ -294,11 +323,7 @@ MIL_INT ProcessingFunction1(MIL_INT HookType, MIL_ID HookId, void *HookDataPtr)
QMutexLocker locker(&gMaskMutex1);
gMask1 = detection_result1;
}
auto [mask_tmp, newTail] = generateMaskWithTail(detection_result1, tail_1, widthBlocks, heightBlocks, sizeThreshold, rowRange, skipLeftCols, skipRightCols);
tail_1 = newTail;
mask_1 = mask_tmp;
mask_1 = generateMaskFromMatImage(merged_mat_mask, widthBlocks, heightBlocks, sizeThreshold);
detection_ready.acquire();
vector<vector<uint8_t>> merged_mask;
vector<vector<uint8_t>> mask_tail;
@ -311,7 +336,7 @@ MIL_INT ProcessingFunction1(MIL_INT HookType, MIL_ID HookId, void *HookDataPtr)
auto mask_expaned = expandMaskHorizontally(merged_mask, expansionRaidus);
// 将结果的左右补充上0让物体大小符合要求
PadColumns(mask_expaned,padLeft,padRight,0);
//将mask扩展到合适的大小
//将mask扩展到合适发送的大小
std::vector<std::vector<uint8_t>> mask_Total = expandArray(mask_expaned,64);
// save masks
#if(GlobalDebug && DebugDetection)
@ -326,7 +351,6 @@ MIL_INT ProcessingFunction1(MIL_INT HookType, MIL_ID HookId, void *HookDataPtr)
qWarning()<<"下位机发送失败";
}
#if(GlobalDebug && DebugDetectionTime)
timer_detection_time.printElapsedTime("Time of Processing From Get into Ca"
"..0llBack to Sent to Lower Mac");
@ -676,7 +700,7 @@ std::vector<std::vector<uint8_t>> generateMaskFromImage(const MIL_ID& inputImage
}
std::vector<std::vector<uint8_t>> generateMaskFromImage2(const cv::Mat& image, int widthBlocks, int heightBlocks, int thresholds= 10) {
std::vector<std::vector<uint8_t>> generateMaskFromMatImage(const cv::Mat& image, int widthBlocks, int heightBlocks, int thresholds= 10) {
// 确保图像是二值化的-*
cv::threshold(image, image, 128, 255, cv::THRESH_BINARY);

View File

@ -27,7 +27,7 @@ extern MIL_ID MilApplication;
extern MIL_ID MilSystem;
std::vector<std::vector<uint8_t>> generateMaskFromImage(const MIL_ID& inputImage, int widthBlocks, int heightBlocks, int thresholds);
std::vector<std::vector<uint8_t>> generateMaskFromImage2(const cv::Mat& image, int widthBlocks, int heightBlocks, int thresholds);
std::vector<std::vector<uint8_t>> generateMaskFromMatImage(const cv::Mat& image, int widthBlocks, int heightBlocks, int thresholds);
void PadColumns(std::vector<std::vector<uint8_t>>& data, int pad_left, int pad_right, uint8_t fill_value );

2356
config/1.dcf Normal file

File diff suppressed because it is too large Load Diff

2354
config/2.dcf Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,52 @@
# Green color parameters
green_L_min = 18
green_L_max = 58
green_a_min = -35
green_a_max = -11
green_b_min = -7
green_b_max = 24
# Blue color parameters
blue_L_min = 20
blue_L_max = 43
blue_a_min = -13
blue_a_max = 22
blue_b_min = -48
blue_b_max = -3
# Orange color parameters
orange_L_min = 62
orange_L_max = 77
orange_a_min = 7
orange_a_max = 15
orange_b_min = 30
orange_b_max = 48
# Black color parameters
black_L_min = 1
black_L_max = 11
black_a_min = -5
black_a_max = 2
black_b_min = -3
black_b_max = 6
# Red color parameters
red_L_min = 20
red_L_max = 44
red_a_min = 10
red_a_max = 30
red_b_min = -99
red_b_max = 32
# Purple color parameters
purple_L_min = 35
purple_L_max = 72
purple_a_min = 12
purple_a_max = 22
purple_b_min = -48
purple_b_max = 1
# Other parameters
lab_denoising = 1
saturation_threshold = 150
saturation_denoising = 1

BIN
config/dimo_369_640.onnx Normal file

Binary file not shown.

View File

@ -10,6 +10,7 @@ CONFIG += c++17
SOURCES += \
camera.cpp \
detectionworker.cpp \
globals.cpp \
img_utils.cpp \
main.cpp \
@ -19,6 +20,7 @@ SOURCES += \
HEADERS += \
camera.h \
detectionworker.h \
globals.h \
img_utils.h \
onnxrunner.h \

View File

@ -1,22 +1,40 @@
// globals.cpp
#include "globals.h"
// 图片显示
// 初始化图片显示互斥锁和MIL_ID
QMutex gDispPicMutex0;
MIL_ID gDispCurrentPicId0 = 0;
QMutex gDispPicMutex1;
MIL_ID gDispCurrentPicId1 = 0;
// 检测结果
// 初始化掩膜互斥锁和MIL_ID
QMutex gMaskMutex0;
MIL_ID gMask0 = 0;
QMutex gMaskMutex1;
MIL_ID gMask1 = 0;
// 双相机结果同步
// 初始化检测准备信号量
QSemaphore detection_ready(0);
// 初始化全局存储队列
ThreadSafeQueue<ImageData> g_storageQueue;
// 初始化全局图像队列和结果队列
ThreadSafeQueue<ImageData>* g_img_Queue[2] = { nullptr, nullptr };
ThreadSafeQueue<ImageData>* g_result_Queue[2] = { nullptr, nullptr };
// 初始化全局识别线程指针
std::thread* g_recognitionThread[2] = { nullptr, nullptr };
// 初始化线程控制变量指针
std::atomic<bool>* g_recognitionRunning[2] = { nullptr, nullptr };
// 初始化全局ONNXRunner实例数组
ONNXRunner* g_runner_array[2] = { nullptr, nullptr };
// 实现获取保存目录和配置目录的函数
QString getSaveDirectory() {
QDir appDir(QCoreApplication::applicationDirPath());
QString saveDir = appDir.filePath("images");
@ -28,16 +46,120 @@ QString getSaveDirectory() {
return saveDir;
}
QString getConfigDirectory() {
QDir appDir(QCoreApplication::applicationDirPath());
QString saveDir = appDir.filePath("config");
// 获取应用程序的目录路径
QString appPath = QCoreApplication::applicationDirPath();
QDir appDir(appPath);
if (!appDir.exists(saveDir)) {
appDir.mkdir("config"); // 创建目录,如果不存在
// 获取绝对路径的 config 目录
QString configDir = appDir.absoluteFilePath("config");
// 检查 config 目录是否存在
if (!appDir.exists("config")) {
// 尝试创建 config 目录
if (!appDir.mkdir("config")) {
qCritical() << "无法创建配置目录:" << configDir;
// 根据需求,可以选择返回空字符串、抛出异常或采取其他错误处理措施
return QString();
}
}
return saveDir;
return configDir;
}
ThreadSafeQueue<ImageData> g_storageQueue;
// 初始化全局图像队列和结果队列
void initializeGlobalQueues() {
for(int i = 0; i < 2; ++i)
{
g_img_Queue[i] = new ThreadSafeQueue<ImageData>();
g_result_Queue[i] = new ThreadSafeQueue<ImageData>();
}
}
// 初始化线程控制变量和识别线程
void initializeGlobalThreads() {
for(int i = 0; i < 2; ++i)
{
g_recognitionRunning[i] = new std::atomic<bool>(false);
g_recognitionThread[i] = nullptr;
}
}
// 初始化全局ONNXRunner实例数组
void initializeGlobalONNXRunner() {
for(int i = 0; i < 2; ++i)
{
g_runner_array[i] = new ONNXRunner();
std::string modelPath = (getConfigDirectory() + "/dimo_369_640.onnx").toStdString();
qDebug() << modelPath;
g_runner_array[i]->load(modelPath);
}
}
// 清理全局图像队列和结果队列
void cleanupGlobalQueues() {
for(int i = 0; i < 2; ++i)
{
if(g_img_Queue[i])
{
g_img_Queue[i]->stop();
delete g_img_Queue[i];
g_img_Queue[i] = nullptr;
}
if(g_result_Queue[i])
{
g_result_Queue[i]->stop();
delete g_result_Queue[i];
g_result_Queue[i] = nullptr;
}
}
}
// 清理线程控制变量和识别线程
void cleanupGlobalThreads() {
for(int i = 0; i < 2; ++i)
{
if(g_recognitionRunning[i])
{
delete g_recognitionRunning[i];
g_recognitionRunning[i] = nullptr;
}
if(g_recognitionThread[i])
{
if(g_recognitionThread[i]->joinable())
{
g_recognitionThread[i]->join();
}
delete g_recognitionThread[i];
g_recognitionThread[i] = nullptr;
}
}
}
// 清理全局ONNXRunner实例数组
void cleanupGlobalONNXRunner() {
for(int i = 0; i < 2; ++i)
{
if(g_runner_array[i])
{
delete g_runner_array[i];
g_runner_array[i] = nullptr;
}
}
}
// 在程序初始化时调用
struct GlobalsInitializer {
GlobalsInitializer() {
initializeGlobalQueues();
initializeGlobalThreads();
initializeGlobalONNXRunner(); // 添加此行
}
~GlobalsInitializer() {
cleanupGlobalThreads();
cleanupGlobalQueues();
cleanupGlobalONNXRunner(); // 添加此行
}
} globalsInitializerInstance; // 全局实例,确保在程序结束时清理

View File

@ -8,6 +8,13 @@
#include <QSemaphore>
#include <QDir>
#include <QCoreApplication>
#include <queue>
#include <mutex>
#include <condition_variable>
#include <optional>
#include <thread>
#include <atomic>
#include "onnxrunner.h"
// 图片显示0
extern QMutex gDispPicMutex0;
@ -17,14 +24,17 @@ extern MIL_ID gDispCurrentPicId0;
extern QMutex gDispPicMutex1;
extern MIL_ID gDispCurrentPicId1;
// 掩膜互斥锁和ID
extern QMutex gMaskMutex0;
extern MIL_ID gMask0;
extern QMutex gMaskMutex1;
extern MIL_ID gMask1;
// 检测准备信号量
extern QSemaphore detection_ready;
// 获取保存目录和配置目录函数
QString getSaveDirectory();
QString getConfigDirectory();
@ -35,6 +45,7 @@ struct ImageData
cv::Mat image;
};
// 线程安全队列模板类
template <typename T>
class ThreadSafeQueue{
public:
@ -61,6 +72,20 @@ public:
return true;
}
// 查看队列中的第一个元素而不移除
bool front(T& item)
{
std::unique_lock<std::mutex> lock(mutex_);
while (queue_.empty() && !stop_)
{
cond_var_.wait(lock);
}
if (queue_.empty())
return false;
item = queue_.front(); // 直接复制,不移除
return true;
}
// 停止队列,唤醒所有等待的线程
void stop()
{
@ -69,6 +94,13 @@ public:
cond_var_.notify_all();
}
// 检查队列是否为空
bool empty()
{
std::lock_guard<std::mutex> lock(mutex_);
return queue_.empty();
}
private:
std::queue<T> queue_;
std::mutex mutex_;
@ -76,7 +108,20 @@ private:
bool stop_ = false;
};
// 定义全局存储队列
extern ThreadSafeQueue<ImageData> g_storageQueue;
// 定义全局图像队列和结果队列
extern ThreadSafeQueue<ImageData>* g_img_Queue[2];
extern ThreadSafeQueue<ImageData>* g_result_Queue[2];
// 定义全局识别线程
extern std::thread* g_recognitionThread[2];
// 定义线程控制变量
extern std::atomic<bool>* g_recognitionRunning[2];
// 定义全局ONNXRunner实例数组
extern ONNXRunner* g_runner_array[2];
#endif // GLOBALS_H

View File

@ -61,7 +61,13 @@ cv::Mat ONNXRunner::createDetectionMask(const cv::Mat& originalImage, const std:
// Load the ONNX model
cv::dnn::Net ONNXRunner::loadModel(const std::string& modelPath) {
// Load the ONNX model
cv::dnn::Net net = cv::dnn::readNetFromONNX(modelPath);
if (net.empty()) {
qCritical() << "加载ONNX模型失败:" << QString::fromStdString(modelPath);
} else {
qDebug() << "成功加载ONNX模型:" << QString::fromStdString(modelPath);
}
net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA); // Use CUDA backend
net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA); // Run on GPU
return net;
@ -134,13 +140,39 @@ std::vector<Detection> ONNXRunner::applyNMS(std::vector<Detection>& detections)
void ONNXRunner::load(const std::string& modelPath) {
QString qModelPath = QString::fromStdString(modelPath);
// 检查模型文件是否存在
if (!QFile::exists(qModelPath)) {
qCritical() << "ONNX模型文件不存在:" << qModelPath;
// 根据需求,可以选择抛出异常、设置状态标志或采取其他错误处理措施
return;
}
// 加载模型
this->net = ONNXRunner::loadModel(modelPath);
// 检查模型是否成功加载
if (this->net.empty()) {
qCritical() << "加载ONNX模型失败:" << qModelPath;
// 根据需求,可以选择抛出异常、设置状态标志或采取其他错误处理措施
return;
}
// 创建一个空的输入矩阵作为预热数据(假定模型输入是 RGB 图像)
cv::Mat dummyInput = cv::Mat::zeros(INPUT_HEIGHT, INPUT_WIDTH, CV_8UC3); // 640x640 的全零矩阵
// 调用 predict 方法进行预热
this->predict(dummyInput);
}; // Load the model
// 调用 predict 方法进行预热,避免第一次推理时加载模型导致延迟
try {
for(int i = 0; i < 10; i++)
this->predict(dummyInput);
qDebug() << "ONNX模型预热完成";
}
catch (const std::exception& e) {
qCritical() << "ONNXRunner::predict 异常:" << e.what();
// 处理异常,例如记录日志或设置状态标志
}
};
std::vector<Detection> ONNXRunner::predict(const cv::Mat &image) {
cv::dnn::Net net = this->net;

View File

@ -4,6 +4,7 @@
#include <opencv2/opencv.hpp>
#include <opencv2/dnn/dnn.hpp>
#include <iostream>
#include <QFile>
#include <vector>
#include <chrono>

View File

@ -13,6 +13,7 @@
#include <QPixmap>
#include <QDateTime>
#include <img_utils.h>
#include <detectionworker.h>
using namespace std;
@ -211,6 +212,29 @@ void Widget::on_btn_stop_clicked()
gDispCurrentPicId1 = 0;
}
ui->camera_1_img->clear();
// 停止检测工作者线程
for(int i = 0; i < 2; ++i)
{
g_recognitionRunning[i]->store(false);
g_img_Queue[i]->stop(); // 停止队列以唤醒线程
}
// 等待检测工作者线程结束
for(int i = 0; i < 2; ++i)
{
if(g_recognitionThread[i] && g_recognitionThread[i]->joinable())
{
g_recognitionThread[i]->join();
delete g_recognitionThread[i];
g_recognitionThread[i] = nullptr;
}
if(g_recognitionRunning[i])
{
delete g_recognitionRunning[i];
g_recognitionRunning[i] = nullptr;
}
}
DestoryCamera();
DestoryLowMac();
}
@ -219,7 +243,14 @@ void Widget::on_btn_stop_clicked()
void Widget::on_btn_start_clicked()
{
this->isCamRunning = true;
// 启动检测工作者线程
for(int i = 0; i < 2; ++i)
{
g_recognitionRunning[i]->store(true);
g_recognitionThread[i] = new std::thread(detectionWorker, i);
}
Start_camera();
}