新增actual-deployment分支,为现场实际部署版本,当前部署版本为20240713版,存在几个问题未修改(lab色彩空间函数缺少阈值参数)、番茄叶片分割模型存在输入变量污染。以上问题已于家中发现并修改,但现场设备并未修改,胜哥将于20240723赶赴泉州,顺便修改。
105
.gitignore
vendored
@ -1,105 +0,0 @@
|
|||||||
### JetBrains template
|
|
||||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
|
|
||||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
|
||||||
|
|
||||||
# User-specific stuff
|
|
||||||
.idea/**/workspace.xml
|
|
||||||
.idea/**/tasks.xml
|
|
||||||
.idea/**/usage.statistics.xml
|
|
||||||
.idea/**/dictionaries
|
|
||||||
.idea/**/shelf
|
|
||||||
|
|
||||||
# AWS User-specific
|
|
||||||
.idea/**/aws.xml
|
|
||||||
|
|
||||||
# Generated files
|
|
||||||
.idea/**/contentModel.xml
|
|
||||||
|
|
||||||
# Sensitive or high-churn files
|
|
||||||
.idea/**/dataSources/
|
|
||||||
.idea/**/dataSources.ids
|
|
||||||
.idea/**/dataSources.local.xml
|
|
||||||
.idea/**/sqlDataSources.xml
|
|
||||||
.idea/**/dynamic.xml
|
|
||||||
.idea/**/uiDesigner.xml
|
|
||||||
.idea/**/dbnavigator.xml
|
|
||||||
|
|
||||||
# Gradle
|
|
||||||
.idea/**/gradle.xml
|
|
||||||
.idea/**/libraries
|
|
||||||
|
|
||||||
# Gradle and Maven with auto-import
|
|
||||||
# When using Gradle or Maven with auto-import, you should exclude module files,
|
|
||||||
# since they will be recreated, and may cause churn. Uncomment if using
|
|
||||||
# auto-import.
|
|
||||||
# .idea/artifacts
|
|
||||||
# .idea/compiler.xml
|
|
||||||
# .idea/jarRepositories.xml
|
|
||||||
# .idea/modules.xml
|
|
||||||
# .idea/*.iml
|
|
||||||
# .idea/modules
|
|
||||||
# *.iml
|
|
||||||
# *.ipr
|
|
||||||
|
|
||||||
# CMake
|
|
||||||
cmake-build-*/
|
|
||||||
|
|
||||||
# Mongo Explorer plugin
|
|
||||||
.idea/**/mongoSettings.xml
|
|
||||||
|
|
||||||
# File-based project format
|
|
||||||
*.iws
|
|
||||||
|
|
||||||
# IntelliJ
|
|
||||||
out/
|
|
||||||
|
|
||||||
# mpeltonen/sbt-idea plugin
|
|
||||||
.idea_modules/
|
|
||||||
|
|
||||||
# JIRA plugin
|
|
||||||
atlassian-ide-plugin.xml
|
|
||||||
|
|
||||||
# Cursive Clojure plugin
|
|
||||||
.idea/replstate.xml
|
|
||||||
|
|
||||||
# SonarLint plugin
|
|
||||||
.idea/sonarlint/
|
|
||||||
|
|
||||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
|
||||||
com_crashlytics_export_strings.xml
|
|
||||||
crashlytics.properties
|
|
||||||
crashlytics-build.properties
|
|
||||||
fabric.properties
|
|
||||||
|
|
||||||
# Editor-based Rest Client
|
|
||||||
.idea/httpRequests
|
|
||||||
|
|
||||||
# Android studio 3.1+ serialized cache file
|
|
||||||
.idea/caches/build_file_checksums.ser
|
|
||||||
|
|
||||||
!/20240529RGBtest3/data/
|
|
||||||
!/20240529RGBtest3/data/
|
|
||||||
!/20240410RGBtest1/super-tomato/defect_big.bmp
|
|
||||||
!/20240410RGBtest1/super-tomato/defect_mask.bmp
|
|
||||||
!/20240410RGBtest1/defect_big.bmp
|
|
||||||
!/20240529RGBtest3/原图108测试过程图/
|
|
||||||
!/20240529RGBtest3/测试1.png
|
|
||||||
!/20240529RGBtest3/测试2.png
|
|
||||||
!/20240410RGBtest1/super-tomato/defect.bmp
|
|
||||||
!/20240410RGBtest1/super-tomato/defect_big.bmp
|
|
||||||
!/20240410RGBtest1/super-tomato/defect_mask.bmp
|
|
||||||
!/20240410RGBtest1/super-tomato/prediction.png
|
|
||||||
/20240529RGBtest3/data/
|
|
||||||
/20240627test4/.idea/
|
|
||||||
/20240627test4/qt_test/
|
|
||||||
/20240627test4/封装exe/
|
|
||||||
/20240627test4/qt_test/
|
|
||||||
/20240627test4/qt_test/PF/
|
|
||||||
/20240627test4/image/
|
|
||||||
/20240627test4/pfzc/
|
|
||||||
/20240627test4/pfzz/
|
|
||||||
/20240529RGBtest3/原图108测试过程图/
|
|
||||||
/20240627test4/20240718test/
|
|
||||||
/20240627test4/testimg_result/
|
|
||||||
/20240627test4/zz_test/
|
|
||||||
/20240627test4/误识别测试20240714/
|
|
||||||
@ -1,444 +0,0 @@
|
|||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 1,
|
|
||||||
"id": "initial_id",
|
|
||||||
"metadata": {
|
|
||||||
"collapsed": true,
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2024-04-10T14:49:51.025756600Z",
|
|
||||||
"start_time": "2024-04-10T14:49:51.021739700Z"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"source": [],
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
" import cv2\n",
|
|
||||||
" import numpy as np\n",
|
|
||||||
" import matplotlib.pyplot as plt\n",
|
|
||||||
" image = cv2.imread(r'D:\\project\\Tomato\\20240410tomatoRGBtest2\\data\\TopImage_32.jpg')\n",
|
|
||||||
" r_channel = image[:, :, 2]\n",
|
|
||||||
" b_channel = image[:, :, 0]\n",
|
|
||||||
" gray_image = r_channel - b_channel\n",
|
|
||||||
" gray_image = np.clip(gray_image, 0, 255)\n",
|
|
||||||
" gray_image = np.uint8(gray_image)\n",
|
|
||||||
"\n",
|
|
||||||
" # 显示新生成的灰度图像\n",
|
|
||||||
" plt.imshow(gray_image, cmap='gray')\n",
|
|
||||||
" plt.axis('off')\n",
|
|
||||||
" plt.show()\n",
|
|
||||||
" "
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"collapsed": false,
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2024-04-10T14:53:49.016762Z",
|
|
||||||
"start_time": "2024-04-10T14:53:48.426912500Z"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"id": "206810a054630770",
|
|
||||||
"execution_count": 3,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"import cv2\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"import matplotlib.pyplot as plt\n",
|
|
||||||
"\n",
|
|
||||||
"\n",
|
|
||||||
"# 读取RGB图像\n",
|
|
||||||
"img = cv2.imread(r'D:\\project\\Tomato\\20240410tomatoRGBtest2\\data\\TopImage_32.jpg')\n",
|
|
||||||
"\n",
|
|
||||||
"\n",
|
|
||||||
"# 获取图像的高度和宽度\n",
|
|
||||||
"height, width, _ = img.shape\n",
|
|
||||||
"\n",
|
|
||||||
"# 创建一个新的灰度图像\n",
|
|
||||||
"gray_img = np.zeros((height, width), dtype=np.uint8)\n",
|
|
||||||
"\n",
|
|
||||||
"# 遍历每个像素,计算R-B作为新的灰度值\n",
|
|
||||||
"for y in range(height):\n",
|
|
||||||
" for x in range(width):\n",
|
|
||||||
" r, g, b = img[y, x]\n",
|
|
||||||
" gray_value = r - b\n",
|
|
||||||
" if gray_value < 0:\n",
|
|
||||||
" gray_value = 0\n",
|
|
||||||
" elif gray_value > 255:\n",
|
|
||||||
" gray_value = 255\n",
|
|
||||||
" gray_img[y, x] = int(gray_value)\n",
|
|
||||||
"\n",
|
|
||||||
"\n",
|
|
||||||
"plt.imshow(gray_image, cmap='gray')\n",
|
|
||||||
"plt.axis('off')\n",
|
|
||||||
"plt.show()\n",
|
|
||||||
" \n",
|
|
||||||
"# 保存灰度图像\n",
|
|
||||||
"# cv2.imwrite('output_image.jpg', gray_img)"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"collapsed": false,
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2024-04-10T15:04:30.607530400Z",
|
|
||||||
"start_time": "2024-04-10T15:03:55.344343200Z"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"id": "ba82f5c2fd049821",
|
|
||||||
"execution_count": 9,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"import torch\n",
|
|
||||||
"import cv2\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"import matplotlib.pyplot as plt\n",
|
|
||||||
"\n",
|
|
||||||
"def extract_r_minus_b(image):\n",
|
|
||||||
" img_tensor = torch.tensor(image, dtype=torch.float32).to('cuda')\n",
|
|
||||||
"\n",
|
|
||||||
" r_channel = img_tensor[:, :, 0]\n",
|
|
||||||
" b_channel = img_tensor[:, :, 2]\n",
|
|
||||||
"\n",
|
|
||||||
" # 检查是否存在 NoneType 值,并将其替换为 0\n",
|
|
||||||
" r_channel = torch.nan_to_num(r_channel, nan=0)\n",
|
|
||||||
" b_channel = torch.nan_to_num(b_channel, nan=0)\n",
|
|
||||||
"\n",
|
|
||||||
" r_minus_b = torch.clamp(r_channel - b_channel, 0, 255).to('cpu').numpy().astype(np.uint8)\n",
|
|
||||||
"\n",
|
|
||||||
" return r_minus_b\n",
|
|
||||||
"\n",
|
|
||||||
"# 测试图像路径\n",
|
|
||||||
"image_path = '20240410tomatoRGBtest2/data/39.bmp'\n",
|
|
||||||
"\n",
|
|
||||||
"# 读取图像\n",
|
|
||||||
"image = cv2.imread(image_path)\n",
|
|
||||||
"\n",
|
|
||||||
"# 测试 extract_r_minus_b() 函数\n",
|
|
||||||
"r_minus_b = extract_r_minus_b(image)\n",
|
|
||||||
"\n",
|
|
||||||
"# 显示结果\n",
|
|
||||||
"plt.imshow(r_minus_b, cmap='gray')\n",
|
|
||||||
"plt.title('R-B Channel Difference')\n",
|
|
||||||
"plt.axis('off')\n",
|
|
||||||
"plt.show()\n"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"collapsed": false,
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2024-04-10T16:01:24.857246200Z",
|
|
||||||
"start_time": "2024-04-10T16:01:24.805385100Z"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"id": "7f09896cca12ec80",
|
|
||||||
"execution_count": 15,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"### 自适应阈值分割\n",
|
|
||||||
"import cv2\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"import matplotlib.pyplot as plt\n",
|
|
||||||
"\n",
|
|
||||||
"# 读取R通道减去B通道的灰度图\n",
|
|
||||||
"gray_image = cv2.imread(r'D:\\project\\Tomato\\20240410tomatoRGBtest2\\data\\r-b.jpg', cv2.IMREAD_GRAYSCALE)\n",
|
|
||||||
"\n",
|
|
||||||
"# 自适应阈值分割\n",
|
|
||||||
"adaptive_threshold = cv2.adaptiveThreshold(gray_image, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)\n",
|
|
||||||
"\n",
|
|
||||||
"# 显示结果\n",
|
|
||||||
"plt.imshow(adaptive_threshold, cmap='gray')\n",
|
|
||||||
"plt.axis('off')\n",
|
|
||||||
"plt.show()\n"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"collapsed": false,
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2024-04-10T14:49:51.023762Z",
|
|
||||||
"start_time": "2024-04-10T14:49:47.876441200Z"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"id": "b9a2db758d89860e",
|
|
||||||
"execution_count": 1,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"### 带滑动条调节的阈值分割\n",
|
|
||||||
"\n",
|
|
||||||
"import cv2\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"import matplotlib.pyplot as plt\n",
|
|
||||||
"import ipywidgets as widgets\n",
|
|
||||||
"from IPython.display import display\n",
|
|
||||||
"\n",
|
|
||||||
"# 读取R通道减去B通道的灰度图\n",
|
|
||||||
"gray_image = cv2.imread(r'D:\\project\\Tomato\\20240410tomatoRGBtest2\\data\\r-b.jpg', cv2.IMREAD_GRAYSCALE)\n",
|
|
||||||
"\n",
|
|
||||||
"# 定义回调函数\n",
|
|
||||||
"def threshold_callback(threshold):\n",
|
|
||||||
" _, thresholded_image = cv2.threshold(gray_image, threshold, 255, cv2.THRESH_BINARY)\n",
|
|
||||||
" plt.imshow(thresholded_image, cmap='gray')\n",
|
|
||||||
" plt.axis('off')\n",
|
|
||||||
" plt.show()\n",
|
|
||||||
"\n",
|
|
||||||
"# 创建滑动条\n",
|
|
||||||
"threshold_slider = widgets.IntSlider(min=0, max=255, step=1, value=128, description='Threshold:')\n",
|
|
||||||
"display(threshold_slider)\n",
|
|
||||||
"\n",
|
|
||||||
"# 调用回调函数\n",
|
|
||||||
"threshold_callback(threshold_slider.value)\n",
|
|
||||||
"\n",
|
|
||||||
"# 连接滑动条和回调函数\n",
|
|
||||||
"widgets.interactive(threshold_callback, threshold=threshold_slider)\n"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"collapsed": false,
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2024-04-10T09:49:57.666748700Z",
|
|
||||||
"start_time": "2024-04-10T09:49:56.140521900Z"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"id": "6ff8e4e22d034f53",
|
|
||||||
"execution_count": 7,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"### Otsu阈值分割\n",
|
|
||||||
"import cv2\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"import matplotlib.pyplot as plt\n",
|
|
||||||
"\n",
|
|
||||||
"# 读取R通道减去B通道的灰度图\n",
|
|
||||||
"gray_image = cv2.imread(r'D:\\project\\Tomato\\20240410tomatoRGBtest2\\data\\r-b.jpg', cv2.IMREAD_GRAYSCALE)\n",
|
|
||||||
"\n",
|
|
||||||
"# 使用Otsu阈值分割\n",
|
|
||||||
"_, otsu_threshold = cv2.threshold(gray_image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
|
|
||||||
"\n",
|
|
||||||
"# 显示结果\n",
|
|
||||||
"plt.imshow(otsu_threshold, cmap='gray')\n",
|
|
||||||
"plt.axis('off')\n",
|
|
||||||
"plt.show()\n"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"collapsed": false,
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2024-04-10T09:54:09.077152500Z",
|
|
||||||
"start_time": "2024-04-10T09:54:08.502123300Z"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"id": "6207115686820296",
|
|
||||||
"execution_count": 8,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"### Sobel边缘检测\n",
|
|
||||||
"import cv2\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"import matplotlib.pyplot as plt\n",
|
|
||||||
"\n",
|
|
||||||
"# 读取R通道减去B通道的灰度图\n",
|
|
||||||
"gray_image = cv2.imread(r'D:\\project\\Tomato\\20240410tomatoRGBtest2\\data\\r-b.jpg', cv2.IMREAD_GRAYSCALE)\n",
|
|
||||||
"\n",
|
|
||||||
"# 使用Sobel边缘检测\n",
|
|
||||||
"sobel_x = cv2.Sobel(gray_image, cv2.CV_64F, 1, 0, ksize=5)\n",
|
|
||||||
"sobel_y = cv2.Sobel(gray_image, cv2.CV_64F, 0, 1, ksize=5)\n",
|
|
||||||
"edges = np.sqrt(sobel_x**2 + sobel_y**2)\n",
|
|
||||||
"\n",
|
|
||||||
"# 显示结果\n",
|
|
||||||
"plt.imshow(edges, cmap='gray')\n",
|
|
||||||
"plt.axis('off')\n",
|
|
||||||
"plt.show()\n"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"collapsed": false,
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2024-04-10T09:55:13.525305800Z",
|
|
||||||
"start_time": "2024-04-10T09:55:12.652295600Z"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"id": "f3bd38e5e62cbd3c",
|
|
||||||
"execution_count": 9,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"### 形态学处理\n",
|
|
||||||
"import cv2\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"import matplotlib.pyplot as plt\n",
|
|
||||||
"\n",
|
|
||||||
"# 读取R通道减去B通道的灰度图\n",
|
|
||||||
"gray_image = cv2.imread(r'D:\\project\\Tomato\\20240410tomatoRGBtest2\\data\\r-b.jpg', cv2.IMREAD_GRAYSCALE)\n",
|
|
||||||
"\n",
|
|
||||||
"# 定义结构元素\n",
|
|
||||||
"kernel = np.ones((2,2),np.uint8)\n",
|
|
||||||
"\n",
|
|
||||||
"# 闭运算\n",
|
|
||||||
"closing = cv2.morphologyEx(gray_image, cv2.MORPH_CLOSE, kernel)\n",
|
|
||||||
"\n",
|
|
||||||
"# 显示结果\n",
|
|
||||||
"plt.imshow(closing, cmap='gray')\n",
|
|
||||||
"plt.axis('off')\n",
|
|
||||||
"plt.show()\n"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"collapsed": false,
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2024-04-10T09:57:05.878548800Z",
|
|
||||||
"start_time": "2024-04-10T09:57:05.060591800Z"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"id": "7395629910ee8620",
|
|
||||||
"execution_count": 11,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"### 角点检测\n",
|
|
||||||
"import cv2\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"import matplotlib.pyplot as plt\n",
|
|
||||||
"\n",
|
|
||||||
"# 读取R通道减去B通道的灰度图\n",
|
|
||||||
"gray_image = cv2.imread(r'D:\\project\\Tomato\\20240410tomatoRGBtest2\\data\\r-b.jpg', cv2.IMREAD_GRAYSCALE)\n",
|
|
||||||
"\n",
|
|
||||||
"# Shi-Tomasi角点检测\n",
|
|
||||||
"corners = cv2.goodFeaturesToTrack(gray_image, 100, 0.01, 10)\n",
|
|
||||||
"\n",
|
|
||||||
"# 绘制角点\n",
|
|
||||||
"corners = np.int0(corners)\n",
|
|
||||||
"for i in corners:\n",
|
|
||||||
" x, y = i.ravel()\n",
|
|
||||||
" cv2.circle(gray_image, (x, y), 3, 255, -1)\n",
|
|
||||||
"\n",
|
|
||||||
"# 显示结果\n",
|
|
||||||
"plt.imshow(gray_image, cmap='gray')\n",
|
|
||||||
"plt.axis('off')\n",
|
|
||||||
"plt.show()\n"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"collapsed": false,
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2024-04-10T09:59:32.789829500Z",
|
|
||||||
"start_time": "2024-04-10T09:59:31.769444500Z"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"id": "990a0ecebe27d9a0",
|
|
||||||
"execution_count": 12,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"### 二值化对象\n",
|
|
||||||
"\n",
|
|
||||||
"import cv2\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"\n",
|
|
||||||
"# 读取原始图像\n",
|
|
||||||
"img = cv2.imread(r'D:\\project\\Tomato\\20240410tomatoRGBtest2\\output\\r_minus_b_26.jpg', cv2.IMREAD_GRAYSCALE)\n",
|
|
||||||
"\n",
|
|
||||||
"# 使用Otsu's二值化方法\n",
|
|
||||||
"_, binary_img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n",
|
|
||||||
"\n",
|
|
||||||
"# # 显示二值化后的图像\n",
|
|
||||||
"# cv2.imshow('Binary Image', binary_img)\n",
|
|
||||||
"# cv2.waitKey(0)\n",
|
|
||||||
"# cv2.destroyAllWindows()\n",
|
|
||||||
"# 保存二值化后的图像\n",
|
|
||||||
"cv2.imwrite('06.jpg', binary_img)"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"collapsed": false,
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2024-04-12T06:34:26.530387300Z",
|
|
||||||
"start_time": "2024-04-12T06:34:26.132598800Z"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"id": "c4a6c0bfd05d3283",
|
|
||||||
"execution_count": 1,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [
|
|
||||||
"### 光滑边缘\n",
|
|
||||||
"import cv2\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"\n",
|
|
||||||
"# 假设您的两张图像已经保存为 image1.png 和 image2.png\n",
|
|
||||||
"image1 = cv2.imread(r'D:\\project\\Tomato\\20240410tomatoRGBtest2\\06.jpg', cv2.IMREAD_GRAYSCALE)\n",
|
|
||||||
"image2 = cv2.imread(r'D:\\project\\Tomato\\20240410tomatoRGBtest2\\010.png', cv2.IMREAD_GRAYSCALE)\n",
|
|
||||||
"\n",
|
|
||||||
"# 应用高斯模糊来平滑边缘\n",
|
|
||||||
"smoothed_image1 = cv2.GaussianBlur(image1, (9, 9), 0)\n",
|
|
||||||
"smoothed_image2 = cv2.GaussianBlur(image2, (9, 9), 0)\n",
|
|
||||||
"\n",
|
|
||||||
"# 将处理后的图像转换为二值图像\n",
|
|
||||||
"_, binary_image1 = cv2.threshold(smoothed_image1, 128, 255, cv2.THRESH_BINARY)\n",
|
|
||||||
"_, binary_image2 = cv2.threshold(smoothed_image2, 128, 255, cv2.THRESH_BINARY)\n",
|
|
||||||
"\n",
|
|
||||||
"# 保存二值化后的图像\n",
|
|
||||||
"cv2.imwrite('smoothed_binary_image06.png', binary_image1)\n",
|
|
||||||
"cv2.imwrite('smoothed_binary_image2.png', binary_image2)"
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"collapsed": false,
|
|
||||||
"ExecuteTime": {
|
|
||||||
"end_time": "2024-04-12T06:35:18.063684900Z",
|
|
||||||
"start_time": "2024-04-12T06:35:17.935028900Z"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"id": "1f6ac60b3150fc9e",
|
|
||||||
"execution_count": 3,
|
|
||||||
"outputs": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"source": [],
|
|
||||||
"metadata": {
|
|
||||||
"collapsed": false
|
|
||||||
},
|
|
||||||
"id": "3749d97ac3719946",
|
|
||||||
"execution_count": null,
|
|
||||||
"outputs": []
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python3"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 2
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython2",
|
|
||||||
"version": "2.7.6"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 5
|
|
||||||
}
|
|
||||||
@ -1,298 +0,0 @@
|
|||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
import os
|
|
||||||
import argparse
|
|
||||||
# from svm import predict_image_array, load_model
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def extract_s_l(image_path):
|
|
||||||
image = cv2.imread(image_path)
|
|
||||||
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
|
||||||
lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
|
|
||||||
s_channel = hsv[:,:,1]
|
|
||||||
l_channel = lab[:,:,0]
|
|
||||||
result = cv2.add(s_channel, l_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def find_reflection(image_path, threshold=190):
|
|
||||||
# 读取图像
|
|
||||||
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
|
|
||||||
|
|
||||||
# 应用阈值分割
|
|
||||||
_, reflection = cv2.threshold(image, threshold, 255, cv2.THRESH_BINARY)
|
|
||||||
|
|
||||||
return reflection
|
|
||||||
|
|
||||||
def otsu_threshold(image):
|
|
||||||
|
|
||||||
# 将图像转换为灰度图像
|
|
||||||
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
|
||||||
|
|
||||||
# 使用Otsu阈值分割
|
|
||||||
_, binary = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
|
||||||
|
|
||||||
return binary
|
|
||||||
|
|
||||||
# 提取花萼,使用G-R的图像
|
|
||||||
def extract_g_r(image):
|
|
||||||
# image = cv2.imread(image_path)
|
|
||||||
g_channel = image[:,:,1]
|
|
||||||
r_channel = image[:,:,2]
|
|
||||||
result = cv2.subtract(cv2.multiply(g_channel, 1.5), r_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
#提取西红柿,使用R-B的图像
|
|
||||||
def extract_r_b(image_path):
|
|
||||||
image = cv2.imread(image_path)
|
|
||||||
r_channel = image[:,:,2]
|
|
||||||
b_channel = image[:,:,0]
|
|
||||||
result = cv2.subtract(r_channel, b_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def extract_r_g(image_path):
|
|
||||||
image = cv2.imread(image_path)
|
|
||||||
r_channel = image[:,:,2]
|
|
||||||
g_channel = image[:,:,1]
|
|
||||||
result = cv2.subtract(r_channel, g_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def threshold_segmentation(image, threshold, color=255):
|
|
||||||
_, result = cv2.threshold(image, threshold, color, cv2.THRESH_BINARY)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def bitwise_operation(image1, image2, operation='and'):
|
|
||||||
if operation == 'and':
|
|
||||||
result = cv2.bitwise_and(image1, image2)
|
|
||||||
elif operation == 'or':
|
|
||||||
result = cv2.bitwise_or(image1, image2)
|
|
||||||
else:
|
|
||||||
raise ValueError("operation must be 'and' or 'or'")
|
|
||||||
return result
|
|
||||||
|
|
||||||
def largest_connected_component(bin_img):
|
|
||||||
# 使用connectedComponentsWithStats函数找到连通区域
|
|
||||||
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(bin_img, connectivity=8)
|
|
||||||
|
|
||||||
# 如果只有背景标签,返回一个空的二值图像
|
|
||||||
if num_labels <= 1:
|
|
||||||
return np.zeros_like(bin_img)
|
|
||||||
|
|
||||||
# 找到最大的连通区域(除了背景)
|
|
||||||
largest_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA])
|
|
||||||
|
|
||||||
# 创建一个新的二值图像,只显示最大的连通区域
|
|
||||||
new_bin_img = np.zeros_like(bin_img)
|
|
||||||
new_bin_img[labels == largest_label] = 255
|
|
||||||
|
|
||||||
return new_bin_img
|
|
||||||
|
|
||||||
def close_operation(bin_img, kernel_size=(5, 5)):
|
|
||||||
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size)
|
|
||||||
closed_img = cv2.morphologyEx(bin_img, cv2.MORPH_CLOSE, kernel)
|
|
||||||
return closed_img
|
|
||||||
|
|
||||||
def open_operation(bin_img, kernel_size=(5, 5)):
|
|
||||||
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size)
|
|
||||||
opened_img = cv2.morphologyEx(bin_img, cv2.MORPH_OPEN, kernel)
|
|
||||||
return opened_img
|
|
||||||
|
|
||||||
|
|
||||||
def draw_tomato_edge(original_img, bin_img):
|
|
||||||
bin_img_processed = close_operation(bin_img, kernel_size=(15, 15))
|
|
||||||
# cv2.imshow('Close Operation', bin_img_processed)
|
|
||||||
# bin_img_processed = open_operation(bin_img_processed, kernel_size=(19, 19))
|
|
||||||
# cv2.imshow('Open Operation', bin_img_processed)
|
|
||||||
# 现在使用处理后的bin_img_processed查找轮廓
|
|
||||||
contours, _ = cv2.findContours(bin_img_processed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
|
|
||||||
# 如果没有找到轮廓,直接返回原图
|
|
||||||
if not contours:
|
|
||||||
return original_img, np.zeros_like(bin_img) # 返回原图和全黑mask
|
|
||||||
# 找到最大轮廓
|
|
||||||
max_contour = max(contours, key=cv2.contourArea)
|
|
||||||
# 多边形近似的精度调整
|
|
||||||
epsilon = 0.0006 * cv2.arcLength(max_contour, True) # 可以调整这个值
|
|
||||||
approx = cv2.approxPolyDP(max_contour, epsilon, True)
|
|
||||||
# 绘制轮廓
|
|
||||||
cv2.drawContours(original_img, [approx], -1, (0, 255, 0), 3)
|
|
||||||
mask = np.zeros_like(bin_img)
|
|
||||||
|
|
||||||
# 使用白色填充最大轮廓
|
|
||||||
cv2.drawContours(mask, [max_contour], -1, (255), thickness=cv2.FILLED)
|
|
||||||
|
|
||||||
return original_img, mask
|
|
||||||
|
|
||||||
def draw_tomato_edge_convex_hull(original_img, bin_img):
|
|
||||||
bin_img_blurred = cv2.GaussianBlur(bin_img, (5, 5), 0)
|
|
||||||
contours, _ = cv2.findContours(bin_img_blurred, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
if not contours:
|
|
||||||
return original_img
|
|
||||||
max_contour = max(contours, key=cv2.contourArea)
|
|
||||||
hull = cv2.convexHull(max_contour)
|
|
||||||
cv2.drawContours(original_img, [hull], -1, (0, 255, 0), 3)
|
|
||||||
return original_img
|
|
||||||
|
|
||||||
# 得到完整的西红柿二值图像,除了绿色花萼
|
|
||||||
def fill_holes(bin_img):
|
|
||||||
# 复制 bin_img 到 img_filled
|
|
||||||
img_filled = bin_img.copy()
|
|
||||||
|
|
||||||
# 获取图像的高度和宽度
|
|
||||||
height, width = bin_img.shape
|
|
||||||
|
|
||||||
# 创建一个掩码,比输入图像大两个像素点
|
|
||||||
mask = np.zeros((height + 2, width + 2), np.uint8)
|
|
||||||
|
|
||||||
# 使用 floodFill 函数填充黑色区域
|
|
||||||
cv2.floodFill(img_filled, mask, (0, 0), 255)
|
|
||||||
|
|
||||||
# 反转填充后的图像
|
|
||||||
img_filled_d = cv2.bitwise_not(img_filled)
|
|
||||||
|
|
||||||
# 使用 bitwise_or 操作合并原图像和填充后的图像
|
|
||||||
img_filled = cv2.bitwise_or(bin_img, img_filled)
|
|
||||||
# 裁剪 img_filled 和 img_filled_d 到与 bin_img 相同的大小
|
|
||||||
# img_filled = img_filled[:height, :width]
|
|
||||||
img_filled_d = img_filled_d[:height, :width]
|
|
||||||
|
|
||||||
return img_filled, img_filled_d
|
|
||||||
|
|
||||||
def bitwise_and_rgb_with_binary(rgb_img, bin_img):
|
|
||||||
# 将二值图像转换为三通道图像
|
|
||||||
bin_img_3channel = cv2.cvtColor(bin_img, cv2.COLOR_GRAY2BGR)
|
|
||||||
|
|
||||||
# 使用 bitwise_and 操作合并 RGB 图像和二值图像
|
|
||||||
result = cv2.bitwise_and(rgb_img, bin_img_3channel)
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def extract_max_connected_area(image, lower_hsv, upper_hsv):
|
|
||||||
# 读取图像
|
|
||||||
# image = cv2.imread(image_path)
|
|
||||||
|
|
||||||
# 将图像从BGR转换到HSV
|
|
||||||
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
|
||||||
|
|
||||||
# 使用阈值获取指定区域的二值图像
|
|
||||||
mask = cv2.inRange(hsv, lower_hsv, upper_hsv)
|
|
||||||
|
|
||||||
# 找到二值图像的连通区域
|
|
||||||
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(mask, connectivity=8)
|
|
||||||
|
|
||||||
# 找到最大的连通区域(除了背景)
|
|
||||||
largest_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA])
|
|
||||||
|
|
||||||
# 创建一个新的二值图像,只显示最大的连通区域
|
|
||||||
new_bin_img = np.zeros_like(mask)
|
|
||||||
new_bin_img[labels == largest_label] = 255
|
|
||||||
|
|
||||||
# 复制 new_bin_img 到 img_filled
|
|
||||||
img_filled = new_bin_img.copy()
|
|
||||||
|
|
||||||
# 获取图像的高度和宽度
|
|
||||||
height, width = new_bin_img.shape
|
|
||||||
|
|
||||||
# 创建一个掩码,比输入图像大两个像素点
|
|
||||||
mask = np.zeros((height + 2, width + 2), np.uint8)
|
|
||||||
|
|
||||||
# 使用 floodFill 函数填充黑色区域
|
|
||||||
cv2.floodFill(img_filled, mask, (0, 0), 255)
|
|
||||||
|
|
||||||
# 反转填充后的图像
|
|
||||||
img_filled_inv = cv2.bitwise_not(img_filled)
|
|
||||||
|
|
||||||
# 使用 bitwise_or 操作合并原图像和填充后的图像
|
|
||||||
img_filled = cv2.bitwise_or(new_bin_img, img_filled_inv)
|
|
||||||
|
|
||||||
return img_filled
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = argparse.ArgumentParser(description='Process some integers.')
|
|
||||||
parser.add_argument('--dir_path', type=str, default=r'D:\project\supermachine--tomato-passion_fruit\20240419RGBtest2\data',
|
|
||||||
help='the directory path of images')
|
|
||||||
parser.add_argument('--threshold_s_l', type=int, default=180,
|
|
||||||
help='the threshold for s_l')
|
|
||||||
parser.add_argument('--threshold_r_b', type=int, default=15,
|
|
||||||
help='the threshold for r_b')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
for img_file in os.listdir(args.dir_path):
|
|
||||||
if img_file.endswith('.bmp'):
|
|
||||||
img_path = os.path.join(args.dir_path, img_file)
|
|
||||||
s_l = extract_s_l(img_path)
|
|
||||||
otsu_thresholded = otsu_threshold(s_l)
|
|
||||||
img_fore = bitwise_and_rgb_with_binary(cv2.imread(img_path), otsu_thresholded)
|
|
||||||
img_fore_defect = extract_g_r(img_fore)
|
|
||||||
img_fore_defect = threshold_segmentation(img_fore_defect, args.threshold_r_b)
|
|
||||||
# cv2.imshow('img_fore_defect', img_fore_defect)
|
|
||||||
thresholded_s_l = threshold_segmentation(s_l, args.threshold_s_l)
|
|
||||||
new_bin_img = largest_connected_component(thresholded_s_l)
|
|
||||||
zhongggggg = cv2.bitwise_or(new_bin_img, cv2.imread('defect_mask.bmp', cv2.IMREAD_GRAYSCALE))
|
|
||||||
cv2.imshow('zhongggggg', zhongggggg)
|
|
||||||
new_otsu_bin_img = largest_connected_component(otsu_thresholded)
|
|
||||||
filled_img, defect = fill_holes(new_bin_img)
|
|
||||||
defect = bitwise_and_rgb_with_binary(cv2.imread(img_path), defect)
|
|
||||||
cv2.imshow('defect', defect)
|
|
||||||
edge, mask = draw_tomato_edge(cv2.imread(img_path), new_bin_img)
|
|
||||||
org_defect = bitwise_and_rgb_with_binary(edge, new_bin_img)
|
|
||||||
fore = bitwise_and_rgb_with_binary(cv2.imread(img_path), mask)
|
|
||||||
fore_g_r_t = threshold_segmentation(extract_g_r(fore), 20)
|
|
||||||
fore_g_r_t_ture = bitwise_and_rgb_with_binary(cv2.imread(img_path), fore_g_r_t)
|
|
||||||
cv2.imwrite('defect_big.bmp', fore_g_r_t_ture)
|
|
||||||
res = cv2.bitwise_or(new_bin_img, fore_g_r_t)
|
|
||||||
white = find_reflection(img_path)
|
|
||||||
|
|
||||||
# SVM预测
|
|
||||||
# 加载模型
|
|
||||||
# model, scaler = load_model('/Users/xs/PycharmProjects/super-tomato/svm_green.joblib')
|
|
||||||
|
|
||||||
# 对图像进行预测
|
|
||||||
# predicted_mask = predict_image_array(image, model, scaler)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
cv2.imshow('white', white)
|
|
||||||
|
|
||||||
cv2.imshow('fore', fore)
|
|
||||||
cv2.imshow('fore_g_r_t', fore_g_r_t)
|
|
||||||
cv2.imshow('mask', mask)
|
|
||||||
print('mask', mask.shape)
|
|
||||||
print('filled', filled_img.shape)
|
|
||||||
print('largest', new_bin_img.shape)
|
|
||||||
print('rp', org_defect.shape)
|
|
||||||
cv2.imshow('res', res)
|
|
||||||
|
|
||||||
# lower_hsv = np.array([19, 108, 15])
|
|
||||||
# upper_hsv = np.array([118, 198, 134])
|
|
||||||
# max_connected_area = extract_max_connected_area(img_path, lower_hsv, upper_hsv)
|
|
||||||
# cv2.imshow('Max Connected Area', max_connected_area)
|
|
||||||
|
|
||||||
# 显示原始图像
|
|
||||||
original_img = cv2.imread(img_path)
|
|
||||||
cv2.imshow('Original', original_img)
|
|
||||||
cv2.imshow('thresholded_s_l', thresholded_s_l)
|
|
||||||
cv2.imshow('Largest Connected Component', new_bin_img)
|
|
||||||
cv2.imshow('Filled', filled_img)
|
|
||||||
cv2.imshow('Defect', defect)
|
|
||||||
cv2.imshow('Org_defect', org_defect)
|
|
||||||
cv2.imshow('otsu_thresholded', new_otsu_bin_img)
|
|
||||||
|
|
||||||
|
|
||||||
#显示轮廓
|
|
||||||
cv2.imshow('Edge', edge)
|
|
||||||
|
|
||||||
# 等待用户按下任意键
|
|
||||||
cv2.waitKey(0)
|
|
||||||
|
|
||||||
# 关闭所有窗口
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
@ -1,217 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/4/12 15:04
|
|
||||||
# @Author : TG
|
|
||||||
# @File : main.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
|
|
||||||
import socket
|
|
||||||
import sys
|
|
||||||
import numpy as np
|
|
||||||
import cv2
|
|
||||||
import root_dir
|
|
||||||
import time
|
|
||||||
import os
|
|
||||||
from root_dir import ROOT_DIR
|
|
||||||
from utils import PreSocket, receive_sock, parse_protocol, ack_sock, done_sock, DualSock, simple_sock, test_sock
|
|
||||||
import logging
|
|
||||||
from utils import threshold_segmentation, largest_connected_component, draw_tomato_edge, bitwise_and_rgb_with_binary, extract_s_l, get_tomato_dimensions, get_defect_info
|
|
||||||
from collections import deque
|
|
||||||
import time
|
|
||||||
|
|
||||||
|
|
||||||
def process_cmd(cmd: str, img: any, connected_sock: socket.socket) -> tuple:
|
|
||||||
"""
|
|
||||||
处理指令
|
|
||||||
|
|
||||||
:param cmd: 指令类型
|
|
||||||
:param data: 指令内容
|
|
||||||
:param connected_sock: socket
|
|
||||||
:param detector: 模型
|
|
||||||
:return: 是否处理成功
|
|
||||||
"""
|
|
||||||
start_time = time.time()
|
|
||||||
if cmd == 'IM':
|
|
||||||
# image = cv2.imdecode(np.frombuffer(img, dtype=np.uint8), cv2.IMREAD_COLOR)
|
|
||||||
# image = img
|
|
||||||
threshold_s_l = 180
|
|
||||||
# threshold_r_b = 15
|
|
||||||
|
|
||||||
s_l = extract_s_l(img)
|
|
||||||
# otsu_thresholded = ImageProcessor.otsu_threshold(s_l)
|
|
||||||
# img_fore = ImageProcessor.bitwise_and_rgb_with_binary(img, otsu_thresholded)
|
|
||||||
# img_fore_defect = ImageProcessor.extract_g_r(img_fore)
|
|
||||||
# img_fore_defect = ImageProcessor.threshold_segmentation(img_fore_defect, threshold_r_b)
|
|
||||||
|
|
||||||
# cv2.imshow('img_fore_defect', img_fore_defect)
|
|
||||||
|
|
||||||
thresholded_s_l = threshold_segmentation(s_l, threshold_s_l)
|
|
||||||
new_bin_img = largest_connected_component(thresholded_s_l)
|
|
||||||
# zhongggggg = cv2.bitwise_or(new_bin_img, cv2.imread('defect_mask.bmp', cv2.IMREAD_GRAYSCALE))
|
|
||||||
|
|
||||||
# cv2.imshow('zhongggggg', zhongggggg)
|
|
||||||
|
|
||||||
# new_otsu_bin_img = ImageProcessor.largest_connected_component(otsu_thresholded)
|
|
||||||
# filled_img, defect = ImageProcessor.fill_holes(new_bin_img)
|
|
||||||
# defect = ImageProcessor.bitwise_and_rgb_with_binary(cv2.imread(img), defect)
|
|
||||||
|
|
||||||
# cv2.imshow('defect', defect)
|
|
||||||
|
|
||||||
edge, mask = draw_tomato_edge(img, new_bin_img)
|
|
||||||
org_defect = bitwise_and_rgb_with_binary(edge, new_bin_img)
|
|
||||||
# fore = ImageProcessor.bitwise_and_rgb_with_binary(cv2.imread(img), mask)
|
|
||||||
# fore_g_r_t = ImageProcessor.threshold_segmentation(ImageProcessor.extract_g_r(fore), 20)
|
|
||||||
# fore_g_r_t_ture = ImageProcessor.bitwise_and_rgb_with_binary(cv2.imread(img), fore_g_r_t)
|
|
||||||
|
|
||||||
# cv2.imwrite('defect_big.bmp', fore_g_r_t_ture)
|
|
||||||
|
|
||||||
# res = cv2.bitwise_or(new_bin_img, fore_g_r_t)
|
|
||||||
# white = ImageProcessor.find_reflection(img)
|
|
||||||
# cv2.imwrite('new_bin_img.bmp', new_bin_img)
|
|
||||||
|
|
||||||
long_axis, short_axis = get_tomato_dimensions(mask)
|
|
||||||
number_defects, total_pixels = get_defect_info(new_bin_img)
|
|
||||||
rp = org_defect
|
|
||||||
rp = cv2.cvtColor(rp, cv2.COLOR_BGR2RGB)
|
|
||||||
# cv2.imwrite('rp1.bmp', rp)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# elif cmd == 'TR':
|
|
||||||
# detector = WoodClass(w=4096, h=1200, n=8000, p1=0.8, debug_mode=False)
|
|
||||||
# model_name = None
|
|
||||||
# if "$" in data:
|
|
||||||
# data, model_name = data.split("$", 1)
|
|
||||||
# model_name = model_name + ".p"
|
|
||||||
# settings.data_path = data
|
|
||||||
# settings.model_path = ROOT_DIR / 'models' / detector.fit_pictures(data_path=settings.data_path, file_name=model_name)
|
|
||||||
# response = simple_sock(connected_sock, cmd_type=cmd, result=result)
|
|
||||||
# elif cmd == 'MD':
|
|
||||||
# settings.model_path = data
|
|
||||||
# detector.load(path=settings.model_path)
|
|
||||||
# response = simple_sock(connected_sock, cmd_type=cmd, result=result)
|
|
||||||
# elif cmd == 'KM':
|
|
||||||
# x_data, y_data, labels, img_names = detector.get_luminance_data(data, plot_2d=True)
|
|
||||||
# result = detector.data_adjustments(x_data, y_data, labels, img_names)
|
|
||||||
# result = ','.join([str(x) for x in result])
|
|
||||||
# response = simple_sock(connected_sock, cmd_type=cmd, result=result)
|
|
||||||
|
|
||||||
|
|
||||||
else:
|
|
||||||
logging.error(f'错误指令,指令为{cmd}')
|
|
||||||
response = False
|
|
||||||
|
|
||||||
end_time = time.time()
|
|
||||||
elapsed_time = end_time - start_time
|
|
||||||
print(f'处理时间:{elapsed_time}秒')
|
|
||||||
return long_axis, short_axis, number_defects, total_pixels, rp
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main(is_debug=False):
|
|
||||||
file_handler = logging.FileHandler(os.path.join(ROOT_DIR, 'report.log'))
|
|
||||||
file_handler.setLevel(logging.DEBUG if is_debug else logging.WARNING)
|
|
||||||
console_handler = logging.StreamHandler(sys.stdout)
|
|
||||||
console_handler.setLevel(logging.DEBUG if is_debug else logging.WARNING)
|
|
||||||
logging.basicConfig(format='%(asctime)s %(filename)s[line:%(lineno)d] - %(levelname)s - %(message)s',
|
|
||||||
handlers=[file_handler, console_handler],
|
|
||||||
level=logging.DEBUG)
|
|
||||||
|
|
||||||
dual_sock = DualSock(connect_ip='127.0.0.1')
|
|
||||||
|
|
||||||
while not dual_sock.status:
|
|
||||||
dual_sock.reconnect()
|
|
||||||
|
|
||||||
while True:
|
|
||||||
long_axis_list = []
|
|
||||||
short_axis_list = []
|
|
||||||
defect_num_sum = 0
|
|
||||||
total_defect_area_sum = 0
|
|
||||||
rp = None
|
|
||||||
|
|
||||||
for i in range(5):
|
|
||||||
|
|
||||||
start_time = time.time()
|
|
||||||
|
|
||||||
pack, next_pack = receive_sock(dual_sock)
|
|
||||||
if pack == b"":
|
|
||||||
time.sleep(2)
|
|
||||||
dual_sock.reconnect()
|
|
||||||
continue
|
|
||||||
|
|
||||||
cmd, img = parse_protocol(pack)
|
|
||||||
print(cmd)
|
|
||||||
print(img.shape)
|
|
||||||
|
|
||||||
end_time = time.time()
|
|
||||||
elapsed_time = end_time - start_time
|
|
||||||
print(f'接收时间:{elapsed_time}秒')
|
|
||||||
|
|
||||||
long_axis, short_axis, number_defects, total_pixels, rp = process_cmd(cmd=cmd, img=img, connected_sock=dual_sock)
|
|
||||||
# print(long_axis, short_axis, number_defects, type(total_pixels), rp.shape)
|
|
||||||
|
|
||||||
if i <= 2:
|
|
||||||
long_axis_list.append(long_axis)
|
|
||||||
short_axis_list.append(short_axis)
|
|
||||||
if i == 1:
|
|
||||||
rp_result = rp
|
|
||||||
|
|
||||||
defect_num_sum += number_defects
|
|
||||||
total_defect_area_sum += total_pixels
|
|
||||||
|
|
||||||
|
|
||||||
long_axis = round(sum(long_axis_list) / 3)
|
|
||||||
short_axis = round(sum(short_axis_list) / 3)
|
|
||||||
# print(type(long_axis), type(short_axis), type(defect_num_sum), type(total_defect_area_sum), type(rp_result))
|
|
||||||
response = test_sock(dual_sock, cmd_type=cmd, long_axis=long_axis, short_axis=short_axis,
|
|
||||||
defect_num=defect_num_sum, total_defect_area=total_defect_area_sum, rp=rp_result)
|
|
||||||
print(long_axis, short_axis, defect_num_sum, total_defect_area_sum, rp_result.shape)
|
|
||||||
|
|
||||||
# while True:
|
|
||||||
# result_buffer = []
|
|
||||||
# for _ in range(5):
|
|
||||||
# pack, next_pack = receive_sock(dual_sock) # 接收数据,如果没有数据则阻塞,如果返回的是空字符串则表示出现错误
|
|
||||||
# if pack == b"": # 无数据表示出现错误
|
|
||||||
# time.sleep(5)
|
|
||||||
# dual_sock.reconnect()
|
|
||||||
# break
|
|
||||||
#
|
|
||||||
# cmd, data = parse_protocol(pack)
|
|
||||||
# print(cmd)
|
|
||||||
# print(data)
|
|
||||||
#
|
|
||||||
# result = process_cmd(cmd=cmd, data=data, connected_sock=dual_sock, detector=detector, settings=settings)
|
|
||||||
# result_buffer.append(result) # 将处理结果添加到缓冲区
|
|
||||||
#
|
|
||||||
# # 在这里进行对5次结果的处理,可以进行合并、比较等操作
|
|
||||||
# final_result = combine_results(result_buffer)
|
|
||||||
#
|
|
||||||
# # 发送最终结果
|
|
||||||
# response = simple_sock(dual_sock, cmd_type=cmd, result=final_result)
|
|
||||||
# print(final_result)
|
|
||||||
# result_buffer = []
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
# 2个端口
|
|
||||||
# 接受端口21122
|
|
||||||
# 发送端口21123
|
|
||||||
# 接收到图片 n_rows * n_bands * n_cols, float32
|
|
||||||
# 发送图片 n_rows * n_cols, uint8
|
|
||||||
main(is_debug=False)
|
|
||||||
# test(r"D:\build-tobacco-Desktop_Qt_5_9_0_MSVC2015_64bit-Release\calibrated15.raw")
|
|
||||||
# main()
|
|
||||||
# debug_main()
|
|
||||||
# test_run(all_data_dir=r'D:\数据')
|
|
||||||
# with open(r'D:\数据\虫子\valid2.raw', 'rb') as f:
|
|
||||||
# data = np.frombuffer(f.read(), dtype=np.float32).reshape(600, 29, 1024).transpose(0, 2, 1)
|
|
||||||
# plt.matshow(data[:, :, 10])
|
|
||||||
# plt.show()
|
|
||||||
# detector = SpecDetector('model_spec/model_29.p')
|
|
||||||
# result = detector.predict(data)
|
|
||||||
#
|
|
||||||
# plt.matshow(result)
|
|
||||||
# plt.show()
|
|
||||||
# result = result.reshape((600, 1024))
|
|
||||||
@ -1,129 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/4/11 15:18
|
|
||||||
# @Author : TG
|
|
||||||
# @File : parameter calculation.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
|
|
||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
from scipy.ndimage.measurements import label, find_objects
|
|
||||||
|
|
||||||
def get_tomato_dimensions(edge_img):
|
|
||||||
"""
|
|
||||||
根据番茄边缘二值化轮廓图,计算番茄的长径、短径和长短径比值。
|
|
||||||
使用最小外接矩形和最小外接圆两种方法。
|
|
||||||
|
|
||||||
参数:
|
|
||||||
edge_img (numpy.ndarray): 番茄边缘二值化轮廓图,背景为黑色,番茄区域为白色。
|
|
||||||
|
|
||||||
返回:
|
|
||||||
tuple: (长径, 短径, 长短径比值)
|
|
||||||
"""
|
|
||||||
# 最小外接矩形
|
|
||||||
rect = cv2.minAreaRect(cv2.findContours(edge_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0][0])
|
|
||||||
major_axis, minor_axis = rect[1]
|
|
||||||
aspect_ratio = max(major_axis, minor_axis) / min(major_axis, minor_axis)
|
|
||||||
|
|
||||||
# # 最小外接圆
|
|
||||||
# (x, y), radius = cv2.minEnclosingCircle(
|
|
||||||
# cv2.findContours(edge_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0][0])
|
|
||||||
# diameter = 2 * radius
|
|
||||||
# aspect_ratio_circle = 1.0
|
|
||||||
|
|
||||||
return (max(major_axis, minor_axis), min(major_axis, minor_axis), aspect_ratio)
|
|
||||||
|
|
||||||
def get_defect_info(defect_img):
|
|
||||||
"""
|
|
||||||
根据番茄区域缺陷二值化轮廓图,计算缺陷区域的个数和总面积。
|
|
||||||
|
|
||||||
参数:
|
|
||||||
defect_img (numpy.ndarray): 番茄区域缺陷二值化轮廓图,背景为黑色,番茄区域为白色,缺陷区域为黑色连通域。
|
|
||||||
|
|
||||||
返回:
|
|
||||||
tuple: (缺陷区域个数, 缺陷区域像素面积,缺陷像素总面积)
|
|
||||||
"""
|
|
||||||
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(defect_img, connectivity=4)
|
|
||||||
max_area = max(stats[i, cv2.CC_STAT_AREA] for i in range(1, nb_components))
|
|
||||||
areas = []
|
|
||||||
for i in range(1, nb_components):
|
|
||||||
area = stats[i, cv2.CC_STAT_AREA]
|
|
||||||
if area != max_area:
|
|
||||||
areas.append(area)
|
|
||||||
number_defects = len(areas)
|
|
||||||
total_pixels = sum(areas)
|
|
||||||
return number_defects, areas, total_pixels
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def connected_components_analysis(binary_image):
|
|
||||||
"""
|
|
||||||
从二值化图像计算黑色连通域个数和各个黑色连通域像素面积及黑色像素总面积。
|
|
||||||
|
|
||||||
参数:
|
|
||||||
binary_image (numpy.ndarray): 二值化图像, 其中 0 表示白色, 1 表示黑色。
|
|
||||||
|
|
||||||
返回:
|
|
||||||
num_components (int): 黑色连通域的个数。
|
|
||||||
component_areas (list): 每个黑色连通域的像素面积。
|
|
||||||
total_black_area (int): 黑色像素的总面积。
|
|
||||||
"""
|
|
||||||
# 标记连通域
|
|
||||||
labeled_image, num_components = label(binary_image)
|
|
||||||
|
|
||||||
# 获取每个连通域的像素位置
|
|
||||||
slices = find_objects(labeled_image)
|
|
||||||
|
|
||||||
# 计算每个连通域的像素面积
|
|
||||||
component_areas = []
|
|
||||||
for slice_obj in slices:
|
|
||||||
component_area = np.sum(binary_image[slice_obj])
|
|
||||||
component_areas.append(component_area)
|
|
||||||
|
|
||||||
# 计算黑色像素的总面积
|
|
||||||
total_black_area = np.sum(binary_image)
|
|
||||||
|
|
||||||
return num_components, component_areas, total_black_area
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# 读取图像
|
|
||||||
defect_image = cv2.imread(r'D:\project\Tomato\20240410tomatoRGBtest2\Largest Connected Component_screenshot_15.04.2024.png', 0)
|
|
||||||
edge_image = cv2.imread(r'D:\project\Tomato\20240410tomatoRGBtest2\mask_screenshot_15.04.2024.png', 0)
|
|
||||||
filled_image = cv2.imread(r'D:\project\Tomato\20240410tomatoRGBtest2\Filled_screenshot_15.04.2024.png', 0)
|
|
||||||
|
|
||||||
# print(defect_image.shape)
|
|
||||||
# print(edge_image.shape)
|
|
||||||
# print(filled_image.shape)
|
|
||||||
|
|
||||||
# 执行二值化处理
|
|
||||||
_, thresh_defect = cv2.threshold(defect_image, 127, 255, cv2.THRESH_BINARY_INV)
|
|
||||||
_, thresh_edge = cv2.threshold(edge_image, 127, 255, cv2.THRESH_BINARY)
|
|
||||||
_, thresh_filled = cv2.threshold(filled_image, 127, 255, cv2.THRESH_BINARY)
|
|
||||||
|
|
||||||
print(thresh_defect.shape)
|
|
||||||
print(thresh_edge.shape)
|
|
||||||
print(thresh_filled.shape)
|
|
||||||
|
|
||||||
# # 直接使用二值图像
|
|
||||||
# thresh_defect = defect_image
|
|
||||||
# thresh_edge = edge_image
|
|
||||||
# thresh_filled = filled_image
|
|
||||||
|
|
||||||
# 获取番茄的长径、短径和长短径比值
|
|
||||||
major_axis, minor_axis, aspect_ratio = get_tomato_dimensions(thresh_edge)
|
|
||||||
|
|
||||||
# 获取缺陷区域的个数和总面积
|
|
||||||
num_defects, areas, total_pixels = get_defect_info(thresh_defect)
|
|
||||||
|
|
||||||
# 获取黑色连通域的个数、各个连通域的面积和总黑色面积
|
|
||||||
num_components, component_areas, total_black_area = connected_components_analysis(thresh_filled)
|
|
||||||
|
|
||||||
print(f'番茄的长径为{major_axis},短径为{minor_axis},长短径比值为{aspect_ratio}')
|
|
||||||
print(f'缺陷区域的个数为{num_defects},像素个数分别为{areas},缺陷总面积为{total_pixels}')
|
|
||||||
print(f'黑色连通域的个数为{num_components},像素个数分别为{component_areas},黑色像素总面积为{total_black_area}')
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
|
|
||||||
@ -1,211 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/4/12 16:54
|
|
||||||
# @Author : TG
|
|
||||||
# @File : qt_test.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import socket
|
|
||||||
import logging
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import cv2
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
from utils import DualSock, try_connect, receive_sock, parse_protocol, ack_sock, done_sock
|
|
||||||
|
|
||||||
|
|
||||||
def rec_socket(recv_sock: socket.socket, cmd_type: str, ack: bool) -> bool:
|
|
||||||
if ack:
|
|
||||||
cmd = 'A' + cmd_type
|
|
||||||
else:
|
|
||||||
cmd = 'D' + cmd_type
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
temp = recv_sock.recv(1)
|
|
||||||
except ConnectionError as e:
|
|
||||||
logging.error(f'连接出错, 错误代码:\n{e}')
|
|
||||||
return False
|
|
||||||
except TimeoutError as e:
|
|
||||||
logging.error(f'超时了,错误代码: \n{e}')
|
|
||||||
return False
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'遇见未知错误,错误代码: \n{e}')
|
|
||||||
return False
|
|
||||||
if temp == b'\xaa':
|
|
||||||
break
|
|
||||||
|
|
||||||
# 获取报文长度
|
|
||||||
temp = b''
|
|
||||||
while len(temp) < 4:
|
|
||||||
try:
|
|
||||||
temp += recv_sock.recv(1)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'接收报文长度失败, 错误代码: \n{e}')
|
|
||||||
return False
|
|
||||||
try:
|
|
||||||
data_len = int.from_bytes(temp, byteorder='big')
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'转换失败,错误代码 \n{e}, \n报文内容\n{temp}')
|
|
||||||
return False
|
|
||||||
|
|
||||||
# 读取报文内容
|
|
||||||
temp = b''
|
|
||||||
while len(temp) < data_len:
|
|
||||||
try:
|
|
||||||
temp += recv_sock.recv(data_len)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'接收报文内容失败, 错误代码: \n{e},\n报文内容\n{temp}')
|
|
||||||
return False
|
|
||||||
data = temp
|
|
||||||
if cmd.strip().upper() != data[:4].decode('ascii').strip().upper():
|
|
||||||
logging.error(f'客户端接收指令错误,\n指令内容\n{data}')
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
if cmd == 'DIM':
|
|
||||||
print(data)
|
|
||||||
|
|
||||||
# 进行数据校验
|
|
||||||
temp = b''
|
|
||||||
while len(temp) < 3:
|
|
||||||
try:
|
|
||||||
temp += recv_sock.recv(1)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'接收报文校验失败, 错误代码: \n{e}')
|
|
||||||
return False
|
|
||||||
if temp == b'\xff\xff\xbb':
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
logging.error(f"接收了一个完美的只错了校验位的报文,\n data: {data}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
# def main():
|
|
||||||
# socket_receive = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
||||||
# socket_receive.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
|
||||||
# socket_receive.bind(('127.0.0.1', 21123))
|
|
||||||
# socket_receive.listen(5)
|
|
||||||
# socket_send = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
||||||
# socket_send.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
|
||||||
# socket_send.bind(('127.0.0.1', 21122))
|
|
||||||
# socket_send.listen(5)
|
|
||||||
# print('等待连接')
|
|
||||||
# socket_send_1, receive_addr_1 = socket_send.accept()
|
|
||||||
# print("连接成功:", receive_addr_1)
|
|
||||||
# # socket_send_2 = socket_send_1
|
|
||||||
# socket_send_2, receive_addr_2 = socket_receive.accept()
|
|
||||||
# print("连接成功:", receive_addr_2)
|
|
||||||
# while True:
|
|
||||||
# cmd = input('请输入指令:').strip().upper()
|
|
||||||
# if cmd == 'IM':
|
|
||||||
# with open('data/newrawfile_ref.raw', 'rb') as f:
|
|
||||||
# data = np.frombuffer(f.read(), dtype=np.float32).reshape(750, 288, 384)
|
|
||||||
# data = data[:, [91, 92, 93, 94, 95, 96, 97, 98, 99, 100], :]
|
|
||||||
# n_rows, n_bands, n_cols = data.shape[0], data.shape[1], data.shape[2]
|
|
||||||
# print(f'n_rows:{n_rows}, n_bands:{n_bands}, n_cols:{n_cols}')
|
|
||||||
# n_rows, n_cols, n_bands = [x.to_bytes(2, byteorder='big') for x in [n_rows, n_cols, n_bands]]
|
|
||||||
# data = data.tobytes()
|
|
||||||
# length = len(data) + 10
|
|
||||||
# print(f'length: {length}')
|
|
||||||
# length = length.to_bytes(4, byteorder='big')
|
|
||||||
# msg = b'\xaa' + length + (' ' + cmd).upper().encode('ascii') + n_rows + n_cols + n_bands + data + b'\xff\xff\xbb'
|
|
||||||
# socket_send_1.send(msg)
|
|
||||||
# print('发送成功')
|
|
||||||
# result = socket_send_2.recv(5)
|
|
||||||
# length = int.from_bytes(result[1:5], byteorder='big')
|
|
||||||
# result = b''
|
|
||||||
# while len(result) < length:
|
|
||||||
# result += socket_send_2.recv(length)
|
|
||||||
# print(result)
|
|
||||||
# data = result[4:length].decode()
|
|
||||||
# print(data)
|
|
||||||
|
|
||||||
def main():
|
|
||||||
socket_receive = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
||||||
socket_receive.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
|
||||||
socket_receive.bind(('127.0.0.1', 21123))
|
|
||||||
socket_receive.listen(5)
|
|
||||||
socket_send = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
||||||
socket_send.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
|
||||||
socket_send.bind(('127.0.0.1', 21122))
|
|
||||||
socket_send.listen(5)
|
|
||||||
print('等待连接')
|
|
||||||
socket_send_1, receive_addr_1 = socket_send.accept()
|
|
||||||
print("连接成功:", receive_addr_1)
|
|
||||||
socket_send_2, receive_addr_2 = socket_receive.accept()
|
|
||||||
print("连接成功:", receive_addr_2)
|
|
||||||
|
|
||||||
# while True:
|
|
||||||
# cmd = input().strip().upper()
|
|
||||||
# if cmd == 'IM':
|
|
||||||
# image_dir = r'D:\project\Tomato\20240410tomatoRGBtest2\data'
|
|
||||||
# image_paths = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith(".bmp")]
|
|
||||||
# for image_path in image_paths:
|
|
||||||
# img = cv2.imread(image_path)
|
|
||||||
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
|
||||||
# img = np.asarray(img, dtype=np.uint8)
|
|
||||||
# width = img.shape[0]
|
|
||||||
# height = img.shape[1]
|
|
||||||
# print(width, height)
|
|
||||||
# img_bytes = img.tobytes()
|
|
||||||
# length = len(img_bytes) + 8
|
|
||||||
# print(length)
|
|
||||||
# length = length.to_bytes(4, byteorder='big')
|
|
||||||
# width = width.to_bytes(2, byteorder='big')
|
|
||||||
# height = height.to_bytes(2, byteorder='big')
|
|
||||||
# send_message = b'\xaa' + length + (' ' + cmd).upper().encode('ascii') + width + height + img_bytes + b'\xff\xff\xbb'
|
|
||||||
# socket_send_1.send(send_message)
|
|
||||||
# print('发送成功')
|
|
||||||
# result = socket_send_2.recv(5)
|
|
||||||
# print(result)
|
|
||||||
while True:
|
|
||||||
cmd = input().strip().upper()
|
|
||||||
if cmd == 'IM':
|
|
||||||
image_dir = r'D:\project\Tomato\20240410tomatoRGBtest2\data'
|
|
||||||
send_images(image_dir, socket_send_1, socket_send_2)
|
|
||||||
def send_images(image_dir, socket_send_1, socket_send_2):
|
|
||||||
image_paths = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith(".bmp")]
|
|
||||||
num_images = len(image_paths)
|
|
||||||
num_groups = (num_images + 4) // 5
|
|
||||||
|
|
||||||
for group_idx in range(num_groups):
|
|
||||||
start = group_idx * 5
|
|
||||||
end = start + 5
|
|
||||||
group_images = image_paths[start:end]
|
|
||||||
|
|
||||||
group_start = time.time()
|
|
||||||
|
|
||||||
for image_path in group_images:
|
|
||||||
|
|
||||||
img_start = time.time()
|
|
||||||
|
|
||||||
img = cv2.imread(image_path)
|
|
||||||
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
|
||||||
img = np.asarray(img, dtype=np.uint8)
|
|
||||||
width = img.shape[0]
|
|
||||||
height = img.shape[1]
|
|
||||||
print(width, height)
|
|
||||||
img_bytes = img.tobytes()
|
|
||||||
length = len(img_bytes) + 8
|
|
||||||
print(length)
|
|
||||||
length = length.to_bytes(4, byteorder='big')
|
|
||||||
width = width.to_bytes(2, byteorder='big')
|
|
||||||
height = height.to_bytes(2, byteorder='big')
|
|
||||||
send_message = b'\xaa' + length + (' ' + 'IM').upper().encode('ascii') + width + height + img_bytes + b'\xff\xff\xbb'
|
|
||||||
socket_send_1.send(send_message)
|
|
||||||
|
|
||||||
img_end = time.time()
|
|
||||||
print(f'图片发送时间: {img_end - img_start}秒')
|
|
||||||
|
|
||||||
print('图片发送成功')
|
|
||||||
|
|
||||||
group_end = time.time()
|
|
||||||
print(f'第 {group_idx + 1} 组图片发送时间: {group_end - group_start}秒')
|
|
||||||
|
|
||||||
result = socket_send_2.recv(5)
|
|
||||||
print(f'第 {group_idx + 1} 组结果: {result}')
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
8
20240410RGBtest1/super-tomato/.idea/.gitignore
generated
vendored
@ -1,8 +0,0 @@
|
|||||||
# Default ignored files
|
|
||||||
/shelf/
|
|
||||||
/workspace.xml
|
|
||||||
# Editor-based HTTP Client requests
|
|
||||||
/httpRequests/
|
|
||||||
# Datasource local storage ignored files
|
|
||||||
/dataSources/
|
|
||||||
/dataSources.local.xml
|
|
||||||
@ -1,48 +0,0 @@
|
|||||||
<component name="InspectionProjectProfileManager">
|
|
||||||
<profile version="1.0">
|
|
||||||
<option name="myName" value="Project Default" />
|
|
||||||
<inspection_tool class="PyInterpreterInspection" enabled="false" level="WARNING" enabled_by_default="false" />
|
|
||||||
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
|
|
||||||
<option name="ignoredPackages">
|
|
||||||
<value>
|
|
||||||
<list size="34">
|
|
||||||
<item index="0" class="java.lang.String" itemvalue="qt5-applications" />
|
|
||||||
<item index="1" class="java.lang.String" itemvalue="python-dateutil" />
|
|
||||||
<item index="2" class="java.lang.String" itemvalue="qt5-tools" />
|
|
||||||
<item index="3" class="java.lang.String" itemvalue="PyQt5-sip" />
|
|
||||||
<item index="4" class="java.lang.String" itemvalue="cycler" />
|
|
||||||
<item index="5" class="java.lang.String" itemvalue="numpy" />
|
|
||||||
<item index="6" class="java.lang.String" itemvalue="requests" />
|
|
||||||
<item index="7" class="java.lang.String" itemvalue="func-timeout" />
|
|
||||||
<item index="8" class="java.lang.String" itemvalue="PyQt5-Qt5" />
|
|
||||||
<item index="9" class="java.lang.String" itemvalue="PyQt5" />
|
|
||||||
<item index="10" class="java.lang.String" itemvalue="urllib3" />
|
|
||||||
<item index="11" class="java.lang.String" itemvalue="pyparsing" />
|
|
||||||
<item index="12" class="java.lang.String" itemvalue="wincertstore" />
|
|
||||||
<item index="13" class="java.lang.String" itemvalue="six" />
|
|
||||||
<item index="14" class="java.lang.String" itemvalue="kiwisolver" />
|
|
||||||
<item index="15" class="java.lang.String" itemvalue="packaging" />
|
|
||||||
<item index="16" class="java.lang.String" itemvalue="pyserial" />
|
|
||||||
<item index="17" class="java.lang.String" itemvalue="torch" />
|
|
||||||
<item index="18" class="java.lang.String" itemvalue="click" />
|
|
||||||
<item index="19" class="java.lang.String" itemvalue="contourpy" />
|
|
||||||
<item index="20" class="java.lang.String" itemvalue="pandas" />
|
|
||||||
<item index="21" class="java.lang.String" itemvalue="fonttools" />
|
|
||||||
<item index="22" class="java.lang.String" itemvalue="pyqtgraph" />
|
|
||||||
<item index="23" class="java.lang.String" itemvalue="matplotlib" />
|
|
||||||
<item index="24" class="java.lang.String" itemvalue="typing_extensions" />
|
|
||||||
<item index="25" class="java.lang.String" itemvalue="charset-normalizer" />
|
|
||||||
<item index="26" class="java.lang.String" itemvalue="pytz" />
|
|
||||||
<item index="27" class="java.lang.String" itemvalue="idna" />
|
|
||||||
<item index="28" class="java.lang.String" itemvalue="Pillow" />
|
|
||||||
<item index="29" class="java.lang.String" itemvalue="scipy" />
|
|
||||||
<item index="30" class="java.lang.String" itemvalue="threadpoolctl" />
|
|
||||||
<item index="31" class="java.lang.String" itemvalue="thop" />
|
|
||||||
<item index="32" class="java.lang.String" itemvalue="tensorboard" />
|
|
||||||
<item index="33" class="java.lang.String" itemvalue="pycocotools" />
|
|
||||||
</list>
|
|
||||||
</value>
|
|
||||||
</option>
|
|
||||||
</inspection_tool>
|
|
||||||
</profile>
|
|
||||||
</component>
|
|
||||||
@ -1,6 +0,0 @@
|
|||||||
<component name="InspectionProjectProfileManager">
|
|
||||||
<settings>
|
|
||||||
<option name="USE_PROJECT_PROFILE" value="false" />
|
|
||||||
<version value="1.0" />
|
|
||||||
</settings>
|
|
||||||
</component>
|
|
||||||
7
20240410RGBtest1/super-tomato/.idea/misc.xml
generated
@ -1,7 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<project version="4">
|
|
||||||
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.10 (deepo)" project-jdk-type="Python SDK" />
|
|
||||||
<component name="PyPackaging">
|
|
||||||
<option name="earlyReleasesAsUpgrades" value="true" />
|
|
||||||
</component>
|
|
||||||
</project>
|
|
||||||
8
20240410RGBtest1/super-tomato/.idea/modules.xml
generated
@ -1,8 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<project version="4">
|
|
||||||
<component name="ProjectModuleManager">
|
|
||||||
<modules>
|
|
||||||
<module fileurl="file://$PROJECT_DIR$/.idea/super-tomato.iml" filepath="$PROJECT_DIR$/.idea/super-tomato.iml" />
|
|
||||||
</modules>
|
|
||||||
</component>
|
|
||||||
</project>
|
|
||||||
6
20240410RGBtest1/super-tomato/.idea/other.xml
generated
@ -1,6 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<project version="4">
|
|
||||||
<component name="PySciProjectComponent">
|
|
||||||
<option name="PY_MATPLOTLIB_IN_TOOLWINDOW" value="false" />
|
|
||||||
</component>
|
|
||||||
</project>
|
|
||||||
@ -1,8 +0,0 @@
|
|||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<module type="PYTHON_MODULE" version="4">
|
|
||||||
<component name="NewModuleRootManager">
|
|
||||||
<content url="file://$MODULE_DIR$" />
|
|
||||||
<orderEntry type="inheritedJdk" />
|
|
||||||
<orderEntry type="sourceFolder" forTests="false" />
|
|
||||||
</component>
|
|
||||||
</module>
|
|
||||||
@ -1,14 +0,0 @@
|
|||||||
import os
|
|
||||||
from PIL import Image
|
|
||||||
|
|
||||||
def convert_images(directory):
|
|
||||||
for filename in os.listdir(directory):
|
|
||||||
if filename.endswith('.png'):
|
|
||||||
filepath = os.path.join(directory, filename)
|
|
||||||
with Image.open(filepath) as img:
|
|
||||||
if img.mode == 'RGBA':
|
|
||||||
img = img.convert('RGB')
|
|
||||||
img.save(filepath)
|
|
||||||
|
|
||||||
# 调用函数,替换为你的目录路径
|
|
||||||
convert_images(r'D:\同步盘\project\Tomato\20240410RGBtest2\super-tomato\images')
|
|
||||||
@ -1,65 +0,0 @@
|
|||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
from mpl_toolkits.mplot3d import Axes3D
|
|
||||||
|
|
||||||
def plot_color_spaces(image_path, mask_path):
|
|
||||||
# 读取原始图像和掩码图像
|
|
||||||
image = cv2.imread(image_path)
|
|
||||||
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
|
|
||||||
|
|
||||||
# 确保掩码是二值的
|
|
||||||
_, mask = cv2.threshold(mask, 128, 255, cv2.THRESH_BINARY)
|
|
||||||
|
|
||||||
# 提取花萼部分和背景部分的像素
|
|
||||||
flower_parts = image[mask == 255]
|
|
||||||
background = image[mask == 0]
|
|
||||||
background = background[::50] # 每隔三个像素取一个
|
|
||||||
|
|
||||||
# 转换到HSV和LAB颜色空间
|
|
||||||
flower_parts_hsv = cv2.cvtColor(flower_parts.reshape(-1, 1, 3), cv2.COLOR_BGR2HSV).reshape(-1, 3)
|
|
||||||
flower_parts_lab = cv2.cvtColor(flower_parts.reshape(-1, 1, 3), cv2.COLOR_BGR2LAB).reshape(-1, 3)
|
|
||||||
|
|
||||||
background_hsv = cv2.cvtColor(background.reshape(-1, 1, 3), cv2.COLOR_BGR2HSV).reshape(-1, 3)
|
|
||||||
background_lab = cv2.cvtColor(background.reshape(-1, 1, 3), cv2.COLOR_BGR2LAB).reshape(-1, 3)
|
|
||||||
|
|
||||||
# 创建RGB空间的3D图
|
|
||||||
fig_rgb = plt.figure()
|
|
||||||
ax_rgb = fig_rgb.add_subplot(111, projection='3d')
|
|
||||||
ax_rgb.scatter(flower_parts[:, 2], flower_parts[:, 1], flower_parts[:, 0], c='r', label='Flower Parts',alpha=0.01)
|
|
||||||
ax_rgb.scatter(background[:, 2], background[:, 1], background[:, 0], c='b', label='Background',alpha=0.01)
|
|
||||||
ax_rgb.set_title('RGB Color Space')
|
|
||||||
ax_rgb.set_xlabel('Red')
|
|
||||||
ax_rgb.set_ylabel('Green')
|
|
||||||
ax_rgb.set_zlabel('Blue')
|
|
||||||
ax_rgb.legend()
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
# 创建HSV空间的3D图
|
|
||||||
fig_hsv = plt.figure()
|
|
||||||
ax_hsv = fig_hsv.add_subplot(111, projection='3d')
|
|
||||||
ax_hsv.scatter(flower_parts_hsv[:, 0], flower_parts_hsv[:, 1], flower_parts_hsv[:, 2], c='r', label='Flower Parts',alpha=0.01)
|
|
||||||
ax_hsv.scatter(background_hsv[:, 0], background_hsv[:, 1], background_hsv[:, 2], c='b', label='Background',alpha=0.01)
|
|
||||||
ax_hsv.set_title('HSV Color Space')
|
|
||||||
ax_hsv.set_xlabel('Hue')
|
|
||||||
ax_hsv.set_ylabel('Saturation')
|
|
||||||
ax_hsv.set_zlabel('Value')
|
|
||||||
ax_hsv.legend()
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
# 创建LAB空间的3D图
|
|
||||||
fig_lab = plt.figure()
|
|
||||||
ax_lab = fig_lab.add_subplot(111, projection='3d')
|
|
||||||
ax_lab.scatter(flower_parts_lab[:, 0], flower_parts_lab[:, 1], flower_parts_lab[:, 2], c='r', label='Flower Parts',alpha=0.01)
|
|
||||||
ax_lab.scatter(background_lab[:, 0], background_lab[:, 1], background_lab[:, 2], c='b', label='Background',alpha=0.01)
|
|
||||||
ax_hsv.set_title('LAB Color Space')
|
|
||||||
ax_hsv.set_xlabel('L')
|
|
||||||
ax_hsv.set_ylabel('A')
|
|
||||||
ax_hsv.set_zlabel('B')
|
|
||||||
ax_hsv.legend()
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
|
|
||||||
# 调用函数,确保替换下面的路径为你的图像路径
|
|
||||||
plot_color_spaces('/Users/xs/PycharmProjects/super-tomato/datasets_green/train/img/2.bmp',
|
|
||||||
'/Users/xs/PycharmProjects/super-tomato/datasets_green/train/label/2.png')
|
|
||||||
@ -1,157 +0,0 @@
|
|||||||
import torch
|
|
||||||
import torch.nn as nn
|
|
||||||
import torch.optim as optim
|
|
||||||
from torch import device
|
|
||||||
from torch.utils.data import Dataset, DataLoader
|
|
||||||
from PIL import Image
|
|
||||||
import numpy as np
|
|
||||||
from sklearn.metrics import accuracy_score, precision_score, recall_score
|
|
||||||
import os
|
|
||||||
from tqdm import tqdm
|
|
||||||
import time
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
|
|
||||||
|
|
||||||
class SimpleCNN(nn.Module):
|
|
||||||
def __init__(self):
|
|
||||||
super(SimpleCNN, self).__init__()
|
|
||||||
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
|
|
||||||
self.conv2 = nn.Conv2d(64, 1, kernel_size=3, padding=1)
|
|
||||||
|
|
||||||
def forward(self, x):
|
|
||||||
x = torch.relu(self.conv1(x))
|
|
||||||
x = torch.sigmoid(self.conv2(x))
|
|
||||||
return x
|
|
||||||
|
|
||||||
class ImageDataset(Dataset):
|
|
||||||
def __init__(self, img_paths, mask_paths):
|
|
||||||
self.img_paths = img_paths
|
|
||||||
self.mask_paths = mask_paths
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
return len(self.img_paths)
|
|
||||||
|
|
||||||
def __getitem__(self, idx):
|
|
||||||
img = np.array(Image.open(self.img_paths[idx]).convert('RGB')).transpose((2, 0, 1)) # 转换为RGB图像,确保有3个通道
|
|
||||||
if self.mask_paths[0] is not None:
|
|
||||||
mask = np.array(Image.open(self.mask_paths[idx]).convert('I')) # 转换为32位深度的灰度图像
|
|
||||||
mask = mask / np.max(mask) # Normalize to 0-1
|
|
||||||
return img, mask[np.newaxis, :]
|
|
||||||
else:
|
|
||||||
return img, None
|
|
||||||
|
|
||||||
def train_model(dataloader, model, criterion, optimizer, epochs):
|
|
||||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
||||||
model.to(device)
|
|
||||||
|
|
||||||
best_accuracy = 0.0
|
|
||||||
for epoch in tqdm(range(epochs), desc="Training"):
|
|
||||||
for img, mask in dataloader:
|
|
||||||
img = img.float().to(device)
|
|
||||||
mask = mask.float().to(device)
|
|
||||||
|
|
||||||
optimizer.zero_grad()
|
|
||||||
outputs = model(img)
|
|
||||||
loss = criterion(outputs, mask)
|
|
||||||
loss.backward()
|
|
||||||
optimizer.step()
|
|
||||||
|
|
||||||
# 二值化模型的输出
|
|
||||||
preds = outputs.detach().cpu().numpy() > 0.5
|
|
||||||
mask = (mask.cpu().numpy() > 0.5) # Binarize the mask
|
|
||||||
|
|
||||||
# 计算准确度、精度和召回率
|
|
||||||
accuracy = accuracy_score(mask.flatten(), preds.flatten())
|
|
||||||
precision = precision_score(mask.flatten(), preds.flatten())
|
|
||||||
recall = recall_score(mask.flatten(), preds.flatten())
|
|
||||||
|
|
||||||
print(f'Epoch {epoch+1}/{epochs}, Loss: {loss.item()}, Accuracy: {accuracy}, Precision: {precision}, Recall: {recall}')
|
|
||||||
|
|
||||||
# 如果这个模型的准确度更好,就保存它
|
|
||||||
if accuracy > best_accuracy:
|
|
||||||
best_accuracy = accuracy
|
|
||||||
torch.save(model.state_dict(), 'best_model.pth')
|
|
||||||
|
|
||||||
return model
|
|
||||||
|
|
||||||
def predict(model, img_path):
|
|
||||||
img = np.array(Image.open(img_path)).transpose((2, 0, 1)) # 调整维度为(C, H, W)
|
|
||||||
img = torch.from_numpy(img).float().unsqueeze(0)
|
|
||||||
model.eval()
|
|
||||||
with torch.no_grad():
|
|
||||||
outputs = model(img)
|
|
||||||
pred = outputs.squeeze().numpy()
|
|
||||||
return pred
|
|
||||||
|
|
||||||
def main(train_img_folder, train_mask_folder, test_img_folder, test_mask_folder, epochs, img_path='/Users/xs/PycharmProjects/super-tomato/datasets_green/test/label'):
|
|
||||||
# Define device
|
|
||||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
||||||
|
|
||||||
# Create model
|
|
||||||
model = SimpleCNN()
|
|
||||||
criterion = nn.BCELoss()
|
|
||||||
optimizer = optim.Adam(model.parameters())
|
|
||||||
|
|
||||||
# Create data loaders
|
|
||||||
train_dataset = ImageDataset(train_img_folder, train_mask_folder)
|
|
||||||
train_dataloader = DataLoader(train_dataset, batch_size=1)
|
|
||||||
|
|
||||||
# Train model
|
|
||||||
model = train_model(train_dataloader, model, criterion, optimizer, epochs)
|
|
||||||
|
|
||||||
# Create test data loaders
|
|
||||||
test_dataset = ImageDataset(test_img_folder, test_mask_folder)
|
|
||||||
test_dataloader = DataLoader(test_dataset, batch_size=1)
|
|
||||||
|
|
||||||
# Use trained model to predict
|
|
||||||
for img, mask in test_dataloader:
|
|
||||||
img = img.float().to(device)
|
|
||||||
mask = mask.float().to(device)
|
|
||||||
|
|
||||||
start_time = time.time()
|
|
||||||
outputs = model(img)
|
|
||||||
elapsed_time = time.time() - start_time
|
|
||||||
|
|
||||||
# Binarize model's output
|
|
||||||
preds = outputs.detach().cpu().numpy() > 0.5
|
|
||||||
mask = mask.cpu().numpy()
|
|
||||||
|
|
||||||
# Calculate accuracy, precision and recall
|
|
||||||
accuracy = accuracy_score(mask.flatten(), preds.flatten())
|
|
||||||
precision = precision_score(mask.flatten(), preds.flatten())
|
|
||||||
recall = recall_score(mask.flatten(), preds.flatten())
|
|
||||||
|
|
||||||
print(f'Prediction for {img_path} saved, Time: {elapsed_time:.3f} seconds, Accuracy: {accuracy}, Precision: {precision}, Recall: {recall}')
|
|
||||||
# 调用函数示例
|
|
||||||
main('/Users/xs/PycharmProjects/super-tomato/datasets_green/train/img',
|
|
||||||
'/Users/xs/PycharmProjects/super-tomato/datasets_green/train/label',
|
|
||||||
'/Users/xs/PycharmProjects/super-tomato/datasets_green/test/img',
|
|
||||||
'/Users/xs/PycharmProjects/super-tomato/datasets_green/test/label', 1)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def predict_and_display(model_path, img_paths):
|
|
||||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
||||||
|
|
||||||
# 加载模型
|
|
||||||
model = SimpleCNN()
|
|
||||||
model.load_state_dict(torch.load(model_path))
|
|
||||||
model.to(device)
|
|
||||||
model.eval()
|
|
||||||
|
|
||||||
dataset = ImageDataset(img_paths, [None]*len(img_paths)) # 我们不需要掩码,所以传入一个空列表
|
|
||||||
dataloader = DataLoader(dataset, batch_size=1)
|
|
||||||
|
|
||||||
for i, img in enumerate(dataloader):
|
|
||||||
img = img.float().to(device)
|
|
||||||
with torch.no_grad():
|
|
||||||
outputs = model(img)
|
|
||||||
pred = outputs.detach().cpu().numpy() > 0.5
|
|
||||||
|
|
||||||
# 显示预测结果
|
|
||||||
plt.imshow(pred[0, 0, :, :], cmap='gray')
|
|
||||||
plt.title(f'Predicted Mask for {img_paths[i]}')
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
# 调用函数示例
|
|
||||||
predict_and_display('best_model.pth', ['/Users/xs/PycharmProjects/super-tomato/datasets_green/test/img/5.bmp'])
|
|
||||||
@ -1,85 +0,0 @@
|
|||||||
import numpy as np
|
|
||||||
from PIL import Image
|
|
||||||
from sklearn.ensemble import RandomForestClassifier
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
|
|
||||||
|
|
||||||
def load_image_data(img_path, label_path):
|
|
||||||
"""加载图像和标签,并将其转换为模型输入格式."""
|
|
||||||
image = Image.open(img_path)
|
|
||||||
label = Image.open(label_path).convert('L')
|
|
||||||
|
|
||||||
image_np = np.array(image)
|
|
||||||
label_np = np.array(label)
|
|
||||||
|
|
||||||
# 转换图像数据格式
|
|
||||||
n_samples = image_np.shape[0] * image_np.shape[1]
|
|
||||||
n_features = image_np.shape[2] # RGB通道
|
|
||||||
image_np = image_np.reshape((n_samples, n_features))
|
|
||||||
label_np = label_np.reshape((n_samples,))
|
|
||||||
|
|
||||||
# 二值化标签
|
|
||||||
label_np = (label_np > 128).astype(int)
|
|
||||||
|
|
||||||
return image_np, label_np
|
|
||||||
|
|
||||||
|
|
||||||
def train_model(X, y, n_estimators=100):
|
|
||||||
"""训练模型."""
|
|
||||||
model = RandomForestClassifier(n_estimators=n_estimators)
|
|
||||||
model.fit(X, y)
|
|
||||||
return model
|
|
||||||
|
|
||||||
|
|
||||||
def predict_and_save(model, image_path, output_path):
|
|
||||||
"""预测并保存结果图像."""
|
|
||||||
image = Image.open(image_path)
|
|
||||||
image_np = np.array(image)
|
|
||||||
n_samples = image_np.shape[0] * image_np.shape[1]
|
|
||||||
image_np = image_np.reshape((n_samples, -1))
|
|
||||||
|
|
||||||
# 预测
|
|
||||||
predicted_labels = model.predict(image_np)
|
|
||||||
predicted_labels = predicted_labels.reshape((image.size[1], image.size[0])) # Use correct dimensions
|
|
||||||
|
|
||||||
# 保存预测结果
|
|
||||||
output_image = Image.fromarray((predicted_labels * 255).astype('uint8'), 'L') # 'L' for grayscale
|
|
||||||
output_image.save(output_path)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def process_folder(train_folder):
|
|
||||||
X, y = [], []
|
|
||||||
img_folder = os.path.join(train_folder, "img")
|
|
||||||
label_folder = os.path.join(train_folder, "label")
|
|
||||||
|
|
||||||
for filename in os.listdir(img_folder):
|
|
||||||
img_path = os.path.join(img_folder, filename)
|
|
||||||
label_path = os.path.join(label_folder, filename.replace('.bmp', '.png'))
|
|
||||||
|
|
||||||
img_data, label_data = load_image_data(img_path, label_path)
|
|
||||||
X.append(img_data)
|
|
||||||
y.append(label_data)
|
|
||||||
|
|
||||||
# 将数据列表转换为numpy数组
|
|
||||||
X = np.vstack(X)
|
|
||||||
y = np.concatenate(y)
|
|
||||||
|
|
||||||
# 训练模型
|
|
||||||
return train_model(X, y)
|
|
||||||
|
|
||||||
|
|
||||||
# 示例用法
|
|
||||||
train_folder = '/Users/xs/PycharmProjects/super-tomato/datasets_green/train'
|
|
||||||
t1 = time.time()
|
|
||||||
model = process_folder(train_folder)
|
|
||||||
t2 = time.time()
|
|
||||||
print(f'训练模型所需时间: {t2 - t1:.2f}秒')
|
|
||||||
|
|
||||||
# 测试图像处理和保存预测结果
|
|
||||||
test_folder = '/Users/xs/PycharmProjects/super-tomato/tomato_img_25'
|
|
||||||
for test_filename in os.listdir(test_folder):
|
|
||||||
test_image_path = os.path.join(test_folder, test_filename)
|
|
||||||
output_path = os.path.join(test_folder, "predicted_" + test_filename)
|
|
||||||
predict_and_save(model, test_image_path, output_path)
|
|
||||||
@ -1,169 +0,0 @@
|
|||||||
|
|
||||||
import torch
|
|
||||||
import torch.nn as nn
|
|
||||||
import torch.optim as optim
|
|
||||||
from torch.utils.data import Dataset, DataLoader
|
|
||||||
from torchvision import transforms
|
|
||||||
from PIL import Image
|
|
||||||
import os
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
# 定义 U-Net 的下采样(编码器)部分
|
|
||||||
class UNetDown(nn.Module):
|
|
||||||
def __init__(self, in_size, out_size):
|
|
||||||
super(UNetDown, self).__init__()
|
|
||||||
self.conv = nn.Sequential(
|
|
||||||
nn.Conv2d(in_size, out_size, 3, padding=1),
|
|
||||||
nn.ReLU(inplace=True),
|
|
||||||
nn.Conv2d(out_size, out_size, 3, padding=1),
|
|
||||||
nn.ReLU(inplace=True),
|
|
||||||
nn.MaxPool2d(2)
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(self, x):
|
|
||||||
return self.conv(x)
|
|
||||||
|
|
||||||
# 定义 U-Net 的上采样(解码器)部分
|
|
||||||
class UNetUp(nn.Module):
|
|
||||||
def __init__(self, in_size, out_size):
|
|
||||||
super(UNetUp, self).__init__()
|
|
||||||
self.up = nn.ConvTranspose2d(in_size, out_size, 2, stride=2)
|
|
||||||
self.conv = nn.Sequential(
|
|
||||||
nn.Conv2d(in_size, out_size, 3, padding=1),
|
|
||||||
nn.ReLU(inplace=True),
|
|
||||||
nn.Conv2d(out_size, out_size, 3, padding=1),
|
|
||||||
nn.ReLU(inplace=True)
|
|
||||||
)
|
|
||||||
|
|
||||||
def forward(self, x1, x2):
|
|
||||||
x1 = self.up(x1)
|
|
||||||
# 计算 padding
|
|
||||||
diffY = x2.size()[2] - x1.size()[2]
|
|
||||||
diffX = x2.size()[3] - x1.size()[3]
|
|
||||||
x1 = nn.functional.pad(x1, [
|
|
||||||
diffX // 2, diffX - diffX // 2,
|
|
||||||
diffY // 2, diffY - diffY // 2
|
|
||||||
])
|
|
||||||
|
|
||||||
x = torch.cat([x2, x1], dim=1)
|
|
||||||
return self.conv(x)
|
|
||||||
|
|
||||||
|
|
||||||
# 组装完整的 U-Net 模型
|
|
||||||
class UNet(nn.Module):
|
|
||||||
def __init__(self):
|
|
||||||
super(UNet, self).__init__()
|
|
||||||
self.down1 = UNetDown(3, 64)
|
|
||||||
self.down2 = UNetDown(64, 128)
|
|
||||||
self.down3 = UNetDown(128, 256)
|
|
||||||
self.down4 = UNetDown(256, 512)
|
|
||||||
self.middle_conv = nn.Sequential(
|
|
||||||
nn.Conv2d(512, 1024, 3, padding=1),
|
|
||||||
nn.ReLU(inplace=True),
|
|
||||||
nn.Conv2d(1024, 1024, 3, padding=1),
|
|
||||||
nn.ReLU(inplace=True)
|
|
||||||
)
|
|
||||||
self.up1 = UNetUp(1024, 512)
|
|
||||||
self.up2 = UNetUp(512, 256)
|
|
||||||
self.up3 = UNetUp(256, 128)
|
|
||||||
self.up4 = UNetUp(128, 64)
|
|
||||||
self.final_conv = nn.Conv2d(64, 1, 1)
|
|
||||||
|
|
||||||
def forward(self, x):
|
|
||||||
x1 = self.down1(x)
|
|
||||||
x2 = self.down2(x1)
|
|
||||||
x3 = self.down3(x2)
|
|
||||||
x4 = self.down4(x3)
|
|
||||||
x_middle = self.middle_conv(x4)
|
|
||||||
x = self.up1(x_middle, x4)
|
|
||||||
x = self.up2(x, x3)
|
|
||||||
x = self.up3(x, x2)
|
|
||||||
x = self.up4(x, x1)
|
|
||||||
x = self.final_conv(x)
|
|
||||||
return torch.sigmoid(x)
|
|
||||||
|
|
||||||
# 创建模型
|
|
||||||
model = UNet()
|
|
||||||
print(model)
|
|
||||||
|
|
||||||
|
|
||||||
class ImageDataset(Dataset):
|
|
||||||
def __init__(self, image_dir, mask_dir, transform=None, size=(320, 458)):
|
|
||||||
self.image_dir = image_dir
|
|
||||||
self.mask_dir = mask_dir
|
|
||||||
self.transform = transform
|
|
||||||
self.size = size
|
|
||||||
self.images = os.listdir(image_dir)
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
return len(self.images)
|
|
||||||
|
|
||||||
def __getitem__(self, idx):
|
|
||||||
img_name = self.images[idx]
|
|
||||||
img_path = os.path.join(self.image_dir, img_name)
|
|
||||||
mask_path = os.path.join(self.mask_dir, img_name.replace('.bmp', '.png'))
|
|
||||||
image = Image.open(img_path).convert("RGB")
|
|
||||||
mask = Image.open(mask_path).convert("L") # 确保是单通道的灰度图
|
|
||||||
|
|
||||||
# Resize image and mask
|
|
||||||
resize = transforms.Resize(self.size)
|
|
||||||
image = resize(image)
|
|
||||||
mask = resize(mask)
|
|
||||||
|
|
||||||
if self.transform:
|
|
||||||
image = self.transform(image)
|
|
||||||
mask = self.transform(mask)
|
|
||||||
|
|
||||||
mask = (mask > 128).type(torch.FloatTensor) # 二值化标签
|
|
||||||
return image, mask
|
|
||||||
|
|
||||||
transform = transforms.Compose([
|
|
||||||
transforms.ToTensor(),
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
def test_and_save(model, test_dir, save_dir):
|
|
||||||
model.eval()
|
|
||||||
transform = transforms.Compose([transforms.ToTensor()])
|
|
||||||
test_images = os.listdir(test_dir)
|
|
||||||
for img_name in test_images:
|
|
||||||
img_path = os.path.join(test_dir, img_name)
|
|
||||||
image = Image.open(img_path).convert("RGB")
|
|
||||||
image = transform(image).unsqueeze(0) # 添加 batch dimension
|
|
||||||
|
|
||||||
with torch.no_grad():
|
|
||||||
prediction = model(image)
|
|
||||||
|
|
||||||
prediction = prediction.squeeze(0).squeeze(0) # 去掉 batch 和 channel dimension
|
|
||||||
prediction = (prediction > 0.5).type(torch.uint8) # 二值化
|
|
||||||
save_image = Image.fromarray(prediction.numpy() * 255, 'L')
|
|
||||||
save_image.save(os.path.join(save_dir, img_name))
|
|
||||||
|
|
||||||
|
|
||||||
def train(model, loader, optimizer, criterion, epochs):
|
|
||||||
model.train()
|
|
||||||
for epoch in range(epochs):
|
|
||||||
for images, masks in loader:
|
|
||||||
optimizer.zero_grad()
|
|
||||||
outputs = model(images)
|
|
||||||
loss = criterion(outputs, masks)
|
|
||||||
loss.backward()
|
|
||||||
optimizer.step()
|
|
||||||
print(f"Epoch {epoch+1}, Loss: {loss.item()}")
|
|
||||||
|
|
||||||
train_dataset = ImageDataset('/Users/xs/PycharmProjects/super-tomato/datasets_green/train/img', '/Users/xs/PycharmProjects/super-tomato/datasets_green/train/label', transform=transform)
|
|
||||||
train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True)
|
|
||||||
|
|
||||||
# 上面定义的 UNet 类的代码
|
|
||||||
model = UNet()
|
|
||||||
criterion = nn.BCELoss()
|
|
||||||
optimizer = optim.Adam(model.parameters(), lr=0.001)
|
|
||||||
|
|
||||||
# 训练模型
|
|
||||||
|
|
||||||
|
|
||||||
train(model, train_loader, optimizer, criterion, epochs=50)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
test_and_save(model, '/Users/xs/PycharmProjects/super-tomato/tomato_img_25', '/Users/xs/PycharmProjects/super-tomato/tomato_img_25/pre')
|
|
||||||
@ -1,57 +0,0 @@
|
|||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
|
|
||||||
def segment_image_by_variance(image_path, m, n, variance_threshold):
|
|
||||||
# 读取图像
|
|
||||||
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
|
|
||||||
if image is None:
|
|
||||||
print("Error loading image")
|
|
||||||
return None
|
|
||||||
|
|
||||||
# 图像的高度和宽度
|
|
||||||
h, w = image.shape
|
|
||||||
|
|
||||||
# 计算每个块的尺寸
|
|
||||||
block_h, block_w = h // m, w // n
|
|
||||||
|
|
||||||
# 创建空白图像
|
|
||||||
segmented_image = np.zeros_like(image)
|
|
||||||
|
|
||||||
# 遍历每个块
|
|
||||||
for row in range(m):
|
|
||||||
for col in range(n):
|
|
||||||
# 计算块的位置
|
|
||||||
y1, x1 = row * block_h, col * block_w
|
|
||||||
y2, x2 = y1 + block_h, x1 + block_w
|
|
||||||
|
|
||||||
# 提取块
|
|
||||||
block = image[y1:y2, x1:x2]
|
|
||||||
|
|
||||||
# 计算方差
|
|
||||||
variance = np.var(block)
|
|
||||||
|
|
||||||
# 根据方差设置新图像的对应区块
|
|
||||||
if variance > variance_threshold:
|
|
||||||
segmented_image[y1:y2, x1:x2] = 1
|
|
||||||
else:
|
|
||||||
segmented_image[y1:y2, x1:x2] = 0
|
|
||||||
|
|
||||||
# 将新图像的值扩展到0-255范围,以便可视化
|
|
||||||
segmented_image *= 255
|
|
||||||
|
|
||||||
return segmented_image
|
|
||||||
|
|
||||||
|
|
||||||
# 示例用法
|
|
||||||
image_path = '/Users/xs/PycharmProjects/super-tomato/tomato_img_25/60.bmp' # 替换为你的番茄图像路径
|
|
||||||
m, n = 300, 300 # 划分的区块数
|
|
||||||
variance_threshold = 80 # 方差的阈值
|
|
||||||
|
|
||||||
segmented_image = segment_image_by_variance(image_path, m, n, variance_threshold)
|
|
||||||
|
|
||||||
if segmented_image is not None:
|
|
||||||
cv2.imshow("Segmented Image", segmented_image)
|
|
||||||
cv2.imshow("Original Image", cv2.imread(image_path))
|
|
||||||
cv2.waitKey(0)
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
@ -1,295 +0,0 @@
|
|||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
import os
|
|
||||||
import argparse
|
|
||||||
# from svm import predict_image_array, load_model
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#提取西红柿,使用S+L的图像
|
|
||||||
def extract_s_l(image_path):
|
|
||||||
image = cv2.imread(image_path)
|
|
||||||
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
|
||||||
lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
|
|
||||||
s_channel = hsv[:,:,1]
|
|
||||||
l_channel = lab[:,:,0]
|
|
||||||
result = cv2.add(s_channel, l_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def find_reflection(image_path, threshold=190):
|
|
||||||
# 读取图像
|
|
||||||
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
|
|
||||||
|
|
||||||
# 应用阈值分割
|
|
||||||
_, reflection = cv2.threshold(image, threshold, 255, cv2.THRESH_BINARY)
|
|
||||||
|
|
||||||
return reflection
|
|
||||||
|
|
||||||
def otsu_threshold(image):
|
|
||||||
|
|
||||||
# 将图像转换为灰度图像
|
|
||||||
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
|
||||||
|
|
||||||
# 使用Otsu阈值分割
|
|
||||||
_, binary = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
|
||||||
|
|
||||||
return binary
|
|
||||||
|
|
||||||
# 提取花萼,使用G-R的图像
|
|
||||||
def extract_g_r(image):
|
|
||||||
# image = cv2.imread(image_path)
|
|
||||||
g_channel = image[:,:,1]
|
|
||||||
r_channel = image[:,:,2]
|
|
||||||
result = cv2.subtract(cv2.multiply(g_channel, 1.5), r_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
#提取西红柿,使用R-B的图像
|
|
||||||
def extract_r_b(image_path):
|
|
||||||
image = cv2.imread(image_path)
|
|
||||||
r_channel = image[:,:,2]
|
|
||||||
b_channel = image[:,:,0]
|
|
||||||
result = cv2.subtract(r_channel, b_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def extract_r_g(image_path):
|
|
||||||
image = cv2.imread(image_path)
|
|
||||||
r_channel = image[:,:,2]
|
|
||||||
g_channel = image[:,:,1]
|
|
||||||
result = cv2.subtract(r_channel, g_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def threshold_segmentation(image, threshold, color=255):
|
|
||||||
_, result = cv2.threshold(image, threshold, color, cv2.THRESH_BINARY)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def bitwise_operation(image1, image2, operation='and'):
|
|
||||||
if operation == 'and':
|
|
||||||
result = cv2.bitwise_and(image1, image2)
|
|
||||||
elif operation == 'or':
|
|
||||||
result = cv2.bitwise_or(image1, image2)
|
|
||||||
else:
|
|
||||||
raise ValueError("operation must be 'and' or 'or'")
|
|
||||||
return result
|
|
||||||
|
|
||||||
def largest_connected_component(bin_img):
|
|
||||||
# 使用connectedComponentsWithStats函数找到连通区域
|
|
||||||
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(bin_img, connectivity=8)
|
|
||||||
|
|
||||||
# 找到最大的连通区域(除了背景)
|
|
||||||
largest_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA])
|
|
||||||
|
|
||||||
# 创建一个新的二值图像,只显示最大的连通区域
|
|
||||||
new_bin_img = np.zeros_like(bin_img)
|
|
||||||
new_bin_img[labels == largest_label] = 255
|
|
||||||
|
|
||||||
return new_bin_img
|
|
||||||
|
|
||||||
def close_operation(bin_img, kernel_size=(5, 5)):
|
|
||||||
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size)
|
|
||||||
closed_img = cv2.morphologyEx(bin_img, cv2.MORPH_CLOSE, kernel)
|
|
||||||
return closed_img
|
|
||||||
|
|
||||||
def open_operation(bin_img, kernel_size=(5, 5)):
|
|
||||||
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size)
|
|
||||||
opened_img = cv2.morphologyEx(bin_img, cv2.MORPH_OPEN, kernel)
|
|
||||||
return opened_img
|
|
||||||
|
|
||||||
|
|
||||||
def draw_tomato_edge(original_img, bin_img):
|
|
||||||
bin_img_processed = close_operation(bin_img, kernel_size=(15, 15))
|
|
||||||
# cv2.imshow('Close Operation', bin_img_processed)
|
|
||||||
# bin_img_processed = open_operation(bin_img_processed, kernel_size=(19, 19))
|
|
||||||
# cv2.imshow('Open Operation', bin_img_processed)
|
|
||||||
# 现在使用处理后的bin_img_processed查找轮廓
|
|
||||||
contours, _ = cv2.findContours(bin_img_processed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
|
|
||||||
# 如果没有找到轮廓,直接返回原图
|
|
||||||
if not contours:
|
|
||||||
return original_img
|
|
||||||
# 找到最大轮廓
|
|
||||||
max_contour = max(contours, key=cv2.contourArea)
|
|
||||||
# 多边形近似的精度调整
|
|
||||||
epsilon = 0.0006 * cv2.arcLength(max_contour, True) # 可以调整这个值
|
|
||||||
approx = cv2.approxPolyDP(max_contour, epsilon, True)
|
|
||||||
# 绘制轮廓
|
|
||||||
cv2.drawContours(original_img, [approx], -1, (0, 255, 0), 3)
|
|
||||||
mask = np.zeros_like(bin_img)
|
|
||||||
|
|
||||||
# 使用白色填充最大轮廓
|
|
||||||
cv2.drawContours(mask, [max_contour], -1, (255), thickness=cv2.FILLED)
|
|
||||||
|
|
||||||
return original_img, mask
|
|
||||||
|
|
||||||
def draw_tomato_edge_convex_hull(original_img, bin_img):
|
|
||||||
bin_img_blurred = cv2.GaussianBlur(bin_img, (5, 5), 0)
|
|
||||||
contours, _ = cv2.findContours(bin_img_blurred, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
if not contours:
|
|
||||||
return original_img
|
|
||||||
max_contour = max(contours, key=cv2.contourArea)
|
|
||||||
hull = cv2.convexHull(max_contour)
|
|
||||||
cv2.drawContours(original_img, [hull], -1, (0, 255, 0), 3)
|
|
||||||
return original_img
|
|
||||||
|
|
||||||
# 得到完整的西红柿二值图像,除了绿色花萼
|
|
||||||
def fill_holes(bin_img):
|
|
||||||
# 复制 bin_img 到 img_filled
|
|
||||||
img_filled = bin_img.copy()
|
|
||||||
|
|
||||||
# 获取图像的高度和宽度
|
|
||||||
height, width = bin_img.shape
|
|
||||||
|
|
||||||
# 创建一个掩码,比输入图像大两个像素点
|
|
||||||
mask = np.zeros((height + 2, width + 2), np.uint8)
|
|
||||||
|
|
||||||
# 使用 floodFill 函数填充黑色区域
|
|
||||||
cv2.floodFill(img_filled, mask, (0, 0), 255)
|
|
||||||
|
|
||||||
# 反转填充后的图像
|
|
||||||
img_filled_d = cv2.bitwise_not(img_filled)
|
|
||||||
|
|
||||||
# 使用 bitwise_or 操作合并原图像和填充后的图像
|
|
||||||
img_filled = cv2.bitwise_or(bin_img, img_filled)
|
|
||||||
# 裁剪 img_filled 和 img_filled_d 到与 bin_img 相同的大小
|
|
||||||
# img_filled = img_filled[:height, :width]
|
|
||||||
img_filled_d = img_filled_d[:height, :width]
|
|
||||||
|
|
||||||
return img_filled, img_filled_d
|
|
||||||
|
|
||||||
def bitwise_and_rgb_with_binary(rgb_img, bin_img):
|
|
||||||
# 将二值图像转换为三通道图像
|
|
||||||
bin_img_3channel = cv2.cvtColor(bin_img, cv2.COLOR_GRAY2BGR)
|
|
||||||
|
|
||||||
# 使用 bitwise_and 操作合并 RGB 图像和二值图像
|
|
||||||
result = cv2.bitwise_and(rgb_img, bin_img_3channel)
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def extract_max_connected_area(image_path, lower_hsv, upper_hsv):
|
|
||||||
# 读取图像
|
|
||||||
image = cv2.imread(image_path)
|
|
||||||
|
|
||||||
# 将图像从BGR转换到HSV
|
|
||||||
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
|
||||||
|
|
||||||
# 使用阈值获取指定区域的二值图像
|
|
||||||
mask = cv2.inRange(hsv, lower_hsv, upper_hsv)
|
|
||||||
|
|
||||||
# 找到二值图像的连通区域
|
|
||||||
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(mask, connectivity=8)
|
|
||||||
|
|
||||||
# 找到最大的连通区域(除了背景)
|
|
||||||
largest_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA])
|
|
||||||
|
|
||||||
# 创建一个新的二值图像,只显示最大的连通区域
|
|
||||||
new_bin_img = np.zeros_like(mask)
|
|
||||||
new_bin_img[labels == largest_label] = 255
|
|
||||||
|
|
||||||
# 复制 new_bin_img 到 img_filled
|
|
||||||
img_filled = new_bin_img.copy()
|
|
||||||
|
|
||||||
# 获取图像的高度和宽度
|
|
||||||
height, width = new_bin_img.shape
|
|
||||||
|
|
||||||
# 创建一个掩码,比输入图像大两个像素点
|
|
||||||
mask = np.zeros((height + 2, width + 2), np.uint8)
|
|
||||||
|
|
||||||
# 使用 floodFill 函数填充黑色区域
|
|
||||||
cv2.floodFill(img_filled, mask, (0, 0), 255)
|
|
||||||
|
|
||||||
# 反转填充后的图像
|
|
||||||
img_filled_inv = cv2.bitwise_not(img_filled)
|
|
||||||
|
|
||||||
# 使用 bitwise_or 操作合并原图像和填充后的图像
|
|
||||||
img_filled = cv2.bitwise_or(new_bin_img, img_filled_inv)
|
|
||||||
|
|
||||||
return img_filled
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = argparse.ArgumentParser(description='Process some integers.')
|
|
||||||
parser.add_argument('--dir_path', type=str, default=r'D:\project\supermachine--tomato-passion_fruit\20240419RGBtest2\data',
|
|
||||||
help='the directory path of images')
|
|
||||||
parser.add_argument('--threshold_s_l', type=int, default=180,
|
|
||||||
help='the threshold for s_l')
|
|
||||||
parser.add_argument('--threshold_r_b', type=int, default=15,
|
|
||||||
help='the threshold for r_b')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
for img_file in os.listdir(args.dir_path):
|
|
||||||
if img_file.endswith('.bmp'):
|
|
||||||
img_path = os.path.join(args.dir_path, img_file)
|
|
||||||
s_l = extract_s_l(img_path)
|
|
||||||
otsu_thresholded = otsu_threshold(s_l)
|
|
||||||
img_fore = bitwise_and_rgb_with_binary(cv2.imread(img_path), otsu_thresholded)
|
|
||||||
img_fore_defect = extract_g_r(img_fore)
|
|
||||||
img_fore_defect = threshold_segmentation(img_fore_defect, args.threshold_r_b)
|
|
||||||
# cv2.imshow('img_fore_defect', img_fore_defect)
|
|
||||||
thresholded_s_l = threshold_segmentation(s_l, args.threshold_s_l)
|
|
||||||
new_bin_img = largest_connected_component(thresholded_s_l)
|
|
||||||
zhongggggg = cv2.bitwise_or(new_bin_img, cv2.imread('defect_mask.bmp', cv2.IMREAD_GRAYSCALE))
|
|
||||||
cv2.imshow('zhongggggg', zhongggggg)
|
|
||||||
new_otsu_bin_img = largest_connected_component(otsu_thresholded)
|
|
||||||
filled_img, defect = fill_holes(new_bin_img)
|
|
||||||
defect = bitwise_and_rgb_with_binary(cv2.imread(img_path), defect)
|
|
||||||
cv2.imshow('defect', defect)
|
|
||||||
edge, mask = draw_tomato_edge(cv2.imread(img_path), new_bin_img)
|
|
||||||
org_defect = bitwise_and_rgb_with_binary(edge, new_bin_img)
|
|
||||||
fore = bitwise_and_rgb_with_binary(cv2.imread(img_path), mask)
|
|
||||||
fore_g_r_t = threshold_segmentation(extract_g_r(fore), 20)
|
|
||||||
fore_g_r_t_ture = bitwise_and_rgb_with_binary(cv2.imread(img_path), fore_g_r_t)
|
|
||||||
cv2.imwrite('defect_big.bmp', fore_g_r_t_ture)
|
|
||||||
res = cv2.bitwise_or(new_bin_img, fore_g_r_t)
|
|
||||||
white = find_reflection(img_path)
|
|
||||||
|
|
||||||
# SVM预测
|
|
||||||
# 加载模型
|
|
||||||
# model, scaler = load_model('/Users/xs/PycharmProjects/super-tomato/svm_green.joblib')
|
|
||||||
|
|
||||||
# 对图像进行预测
|
|
||||||
# predicted_mask = predict_image_array(image, model, scaler)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
cv2.imshow('white', white)
|
|
||||||
|
|
||||||
cv2.imshow('fore', fore)
|
|
||||||
cv2.imshow('fore_g_r_t', fore_g_r_t)
|
|
||||||
cv2.imshow('mask', mask)
|
|
||||||
print('mask', mask.shape)
|
|
||||||
print('filled', filled_img.shape)
|
|
||||||
print('largest', new_bin_img.shape)
|
|
||||||
print('rp', org_defect.shape)
|
|
||||||
cv2.imshow('res', res)
|
|
||||||
|
|
||||||
# lower_hsv = np.array([19, 108, 15])
|
|
||||||
# upper_hsv = np.array([118, 198, 134])
|
|
||||||
# max_connected_area = extract_max_connected_area(img_path, lower_hsv, upper_hsv)
|
|
||||||
# cv2.imshow('Max Connected Area', max_connected_area)
|
|
||||||
|
|
||||||
# 显示原始图像
|
|
||||||
original_img = cv2.imread(img_path)
|
|
||||||
cv2.imshow('Original', original_img)
|
|
||||||
cv2.imshow('thresholded_s_l', thresholded_s_l)
|
|
||||||
cv2.imshow('Largest Connected Component', new_bin_img)
|
|
||||||
cv2.imshow('Filled', filled_img)
|
|
||||||
cv2.imshow('Defect', defect)
|
|
||||||
cv2.imshow('Org_defect', org_defect)
|
|
||||||
cv2.imshow('otsu_thresholded', new_otsu_bin_img)
|
|
||||||
|
|
||||||
|
|
||||||
#显示轮廓
|
|
||||||
cv2.imshow('Edge', edge)
|
|
||||||
|
|
||||||
# 等待用户按下任意键
|
|
||||||
cv2.waitKey(0)
|
|
||||||
|
|
||||||
# 关闭所有窗口
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
@ -1,105 +0,0 @@
|
|||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
from sklearn import svm
|
|
||||||
from sklearn.preprocessing import StandardScaler
|
|
||||||
import time
|
|
||||||
import os
|
|
||||||
import joblib
|
|
||||||
|
|
||||||
|
|
||||||
def load_model(model_path):
|
|
||||||
# 加载模型和标准化器
|
|
||||||
model, scaler = joblib.load(model_path)
|
|
||||||
return model, scaler
|
|
||||||
|
|
||||||
def predict_image_array(image_array, model_path):
|
|
||||||
# 加载模型和标准化器
|
|
||||||
model, scaler = load_model(model_path)
|
|
||||||
|
|
||||||
# 将图像转换为像素数组
|
|
||||||
test_pixels = image_array.reshape(-1, 3)
|
|
||||||
|
|
||||||
# 标准化
|
|
||||||
test_pixels_scaled = scaler.transform(test_pixels)
|
|
||||||
|
|
||||||
# 预测
|
|
||||||
predictions = model.predict(test_pixels_scaled)
|
|
||||||
|
|
||||||
# 转换预测结果为图像
|
|
||||||
mask_predicted = predictions.reshape(image_array.shape[0], image_array.shape[1])
|
|
||||||
|
|
||||||
return mask_predicted
|
|
||||||
def prepare_data(image_dir, mask_dir):
|
|
||||||
# 初始化像素和标签列表
|
|
||||||
all_pixels = []
|
|
||||||
all_labels = []
|
|
||||||
|
|
||||||
# 获取图像和掩码文件名列表
|
|
||||||
image_files = sorted(os.listdir(image_dir))
|
|
||||||
mask_files = sorted(os.listdir(mask_dir))
|
|
||||||
|
|
||||||
# 遍历所有图像和掩码文件
|
|
||||||
for image_file, mask_file in zip(image_files, mask_files):
|
|
||||||
# 读取原始图像和掩码图像
|
|
||||||
image = cv2.imread(os.path.join(image_dir, image_file))
|
|
||||||
mask = cv2.imread(os.path.join(mask_dir, mask_file), cv2.IMREAD_GRAYSCALE)
|
|
||||||
|
|
||||||
# 提取像素
|
|
||||||
pixels = image.reshape(-1, 3) # 将图像转换为(n_pixels, 3)
|
|
||||||
labels = (mask.reshape(-1) > 128).astype(int) # 标记为0或1
|
|
||||||
|
|
||||||
# 添加到列表
|
|
||||||
all_pixels.append(pixels)
|
|
||||||
all_labels.append(labels)
|
|
||||||
|
|
||||||
# 将列表转换为NumPy数组
|
|
||||||
all_pixels = np.concatenate(all_pixels, axis=0)
|
|
||||||
all_labels = np.concatenate(all_labels, axis=0)
|
|
||||||
|
|
||||||
return all_pixels, all_labels
|
|
||||||
|
|
||||||
# 加载数据
|
|
||||||
train_pixels, train_labels = prepare_data('/Users/xs/PycharmProjects/super-tomato/datasets_green/train-2/img',
|
|
||||||
'/Users/xs/PycharmProjects/super-tomato/datasets_green/train-2/label')
|
|
||||||
|
|
||||||
# 数据标准化
|
|
||||||
scaler = StandardScaler()
|
|
||||||
train_pixels_scaled = scaler.fit_transform(train_pixels)
|
|
||||||
|
|
||||||
# 创建SVM模型
|
|
||||||
# model = svm.SVC(kernel='linear', C=1.0)
|
|
||||||
# model.fit(train_pixels_scaled, train_labels)
|
|
||||||
# # 在训练模型后保存模型
|
|
||||||
# joblib.dump((model, scaler), '/Users/xs/PycharmProjects/super-tomato/svm_green.joblib') # 替换为你的模型文件路径
|
|
||||||
|
|
||||||
print('模型训练完成!')
|
|
||||||
|
|
||||||
def predict_image(image_path, model, scaler):
|
|
||||||
# 读取图像
|
|
||||||
image = cv2.imread(image_path)
|
|
||||||
test_pixels = image.reshape(-1, 3)
|
|
||||||
|
|
||||||
# 标准化
|
|
||||||
test_pixels_scaled = scaler.transform(test_pixels)
|
|
||||||
|
|
||||||
# 预测
|
|
||||||
predictions = model.predict(test_pixels_scaled)
|
|
||||||
|
|
||||||
# 转换预测结果为图像
|
|
||||||
mask_predicted = predictions.reshape(image.shape[0], image.shape[1])
|
|
||||||
|
|
||||||
return mask_predicted
|
|
||||||
|
|
||||||
|
|
||||||
# 对一个新的图像进行预测
|
|
||||||
time1 = time.time()
|
|
||||||
model, scaler = load_model('/Users/xs/PycharmProjects/super-tomato/svm_green.joblib')
|
|
||||||
|
|
||||||
predicted_mask = predict_image('/Users/xs/PycharmProjects/super-tomato/defect_big.bmp', model, scaler)
|
|
||||||
cv2.imwrite('/Users/xs/PycharmProjects/super-tomato/defect_mask.bmp', (predicted_mask * 255).astype('uint8'))
|
|
||||||
cv2.imshow('Predicted Mask', (predicted_mask * 255).astype('uint8'))
|
|
||||||
cv2.waitKey(0)
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
|
|
||||||
time2 = time.time()
|
|
||||||
print(f'预测时间: {time2 - time1:.2f}秒')
|
|
||||||
@ -1,56 +0,0 @@
|
|||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
|
|
||||||
def find_reflection(image_path, threshold=190):
|
|
||||||
# 读取图像
|
|
||||||
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
|
|
||||||
|
|
||||||
# 应用阈值分割
|
|
||||||
_, reflection = cv2.threshold(image, threshold, 255, cv2.THRESH_BINARY)
|
|
||||||
|
|
||||||
return reflection
|
|
||||||
|
|
||||||
def repair_reflection_telea(image_path, reflection, inpaint_radius=20):
|
|
||||||
# 读取图像
|
|
||||||
image = cv2.imread(image_path)
|
|
||||||
|
|
||||||
# 将高亮区域转换为二值图像
|
|
||||||
_, reflection_binary = cv2.threshold(reflection, 1, 255, cv2.THRESH_BINARY)
|
|
||||||
|
|
||||||
# 使用inpaint函数修复高亮区域
|
|
||||||
repaired_image = cv2.inpaint(image, reflection_binary, inpaint_radius, cv2.INPAINT_TELEA)
|
|
||||||
|
|
||||||
return repaired_image
|
|
||||||
|
|
||||||
# 读取图像
|
|
||||||
image_path = '/Users/xs/PycharmProjects/super-tomato/tomato_img_25/60.bmp' # 替换为你的图像路径
|
|
||||||
image = find_reflection(image_path)
|
|
||||||
cv2.imshow('image', image)
|
|
||||||
cv2.waitKey(0)
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
# 修复反光
|
|
||||||
image = repair_reflection_telea(image_path, image)
|
|
||||||
cv2.imshow('ima11ge', cv2.imread(image_path))
|
|
||||||
# 创建窗口
|
|
||||||
cv2.namedWindow('image')
|
|
||||||
|
|
||||||
# 创建滑动条
|
|
||||||
cv2.createTrackbar('Threshold', 'image', 0, 255, lambda x: None)
|
|
||||||
|
|
||||||
while True:
|
|
||||||
# 获取滑动条的值
|
|
||||||
threshold = cv2.getTrackbarPos('Threshold', 'image')
|
|
||||||
|
|
||||||
# 使用阈值进行分割
|
|
||||||
_, thresholded_image = cv2.threshold(image, threshold, 255, cv2.THRESH_BINARY)
|
|
||||||
|
|
||||||
# 显示二值图像
|
|
||||||
cv2.imshow('image', thresholded_image)
|
|
||||||
|
|
||||||
# 按下'q'键退出循环
|
|
||||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
|
||||||
break
|
|
||||||
|
|
||||||
# 销毁所有窗口
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
@ -1,660 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/4/12 14:53
|
|
||||||
# @Author : TG
|
|
||||||
# @File : utils.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
|
|
||||||
import os
|
|
||||||
import socket
|
|
||||||
import time
|
|
||||||
import logging
|
|
||||||
import numpy as np
|
|
||||||
import shutil
|
|
||||||
import cv2
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
from typing import Tuple
|
|
||||||
import pickle
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
from scipy.ndimage.measurements import label, find_objects
|
|
||||||
|
|
||||||
|
|
||||||
class PreSocket(socket.socket):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
self.pre_pack = b''
|
|
||||||
self.settimeout(5)
|
|
||||||
|
|
||||||
def receive(self, *args, **kwargs):
|
|
||||||
if self.pre_pack == b'':
|
|
||||||
return self.recv(*args, **kwargs)
|
|
||||||
else:
|
|
||||||
data_len = args[0]
|
|
||||||
required, left = self.pre_pack[:data_len], self.pre_pack[data_len:]
|
|
||||||
self.pre_pack = left
|
|
||||||
return required
|
|
||||||
|
|
||||||
def set_prepack(self, pre_pack: bytes):
|
|
||||||
temp = self.pre_pack
|
|
||||||
self.pre_pack = temp + pre_pack
|
|
||||||
|
|
||||||
|
|
||||||
class DualSock(PreSocket):
|
|
||||||
def __init__(self, connect_ip='127.0.0.1', recv_port: int = 21122, send_port: int = 21123):
|
|
||||||
super().__init__()
|
|
||||||
received_status, self.received_sock = try_connect(connect_ip=connect_ip, port_number=recv_port)
|
|
||||||
send_status, self.send_sock = try_connect(connect_ip=connect_ip, port_number=send_port)
|
|
||||||
self.status = received_status and send_status
|
|
||||||
|
|
||||||
def send(self, *args, **kwargs) -> int:
|
|
||||||
return self.send_sock.send(*args, **kwargs)
|
|
||||||
|
|
||||||
def receive(self, *args, **kwargs) -> bytes:
|
|
||||||
return self.received_sock.receive(*args, **kwargs)
|
|
||||||
|
|
||||||
def set_prepack(self, pre_pack: bytes):
|
|
||||||
self.received_sock.set_prepack(pre_pack)
|
|
||||||
|
|
||||||
def reconnect(self, connect_ip='127.0.0.1', recv_port:int = 21122, send_port: int = 21123):
|
|
||||||
received_status, self.received_sock = try_connect(connect_ip=connect_ip, port_number=recv_port)
|
|
||||||
send_status, self.send_sock = try_connect(connect_ip=connect_ip, port_number=send_port)
|
|
||||||
return received_status and send_status
|
|
||||||
|
|
||||||
|
|
||||||
def receive_sock(recv_sock: PreSocket, pre_pack: bytes = b'', time_out: float = -1.0, time_out_single=5e20) -> (
|
|
||||||
bytes, bytes):
|
|
||||||
"""
|
|
||||||
从指定的socket中读取数据.自动阻塞,如果返回的数据为空则说明连接出现问题,需要重新连接。
|
|
||||||
|
|
||||||
:param recv_sock: 指定sock
|
|
||||||
:param pre_pack: 上一包的粘包内容
|
|
||||||
:param time_out: 每隔time_out至少要发来一次指令,否则认为出现问题进行重连,小于0则为一直等
|
|
||||||
:param time_out_single: 单次指令超时时间,单位是秒
|
|
||||||
:return: data, next_pack
|
|
||||||
"""
|
|
||||||
recv_sock.set_prepack(pre_pack)
|
|
||||||
# 开头校验
|
|
||||||
time_start_recv = time.time()
|
|
||||||
while True:
|
|
||||||
if time_out > 0:
|
|
||||||
if (time.time() - time_start_recv) > time_out:
|
|
||||||
logging.error(f'指令接收超时')
|
|
||||||
return b'', b''
|
|
||||||
try:
|
|
||||||
temp = recv_sock.receive(1)
|
|
||||||
except ConnectionError as e:
|
|
||||||
logging.error(f'连接出错, 错误代码:\n{e}')
|
|
||||||
return b'', b''
|
|
||||||
except TimeoutError as e:
|
|
||||||
# logging.error(f'超时了,错误代码: \n{e}')
|
|
||||||
logging.info('运行中,等待指令..')
|
|
||||||
continue
|
|
||||||
except socket.timeout as e:
|
|
||||||
logging.info('运行中,等待指令..')
|
|
||||||
continue
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'遇见未知错误,错误代码: \n{e}')
|
|
||||||
return b'', b''
|
|
||||||
if temp == b'\xaa':
|
|
||||||
break
|
|
||||||
|
|
||||||
# 接收开头后,开始进行时间记录
|
|
||||||
time_start_recv = time.time()
|
|
||||||
|
|
||||||
# 获取报文长度
|
|
||||||
temp = b''
|
|
||||||
while len(temp) < 4:
|
|
||||||
if (time.time() - time_start_recv) > time_out_single:
|
|
||||||
logging.error(f'单次指令接收超时')
|
|
||||||
return b'', b''
|
|
||||||
try:
|
|
||||||
temp += recv_sock.receive(1)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'接收报文的长度不正确, 错误代码: \n{e}')
|
|
||||||
return b'', b''
|
|
||||||
try:
|
|
||||||
data_len = int.from_bytes(temp, byteorder='big')
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'转换失败,错误代码 \n{e}')
|
|
||||||
return b'', b''
|
|
||||||
|
|
||||||
# 读取报文内容
|
|
||||||
temp = b''
|
|
||||||
while len(temp) < data_len:
|
|
||||||
if (time.time() - time_start_recv) > time_out_single:
|
|
||||||
logging.error(f'单次指令接收超时')
|
|
||||||
return b'', b''
|
|
||||||
try:
|
|
||||||
temp += recv_sock.receive(data_len)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'接收报文内容失败, 错误代码: \n{e}')
|
|
||||||
return b'', b''
|
|
||||||
data, next_pack = temp[:data_len], temp[data_len:]
|
|
||||||
recv_sock.set_prepack(next_pack)
|
|
||||||
next_pack = b''
|
|
||||||
|
|
||||||
# 进行数据校验
|
|
||||||
temp = b''
|
|
||||||
while len(temp) < 3:
|
|
||||||
if (time.time() - time_start_recv) > time_out_single:
|
|
||||||
logging.error(f'单次指令接收超时')
|
|
||||||
return b'', b''
|
|
||||||
try:
|
|
||||||
temp += recv_sock.receive(1)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'接收报文校验失败, 错误代码: \n{e}')
|
|
||||||
return b'', b''
|
|
||||||
if temp == b'\xff\xff\xbb':
|
|
||||||
return data, next_pack
|
|
||||||
else:
|
|
||||||
logging.error(f"接收了一个完美的只错了校验位的报文")
|
|
||||||
return b'', b''
|
|
||||||
|
|
||||||
|
|
||||||
def parse_protocol(data: bytes) -> (str, any):
|
|
||||||
'''
|
|
||||||
指令转换
|
|
||||||
:param data: 接收到的报文
|
|
||||||
:return: 指令类型,指令内容
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
assert len(data) > 4
|
|
||||||
except AssertionError:
|
|
||||||
logging.error('指令转换失败,长度不足5')
|
|
||||||
return '', None
|
|
||||||
cmd, data = data[:4], data[4:]
|
|
||||||
cmd = cmd.decode('ascii').strip().upper()
|
|
||||||
if cmd == 'IM':
|
|
||||||
n_rows, n_cols, img = data[:2], data[2:4], data[4:]
|
|
||||||
try:
|
|
||||||
n_rows, n_cols = [int.from_bytes(x, byteorder='big') for x in [n_rows, n_cols]]
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'长宽转换失败, 错误代码{e}, 报文大小: n_rows:{n_rows}, n_cols: {n_cols}')
|
|
||||||
return '', None
|
|
||||||
try:
|
|
||||||
assert n_rows * n_cols * 3 == len(img)
|
|
||||||
except AssertionError:
|
|
||||||
logging.error('图像指令IM转换失败,数据长度错误')
|
|
||||||
return '', None
|
|
||||||
img = np.frombuffer(img, dtype=np.uint8).reshape((n_rows, n_cols, -1))
|
|
||||||
return cmd, img
|
|
||||||
|
|
||||||
|
|
||||||
def ack_sock(send_sock: socket.socket, cmd_type: str) -> bool:
|
|
||||||
'''
|
|
||||||
发送应答
|
|
||||||
:param cmd_type:指令类型
|
|
||||||
:param send_sock:指定sock
|
|
||||||
:return:是否发送成功
|
|
||||||
'''
|
|
||||||
msg = b'\xaa\x00\x00\x00\x05' + (' A' + cmd_type).upper().encode('ascii') + b'\xff\xff\xff\xbb'
|
|
||||||
try:
|
|
||||||
send_sock.send(msg)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'发送应答失败,错误类型:{e}')
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def done_sock(send_sock: socket.socket, cmd_type: str, result = '') -> bool:
|
|
||||||
'''
|
|
||||||
发送任务完成指令
|
|
||||||
:param send_sock: 指定sock
|
|
||||||
:param cmd_type: 指令类型
|
|
||||||
:param result: 数据
|
|
||||||
:return: 是否发送成功
|
|
||||||
'''
|
|
||||||
cmd = cmd_type.strip().upper()
|
|
||||||
if cmd_type == 'IM':
|
|
||||||
result = result.encode()
|
|
||||||
# 指令4位
|
|
||||||
length = len(result) + 4
|
|
||||||
length = length.to_bytes(4, byteorder='big')
|
|
||||||
msg = b'\xaa' + length + (' D' + cmd).upper().encode('ascii') + result + b'\xff\xff\xbb'
|
|
||||||
try:
|
|
||||||
send_sock.send(msg)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'发送完成指令失败,错误类型:{e}')
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def simple_sock(send_sock: socket.socket, cmd_type: str, result) -> bool:
|
|
||||||
'''
|
|
||||||
发送任务完成指令
|
|
||||||
:param cmd_type:指令类型
|
|
||||||
:param send_sock:指定sock
|
|
||||||
:param result:数据
|
|
||||||
:return:是否发送成功
|
|
||||||
'''
|
|
||||||
cmd_type = cmd_type.strip().upper()
|
|
||||||
if cmd_type == 'IM':
|
|
||||||
if result == 0:
|
|
||||||
msg = b'S'
|
|
||||||
elif result == 1:
|
|
||||||
msg = b'Z'
|
|
||||||
elif result == 2:
|
|
||||||
msg = b'Q'
|
|
||||||
elif cmd_type == 'TR':
|
|
||||||
msg = b'A'
|
|
||||||
elif cmd_type == 'MD':
|
|
||||||
msg = b'D'
|
|
||||||
elif cmd_type == 'KM':
|
|
||||||
msg = b'K'
|
|
||||||
result = result.encode('ascii')
|
|
||||||
result = b',' + result
|
|
||||||
length = len(result)
|
|
||||||
msg = msg + length.to_bytes(4, 'big') + result
|
|
||||||
try:
|
|
||||||
send_sock.send(msg)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'发送完成指令失败,错误类型:{e}')
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def test_sock(send_sock: socket.socket, cmd_type: str, long_axis, short_axis, defect_num, total_defect_area, rp) -> bool:
|
|
||||||
start_time = time.time()
|
|
||||||
|
|
||||||
cmd_type = cmd_type.strip().upper()
|
|
||||||
if cmd_type == 'IM':
|
|
||||||
# image_dir = r'D:\project\Tomato\20240410tomatoRGBtest2\huifu'
|
|
||||||
# image_paths = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith(".jpg")]
|
|
||||||
# for image_path in image_paths:
|
|
||||||
# img = cv2.imread(r'D:\project\Tomato\20240410tomatoRGBtest2\huifu\thresholded_6.jpg')
|
|
||||||
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
|
||||||
# img = np.asarray(img, dtype=np.uint8)
|
|
||||||
width = rp.shape[0]
|
|
||||||
height = rp.shape[1]
|
|
||||||
# print(width, height)
|
|
||||||
img_bytes = rp.tobytes()
|
|
||||||
length = len(img_bytes) + 18
|
|
||||||
# print(length)
|
|
||||||
length = length.to_bytes(4, byteorder='big')
|
|
||||||
width = width.to_bytes(2, byteorder='big')
|
|
||||||
height = height.to_bytes(2, byteorder='big')
|
|
||||||
long_axis = long_axis.to_bytes(2, byteorder='big')
|
|
||||||
short_axis = short_axis.to_bytes(2, byteorder='big')
|
|
||||||
defect_num = defect_num.to_bytes(2, byteorder='big')
|
|
||||||
total_defect_area = int(total_defect_area).to_bytes(4, byteorder='big')
|
|
||||||
cmd_type = 'RIM'
|
|
||||||
# result = result.encode('ascii')
|
|
||||||
send_message = b'\xaa' + length + (' ' + cmd_type).upper().encode('ascii') + long_axis + short_axis + defect_num + total_defect_area + width + height + img_bytes + b'\xff\xff\xbb'
|
|
||||||
# print(long_axis)
|
|
||||||
# print(short_axis)
|
|
||||||
# print(defect_num)
|
|
||||||
# print(total_defect_area)
|
|
||||||
# print(width)
|
|
||||||
# print(height)
|
|
||||||
|
|
||||||
try:
|
|
||||||
send_sock.send(send_message)
|
|
||||||
print('发送成功')
|
|
||||||
# print(send_message)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'发送完成指令失败,错误类型:{e}')
|
|
||||||
return False
|
|
||||||
|
|
||||||
end_time = time.time()
|
|
||||||
print(f'发送时间:{end_time - start_time}秒')
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def try_connect(connect_ip: str, port_number: int, is_repeat: bool = False, max_reconnect_times: int = 50) -> (
|
|
||||||
bool, socket.socket):
|
|
||||||
"""
|
|
||||||
尝试连接.
|
|
||||||
|
|
||||||
:param is_repeat: 是否是重新连接
|
|
||||||
:param max_reconnect_times:最大重连次数
|
|
||||||
:return: (连接状态True为成功, Socket / None)
|
|
||||||
"""
|
|
||||||
reconnect_time = 0
|
|
||||||
while reconnect_time < max_reconnect_times:
|
|
||||||
logging.warning(f'尝试{"重新" if is_repeat else ""}发起第{reconnect_time + 1}次连接...')
|
|
||||||
try:
|
|
||||||
connected_sock = PreSocket(socket.AF_INET, socket.SOCK_STREAM)
|
|
||||||
connected_sock.connect((connect_ip, port_number))
|
|
||||||
except Exception as e:
|
|
||||||
reconnect_time += 1
|
|
||||||
logging.error(f'第{reconnect_time}次连接失败... 5秒后重新连接...\n {e}')
|
|
||||||
time.sleep(5)
|
|
||||||
continue
|
|
||||||
logging.warning(f'{"重新" if is_repeat else ""}连接成功')
|
|
||||||
return True, connected_sock
|
|
||||||
return False, None
|
|
||||||
|
|
||||||
|
|
||||||
def mkdir_if_not_exist(dir_name, is_delete=False):
|
|
||||||
"""
|
|
||||||
创建文件夹
|
|
||||||
:param dir_name: 文件夹
|
|
||||||
:param is_delete: 是否删除
|
|
||||||
:return: 是否成功
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if is_delete:
|
|
||||||
if os.path.exists(dir_name):
|
|
||||||
shutil.rmtree(dir_name)
|
|
||||||
print('[Info] 文件夹 "%s" 存在, 删除文件夹.' % dir_name)
|
|
||||||
|
|
||||||
if not os.path.exists(dir_name):
|
|
||||||
os.makedirs(dir_name)
|
|
||||||
print('[Info] 文件夹 "%s" 不存在, 创建文件夹.' % dir_name)
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
print('[Exception] %s' % e)
|
|
||||||
return False
|
|
||||||
|
|
||||||
def create_file(file_name):
|
|
||||||
"""
|
|
||||||
创建文件
|
|
||||||
:param file_name: 文件名
|
|
||||||
:return: None
|
|
||||||
"""
|
|
||||||
if os.path.exists(file_name):
|
|
||||||
print("文件存在:%s" % file_name)
|
|
||||||
return False
|
|
||||||
# os.remove(file_name) # 删除已有文件
|
|
||||||
if not os.path.exists(file_name):
|
|
||||||
print("文件不存在,创建文件:%s" % file_name)
|
|
||||||
open(file_name, 'a').close()
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
class Logger(object):
|
|
||||||
def __init__(self, is_to_file=False, path=None):
|
|
||||||
self.is_to_file = is_to_file
|
|
||||||
if path is None:
|
|
||||||
path = "tomato.log"
|
|
||||||
self.path = path
|
|
||||||
create_file(path)
|
|
||||||
|
|
||||||
def log(self, content):
|
|
||||||
if self.is_to_file:
|
|
||||||
with open(self.path, "a") as f:
|
|
||||||
print(time.strftime("[%Y-%m-%d_%H-%M-%S]:"), file=f)
|
|
||||||
print(content, file=f)
|
|
||||||
else:
|
|
||||||
print(content)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#提取西红柿,使用S+L的图像
|
|
||||||
def extract_s_l(image):
|
|
||||||
# image = cv2.imread(image_path)
|
|
||||||
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
|
||||||
lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
|
|
||||||
s_channel = hsv[:,:,1]
|
|
||||||
l_channel = lab[:,:,0]
|
|
||||||
result = cv2.add(s_channel, l_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def find_reflection(image, threshold=190):
|
|
||||||
# 读取图像
|
|
||||||
# image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
|
|
||||||
|
|
||||||
# 应用阈值分割
|
|
||||||
_, reflection = cv2.threshold(image, threshold, 255, cv2.THRESH_BINARY)
|
|
||||||
|
|
||||||
return reflection
|
|
||||||
|
|
||||||
def otsu_threshold(image):
|
|
||||||
|
|
||||||
# 将图像转换为灰度图像
|
|
||||||
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
|
||||||
|
|
||||||
# 使用Otsu阈值分割
|
|
||||||
_, binary = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
|
||||||
|
|
||||||
return binary
|
|
||||||
|
|
||||||
# 提取花萼,使用G-R的图像
|
|
||||||
def extract_g_r(image):
|
|
||||||
# image = cv2.imread(image_path)
|
|
||||||
g_channel = image[:,:,1]
|
|
||||||
r_channel = image[:,:,2]
|
|
||||||
result = cv2.subtract(cv2.multiply(g_channel, 1.5), r_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
#提取西红柿,使用R-B的图像
|
|
||||||
def extract_r_b(image):
|
|
||||||
# image = cv2.imread(image_path)
|
|
||||||
r_channel = image[:,:,2]
|
|
||||||
b_channel = image[:,:,0]
|
|
||||||
result = cv2.subtract(r_channel, b_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def extract_r_g(image):
|
|
||||||
# image = cv2.imread(image_path)
|
|
||||||
r_channel = image[:,:,2]
|
|
||||||
g_channel = image[:,:,1]
|
|
||||||
result = cv2.subtract(r_channel, g_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def threshold_segmentation(image, threshold, color=255):
|
|
||||||
_, result = cv2.threshold(image, threshold, color, cv2.THRESH_BINARY)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def bitwise_operation(image1, image2, operation='and'):
|
|
||||||
if operation == 'and':
|
|
||||||
result = cv2.bitwise_and(image1, image2)
|
|
||||||
elif operation == 'or':
|
|
||||||
result = cv2.bitwise_or(image1, image2)
|
|
||||||
else:
|
|
||||||
raise ValueError("operation must be 'and' or 'or'")
|
|
||||||
return result
|
|
||||||
|
|
||||||
def largest_connected_component(bin_img):
|
|
||||||
# 使用connectedComponentsWithStats函数找到连通区域
|
|
||||||
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(bin_img, connectivity=8)
|
|
||||||
|
|
||||||
# 找到最大的连通区域(除了背景)
|
|
||||||
largest_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA])
|
|
||||||
|
|
||||||
# 创建一个新的二值图像,只显示最大的连通区域
|
|
||||||
new_bin_img = np.zeros_like(bin_img)
|
|
||||||
new_bin_img[labels == largest_label] = 255
|
|
||||||
|
|
||||||
return new_bin_img
|
|
||||||
|
|
||||||
def close_operation(bin_img, kernel_size=(5, 5)):
|
|
||||||
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size)
|
|
||||||
closed_img = cv2.morphologyEx(bin_img, cv2.MORPH_CLOSE, kernel)
|
|
||||||
return closed_img
|
|
||||||
|
|
||||||
def open_operation(bin_img, kernel_size=(5, 5)):
|
|
||||||
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size)
|
|
||||||
opened_img = cv2.morphologyEx(bin_img, cv2.MORPH_OPEN, kernel)
|
|
||||||
return opened_img
|
|
||||||
|
|
||||||
|
|
||||||
def draw_tomato_edge(original_img, bin_img):
|
|
||||||
bin_img_processed = close_operation(bin_img, kernel_size=(15, 15))
|
|
||||||
# cv2.imshow('Close Operation', bin_img_processed)
|
|
||||||
# bin_img_processed = open_operation(bin_img_processed, kernel_size=(19, 19))
|
|
||||||
# cv2.imshow('Open Operation', bin_img_processed)
|
|
||||||
# 现在使用处理后的bin_img_processed查找轮廓
|
|
||||||
contours, _ = cv2.findContours(bin_img_processed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
|
|
||||||
# 如果没有找到轮廓,直接返回原图
|
|
||||||
if not contours:
|
|
||||||
return original_img
|
|
||||||
# 找到最大轮廓
|
|
||||||
max_contour = max(contours, key=cv2.contourArea)
|
|
||||||
# 多边形近似的精度调整
|
|
||||||
epsilon = 0.0006 * cv2.arcLength(max_contour, True) # 可以调整这个值
|
|
||||||
approx = cv2.approxPolyDP(max_contour, epsilon, True)
|
|
||||||
# 绘制轮廓
|
|
||||||
cv2.drawContours(original_img, [approx], -1, (0, 255, 0), 3)
|
|
||||||
mask = np.zeros_like(bin_img)
|
|
||||||
|
|
||||||
# 使用白色填充最大轮廓
|
|
||||||
cv2.drawContours(mask, [max_contour], -1, (255), thickness=cv2.FILLED)
|
|
||||||
|
|
||||||
return original_img, mask
|
|
||||||
|
|
||||||
def draw_tomato_edge_convex_hull(original_img, bin_img):
|
|
||||||
bin_img_blurred = cv2.GaussianBlur(bin_img, (5, 5), 0)
|
|
||||||
contours, _ = cv2.findContours(bin_img_blurred, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
if not contours:
|
|
||||||
return original_img
|
|
||||||
max_contour = max(contours, key=cv2.contourArea)
|
|
||||||
hull = cv2.convexHull(max_contour)
|
|
||||||
cv2.drawContours(original_img, [hull], -1, (0, 255, 0), 3)
|
|
||||||
return original_img
|
|
||||||
|
|
||||||
# 得到完整的西红柿二值图像,除了绿色花萼
|
|
||||||
def fill_holes(bin_img):
|
|
||||||
# 复制 bin_img 到 img_filled
|
|
||||||
img_filled = bin_img.copy()
|
|
||||||
|
|
||||||
# 获取图像的高度和宽度
|
|
||||||
height, width = bin_img.shape
|
|
||||||
|
|
||||||
# 创建一个掩码,比输入图像大两个像素点
|
|
||||||
mask = np.zeros((height + 2, width + 2), np.uint8)
|
|
||||||
|
|
||||||
# 使用 floodFill 函数填充黑色区域
|
|
||||||
cv2.floodFill(img_filled, mask, (0, 0), 255)
|
|
||||||
|
|
||||||
# 反转填充后的图像
|
|
||||||
img_filled_d = cv2.bitwise_not(img_filled)
|
|
||||||
|
|
||||||
# 使用 bitwise_or 操作合并原图像和填充后的图像
|
|
||||||
img_filled = cv2.bitwise_or(bin_img, img_filled)
|
|
||||||
# 裁剪 img_filled 和 img_filled_d 到与 bin_img 相同的大小
|
|
||||||
# img_filled = img_filled[:height, :width]
|
|
||||||
img_filled_d = img_filled_d[:height, :width]
|
|
||||||
|
|
||||||
return img_filled, img_filled_d
|
|
||||||
|
|
||||||
def bitwise_and_rgb_with_binary(rgb_img, bin_img):
|
|
||||||
# 将二值图像转换为三通道图像
|
|
||||||
bin_img_3channel = cv2.cvtColor(bin_img, cv2.COLOR_GRAY2BGR)
|
|
||||||
|
|
||||||
# 使用 bitwise_and 操作合并 RGB 图像和二值图像
|
|
||||||
result = cv2.bitwise_and(rgb_img, bin_img_3channel)
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def extract_max_connected_area(image, lower_hsv, upper_hsv):
|
|
||||||
# 读取图像
|
|
||||||
# image = cv2.imread(image_path)
|
|
||||||
|
|
||||||
# 将图像从BGR转换到HSV
|
|
||||||
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
|
||||||
|
|
||||||
# 使用阈值获取指定区域的二值图像
|
|
||||||
mask = cv2.inRange(hsv, lower_hsv, upper_hsv)
|
|
||||||
|
|
||||||
# 找到二值图像的连通区域
|
|
||||||
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(mask, connectivity=8)
|
|
||||||
|
|
||||||
# 找到最大的连通区域(除了背景)
|
|
||||||
largest_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA])
|
|
||||||
|
|
||||||
# 创建一个新的二值图像,只显示最大的连通区域
|
|
||||||
new_bin_img = np.zeros_like(mask)
|
|
||||||
new_bin_img[labels == largest_label] = 255
|
|
||||||
|
|
||||||
# 复制 new_bin_img 到 img_filled
|
|
||||||
img_filled = new_bin_img.copy()
|
|
||||||
|
|
||||||
# 获取图像的高度和宽度
|
|
||||||
height, width = new_bin_img.shape
|
|
||||||
|
|
||||||
# 创建一个掩码,比输入图像大两个像素点
|
|
||||||
mask = np.zeros((height + 2, width + 2), np.uint8)
|
|
||||||
|
|
||||||
# 使用 floodFill 函数填充黑色区域
|
|
||||||
cv2.floodFill(img_filled, mask, (0, 0), 255)
|
|
||||||
|
|
||||||
# 反转填充后的图像
|
|
||||||
img_filled_inv = cv2.bitwise_not(img_filled)
|
|
||||||
|
|
||||||
# 使用 bitwise_or 操作合并原图像和填充后的图像
|
|
||||||
img_filled = cv2.bitwise_or(new_bin_img, img_filled_inv)
|
|
||||||
|
|
||||||
return img_filled
|
|
||||||
def get_tomato_dimensions(edge_img):
|
|
||||||
"""
|
|
||||||
根据番茄边缘二值化轮廓图,计算番茄的长径、短径和长短径比值。
|
|
||||||
使用最小外接矩形和最小外接圆两种方法。
|
|
||||||
|
|
||||||
参数:
|
|
||||||
edge_img (numpy.ndarray): 番茄边缘二值化轮廓图,背景为黑色,番茄区域为白色。
|
|
||||||
|
|
||||||
返回:
|
|
||||||
tuple: (长径, 短径, 长短径比值)
|
|
||||||
"""
|
|
||||||
# 最小外接矩形
|
|
||||||
rect = cv2.minAreaRect(cv2.findContours(edge_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0][0])
|
|
||||||
major_axis, minor_axis = rect[1]
|
|
||||||
aspect_ratio = max(major_axis, minor_axis) / min(major_axis, minor_axis)
|
|
||||||
|
|
||||||
# # 最小外接圆
|
|
||||||
# (x, y), radius = cv2.minEnclosingCircle(
|
|
||||||
# cv2.findContours(edge_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0][0])
|
|
||||||
# diameter = 2 * radius
|
|
||||||
# aspect_ratio_circle = 1.0
|
|
||||||
|
|
||||||
return (max(major_axis, minor_axis), min(major_axis, minor_axis))
|
|
||||||
|
|
||||||
def get_defect_info(defect_img):
|
|
||||||
"""
|
|
||||||
根据番茄区域缺陷二值化轮廓图,计算缺陷区域的个数和总面积。
|
|
||||||
|
|
||||||
参数:
|
|
||||||
defect_img (numpy.ndarray): 番茄区域缺陷二值化轮廓图,背景为黑色,番茄区域为白色,缺陷区域为黑色连通域。
|
|
||||||
|
|
||||||
返回:
|
|
||||||
tuple: (缺陷区域个数, 缺陷区域像素面积,缺陷像素总面积)
|
|
||||||
"""
|
|
||||||
|
|
||||||
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(defect_img, connectivity=4)
|
|
||||||
max_area = max(stats[i, cv2.CC_STAT_AREA] for i in range(1, nb_components))
|
|
||||||
areas = []
|
|
||||||
for i in range(1, nb_components):
|
|
||||||
area = stats[i, cv2.CC_STAT_AREA]
|
|
||||||
if area != max_area:
|
|
||||||
areas.append(area)
|
|
||||||
number_defects = len(areas)
|
|
||||||
total_pixels = sum(areas)
|
|
||||||
return number_defects, total_pixels
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def connected_components_analysis(binary_image):
|
|
||||||
"""
|
|
||||||
从二值化图像计算黑色连通域个数和各个黑色连通域像素面积及黑色像素总面积。
|
|
||||||
|
|
||||||
参数:
|
|
||||||
binary_image (numpy.ndarray): 二值化图像, 其中 0 表示白色, 1 表示黑色。
|
|
||||||
|
|
||||||
返回:
|
|
||||||
num_components (int): 黑色连通域的个数。
|
|
||||||
component_areas (list): 每个黑色连通域的像素面积。
|
|
||||||
total_black_area (int): 黑色像素的总面积。
|
|
||||||
"""
|
|
||||||
# 标记连通域
|
|
||||||
labeled_image, num_components = label(binary_image)
|
|
||||||
|
|
||||||
# 获取每个连通域的像素位置
|
|
||||||
slices = find_objects(labeled_image)
|
|
||||||
|
|
||||||
# 计算每个连通域的像素面积
|
|
||||||
component_areas = []
|
|
||||||
for slice_obj in slices:
|
|
||||||
component_area = np.sum(binary_image[slice_obj])
|
|
||||||
component_areas.append(component_area)
|
|
||||||
|
|
||||||
# 计算黑色像素的总面积
|
|
||||||
total_black_area = np.sum(binary_image)
|
|
||||||
|
|
||||||
return num_components, component_areas, total_black_area
|
|
||||||
@ -1,75 +0,0 @@
|
|||||||
# 百香果&西红柿通信
|
|
||||||
|
|
||||||
OSI5~7层,基于单播TCP/IP,一包数据由8'haa打头,8'hbb结束,共6个字段:
|
|
||||||
|
|
||||||
| 起始 | 长度1 | 长度2 | 长度3 | 长度4 | 类型1 | 类型2 | 类型3 | 类型4 | 数据1 | 数据2 | ... | 数据i | 校验1 | 校验2 | 结束 |
|
|
||||||
| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :--: | :---: | :---: | :---: | :---: |
|
|
||||||
| 8'haa | 8'hzz | 8'hzz | 8'hzz | 8'hzz | 8'hzz | 8'hzz | 8'hzz | 8'hzz | 8'hzz | 8'hzz | ... | 8'hzz | 8'hff | 8'hff | 8'hbb |
|
|
||||||
|
|
||||||
### 起始
|
|
||||||
|
|
||||||
1字节,8'haa
|
|
||||||
|
|
||||||
### 长度
|
|
||||||
|
|
||||||
一个32位无符号数length,长度 = 数据字节数i + 4 。<br>`长度1`指length[31:24],`长度2`指length[23:16],`长度3`指length[15:8],`长度4`指length[7:0]
|
|
||||||
|
|
||||||
### 类型&数据
|
|
||||||
|
|
||||||
ASCII字符,比如`类型1`为' '(空格),`类型2`为' '(空格),`类型3`为'I',`类型4`为'M',代表RGB图像数据包
|
|
||||||
|
|
||||||
- **RGB图像数据包' (空格)''(空格) ''I''M'**,`数据1`~`数据i`包含了图像的行数rows(高度)、列数cols(宽度)、以及图像的RGB数据
|
|
||||||
|
|
||||||
$$
|
|
||||||
i-4=rows \times cols \times 3
|
|
||||||
$$
|
|
||||||
具体如下:
|
|
||||||
|
|
||||||
| 行数1 | 行数2 | 列数1 | 列数2 | 图像数据1 | ... | 图像数据(i-4) |
|
|
||||||
| :--------: | :-------: | :--------: | :-------: | :-------: | :--: | :-----------: |
|
|
||||||
| rows[15:8] | rows[7:0] | cols[15:8] | cols[7:0] | | ... | |
|
|
||||||
|
|
||||||
**返回结果数据包' (空格)''R''I''M'**,`数据1`~`数据i`包含了长径long、短径short、缺陷个数num、缺陷面积area、结果图像的行数rows(高度)、列数cols(宽度)、以及结果图像的RGB数据
|
|
||||||
|
|
||||||
|
|
||||||
$$
|
|
||||||
i-14=rows \times cols \times 3
|
|
||||||
$$
|
|
||||||
具体如下:
|
|
||||||
|
|
||||||
| 长径1 | 长径2 | 短径1 | 短径2 | 缺陷个数1 | 缺陷个数2 | 缺陷面积1 | 缺陷面积2 | 缺陷面积3 | 缺陷面积4 | 行数1 | 行数2 | 列数1 | 列数2 | 图像数据1 | 图像数据(i-14) |
|
|
||||||
| :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
|
|
||||||
| long[15:8] | long[7:0] | short[15:8] | short[7:0] | num[15:8] | num[7:0] | area[31:24] | area[23:16] | area[15:8] | area[7:0] | rows[15:8] | rows[7:0] | cols[15:8] | cols[7:0] | | |
|
|
||||||
|
|
||||||
- **光谱数据包' (空格)''(空格) ''S''P'**,`数据1`~`数据i`包含了光谱数据的行数rows(高度)、列数cols(宽度)、谱段数bands、以及图像的光谱数据
|
|
||||||
|
|
||||||
$$
|
|
||||||
i-6=rows \times cols \times bands \times 4
|
|
||||||
$$
|
|
||||||
具体如下:
|
|
||||||
|
|
||||||
| 行数1 | 行数2 | 列数1 | 列数2 | 谱段1 | 谱段2 | 图像数据1 | ... | 图像数据(i-6) |
|
|
||||||
| :--------: | :-------: | :--------: | :-------: | :---------: | :--------: | :-------: | :--: | :-----------: |
|
|
||||||
| rows[15:8] | rows[7:0] | cols[15:8] | cols[7:0] | bands[15:8] | bands[7:0] | | ... | |
|
|
||||||
|
|
||||||
- **模型切换指令**
|
|
||||||
|
|
||||||
**' (空格)''(空格) ''P''A'**,表示使用的是百香果分级模型
|
|
||||||
|
|
||||||
| 起始 | 长度1 | 长度2 | 长度3 | 长度4 | 类型1 | 类型2 | 类型3 | 类型4 | 数据1 | 校验1 | 校验2 | 结束 |
|
|
||||||
| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |
|
|
||||||
| 8'haa | 8'd0 | 8'd0 | 8'd0 | 8'd5 | ' ' | ' ' | 'P' | 'A' | 8'hff | 8'hff | 8'hff | 8'hbb |
|
|
||||||
|
|
||||||
**' (空格)''(空格) ''T''O'**,表示使用的是西红柿分级模型
|
|
||||||
|
|
||||||
| 起始 | 长度1 | 长度2 | 长度3 | 长度4 | 类型1 | 类型2 | 类型3 | 类型4 | 数据1 | 校验1 | 校验2 | 结束 |
|
|
||||||
| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |
|
|
||||||
| 8'haa | 8'd0 | 8'd0 | 8'd0 | 8'd5 | ' ' | ' ' | 'T' | 'O' | 8'hff | 8'hff | 8'hff | 8'hbb |
|
|
||||||
|
|
||||||
### 校验
|
|
||||||
|
|
||||||
2字节,`校验1`为8'hff,`校验2`为8'hff
|
|
||||||
|
|
||||||
### 结束
|
|
||||||
|
|
||||||
1字节,8'hbb
|
|
||||||
@ -1,228 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/4/20 18:45
|
|
||||||
# @Author : TG
|
|
||||||
# @File : main.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/4/12 15:04
|
|
||||||
# @Author : TG
|
|
||||||
# @File : main.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
|
|
||||||
import socket
|
|
||||||
import sys
|
|
||||||
import numpy as np
|
|
||||||
import cv2
|
|
||||||
import root_dir
|
|
||||||
import time
|
|
||||||
import os
|
|
||||||
from root_dir import ROOT_DIR
|
|
||||||
import logging
|
|
||||||
from utils import threshold_segmentation, largest_connected_component, draw_tomato_edge, bitwise_and_rgb_with_binary, \
|
|
||||||
extract_s_l, get_tomato_dimensions, get_defect_info, create_pipes, receive_rgb_data, send_data, receive_spec_data
|
|
||||||
from collections import deque
|
|
||||||
import time
|
|
||||||
import io
|
|
||||||
from PIL import Image
|
|
||||||
import threading
|
|
||||||
import queue
|
|
||||||
|
|
||||||
def process_data(img: any) -> tuple:
|
|
||||||
"""
|
|
||||||
处理指令
|
|
||||||
|
|
||||||
:param cmd: 指令类型
|
|
||||||
:param data: 指令内容
|
|
||||||
:param connected_sock: socket
|
|
||||||
:param detector: 模型
|
|
||||||
:return: 是否处理成功
|
|
||||||
"""
|
|
||||||
# start_time = time.time()
|
|
||||||
# if cmd == 'IM':
|
|
||||||
|
|
||||||
threshold_s_l = 180
|
|
||||||
# threshold_r_b = 15
|
|
||||||
|
|
||||||
s_l = extract_s_l(img)
|
|
||||||
|
|
||||||
thresholded_s_l = threshold_segmentation(s_l, threshold_s_l)
|
|
||||||
new_bin_img = largest_connected_component(thresholded_s_l)
|
|
||||||
|
|
||||||
edge, mask = draw_tomato_edge(img, new_bin_img)
|
|
||||||
org_defect = bitwise_and_rgb_with_binary(edge, new_bin_img)
|
|
||||||
|
|
||||||
# filled_img, defect = fill_holes(new_bin_img)
|
|
||||||
|
|
||||||
long_axis, short_axis = get_tomato_dimensions(mask)
|
|
||||||
number_defects, total_pixels = get_defect_info(new_bin_img)
|
|
||||||
rp = org_defect
|
|
||||||
rp = cv2.cvtColor(rp, cv2.COLOR_BGR2RGB)
|
|
||||||
# cv2.imwrite('rp1.bmp', rp)
|
|
||||||
|
|
||||||
# else:
|
|
||||||
# logging.error(f'错误指令,指令为{cmd}')
|
|
||||||
# response = False
|
|
||||||
|
|
||||||
# end_time = time.time()
|
|
||||||
# elapsed_time = end_time - start_time
|
|
||||||
# print(f'处理时间:{elapsed_time}秒')
|
|
||||||
return long_axis, short_axis, number_defects, total_pixels, rp
|
|
||||||
|
|
||||||
|
|
||||||
## 20240423代码
|
|
||||||
def main(is_debug=False):
|
|
||||||
file_handler = logging.FileHandler(os.path.join(ROOT_DIR, 'report.log'))
|
|
||||||
file_handler.setLevel(logging.DEBUG if is_debug else logging.WARNING)
|
|
||||||
console_handler = logging.StreamHandler(sys.stdout)
|
|
||||||
console_handler.setLevel(logging.DEBUG if is_debug else logging.WARNING)
|
|
||||||
logging.basicConfig(format='%(asctime)s %(filename)s[line:%(lineno)d] - %(levelname)s - %(message)s',
|
|
||||||
handlers=[file_handler, console_handler],
|
|
||||||
level=logging.DEBUG)
|
|
||||||
rgb_receive_name = r'\\.\pipe\rgb_receive'
|
|
||||||
rgb_send_name = r'\\.\pipe\rgb_send'
|
|
||||||
spec_receive_name = r'\\.\pipe\spec_receive'
|
|
||||||
rgb_receive, rgb_send, spec_receive = create_pipes(rgb_receive_name, rgb_send_name, spec_receive_name)
|
|
||||||
|
|
||||||
# data_size = 15040566
|
|
||||||
|
|
||||||
while True:
|
|
||||||
long_axis_list = []
|
|
||||||
short_axis_list = []
|
|
||||||
defect_num_sum = 0
|
|
||||||
total_defect_area_sum = 0
|
|
||||||
rp = None
|
|
||||||
|
|
||||||
start_time = time.time()
|
|
||||||
|
|
||||||
for i in range(5):
|
|
||||||
|
|
||||||
# start_time = time.time()
|
|
||||||
|
|
||||||
|
|
||||||
img_data = receive_rgb_data(rgb_receive)
|
|
||||||
image = Image.open(io.BytesIO(img_data))
|
|
||||||
img = np.array(image)
|
|
||||||
print(img.shape)
|
|
||||||
|
|
||||||
# end_time = time.time()
|
|
||||||
# elapsed_time = end_time - start_time
|
|
||||||
# print(f'接收时间:{elapsed_time}秒')
|
|
||||||
|
|
||||||
long_axis, short_axis, number_defects, total_pixels, rp = process_data(img=img)
|
|
||||||
# print(long_axis, short_axis, number_defects, type(total_pixels), rp.shape)
|
|
||||||
|
|
||||||
if i <= 2:
|
|
||||||
long_axis_list.append(long_axis)
|
|
||||||
short_axis_list.append(short_axis)
|
|
||||||
if i == 1:
|
|
||||||
rp_result = rp
|
|
||||||
|
|
||||||
defect_num_sum += number_defects
|
|
||||||
total_defect_area_sum += total_pixels
|
|
||||||
|
|
||||||
long_axis = round(sum(long_axis_list) / 3)
|
|
||||||
short_axis = round(sum(short_axis_list) / 3)
|
|
||||||
# print(type(long_axis), type(short_axis), type(defect_num_sum), type(total_defect_area_sum), type(rp_result))
|
|
||||||
|
|
||||||
spec_data = receive_spec_data(spec_receive)
|
|
||||||
print(f'光谱数据接收长度:', len(spec_data))
|
|
||||||
|
|
||||||
|
|
||||||
response = send_data(pipe_send=rgb_send, long_axis=long_axis, short_axis=short_axis,
|
|
||||||
defect_num=defect_num_sum, total_defect_area=total_defect_area_sum, rp=rp_result)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
end_time = time.time()
|
|
||||||
elapsed_time = (end_time - start_time) * 1000
|
|
||||||
print(f'总时间:{elapsed_time}毫秒')
|
|
||||||
|
|
||||||
print(long_axis, short_axis, defect_num_sum, total_defect_area_sum, rp_result.shape)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
# 2个pipe管道
|
|
||||||
# 接收到图片 n_rows * n_cols * 3, uint8
|
|
||||||
# 发送long_axis, short_axis, defect_num_sum, total_defect_area_sum, rp_result
|
|
||||||
main(is_debug=False)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### 多线程版本
|
|
||||||
|
|
||||||
# def receive_spec_data_thread(spec_receive, spec_queue):
|
|
||||||
# while True:
|
|
||||||
# spec_data = receive_spec_data(spec_receive)
|
|
||||||
# spec_queue.put(spec_data)
|
|
||||||
#
|
|
||||||
# def receive_process_rgb_data_thread(rgb_receive, img_queue, result_queue):
|
|
||||||
# while True:
|
|
||||||
#
|
|
||||||
# long_axis_list = []
|
|
||||||
# short_axis_list = []
|
|
||||||
# defect_num_sum = 0
|
|
||||||
# total_defect_area_sum = 0
|
|
||||||
# rp = None
|
|
||||||
#
|
|
||||||
# for i in range(5):
|
|
||||||
# img_data = receive_rgb_data(rgb_receive)
|
|
||||||
# image = Image.open(io.BytesIO(img_data))
|
|
||||||
# img = np.array(image)
|
|
||||||
#
|
|
||||||
# long_axis, short_axis, number_defects, total_pixels, rp = process_data(img=img)
|
|
||||||
#
|
|
||||||
# if i <= 2:
|
|
||||||
# long_axis_list.append(long_axis)
|
|
||||||
# short_axis_list.append(short_axis)
|
|
||||||
# if i == 1:
|
|
||||||
# rp_result = rp
|
|
||||||
#
|
|
||||||
# defect_num_sum += number_defects
|
|
||||||
# total_defect_area_sum += total_pixels
|
|
||||||
#
|
|
||||||
# long_axis = round(sum(long_axis_list) / 3)
|
|
||||||
# short_axis = round(sum(short_axis_list) / 3)
|
|
||||||
#
|
|
||||||
# result = (long_axis, short_axis, defect_num_sum, total_defect_area_sum, rp_result)
|
|
||||||
# result_queue.put(result)
|
|
||||||
#
|
|
||||||
# def main(is_debug=False):
|
|
||||||
# file_handler = logging.FileHandler(os.path.join(ROOT_DIR, 'report.log'))
|
|
||||||
# file_handler.setLevel(logging.DEBUG if is_debug else logging.WARNING)
|
|
||||||
# console_handler = logging.StreamHandler(sys.stdout)
|
|
||||||
# console_handler.setLevel(logging.DEBUG if is_debug else logging.WARNING)
|
|
||||||
# logging.basicConfig(format='%(asctime)s %(filename)s[line:%(lineno)d] - %(levelname)s - %(message)s',
|
|
||||||
# handlers=[file_handler, console_handler],
|
|
||||||
# level=logging.DEBUG)
|
|
||||||
# rgb_receive_name = r'\\.\pipe\rgb_receive'
|
|
||||||
# rgb_send_name = r'\\.\pipe\rgb_send'
|
|
||||||
# spec_receive_name = r'\\.\pipe\spec_receive'
|
|
||||||
# rgb_receive, rgb_send, spec_receive = create_pipes(rgb_receive_name, rgb_send_name, spec_receive_name)
|
|
||||||
#
|
|
||||||
# spec_queue = queue.Queue()
|
|
||||||
# img_queue = queue.Queue()
|
|
||||||
# result_queue = queue.Queue()
|
|
||||||
#
|
|
||||||
# # 创建并启动线程
|
|
||||||
# spec_thread = threading.Thread(target=receive_spec_data_thread, args=(spec_receive, spec_queue))
|
|
||||||
# rgb_thread = threading.Thread(target=receive_process_rgb_data_thread, args=(rgb_receive, img_queue, result_queue))
|
|
||||||
# spec_thread.start()
|
|
||||||
# rgb_thread.start()
|
|
||||||
#
|
|
||||||
# while True:
|
|
||||||
# spec_data = spec_queue.get()
|
|
||||||
# print(f'spec_data长度:', len(spec_data))
|
|
||||||
# long_axis, short_axis, defect_num_sum, total_defect_area_sum, rp_result = result_queue.get()
|
|
||||||
#
|
|
||||||
# response = send_data(pipe_send=rgb_send, long_axis=long_axis, short_axis=short_axis,
|
|
||||||
# defect_num=defect_num_sum, total_defect_area=total_defect_area_sum, rp=rp_result)
|
|
||||||
#
|
|
||||||
# print(long_axis, short_axis, defect_num_sum, total_defect_area_sum, rp_result.shape)
|
|
||||||
#
|
|
||||||
# if __name__ == '__main__':
|
|
||||||
# main(is_debug=False)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -1,124 +0,0 @@
|
|||||||
import win32file
|
|
||||||
import win32pipe
|
|
||||||
import io
|
|
||||||
from PIL import Image
|
|
||||||
import time
|
|
||||||
import logging
|
|
||||||
|
|
||||||
def send_data(pipe_send, long_axis, short_axis, defect_num, total_defect_area, rp):
|
|
||||||
|
|
||||||
|
|
||||||
start_time = time.time()
|
|
||||||
|
|
||||||
# width = rp.shape[0]
|
|
||||||
# height = rp.shape[1]
|
|
||||||
# print(width, height)
|
|
||||||
img_bytes = rp.tobytes()
|
|
||||||
# length = len(img_bytes) + 18
|
|
||||||
# print(length)
|
|
||||||
# length = length.to_bytes(4, byteorder='big')
|
|
||||||
# width = width.to_bytes(2, byteorder='big')
|
|
||||||
# height = height.to_bytes(2, byteorder='big')
|
|
||||||
long_axis = long_axis.to_bytes(2, byteorder='big')
|
|
||||||
short_axis = short_axis.to_bytes(2, byteorder='big')
|
|
||||||
defect_num = defect_num.to_bytes(2, byteorder='big')
|
|
||||||
total_defect_area = int(total_defect_area).to_bytes(4, byteorder='big')
|
|
||||||
# cmd_type = 'RIM'
|
|
||||||
# result = result.encode('ascii')
|
|
||||||
# send_message = b'\xaa' + length + (' ' + cmd_type).upper().encode('ascii') + long_axis + short_axis + defect_num + total_defect_area + width + height + img_bytes + b'\xff\xff\xbb'
|
|
||||||
send_message = long_axis + short_axis + defect_num + total_defect_area + img_bytes
|
|
||||||
# print(long_axis)
|
|
||||||
# print(short_axis)
|
|
||||||
# print(defect_num)
|
|
||||||
# print(total_defect_area)
|
|
||||||
# print(width)
|
|
||||||
# print(height)
|
|
||||||
|
|
||||||
try:
|
|
||||||
win32file.WriteFile(pipe_send, send_message)
|
|
||||||
print('发送成功')
|
|
||||||
# print(send_message)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'发送完成指令失败,错误类型:{e}')
|
|
||||||
return False
|
|
||||||
|
|
||||||
end_time = time.time()
|
|
||||||
print(f'发送时间:{end_time - start_time}秒')
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def receive_data(pipe, data_size):
|
|
||||||
try:
|
|
||||||
# 读取图片数据
|
|
||||||
result, img_data = win32file.ReadFile(pipe, data_size, None)
|
|
||||||
return img_data
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Failed to receive data. Error: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def create_pipes(pipe_receive_name, pipe_send_name):
|
|
||||||
# 打开或创建命名管道
|
|
||||||
pipe_receive = win32pipe.CreateNamedPipe(
|
|
||||||
pipe_receive_name,
|
|
||||||
win32pipe.PIPE_ACCESS_INBOUND,
|
|
||||||
win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_WAIT,
|
|
||||||
1, 80000000, 80000000, 0, None
|
|
||||||
)
|
|
||||||
pipe_send = win32pipe.CreateNamedPipe(
|
|
||||||
pipe_send_name,
|
|
||||||
win32pipe.PIPE_ACCESS_OUTBOUND, # 修改为输出模式
|
|
||||||
win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_WAIT,
|
|
||||||
1, 80000000, 80000000, 0, None
|
|
||||||
)
|
|
||||||
|
|
||||||
# 等待发送端连接
|
|
||||||
win32pipe.ConnectNamedPipe(pipe_receive, None)
|
|
||||||
# 等待发送端连接
|
|
||||||
win32pipe.ConnectNamedPipe(pipe_send, None)
|
|
||||||
print("Sender connected.")
|
|
||||||
print("receive connected.")
|
|
||||||
|
|
||||||
return pipe_receive, pipe_send
|
|
||||||
|
|
||||||
def process_images(pipe_receive, pipe_send):
|
|
||||||
image_count = 0
|
|
||||||
batch_size = 5
|
|
||||||
images = []
|
|
||||||
|
|
||||||
while True:
|
|
||||||
for i in range(5):
|
|
||||||
start_time = time.time() # 记录开始时间
|
|
||||||
img_data = receive_data(pipe_receive)
|
|
||||||
|
|
||||||
if img_data:
|
|
||||||
image = Image.open(io.BytesIO(img_data))
|
|
||||||
image = image.convert("L") # 示例处理:转换为灰度图
|
|
||||||
buf = io.BytesIO()
|
|
||||||
image.save(buf, format='JPEG')
|
|
||||||
buf.seek(0) # 重置buffer位置到开始
|
|
||||||
processed_data = buf.getvalue()
|
|
||||||
|
|
||||||
images.append(buf) # 存储 BytesIO 对象而不是 Image 对象
|
|
||||||
|
|
||||||
if len(images) >= batch_size:
|
|
||||||
# 发送最后一个处理后的图像
|
|
||||||
send_image_back_to_qt(pipe_send, images[-1].getvalue())
|
|
||||||
images = [] # 清空列表以开始新的批处理
|
|
||||||
time.sleep(0.01) # 添加适当的延迟,降低CPU使用率
|
|
||||||
print("Image received and saved.")
|
|
||||||
end_time = time.time() # 记录结束时间
|
|
||||||
duration_ms = (end_time - start_time) * 1000 # 转换为毫秒
|
|
||||||
print(f"Image {i + 1} received and displayed in {duration_ms:.2f} ms.") # 打印毫秒级的时间
|
|
||||||
image_count += 1 # 图片计数器增加
|
|
||||||
print(f"Image {image_count} received and displayed.")
|
|
||||||
|
|
||||||
def main():
|
|
||||||
pipe_receive_name = r'\\.\pipe\pipe_receive'
|
|
||||||
pipe_send_name = r'\\.\pipe\pipe_send'
|
|
||||||
pipe_receive, pipe_send = create_pipes(pipe_receive_name, pipe_send_name)
|
|
||||||
process_images(pipe_receive, pipe_send)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
@ -1,123 +0,0 @@
|
|||||||
import sys
|
|
||||||
import os
|
|
||||||
from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QLabel, QVBoxLayout, QWidget
|
|
||||||
from PyQt5.QtGui import QPixmap, QImage
|
|
||||||
import win32pipe
|
|
||||||
import win32file
|
|
||||||
import struct
|
|
||||||
from PIL import Image
|
|
||||||
import io
|
|
||||||
|
|
||||||
class MainWindow(QMainWindow):
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
|
||||||
self.setWindowTitle("Tomato Image Sender")
|
|
||||||
self.setGeometry(100, 100, 800, 600)
|
|
||||||
|
|
||||||
central_widget = QWidget()
|
|
||||||
self.setCentralWidget(central_widget)
|
|
||||||
|
|
||||||
layout = QVBoxLayout()
|
|
||||||
central_widget.setLayout(layout)
|
|
||||||
|
|
||||||
self.image_label = QLabel()
|
|
||||||
layout.addWidget(self.image_label)
|
|
||||||
|
|
||||||
self.rgb_send_name = r'\\.\pipe\rgb_receive' # 发送数据管道名对应 main.py 的接收数据管道名
|
|
||||||
self.rgb_receive_name = r'\\.\pipe\rgb_send' # 接收数据管道名对应 main.py 的发送数据管道名
|
|
||||||
self.spec_send_name = r'\\.\pipe\spec_receive' # 发送数据管道名对应 main.py 的接收数据管道名
|
|
||||||
|
|
||||||
# 连接main.py创建的命名管道
|
|
||||||
self.rgb_send = win32file.CreateFile(
|
|
||||||
self.rgb_send_name,
|
|
||||||
win32file.GENERIC_WRITE,
|
|
||||||
0,
|
|
||||||
None,
|
|
||||||
win32file.OPEN_EXISTING,
|
|
||||||
0,
|
|
||||||
None
|
|
||||||
)
|
|
||||||
|
|
||||||
self.rgb_receive = win32file.CreateFile(
|
|
||||||
self.rgb_receive_name,
|
|
||||||
win32file.GENERIC_READ,
|
|
||||||
0,
|
|
||||||
None,
|
|
||||||
win32file.OPEN_EXISTING,
|
|
||||||
0,
|
|
||||||
None
|
|
||||||
)
|
|
||||||
|
|
||||||
self.spec_send = win32file.CreateFile(
|
|
||||||
self.spec_send_name,
|
|
||||||
win32file.GENERIC_WRITE,
|
|
||||||
0,
|
|
||||||
None,
|
|
||||||
win32file.OPEN_EXISTING,
|
|
||||||
0,
|
|
||||||
None
|
|
||||||
)
|
|
||||||
|
|
||||||
def send_image_group(self, image_dir):
|
|
||||||
rgb_files = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith(('.bmp'))][:5]
|
|
||||||
spec_files = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith('.raw')][:1]
|
|
||||||
|
|
||||||
for image_path in rgb_files:
|
|
||||||
with open(image_path, 'rb') as f:
|
|
||||||
img_data = f.read()
|
|
||||||
|
|
||||||
try:
|
|
||||||
win32file.WriteFile(self.rgb_send, len(img_data).to_bytes(4, byteorder='big'))
|
|
||||||
win32file.WriteFile(self.rgb_send, img_data)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"数据发送失败. 错误原因: {e}")
|
|
||||||
|
|
||||||
if spec_files:
|
|
||||||
spec_file = spec_files[0]
|
|
||||||
with open(spec_file, 'rb') as f:
|
|
||||||
spec_data = f.read()
|
|
||||||
|
|
||||||
try:
|
|
||||||
win32file.WriteFile(self.spec_send, len(spec_data).to_bytes(4, byteorder='big'))
|
|
||||||
print(f"发送的光谱数据长度: {len(spec_data)}")
|
|
||||||
win32file.WriteFile(self.spec_send, spec_data)
|
|
||||||
print(f'发送的光谱数据长度: {len(spec_data)}')
|
|
||||||
except Exception as e:
|
|
||||||
print(f"数据发送失败. 错误原因: {e}")
|
|
||||||
|
|
||||||
self.receive_result()
|
|
||||||
|
|
||||||
def receive_result(self):
|
|
||||||
try:
|
|
||||||
# 读取结果数据
|
|
||||||
long_axis = int.from_bytes(win32file.ReadFile(self.rgb_receive, 2)[1], byteorder='big')
|
|
||||||
short_axis = int.from_bytes(win32file.ReadFile(self.rgb_receive, 2)[1], byteorder='big')
|
|
||||||
defect_num = int.from_bytes(win32file.ReadFile(self.rgb_receive, 2)[1], byteorder='big')
|
|
||||||
total_defect_area = int.from_bytes(win32file.ReadFile(self.rgb_receive, 4)[1], byteorder='big')
|
|
||||||
len_img = int.from_bytes(win32file.ReadFile(self.rgb_receive, 4)[1], byteorder='big')
|
|
||||||
img_data = win32file.ReadFile(self.rgb_receive, len_img)[1]
|
|
||||||
|
|
||||||
print(f"长径: {long_axis}, 短径: {short_axis}, 缺陷个数: {defect_num}, 缺陷面积: {total_defect_area}")
|
|
||||||
|
|
||||||
# 显示结果图像
|
|
||||||
image = Image.open(io.BytesIO(img_data))
|
|
||||||
qimage = QImage(image.tobytes(), image.size[0], image.size[1], QImage.Format_RGB888)
|
|
||||||
pixmap = QPixmap.fromImage(qimage)
|
|
||||||
self.image_label.setPixmap(pixmap)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"数据接收失败. 错误原因: {e}")
|
|
||||||
|
|
||||||
def open_file_dialog(self):
|
|
||||||
directory_dialog = QFileDialog()
|
|
||||||
directory_dialog.setFileMode(QFileDialog.Directory)
|
|
||||||
if directory_dialog.exec_():
|
|
||||||
selected_directory = directory_dialog.selectedFiles()[0]
|
|
||||||
self.send_image_group(selected_directory)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
app = QApplication(sys.argv)
|
|
||||||
main_window = MainWindow()
|
|
||||||
main_window.show()
|
|
||||||
main_window.open_file_dialog()
|
|
||||||
sys.exit(app.exec_())
|
|
||||||
@ -1,5 +0,0 @@
|
|||||||
|
|
||||||
import pathlib
|
|
||||||
|
|
||||||
file_path = pathlib.Path(__file__)
|
|
||||||
ROOT_DIR = file_path.parent
|
|
||||||
@ -1,579 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/4/20 18:24
|
|
||||||
# @Author : TG
|
|
||||||
# @File : utils.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
|
|
||||||
import socket
|
|
||||||
import time
|
|
||||||
import logging
|
|
||||||
import numpy as np
|
|
||||||
import shutil
|
|
||||||
import cv2
|
|
||||||
import os
|
|
||||||
from scipy.ndimage.measurements import label, find_objects
|
|
||||||
import win32pipe
|
|
||||||
import win32file
|
|
||||||
import io
|
|
||||||
from PIL import Image
|
|
||||||
import select
|
|
||||||
import msvcrt
|
|
||||||
|
|
||||||
def receive_rgb_data(pipe):
|
|
||||||
try:
|
|
||||||
# 读取图片数据
|
|
||||||
len_img = win32file.ReadFile(pipe, 4, None)
|
|
||||||
data_size = int.from_bytes(len_img[1], byteorder='big')
|
|
||||||
result, img_data = win32file.ReadFile(pipe, data_size, None)
|
|
||||||
return img_data
|
|
||||||
except Exception as e:
|
|
||||||
print(f"数据接收失败,错误原因: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def receive_spec_data(pipe):
|
|
||||||
try:
|
|
||||||
# 读取图片数据长度
|
|
||||||
len_spec = win32file.ReadFile(pipe, 4, None)
|
|
||||||
if len_spec is None:
|
|
||||||
# 未能读取到数据长度,返回"0"
|
|
||||||
return "0"
|
|
||||||
data_size = int.from_bytes(len_spec[1], byteorder='big')
|
|
||||||
if data_size == 0:
|
|
||||||
# 接收到空数据,返回"0"
|
|
||||||
return "0"
|
|
||||||
|
|
||||||
# 读取图片数据
|
|
||||||
result, spec_data = win32file.ReadFile(pipe, data_size, None)
|
|
||||||
return spec_data
|
|
||||||
except Exception as e:
|
|
||||||
print(f"数据接收失败,错误原因: {e}")
|
|
||||||
return '0'
|
|
||||||
|
|
||||||
# def receive_spec_data(pipe):
|
|
||||||
# try:
|
|
||||||
# # 读取图片数据
|
|
||||||
# len_spec = win32file.ReadFile(pipe, 4, None)
|
|
||||||
# data_size = int.from_bytes(len_spec[1], byteorder='big')
|
|
||||||
# result, spec_data = win32file.ReadFile(pipe, data_size, None)
|
|
||||||
# return spec_data
|
|
||||||
# except Exception as e:
|
|
||||||
# print(f"数据接收失败,错误原因: {e}")
|
|
||||||
# return None
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# def create_pipes(pipe_receive_name, pipe_send_name):
|
|
||||||
# while True:
|
|
||||||
# try:
|
|
||||||
# # 打开或创建命名管道
|
|
||||||
# pipe_receive = win32pipe.CreateNamedPipe(
|
|
||||||
# pipe_receive_name,
|
|
||||||
# win32pipe.PIPE_ACCESS_INBOUND,
|
|
||||||
# win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_WAIT,
|
|
||||||
# 1, 80000000, 80000000, 0, None
|
|
||||||
# )
|
|
||||||
# pipe_send = win32pipe.CreateNamedPipe(
|
|
||||||
# pipe_send_name,
|
|
||||||
# win32pipe.PIPE_ACCESS_OUTBOUND, # 修改为输出模式
|
|
||||||
# win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_WAIT,
|
|
||||||
# 1, 80000000, 80000000, 0, None
|
|
||||||
# )
|
|
||||||
#
|
|
||||||
# # 等待发送端连接
|
|
||||||
# win32pipe.ConnectNamedPipe(pipe_receive, None)
|
|
||||||
# # 等待发送端连接
|
|
||||||
# win32pipe.ConnectNamedPipe(pipe_send, None)
|
|
||||||
# print("Sender connected.")
|
|
||||||
# print("receive connected.")
|
|
||||||
# return pipe_receive, pipe_send
|
|
||||||
#
|
|
||||||
# except Exception as e:
|
|
||||||
# print(f"Error occurred while creating pipes: {e}")
|
|
||||||
# print("Waiting for 5 seconds before retrying...")
|
|
||||||
# time.sleep(5)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def create_pipes(rgb_receive_name, rgb_send_name, spec_receive_name):
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
# 打开或创建命名管道
|
|
||||||
rgb_receive = win32pipe.CreateNamedPipe(
|
|
||||||
rgb_receive_name,
|
|
||||||
win32pipe.PIPE_ACCESS_INBOUND,
|
|
||||||
win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_WAIT,
|
|
||||||
1, 80000000, 80000000, 0, None
|
|
||||||
)
|
|
||||||
rgb_send = win32pipe.CreateNamedPipe(
|
|
||||||
rgb_send_name,
|
|
||||||
win32pipe.PIPE_ACCESS_OUTBOUND, # 修改为输出模式
|
|
||||||
win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_WAIT,
|
|
||||||
1, 80000000, 80000000, 0, None
|
|
||||||
)
|
|
||||||
spec_receive = win32pipe.CreateNamedPipe(
|
|
||||||
spec_receive_name,
|
|
||||||
win32pipe.PIPE_ACCESS_INBOUND,
|
|
||||||
win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_WAIT,
|
|
||||||
1, 200000000, 200000000, 0, None
|
|
||||||
)
|
|
||||||
print("pipe管道创建成功,等待连接...")
|
|
||||||
# 等待发送端连接
|
|
||||||
win32pipe.ConnectNamedPipe(rgb_receive, None)
|
|
||||||
print("rgb_receive connected.")
|
|
||||||
# 等待发送端连接
|
|
||||||
win32pipe.ConnectNamedPipe(rgb_send, None)
|
|
||||||
print("rgb_send connected.")
|
|
||||||
win32pipe.ConnectNamedPipe(rgb_receive, None)
|
|
||||||
print("spec_receive connected.")
|
|
||||||
return rgb_receive, rgb_send, spec_receive
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"管道创建连接失败,失败原因: {e}")
|
|
||||||
print("等待5秒后重试...")
|
|
||||||
time.sleep(5)
|
|
||||||
|
|
||||||
|
|
||||||
# def send_data(pipe_send, long_axis, short_axis, defect_num, total_defect_area, rp):
|
|
||||||
#
|
|
||||||
# # start_time = time.time()
|
|
||||||
#
|
|
||||||
# # width = rp.shape[0]
|
|
||||||
# # height = rp.shape[1]
|
|
||||||
# # print(width, height)
|
|
||||||
# img_bytes = rp.tobytes()
|
|
||||||
# # length = len(img_bytes) + 18
|
|
||||||
# # print(length)
|
|
||||||
# # length = length.to_bytes(4, byteorder='big')
|
|
||||||
# # width = width.to_bytes(2, byteorder='big')
|
|
||||||
# # height = height.to_bytes(2, byteorder='big')
|
|
||||||
# length = (len(img_bytes) + 10).to_bytes(4, byteorder='big')
|
|
||||||
# long_axis = long_axis.to_bytes(2, byteorder='big')
|
|
||||||
# short_axis = short_axis.to_bytes(2, byteorder='big')
|
|
||||||
# defect_num = defect_num.to_bytes(2, byteorder='big')
|
|
||||||
# total_defect_area = int(total_defect_area).to_bytes(4, byteorder='big')
|
|
||||||
# # cmd_type = 'RIM'
|
|
||||||
# # result = result.encode('ascii')
|
|
||||||
# # send_message = b'\xaa' + length + (' ' + cmd_type).upper().encode('ascii') + long_axis + short_axis + defect_num + total_defect_area + width + height + img_bytes + b'\xff\xff\xbb'
|
|
||||||
# send_message = length + long_axis + short_axis + defect_num + total_defect_area + img_bytes
|
|
||||||
# # print(long_axis)
|
|
||||||
# # print(short_axis)
|
|
||||||
# # print(defect_num)
|
|
||||||
# # print(total_defect_area)
|
|
||||||
# # print(width)
|
|
||||||
# # print(height)
|
|
||||||
#
|
|
||||||
# try:
|
|
||||||
# win32file.WriteFile(pipe_send, send_message)
|
|
||||||
# print('发送成功')
|
|
||||||
# # print(send_message)
|
|
||||||
# except Exception as e:
|
|
||||||
# logging.error(f'发送完成指令失败,错误类型:{e}')
|
|
||||||
# return False
|
|
||||||
#
|
|
||||||
# # end_time = time.time()
|
|
||||||
# # print(f'发送时间:{end_time - start_time}秒')
|
|
||||||
#
|
|
||||||
# return True
|
|
||||||
|
|
||||||
|
|
||||||
def send_data(pipe_send, long_axis, short_axis, defect_num, total_defect_area, rp):
|
|
||||||
# start_time = time.time()
|
|
||||||
#
|
|
||||||
rp1 = Image.fromarray(rp.astype(np.uint8))
|
|
||||||
# cv2.imwrite('rp1.bmp', rp1)
|
|
||||||
|
|
||||||
# 将 Image 对象保存到 BytesIO 流中
|
|
||||||
img_bytes = io.BytesIO()
|
|
||||||
rp1.save(img_bytes, format='BMP')
|
|
||||||
img_bytes = img_bytes.getvalue()
|
|
||||||
|
|
||||||
# width = rp.shape[0]
|
|
||||||
# height = rp.shape[1]
|
|
||||||
# print(width, height)
|
|
||||||
# img_bytes = rp.tobytes()
|
|
||||||
# length = len(img_bytes) + 18
|
|
||||||
# print(length)
|
|
||||||
# length = length.to_bytes(4, byteorder='big')
|
|
||||||
# width = width.to_bytes(2, byteorder='big')
|
|
||||||
# height = height.to_bytes(2, byteorder='big')
|
|
||||||
|
|
||||||
print(f'原始长度:', len(rp.tobytes()))
|
|
||||||
print(f'发送长度:', len(img_bytes))
|
|
||||||
|
|
||||||
long_axis = long_axis.to_bytes(2, byteorder='big')
|
|
||||||
short_axis = short_axis.to_bytes(2, byteorder='big')
|
|
||||||
defect_num = defect_num.to_bytes(2, byteorder='big')
|
|
||||||
total_defect_area = int(total_defect_area).to_bytes(4, byteorder='big')
|
|
||||||
length = (len(img_bytes) + 4).to_bytes(4, byteorder='big')
|
|
||||||
# cmd_type = 'RIM'
|
|
||||||
# result = result.encode('ascii')
|
|
||||||
# send_message = b'\xaa' + length + (' ' + cmd_type).upper().encode('ascii') + long_axis + short_axis + defect_num + total_defect_area + width + height + img_bytes + b'\xff\xff\xbb'
|
|
||||||
# send_message = long_axis + short_axis + defect_num + total_defect_area + img_bytes
|
|
||||||
send_message = long_axis + short_axis + defect_num + total_defect_area + length + img_bytes
|
|
||||||
# print(long_axis)
|
|
||||||
# print(short_axis)
|
|
||||||
# print(defect_num)
|
|
||||||
# print(total_defect_area)
|
|
||||||
# print(width)
|
|
||||||
# print(height)
|
|
||||||
|
|
||||||
try:
|
|
||||||
win32file.WriteFile(pipe_send, send_message)
|
|
||||||
time.sleep(0.01)
|
|
||||||
print('发送成功')
|
|
||||||
# print(len(send_message))
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'发送完成指令失败,错误类型:{e}')
|
|
||||||
return False
|
|
||||||
|
|
||||||
# end_time = time.time()
|
|
||||||
# print(f'发送时间:{end_time - start_time}秒')
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def mkdir_if_not_exist(dir_name, is_delete=False):
|
|
||||||
"""
|
|
||||||
创建文件夹
|
|
||||||
:param dir_name: 文件夹
|
|
||||||
:param is_delete: 是否删除
|
|
||||||
:return: 是否成功
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if is_delete:
|
|
||||||
if os.path.exists(dir_name):
|
|
||||||
shutil.rmtree(dir_name)
|
|
||||||
print('[Info] 文件夹 "%s" 存在, 删除文件夹.' % dir_name)
|
|
||||||
|
|
||||||
if not os.path.exists(dir_name):
|
|
||||||
os.makedirs(dir_name)
|
|
||||||
print('[Info] 文件夹 "%s" 不存在, 创建文件夹.' % dir_name)
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
print('[Exception] %s' % e)
|
|
||||||
return False
|
|
||||||
|
|
||||||
def create_file(file_name):
|
|
||||||
"""
|
|
||||||
创建文件
|
|
||||||
:param file_name: 文件名
|
|
||||||
:return: None
|
|
||||||
"""
|
|
||||||
if os.path.exists(file_name):
|
|
||||||
print("文件存在:%s" % file_name)
|
|
||||||
return False
|
|
||||||
# os.remove(file_name) # 删除已有文件
|
|
||||||
if not os.path.exists(file_name):
|
|
||||||
print("文件不存在,创建文件:%s" % file_name)
|
|
||||||
open(file_name, 'a').close()
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
class Logger(object):
|
|
||||||
def __init__(self, is_to_file=False, path=None):
|
|
||||||
self.is_to_file = is_to_file
|
|
||||||
if path is None:
|
|
||||||
path = "tomato.log"
|
|
||||||
self.path = path
|
|
||||||
create_file(path)
|
|
||||||
|
|
||||||
def log(self, content):
|
|
||||||
if self.is_to_file:
|
|
||||||
with open(self.path, "a") as f:
|
|
||||||
print(time.strftime("[%Y-%m-%d_%H-%M-%S]:"), file=f)
|
|
||||||
print(content, file=f)
|
|
||||||
else:
|
|
||||||
print(content)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#提取西红柿,使用S+L的图像
|
|
||||||
def extract_s_l(image):
|
|
||||||
# image = cv2.imread(image_path)
|
|
||||||
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
|
||||||
lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
|
|
||||||
s_channel = hsv[:,:,1]
|
|
||||||
l_channel = lab[:,:,0]
|
|
||||||
result = cv2.add(s_channel, l_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def find_reflection(image, threshold=190):
|
|
||||||
# 读取图像
|
|
||||||
# image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
|
|
||||||
|
|
||||||
# 应用阈值分割
|
|
||||||
_, reflection = cv2.threshold(image, threshold, 255, cv2.THRESH_BINARY)
|
|
||||||
|
|
||||||
return reflection
|
|
||||||
|
|
||||||
def otsu_threshold(image):
|
|
||||||
|
|
||||||
# 将图像转换为灰度图像
|
|
||||||
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
|
||||||
|
|
||||||
# 使用Otsu阈值分割
|
|
||||||
_, binary = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
|
||||||
|
|
||||||
return binary
|
|
||||||
|
|
||||||
# 提取花萼,使用G-R的图像
|
|
||||||
def extract_g_r(image):
|
|
||||||
# image = cv2.imread(image_path)
|
|
||||||
g_channel = image[:,:,1]
|
|
||||||
r_channel = image[:,:,2]
|
|
||||||
result = cv2.subtract(cv2.multiply(g_channel, 1.5), r_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
#提取西红柿,使用R-B的图像
|
|
||||||
def extract_r_b(image):
|
|
||||||
# image = cv2.imread(image_path)
|
|
||||||
r_channel = image[:,:,2]
|
|
||||||
b_channel = image[:,:,0]
|
|
||||||
result = cv2.subtract(r_channel, b_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def extract_r_g(image):
|
|
||||||
# image = cv2.imread(image_path)
|
|
||||||
r_channel = image[:,:,2]
|
|
||||||
g_channel = image[:,:,1]
|
|
||||||
result = cv2.subtract(r_channel, g_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def threshold_segmentation(image, threshold, color=255):
|
|
||||||
_, result = cv2.threshold(image, threshold, color, cv2.THRESH_BINARY)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def bitwise_operation(image1, image2, operation='and'):
|
|
||||||
if operation == 'and':
|
|
||||||
result = cv2.bitwise_and(image1, image2)
|
|
||||||
elif operation == 'or':
|
|
||||||
result = cv2.bitwise_or(image1, image2)
|
|
||||||
else:
|
|
||||||
raise ValueError("operation must be 'and' or 'or'")
|
|
||||||
return result
|
|
||||||
|
|
||||||
def largest_connected_component(bin_img):
|
|
||||||
# 使用connectedComponentsWithStats函数找到连通区域
|
|
||||||
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(bin_img, connectivity=8)
|
|
||||||
|
|
||||||
# 如果只有背景标签,返回一个空的二值图像
|
|
||||||
if num_labels <= 1:
|
|
||||||
return np.zeros_like(bin_img)
|
|
||||||
|
|
||||||
# 找到最大的连通区域(除了背景)
|
|
||||||
largest_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA])
|
|
||||||
|
|
||||||
# 创建一个新的二值图像,只显示最大的连通区域
|
|
||||||
new_bin_img = np.zeros_like(bin_img)
|
|
||||||
new_bin_img[labels == largest_label] = 255
|
|
||||||
|
|
||||||
return new_bin_img
|
|
||||||
|
|
||||||
def close_operation(bin_img, kernel_size=(5, 5)):
|
|
||||||
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size)
|
|
||||||
closed_img = cv2.morphologyEx(bin_img, cv2.MORPH_CLOSE, kernel)
|
|
||||||
return closed_img
|
|
||||||
|
|
||||||
def open_operation(bin_img, kernel_size=(5, 5)):
|
|
||||||
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size)
|
|
||||||
opened_img = cv2.morphologyEx(bin_img, cv2.MORPH_OPEN, kernel)
|
|
||||||
return opened_img
|
|
||||||
|
|
||||||
|
|
||||||
def draw_tomato_edge(original_img, bin_img):
|
|
||||||
bin_img_processed = close_operation(bin_img, kernel_size=(15, 15))
|
|
||||||
# cv2.imshow('Close Operation', bin_img_processed)
|
|
||||||
# bin_img_processed = open_operation(bin_img_processed, kernel_size=(19, 19))
|
|
||||||
# cv2.imshow('Open Operation', bin_img_processed)
|
|
||||||
# 现在使用处理后的bin_img_processed查找轮廓
|
|
||||||
contours, _ = cv2.findContours(bin_img_processed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
|
|
||||||
# 如果没有找到轮廓,直接返回原图
|
|
||||||
if not contours:
|
|
||||||
return original_img, np.zeros_like(bin_img) # 返回原图和全黑mask
|
|
||||||
# 找到最大轮廓
|
|
||||||
max_contour = max(contours, key=cv2.contourArea)
|
|
||||||
# 多边形近似的精度调整
|
|
||||||
epsilon = 0.0006 * cv2.arcLength(max_contour, True) # 可以调整这个值
|
|
||||||
approx = cv2.approxPolyDP(max_contour, epsilon, True)
|
|
||||||
# 绘制轮廓
|
|
||||||
cv2.drawContours(original_img, [approx], -1, (0, 255, 0), 3)
|
|
||||||
mask = np.zeros_like(bin_img)
|
|
||||||
|
|
||||||
# 使用白色填充最大轮廓
|
|
||||||
cv2.drawContours(mask, [max_contour], -1, (255), thickness=cv2.FILLED)
|
|
||||||
|
|
||||||
return original_img, mask
|
|
||||||
|
|
||||||
def draw_tomato_edge_convex_hull(original_img, bin_img):
|
|
||||||
bin_img_blurred = cv2.GaussianBlur(bin_img, (5, 5), 0)
|
|
||||||
contours, _ = cv2.findContours(bin_img_blurred, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
if not contours:
|
|
||||||
return original_img
|
|
||||||
max_contour = max(contours, key=cv2.contourArea)
|
|
||||||
hull = cv2.convexHull(max_contour)
|
|
||||||
cv2.drawContours(original_img, [hull], -1, (0, 255, 0), 3)
|
|
||||||
return original_img
|
|
||||||
|
|
||||||
# 得到完整的西红柿二值图像,除了绿色花萼
|
|
||||||
def fill_holes(bin_img):
|
|
||||||
# 复制 bin_img 到 img_filled
|
|
||||||
img_filled = bin_img.copy()
|
|
||||||
|
|
||||||
# 获取图像的高度和宽度
|
|
||||||
height, width = bin_img.shape
|
|
||||||
|
|
||||||
# 创建一个掩码,比输入图像大两个像素点
|
|
||||||
mask = np.zeros((height + 2, width + 2), np.uint8)
|
|
||||||
|
|
||||||
# 使用 floodFill 函数填充黑色区域
|
|
||||||
cv2.floodFill(img_filled, mask, (0, 0), 255)
|
|
||||||
|
|
||||||
# 反转填充后的图像
|
|
||||||
img_filled_d = cv2.bitwise_not(img_filled)
|
|
||||||
|
|
||||||
# 使用 bitwise_or 操作合并原图像和填充后的图像
|
|
||||||
img_filled = cv2.bitwise_or(bin_img, img_filled)
|
|
||||||
# 裁剪 img_filled 和 img_filled_d 到与 bin_img 相同的大小
|
|
||||||
# img_filled = img_filled[:height, :width]
|
|
||||||
img_filled_d = img_filled_d[:height, :width]
|
|
||||||
|
|
||||||
return img_filled, img_filled_d
|
|
||||||
|
|
||||||
def bitwise_and_rgb_with_binary(rgb_img, bin_img):
|
|
||||||
# 将二值图像转换为三通道图像
|
|
||||||
bin_img_3channel = cv2.cvtColor(bin_img, cv2.COLOR_GRAY2BGR)
|
|
||||||
|
|
||||||
# 使用 bitwise_and 操作合并 RGB 图像和二值图像
|
|
||||||
result = cv2.bitwise_and(rgb_img, bin_img_3channel)
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def extract_max_connected_area(image, lower_hsv, upper_hsv):
|
|
||||||
# 读取图像
|
|
||||||
# image = cv2.imread(image_path)
|
|
||||||
|
|
||||||
# 将图像从BGR转换到HSV
|
|
||||||
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
|
||||||
|
|
||||||
# 使用阈值获取指定区域的二值图像
|
|
||||||
mask = cv2.inRange(hsv, lower_hsv, upper_hsv)
|
|
||||||
|
|
||||||
# 找到二值图像的连通区域
|
|
||||||
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(mask, connectivity=8)
|
|
||||||
|
|
||||||
# 找到最大的连通区域(除了背景)
|
|
||||||
largest_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA])
|
|
||||||
|
|
||||||
# 创建一个新的二值图像,只显示最大的连通区域
|
|
||||||
new_bin_img = np.zeros_like(mask)
|
|
||||||
new_bin_img[labels == largest_label] = 255
|
|
||||||
|
|
||||||
# 复制 new_bin_img 到 img_filled
|
|
||||||
img_filled = new_bin_img.copy()
|
|
||||||
|
|
||||||
# 获取图像的高度和宽度
|
|
||||||
height, width = new_bin_img.shape
|
|
||||||
|
|
||||||
# 创建一个掩码,比输入图像大两个像素点
|
|
||||||
mask = np.zeros((height + 2, width + 2), np.uint8)
|
|
||||||
|
|
||||||
# 使用 floodFill 函数填充黑色区域
|
|
||||||
cv2.floodFill(img_filled, mask, (0, 0), 255)
|
|
||||||
|
|
||||||
# 反转填充后的图像
|
|
||||||
img_filled_inv = cv2.bitwise_not(img_filled)
|
|
||||||
|
|
||||||
# 使用 bitwise_or 操作合并原图像和填充后的图像
|
|
||||||
img_filled = cv2.bitwise_or(new_bin_img, img_filled_inv)
|
|
||||||
|
|
||||||
return img_filled
|
|
||||||
def get_tomato_dimensions(edge_img):
|
|
||||||
"""
|
|
||||||
根据番茄边缘二值化轮廓图,计算番茄的长径、短径和长短径比值。
|
|
||||||
使用最小外接矩形和最小外接圆两种方法。
|
|
||||||
|
|
||||||
参数:
|
|
||||||
edge_img (numpy.ndarray): 番茄边缘二值化轮廓图,背景为黑色,番茄区域为白色。
|
|
||||||
|
|
||||||
返回:
|
|
||||||
tuple: (长径, 短径, 长短径比值)
|
|
||||||
"""
|
|
||||||
if edge_img is None or edge_img.any() == 0:
|
|
||||||
return (0, 0)
|
|
||||||
# 最小外接矩形
|
|
||||||
rect = cv2.minAreaRect(cv2.findContours(edge_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0][0])
|
|
||||||
major_axis, minor_axis = rect[1]
|
|
||||||
# aspect_ratio = max(major_axis, minor_axis) / min(major_axis, minor_axis)
|
|
||||||
|
|
||||||
# # 最小外接圆
|
|
||||||
# (x, y), radius = cv2.minEnclosingCircle(
|
|
||||||
# cv2.findContours(edge_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0][0])
|
|
||||||
# diameter = 2 * radius
|
|
||||||
# aspect_ratio_circle = 1.0
|
|
||||||
|
|
||||||
return (max(major_axis, minor_axis), min(major_axis, minor_axis))
|
|
||||||
|
|
||||||
def get_defect_info(defect_img):
|
|
||||||
"""
|
|
||||||
根据番茄区域缺陷二值化轮廓图,计算缺陷区域的个数和总面积。
|
|
||||||
|
|
||||||
参数:
|
|
||||||
defect_img (numpy.ndarray): 番茄区域缺陷二值化轮廓图,背景为黑色,番茄区域为白色,缺陷区域为黑色连通域。
|
|
||||||
|
|
||||||
返回:
|
|
||||||
tuple: (缺陷区域个数, 缺陷区域像素面积,缺陷像素总面积)
|
|
||||||
"""
|
|
||||||
# 检查输入是否为空
|
|
||||||
if defect_img is None or defect_img.any() == 0:
|
|
||||||
return (0, 0)
|
|
||||||
|
|
||||||
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(defect_img, connectivity=4)
|
|
||||||
max_area = max(stats[i, cv2.CC_STAT_AREA] for i in range(1, nb_components))
|
|
||||||
areas = []
|
|
||||||
for i in range(1, nb_components):
|
|
||||||
area = stats[i, cv2.CC_STAT_AREA]
|
|
||||||
if area != max_area:
|
|
||||||
areas.append(area)
|
|
||||||
number_defects = len(areas)
|
|
||||||
total_pixels = sum(areas)
|
|
||||||
return number_defects, total_pixels
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def connected_components_analysis(binary_image):
|
|
||||||
"""
|
|
||||||
从二值化图像计算黑色连通域个数和各个黑色连通域像素面积及黑色像素总面积。
|
|
||||||
|
|
||||||
参数:
|
|
||||||
binary_image (numpy.ndarray): 二值化图像, 其中 0 表示白色, 1 表示黑色。
|
|
||||||
|
|
||||||
返回:
|
|
||||||
num_components (int): 黑色连通域的个数。
|
|
||||||
component_areas (list): 每个黑色连通域的像素面积。
|
|
||||||
total_black_area (int): 黑色像素的总面积。
|
|
||||||
"""
|
|
||||||
# 标记连通域
|
|
||||||
labeled_image, num_components = label(binary_image)
|
|
||||||
|
|
||||||
# 获取每个连通域的像素位置
|
|
||||||
slices = find_objects(labeled_image)
|
|
||||||
|
|
||||||
# 计算每个连通域的像素面积
|
|
||||||
component_areas = []
|
|
||||||
for slice_obj in slices:
|
|
||||||
component_area = np.sum(binary_image[slice_obj])
|
|
||||||
component_areas.append(component_area)
|
|
||||||
|
|
||||||
# 计算黑色像素的总面积
|
|
||||||
total_black_area = np.sum(binary_image)
|
|
||||||
|
|
||||||
return num_components, component_areas, total_black_area
|
|
||||||
@ -1,111 +0,0 @@
|
|||||||
import numpy as np
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
|
|
||||||
from sklearn.svm import SVR
|
|
||||||
from sklearn.neighbors import KNeighborsRegressor
|
|
||||||
from sklearn.model_selection import train_test_split, GridSearchCV
|
|
||||||
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
|
|
||||||
from spec_read import all_spectral_data
|
|
||||||
import joblib
|
|
||||||
|
|
||||||
|
|
||||||
def prepare_data(data):
|
|
||||||
"""Reshape data and select specified spectral bands."""
|
|
||||||
selected_bands = [8, 9, 10, 48, 49, 50, 77, 80, 103, 108, 115, 143, 145]
|
|
||||||
# 筛选特定的波段
|
|
||||||
data_selected = data[:, :, :, selected_bands]
|
|
||||||
# 将筛选后的数据重塑为二维数组,每行代表一个样本
|
|
||||||
reshaped_data = data_selected.reshape(-1, 30 * 30 * len(selected_bands))
|
|
||||||
return reshaped_data
|
|
||||||
|
|
||||||
|
|
||||||
def split_data(X, y, test_size=0.20, random_state=12):
|
|
||||||
"""Split data into training and test sets."""
|
|
||||||
return train_test_split(X, y, test_size=test_size, random_state=random_state)
|
|
||||||
|
|
||||||
|
|
||||||
def evaluate_model(model, X_test, y_test):
|
|
||||||
"""Evaluate the model and return multiple metrics and predictions."""
|
|
||||||
y_pred = model.predict(X_test)
|
|
||||||
mse = mean_squared_error(y_test, y_pred)
|
|
||||||
mae = mean_absolute_error(y_test, y_pred)
|
|
||||||
r2 = r2_score(y_test, y_pred)
|
|
||||||
return mse, mae, r2, y_pred
|
|
||||||
|
|
||||||
|
|
||||||
def print_predictions(y_test, y_pred, model_name):
|
|
||||||
"""Print actual and predicted values."""
|
|
||||||
print(f"Test Set Predictions for {model_name}:")
|
|
||||||
for i, (real, pred) in enumerate(zip(y_test, y_pred)):
|
|
||||||
print(f"Sample {i + 1}: True Value = {real:.2f}, Predicted Value = {pred:.2f}")
|
|
||||||
|
|
||||||
def main():
|
|
||||||
sweetness_acidity = np.array([
|
|
||||||
16.2, 16.1, 17, 16.9, 16.8, 17.8, 18.1, 17.2, 17, 17.2, 17.1, 17.2,
|
|
||||||
17.2, 17.2, 18.1, 17, 17.6, 17.4, 17.1, 17.1, 16.9, 17.6, 17.3, 16.3,
|
|
||||||
16.5, 18.7, 17.6, 16.2, 16.8, 17.2, 16.8, 17.3, 16, 16.6, 16.7, 16.7,
|
|
||||||
17.3, 16.3, 16.8, 17.4, 17.3, 16.3, 16.1, 17.2, 18.6, 16.8, 16.1, 17.2,
|
|
||||||
18.3, 16.5, 16.6, 17, 17, 17.8, 16.4, 18, 17.7, 17, 18.3, 16.8, 17.5,
|
|
||||||
17.7, 18.5, 18, 17.7, 17, 18.3, 18.1, 17.4, 17.7, 17.8, 16.3, 17.1, 16.8,
|
|
||||||
17.2, 17.5, 16.6, 17.7, 17.1, 17.7, 19.4, 20.3, 17.3, 15.8, 18, 17.7,
|
|
||||||
17.2, 15.2, 18, 18.4, 18.3, 15.7, 17.2, 18.6, 15.6, 17, 16.9, 17.4, 17.8,
|
|
||||||
16.5
|
|
||||||
])
|
|
||||||
|
|
||||||
X = prepare_data(all_spectral_data)
|
|
||||||
print(f'原数据尺寸:{all_spectral_data.shape};训练数据尺寸:{X.shape}')
|
|
||||||
X_train, X_test, y_train, y_test = split_data(X, sweetness_acidity)
|
|
||||||
|
|
||||||
models_params = {
|
|
||||||
"RandomForest": {
|
|
||||||
'model': RandomForestRegressor(),
|
|
||||||
'params': {
|
|
||||||
'n_estimators': [100, 200, 300],
|
|
||||||
'max_depth': [None, 10, 20],
|
|
||||||
'min_samples_split': [2, 5],
|
|
||||||
'min_samples_leaf': [1, 2],
|
|
||||||
'random_state': [42]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"GradientBoosting": {
|
|
||||||
'model': GradientBoostingRegressor(),
|
|
||||||
'params': {
|
|
||||||
'n_estimators': [100, 200, 300],
|
|
||||||
'learning_rate': [0.01, 0.1, 0.2],
|
|
||||||
'max_depth': [3, 5, 7],
|
|
||||||
'min_samples_split': [2, 5],
|
|
||||||
'min_samples_leaf': [1, 2],
|
|
||||||
'random_state': [42]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"SVR": {
|
|
||||||
'model': SVR(),
|
|
||||||
'params': {
|
|
||||||
'C': [0.1, 1, 10, 100],
|
|
||||||
'gamma': ['scale', 'auto', 0.01, 0.1],
|
|
||||||
'epsilon': [0.01, 0.1, 0.5]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
best_models = {}
|
|
||||||
|
|
||||||
for model_name, mp in models_params.items():
|
|
||||||
grid_search = GridSearchCV(mp['model'], mp['params'], cv=5, scoring='r2', verbose=2)
|
|
||||||
grid_search.fit(X_train, y_train)
|
|
||||||
best_models[model_name] = grid_search.best_estimator_
|
|
||||||
mse, mae, r2, y_pred = evaluate_model(grid_search.best_estimator_, X_test, y_test)
|
|
||||||
print(f"Best {model_name} parameters: {grid_search.best_params_}")
|
|
||||||
print(f"Model: {model_name}")
|
|
||||||
print(f"MSE on the test set: {mse}")
|
|
||||||
print(f"MAE on the test set: {mae}")
|
|
||||||
print(f"R² score on the test set: {r2}")
|
|
||||||
print_predictions(y_test, y_pred, model_name)
|
|
||||||
print("\n" + "-" * 50 + "\n")
|
|
||||||
|
|
||||||
# Optionally save the best model for each type
|
|
||||||
joblib.dump(grid_search.best_estimator_, f'{model_name}_best_model.joblib')
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@ -1,87 +0,0 @@
|
|||||||
import joblib
|
|
||||||
import numpy as np
|
|
||||||
import os
|
|
||||||
from model_train import prepare_data
|
|
||||||
|
|
||||||
def read_spectral_data(hdr_path, raw_path):
|
|
||||||
# Read HDR file for image dimensions information
|
|
||||||
with open(hdr_path, 'r', encoding='latin1') as hdr_file:
|
|
||||||
lines = hdr_file.readlines()
|
|
||||||
height = width = bands = 0
|
|
||||||
for line in lines:
|
|
||||||
if line.startswith('lines'):
|
|
||||||
height = int(line.split()[-1])
|
|
||||||
elif line.startswith('samples'):
|
|
||||||
width = int(line.split()[-1])
|
|
||||||
elif line.startswith('bands'):
|
|
||||||
bands = int(line.split()[-1])
|
|
||||||
|
|
||||||
# Read spectral data from RAW file
|
|
||||||
raw_image = np.fromfile(raw_path, dtype='uint16')
|
|
||||||
# Initialize the image with the actual read dimensions
|
|
||||||
formatImage = np.zeros((height, width, bands))
|
|
||||||
|
|
||||||
for row in range(height):
|
|
||||||
for dim in range(bands):
|
|
||||||
formatImage[row, :, dim] = raw_image[(dim + row * bands) * width:(dim + 1 + row * bands) * width]
|
|
||||||
|
|
||||||
# Ensure the image is 30x30x224 by cropping or padding
|
|
||||||
target_height, target_width, target_bands = 30, 30, 224
|
|
||||||
# Crop or pad height
|
|
||||||
if height > target_height:
|
|
||||||
formatImage = formatImage[:target_height, :, :]
|
|
||||||
elif height < target_height:
|
|
||||||
pad_height = target_height - height
|
|
||||||
formatImage = np.pad(formatImage, ((0, pad_height), (0, 0), (0, 0)), mode='constant', constant_values=0)
|
|
||||||
|
|
||||||
# Crop or pad width
|
|
||||||
if width > target_width:
|
|
||||||
formatImage = formatImage[:, :target_width, :]
|
|
||||||
elif width < target_width:
|
|
||||||
pad_width = target_width - width
|
|
||||||
formatImage = np.pad(formatImage, ((0, 0), (0, pad_width), (0, 0)), mode='constant', constant_values=0)
|
|
||||||
|
|
||||||
# Crop or pad bands if necessary (usually bands should not change)
|
|
||||||
if bands > target_bands:
|
|
||||||
formatImage = formatImage[:, :, :target_bands]
|
|
||||||
elif bands < target_bands:
|
|
||||||
pad_bands = target_bands - bands
|
|
||||||
formatImage = np.pad(formatImage, ((0, 0), (0, 0), (0, pad_bands)), mode='constant', constant_values=0)
|
|
||||||
|
|
||||||
return formatImage
|
|
||||||
|
|
||||||
def load_model(model_path):
|
|
||||||
"""加载模型"""
|
|
||||||
return joblib.load(model_path)
|
|
||||||
|
|
||||||
def predict(model, data):
|
|
||||||
"""预测数据"""
|
|
||||||
return model.predict(data)
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# 加载模型
|
|
||||||
model = load_model(r'D:\project\supermachine--tomato-passion_fruit\20240529RGBtest3\models\passion_fruit_3.joblib')
|
|
||||||
|
|
||||||
# 读取数据
|
|
||||||
directory = r'D:\project\supermachine--tomato-passion_fruit\20240529RGBtest3\xs\光谱数据3030'
|
|
||||||
all_spectral_data = []
|
|
||||||
for i in range(1, 101):
|
|
||||||
hdr_path = os.path.join(directory, f'{i}.HDR')
|
|
||||||
raw_path = os.path.join(directory, f'{i}')
|
|
||||||
spectral_data = read_spectral_data(hdr_path, raw_path)
|
|
||||||
all_spectral_data.append(spectral_data)
|
|
||||||
all_spectral_data = np.stack(all_spectral_data)
|
|
||||||
print(all_spectral_data.shape)
|
|
||||||
|
|
||||||
# 预处理数据
|
|
||||||
data_prepared = prepare_data(all_spectral_data)
|
|
||||||
print(data_prepared.shape)
|
|
||||||
|
|
||||||
# 预测数据
|
|
||||||
predictions = predict(model, data_prepared)
|
|
||||||
|
|
||||||
# 打印预测结果
|
|
||||||
print(predictions)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@ -1,70 +0,0 @@
|
|||||||
import numpy as np
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
def read_spectral_data(hdr_path, raw_path):
|
|
||||||
# Read HDR file for image dimensions information
|
|
||||||
with open(hdr_path, 'r', encoding='latin1') as hdr_file:
|
|
||||||
lines = hdr_file.readlines()
|
|
||||||
height = width = bands = 0
|
|
||||||
for line in lines:
|
|
||||||
if line.startswith('lines'):
|
|
||||||
height = int(line.split()[-1])
|
|
||||||
elif line.startswith('samples'):
|
|
||||||
width = int(line.split()[-1])
|
|
||||||
elif line.startswith('bands'):
|
|
||||||
bands = int(line.split()[-1])
|
|
||||||
|
|
||||||
# Read spectral data from RAW file
|
|
||||||
raw_image = np.fromfile(raw_path, dtype='uint16')
|
|
||||||
# Initialize the image with the actual read dimensions
|
|
||||||
formatImage = np.zeros((height, width, bands))
|
|
||||||
|
|
||||||
for row in range(height):
|
|
||||||
for dim in range(bands):
|
|
||||||
formatImage[row, :, dim] = raw_image[(dim + row * bands) * width:(dim + 1 + row * bands) * width]
|
|
||||||
|
|
||||||
# Ensure the image is 30x30x224 by cropping or padding
|
|
||||||
target_height, target_width, target_bands = 30, 30, 224
|
|
||||||
# Crop or pad height
|
|
||||||
if height > target_height:
|
|
||||||
formatImage = formatImage[:target_height, :, :]
|
|
||||||
elif height < target_height:
|
|
||||||
pad_height = target_height - height
|
|
||||||
formatImage = np.pad(formatImage, ((0, pad_height), (0, 0), (0, 0)), mode='constant', constant_values=0)
|
|
||||||
|
|
||||||
# Crop or pad width
|
|
||||||
if width > target_width:
|
|
||||||
formatImage = formatImage[:, :target_width, :]
|
|
||||||
elif width < target_width:
|
|
||||||
pad_width = target_width - width
|
|
||||||
formatImage = np.pad(formatImage, ((0, 0), (0, pad_width), (0, 0)), mode='constant', constant_values=0)
|
|
||||||
|
|
||||||
# Crop or pad bands if necessary (usually bands should not change)
|
|
||||||
if bands > target_bands:
|
|
||||||
formatImage = formatImage[:, :, :target_bands]
|
|
||||||
elif bands < target_bands:
|
|
||||||
pad_bands = target_bands - bands
|
|
||||||
formatImage = np.pad(formatImage, ((0, 0), (0, 0), (0, pad_bands)), mode='constant', constant_values=0)
|
|
||||||
|
|
||||||
return formatImage
|
|
||||||
|
|
||||||
|
|
||||||
# Specify the directory containing the HDR and RAW files
|
|
||||||
directory = r'D:\project\supermachine--tomato-passion_fruit\20240529RGBtest3\xs\光谱数据3030'
|
|
||||||
|
|
||||||
# Initialize a list to hold all the spectral data arrays
|
|
||||||
all_spectral_data = []
|
|
||||||
|
|
||||||
# Loop through each data set (assuming there are 40 datasets)
|
|
||||||
for i in range(1, 101):
|
|
||||||
hdr_path = os.path.join(directory, f'{i}.HDR')
|
|
||||||
raw_path = os.path.join(directory, f'{i}')
|
|
||||||
|
|
||||||
# Read data
|
|
||||||
spectral_data = read_spectral_data(hdr_path, raw_path)
|
|
||||||
all_spectral_data.append(spectral_data)
|
|
||||||
|
|
||||||
# Stack all data into a single numpy array
|
|
||||||
all_spectral_data = np.stack(all_spectral_data)
|
|
||||||
print(all_spectral_data.shape) # This should print (40, 30, 30, 224)
|
|
||||||
@ -1,904 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/6/4 21:34
|
|
||||||
# @Author : GG
|
|
||||||
# @File : classifer.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
|
|
||||||
import os
|
|
||||||
import cv2
|
|
||||||
import utils
|
|
||||||
import joblib
|
|
||||||
import logging
|
|
||||||
import random
|
|
||||||
import numpy as np
|
|
||||||
from PIL import Image
|
|
||||||
from utils import Pipe
|
|
||||||
from config import Config as setting
|
|
||||||
from sklearn.ensemble import RandomForestRegressor
|
|
||||||
#图像分类网络所需库,实际并未使用分类网络
|
|
||||||
# import torch
|
|
||||||
# import torch.nn as nn
|
|
||||||
# from torchvision import transforms
|
|
||||||
|
|
||||||
#番茄RGB处理模型
|
|
||||||
class Tomato:
|
|
||||||
def __init__(self, find_reflection_threshold=setting.find_reflection_threshold, extract_g_r_factor=setting.extract_g_r_factor):
|
|
||||||
''' 初始化 Tomato 类。'''
|
|
||||||
self.find_reflection_threshold = find_reflection_threshold
|
|
||||||
self.extract_g_r_factor = extract_g_r_factor
|
|
||||||
pass
|
|
||||||
|
|
||||||
def extract_s_l(self, image):
|
|
||||||
'''
|
|
||||||
提取图像的 S 通道(饱和度)和 L 通道(亮度),并将两者相加。
|
|
||||||
:param image: 输入的 BGR 图像
|
|
||||||
:return: S 通道和 L 通道相加的结果
|
|
||||||
'''
|
|
||||||
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
|
||||||
lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
|
|
||||||
s_channel = hsv[:, :, 1]
|
|
||||||
l_channel = lab[:, :, 0]
|
|
||||||
result = cv2.add(s_channel, l_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def find_reflection(self, image):
|
|
||||||
'''
|
|
||||||
通过阈值处理识别图像中的反射区域。
|
|
||||||
:param image: 输入的单通道图像
|
|
||||||
:param threshold: 用于二值化的阈值
|
|
||||||
:return: 二值化后的图像,高于阈值的部分为白色,其余为黑色
|
|
||||||
'''
|
|
||||||
_, reflection = cv2.threshold(image, self.find_reflection_threshold, 255, cv2.THRESH_BINARY)
|
|
||||||
return reflection
|
|
||||||
|
|
||||||
def otsu_threshold(self, image):
|
|
||||||
'''
|
|
||||||
使用 Otsu 大津法自动计算并应用阈值,进行图像的二值化处理。
|
|
||||||
:param image: 输入的单通道图像
|
|
||||||
:return: 二值化后的图像
|
|
||||||
'''
|
|
||||||
_, binary = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
|
||||||
return binary
|
|
||||||
|
|
||||||
def extract_g_r(self, image):
|
|
||||||
'''
|
|
||||||
提取图像中的 G 通道(绿色),放大并减去 R 通道(红色)。
|
|
||||||
:param image: 输入的 BGR 图像
|
|
||||||
:return: G 通道乘以 1.5 后减去 R 通道的结果
|
|
||||||
'''
|
|
||||||
g_channel = image[:, :, 1]
|
|
||||||
r_channel = image[:, :, 2]
|
|
||||||
result = cv2.subtract(cv2.multiply(g_channel, self.extract_g_r_factor), r_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def extract_r_b(self, image):
|
|
||||||
'''
|
|
||||||
提取图像中的 R 通道(红色)和 B 通道(蓝色),并进行相减。
|
|
||||||
:param image: 输入的 BGR 图像
|
|
||||||
:return: R 通道减去 B 通道的结果
|
|
||||||
'''
|
|
||||||
r_channel = image[:, :, 2]
|
|
||||||
b_channel = image[:, :, 0]
|
|
||||||
result = cv2.subtract(r_channel, b_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def extract_r_g(self, image):
|
|
||||||
'''
|
|
||||||
提取图像中的 R 通道(红色)和 G 通道(绿色),并进行相减。
|
|
||||||
:param image: 输入的 BGR 图像
|
|
||||||
:return: R 通道减去 G 通道的结果
|
|
||||||
'''
|
|
||||||
r_channel = image[:, :, 2]
|
|
||||||
g_channel = image[:, :, 1]
|
|
||||||
result = cv2.subtract(r_channel, g_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def threshold_segmentation(self, image, threshold, color=255):
|
|
||||||
'''
|
|
||||||
对图像进行阈值分割,高于阈值的部分设置为指定的颜色。
|
|
||||||
:param image: 输入的单通道图像
|
|
||||||
:param threshold: 阈值
|
|
||||||
:param color: 设置的颜色值
|
|
||||||
:return: 分割后的二值化图像
|
|
||||||
'''
|
|
||||||
_, result = cv2.threshold(image, threshold, color, cv2.THRESH_BINARY)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def bitwise_operation(self, image1, image2, operation='and'):
|
|
||||||
'''
|
|
||||||
对两幅图像执行位运算(与或运算)。
|
|
||||||
:param image1: 第一幅图像
|
|
||||||
:param image2: 第二幅图像
|
|
||||||
:param operation: 执行的操作类型('and' 或 'or')
|
|
||||||
:return: 位运算后的结果
|
|
||||||
'''
|
|
||||||
if operation == 'and':
|
|
||||||
result = cv2.bitwise_and(image1, image2)
|
|
||||||
elif operation == 'or':
|
|
||||||
result = cv2.bitwise_or(image1, image2)
|
|
||||||
else:
|
|
||||||
raise ValueError("operation must be 'and' or 'or'")
|
|
||||||
return result
|
|
||||||
|
|
||||||
def largest_connected_component(self, bin_img):
|
|
||||||
'''
|
|
||||||
提取二值图像中的最大连通区域。
|
|
||||||
:param bin_img: 输入的二值图像
|
|
||||||
:return: 只包含最大连通区域的二值图像
|
|
||||||
'''
|
|
||||||
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(bin_img, connectivity=8)
|
|
||||||
if num_labels <= 1:
|
|
||||||
return np.zeros_like(bin_img)
|
|
||||||
largest_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA])
|
|
||||||
new_bin_img = np.zeros_like(bin_img)
|
|
||||||
new_bin_img[labels == largest_label] = 255
|
|
||||||
return new_bin_img
|
|
||||||
|
|
||||||
def close_operation(self, bin_img, kernel_size=(5, 5)):
|
|
||||||
'''
|
|
||||||
对二值图像进行闭运算,用于消除内部小孔和连接接近的对象。
|
|
||||||
:param bin_img: 输入的二值图像
|
|
||||||
:param kernel_size: 核的大小
|
|
||||||
:return: 进行闭运算后的图像
|
|
||||||
'''
|
|
||||||
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size)
|
|
||||||
closed_img = cv2.morphologyEx(bin_img, cv2.MORPH_CLOSE, kernel)
|
|
||||||
return closed_img
|
|
||||||
|
|
||||||
def open_operation(self, bin_img, kernel_size=(5, 5)):
|
|
||||||
'''
|
|
||||||
对二值图像进行开运算,用于去除小的噪点。
|
|
||||||
:param bin_img: 输入的二值图像
|
|
||||||
:param kernel_size: 核的大小
|
|
||||||
:return: 进行开运算后的图像
|
|
||||||
'''
|
|
||||||
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size)
|
|
||||||
opened_img = cv2.morphologyEx(bin_img, cv2.MORPH_OPEN, kernel)
|
|
||||||
return opened_img
|
|
||||||
|
|
||||||
def draw_tomato_edge(self, original_img, bin_img):
|
|
||||||
'''
|
|
||||||
在原始图像上绘制最大西红柿轮廓的近似多边形。
|
|
||||||
:param original_img: 原始 BGR 图像
|
|
||||||
:param bin_img: 西红柿的二值图像
|
|
||||||
:return: 带有绘制边缘的原始图像和边缘掩码
|
|
||||||
'''
|
|
||||||
bin_img_processed = self.close_operation(bin_img, kernel_size=(15, 15))
|
|
||||||
contours, _ = cv2.findContours(bin_img_processed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
if not contours:
|
|
||||||
return original_img, np.zeros_like(bin_img)
|
|
||||||
max_contour = max(contours, key=cv2.contourArea)
|
|
||||||
epsilon = 0.0006 * cv2.arcLength(max_contour, True)
|
|
||||||
approx = cv2.approxPolyDP(max_contour, epsilon, True)
|
|
||||||
cv2.drawContours(original_img, [approx], -1, (0, 255, 0), 3)
|
|
||||||
mask = np.zeros_like(bin_img)
|
|
||||||
cv2.drawContours(mask, [max_contour], -1, (255), thickness=cv2.FILLED)
|
|
||||||
return original_img, mask
|
|
||||||
|
|
||||||
def draw_tomato_edge_convex_hull(self, original_img, bin_img):
|
|
||||||
'''
|
|
||||||
在原始图像上绘制最大西红柿轮廓的凸包。
|
|
||||||
:param original_img: 原始 BGR 图像
|
|
||||||
:param bin_img: 西红柿的二值图像
|
|
||||||
:return: 带有绘制凸包的原始图像
|
|
||||||
'''
|
|
||||||
bin_img_blurred = cv2.GaussianBlur(bin_img, (5, 5), 0)
|
|
||||||
contours, _ = cv2.findContours(bin_img_blurred, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
if not contours:
|
|
||||||
return original_img
|
|
||||||
max_contour = max(contours, key=cv2.contourArea)
|
|
||||||
hull = cv2.convexHull(max_contour)
|
|
||||||
cv2.drawContours(original_img, [hull], -1, (0, 255, 0), 3)
|
|
||||||
return original_img
|
|
||||||
|
|
||||||
|
|
||||||
def bitwise_and_rgb_with_binary(self, rgb_img, bin_img):
|
|
||||||
'''
|
|
||||||
将 RGB 图像与二值图像进行按位与操作,用于将二值区域应用于原始图像。
|
|
||||||
:param rgb_img: 原始 RGB 图像
|
|
||||||
:param bin_img: 二值图像
|
|
||||||
:return: 按位与后的结果图像
|
|
||||||
'''
|
|
||||||
bin_img_3channel = cv2.cvtColor(bin_img, cv2.COLOR_GRAY2BGR)
|
|
||||||
result = cv2.bitwise_and(rgb_img, bin_img_3channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def extract_max_connected_area(self, image, lower_hsv, upper_hsv):
|
|
||||||
'''
|
|
||||||
提取图像中满足 HSV 范围条件的最大连通区域,并填充孔洞。
|
|
||||||
:param image: 输入的 BGR 图像
|
|
||||||
:param lower_hsv: HSV 范围的下限
|
|
||||||
:param upper_hsv: HSV 范围的上限
|
|
||||||
:return: 处理后的图像
|
|
||||||
'''
|
|
||||||
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
|
||||||
mask = cv2.inRange(hsv, lower_hsv, upper_hsv)
|
|
||||||
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(mask, connectivity=8)
|
|
||||||
largest_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA])
|
|
||||||
new_bin_img = np.zeros_like(mask)
|
|
||||||
new_bin_img[labels == largest_label] = 255
|
|
||||||
img_filled = new_bin_img.copy()
|
|
||||||
height, width = new_bin_img.shape
|
|
||||||
mask = np.zeros((height + 2, width + 2), np.uint8)
|
|
||||||
cv2.floodFill(img_filled, mask, (0, 0), 255)
|
|
||||||
img_filled_inv = cv2.bitwise_not(img_filled)
|
|
||||||
img_filled = cv2.bitwise_or(new_bin_img, img_filled_inv)
|
|
||||||
return img_filled
|
|
||||||
|
|
||||||
#百香果RGB处理模型
|
|
||||||
class Passion_fruit:
|
|
||||||
def __init__(self, hue_value=setting.hue_value, hue_delta=setting.hue_delta,
|
|
||||||
value_target=setting.value_target, value_delta=setting.value_delta):
|
|
||||||
# 初始化常用参数
|
|
||||||
self.hue_value = hue_value
|
|
||||||
self.hue_delta = hue_delta
|
|
||||||
self.value_target = value_target
|
|
||||||
self.value_delta = value_delta
|
|
||||||
|
|
||||||
def create_mask(self, hsv_image):
|
|
||||||
# 创建H通道阈值掩码
|
|
||||||
lower_hue = np.array([self.hue_value - self.hue_delta, 0, 0])
|
|
||||||
upper_hue = np.array([self.hue_value + self.hue_delta, 255, 255])
|
|
||||||
hue_mask = cv2.inRange(hsv_image, lower_hue, upper_hue)
|
|
||||||
# 创建V通道排除中心值的掩码
|
|
||||||
lower_value_1 = np.array([0, 0, 0])
|
|
||||||
upper_value_1 = np.array([180, 255, self.value_target - self.value_delta])
|
|
||||||
lower_value_2 = np.array([0, 0, self.value_target + self.value_delta])
|
|
||||||
upper_value_2 = np.array([180, 255, 255])
|
|
||||||
value_mask_1 = cv2.inRange(hsv_image, lower_value_1, upper_value_1)
|
|
||||||
value_mask_1 = cv2.bitwise_not(value_mask_1)
|
|
||||||
value_mask_2 = cv2.inRange(hsv_image, lower_value_2, upper_value_2)
|
|
||||||
value_mask = cv2.bitwise_and(value_mask_1, value_mask_2)
|
|
||||||
|
|
||||||
# 合并H通道和V通道掩码
|
|
||||||
return cv2.bitwise_and(hue_mask, value_mask)
|
|
||||||
|
|
||||||
def apply_morphology(self, mask):
|
|
||||||
# 应用形态学操作
|
|
||||||
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
|
|
||||||
return cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
|
|
||||||
|
|
||||||
def find_largest_component(self, mask):
|
|
||||||
if mask is None or mask.size == 0 or np.all(mask == 0):
|
|
||||||
logging.warning("RGB 图像为空或全黑,返回一个全黑RGB图像。")
|
|
||||||
return np.zeros((setting.n_rgb_rows, setting.n_rgb_cols, setting.n_rgb_bands), dtype=np.uint8) \
|
|
||||||
if mask is None else np.zeros_like(mask)
|
|
||||||
# 寻找最大连通组件
|
|
||||||
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(mask, 4, cv2.CV_32S)
|
|
||||||
if num_labels < 2:
|
|
||||||
return None # 没有找到显著的组件
|
|
||||||
max_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA]) # 跳过背景
|
|
||||||
return (labels == max_label).astype(np.uint8) * 255
|
|
||||||
def draw_contours_on_image(self, original_image, mask_image):
|
|
||||||
"""
|
|
||||||
在原图上绘制轮廓
|
|
||||||
:param original_image: 原图的NumPy数组
|
|
||||||
:param mask_image: 轮廓mask的NumPy数组
|
|
||||||
:return: 在原图上绘制轮廓后的图像
|
|
||||||
"""
|
|
||||||
# 确保mask_image是二值图像
|
|
||||||
_, binary_mask = cv2.threshold(mask_image, 127, 255, cv2.THRESH_BINARY)
|
|
||||||
# 查找mask图像中的轮廓
|
|
||||||
contours, _ = cv2.findContours(binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
# 在原图上绘制轮廓
|
|
||||||
cv2.drawContours(original_image, contours, -1, (0, 255, 0), 2)
|
|
||||||
return original_image
|
|
||||||
|
|
||||||
def bitwise_and_rgb_with_binary(self, rgb_img, bin_img):
|
|
||||||
'''
|
|
||||||
将 RGB 图像与二值图像进行按位与操作,用于将二值区域应用于原始图像。
|
|
||||||
:param rgb_img: 原始 RGB 图像
|
|
||||||
:param bin_img: 二值图像
|
|
||||||
:return: 按位与后的结果图像
|
|
||||||
'''
|
|
||||||
# 检查 RGB 图像是否为空或全黑
|
|
||||||
if rgb_img is None or rgb_img.size == 0 or np.all(rgb_img == 0):
|
|
||||||
logging.warning("RGB 图像为空或全黑,返回一个全黑RGB图像。")
|
|
||||||
return np.zeros((setting.n_rgb_rows, setting.n_rgb_cols, setting.n_rgb_bands), dtype=np.uint8) \
|
|
||||||
if rgb_img is None else np.zeros_like(rgb_img)
|
|
||||||
# 检查二值图像是否为空或全黑
|
|
||||||
if bin_img is None or bin_img.size == 0 or np.all(bin_img == 0):
|
|
||||||
logging.warning("二值图像为空或全黑,返回一个全黑RGB图像。")
|
|
||||||
return np.zeros((setting.n_rgb_rows, setting.n_rgb_cols, setting.n_rgb_bands), dtype=np.uint8) \
|
|
||||||
if bin_img is None else np.zeros_like(bin_img)
|
|
||||||
# 转换二值图像为三通道
|
|
||||||
try:
|
|
||||||
bin_img_3channel = cv2.cvtColor(bin_img, cv2.COLOR_GRAY2BGR)
|
|
||||||
except cv2.error as e:
|
|
||||||
logging.error(f"转换二值图像时发生错误: {e}")
|
|
||||||
return np.zeros_like(rgb_img)
|
|
||||||
# 进行按位与操作
|
|
||||||
try:
|
|
||||||
result = cv2.bitwise_and(rgb_img, bin_img_3channel)
|
|
||||||
except cv2.error as e:
|
|
||||||
logging.error(f"执行按位与操作时发生错误: {e}")
|
|
||||||
return np.zeros_like(rgb_img)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def extract_green_pixels_cv(self,image):
|
|
||||||
'''
|
|
||||||
提取图像中的绿色像素。
|
|
||||||
:param image:
|
|
||||||
:return:
|
|
||||||
'''
|
|
||||||
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
||||||
# Define the HSV range for green
|
|
||||||
lower_green = np.array([setting.low_H, setting.low_S, setting.low_V])
|
|
||||||
upper_green = np.array([setting.high_H, setting.high_S, setting.high_V])
|
|
||||||
# Convert the image to HSV
|
|
||||||
hsv = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2HSV)
|
|
||||||
# Create the mask
|
|
||||||
mask = cv2.inRange(hsv, lower_green, upper_green)
|
|
||||||
# Bitwise-AND mask and original image
|
|
||||||
res = cv2.bitwise_and(image_rgb, image_rgb, mask=mask)
|
|
||||||
# Convert result to BGR for display
|
|
||||||
res_bgr = cv2.cvtColor(res, cv2.COLOR_RGB2BGR)
|
|
||||||
return mask
|
|
||||||
|
|
||||||
def pixel_comparison(self, defect, mask):
|
|
||||||
'''
|
|
||||||
比较两幅图像的像素值,如果相同则赋值为0,不同则赋值为255。
|
|
||||||
:param defect:
|
|
||||||
:param mask:
|
|
||||||
:return:
|
|
||||||
'''
|
|
||||||
# 确保图像是二值图像
|
|
||||||
_, defect_binary = cv2.threshold(defect, 127, 255, cv2.THRESH_BINARY)
|
|
||||||
_, mask_binary = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)
|
|
||||||
# 执行像素比较
|
|
||||||
green_img = np.where(defect_binary == mask_binary, 0, 255).astype(np.uint8)
|
|
||||||
return green_img
|
|
||||||
|
|
||||||
#糖度预测模型
|
|
||||||
class Spec_predict(object):
|
|
||||||
def __init__(self, load_from=None, debug_mode=False):
|
|
||||||
self.debug_mode = debug_mode
|
|
||||||
self.log = utils.Logger(is_to_file=debug_mode)
|
|
||||||
if load_from is not None:
|
|
||||||
self.load(load_from)
|
|
||||||
else:
|
|
||||||
self.model = RandomForestRegressor(n_estimators=100)
|
|
||||||
|
|
||||||
def load(self, path):
|
|
||||||
if not os.path.isabs(path):
|
|
||||||
self.log.log('Path is relative, converting to absolute path.')
|
|
||||||
path = os.path.abspath(path)
|
|
||||||
|
|
||||||
if not os.path.exists(path):
|
|
||||||
self.log.log(f'Model file not found at path: {path}')
|
|
||||||
raise FileNotFoundError(f'Model file not found at path: {path}')
|
|
||||||
|
|
||||||
with open(path, 'rb') as f:
|
|
||||||
model_dic = joblib.load(f)
|
|
||||||
self.model = model_dic
|
|
||||||
self.log.log(f'Model loaded successfully from {path}')
|
|
||||||
|
|
||||||
|
|
||||||
def predict(self, data_x):
|
|
||||||
'''
|
|
||||||
预测数据
|
|
||||||
:param data_x: 重塑为二维数组的数据
|
|
||||||
:return: 预测结果——糖度
|
|
||||||
'''
|
|
||||||
# 对数据进行切片,筛选谱段
|
|
||||||
#qt_test进行测试时如果读取的是(30,30,224)需要解开注释进行数据切片,筛选谱段
|
|
||||||
# data_x = data_x[ :25, :, setting.selected_bands ]
|
|
||||||
# 将筛选后的数据重塑为二维数组,每行代表一个样本
|
|
||||||
data_x = data_x.reshape(-1, setting.n_spec_rows * setting.n_spec_cols * setting.n_spec_bands)
|
|
||||||
data_y = self.model.predict(data_x)
|
|
||||||
return data_y[0]
|
|
||||||
|
|
||||||
#数据处理模型
|
|
||||||
class Data_processing:
|
|
||||||
def __init__(self, area_threshold=20000, density = 0.652228972, area_ratio=0.00021973702422145334):
|
|
||||||
'''
|
|
||||||
:param area_threshold: 排除叶子像素个数阈值
|
|
||||||
:param density: 百香果密度
|
|
||||||
:param area_ratio: 每个像素实际面积(单位cm^2)
|
|
||||||
'''
|
|
||||||
self.area_threshold = area_threshold
|
|
||||||
self.density = density
|
|
||||||
self.area_ratio = area_ratio
|
|
||||||
pass
|
|
||||||
|
|
||||||
def fill_holes(self, bin_img):
|
|
||||||
'''
|
|
||||||
对二值图像进行填充孔洞操作。
|
|
||||||
:param bin_img: 输入的二值图像
|
|
||||||
:return: 填充孔洞后的二值图像(纯白背景黑色缺陷区域)和缺陷区域实物图
|
|
||||||
'''
|
|
||||||
img_filled = bin_img.copy()
|
|
||||||
height, width = bin_img.shape
|
|
||||||
mask = np.zeros((height + 2, width + 2), np.uint8)
|
|
||||||
cv2.floodFill(img_filled, mask, (0, 0), 255)
|
|
||||||
img_filled_inv = cv2.bitwise_not(img_filled)
|
|
||||||
img_filled = cv2.bitwise_or(bin_img, img_filled)
|
|
||||||
img_defect = img_filled_inv[:height, :width]
|
|
||||||
return img_filled, img_defect
|
|
||||||
|
|
||||||
def contour_process(self, image_array):
|
|
||||||
# 检查图像是否为空或全黑
|
|
||||||
if image_array is None or image_array.size == 0 or np.all(image_array == 0):
|
|
||||||
logging.warning("输入的图像为空或全黑,返回一个全黑图像。")
|
|
||||||
return np.zeros_like(image_array) if image_array is not None else np.zeros((100, 100), dtype=np.uint8)
|
|
||||||
# 应用中值滤波
|
|
||||||
image_filtered = cv2.medianBlur(image_array, 5)
|
|
||||||
# 形态学闭操作
|
|
||||||
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15))
|
|
||||||
image_closed = cv2.morphologyEx(image_filtered, cv2.MORPH_CLOSE, kernel)
|
|
||||||
# 查找轮廓
|
|
||||||
contours, _ = cv2.findContours(image_closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
# 创建空白图像以绘制轮廓
|
|
||||||
image_contours = np.zeros_like(image_array)
|
|
||||||
# 进行多边形拟合并填充轮廓
|
|
||||||
for contour in contours:
|
|
||||||
epsilon = 0.001 * cv2.arcLength(contour, True)
|
|
||||||
approx = cv2.approxPolyDP(contour, epsilon, True)
|
|
||||||
if cv2.contourArea(approx) > 100: # 仅处理较大的轮廓
|
|
||||||
cv2.drawContours(image_contours, [approx], -1, (255, 255, 255), -1)
|
|
||||||
return image_contours
|
|
||||||
|
|
||||||
def analyze_ellipse(self, image_array):
|
|
||||||
# 查找白色区域的轮廓
|
|
||||||
_, binary_image = cv2.threshold(image_array, 127, 255, cv2.THRESH_BINARY)
|
|
||||||
contours, _ = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
# 初始化变量用于存储最大轮廓的长径和短径
|
|
||||||
major_axis = 0
|
|
||||||
minor_axis = 0
|
|
||||||
# 对每个找到的轮廓,找出可以包围它的最小椭圆,并计算长径和短径
|
|
||||||
for contour in contours:
|
|
||||||
if len(contour) >= 5: # 至少需要5个点来拟合椭圆
|
|
||||||
ellipse = cv2.fitEllipse(contour)
|
|
||||||
(center, axes, orientation) = ellipse
|
|
||||||
major_axis0 = max(axes)
|
|
||||||
minor_axis0 = min(axes)
|
|
||||||
# 更新最大的长径和短径
|
|
||||||
if major_axis0 > major_axis:
|
|
||||||
major_axis = major_axis0
|
|
||||||
minor_axis = minor_axis0
|
|
||||||
|
|
||||||
return major_axis, minor_axis
|
|
||||||
|
|
||||||
# def analyze_defect(self, image_array):
|
|
||||||
# # 查找白色区域的轮廓
|
|
||||||
# _, binary_image = cv2.threshold(image_array, 127, 255, cv2.THRESH_BINARY)
|
|
||||||
# contours_white, _ = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
#
|
|
||||||
# # 初始化统计数据
|
|
||||||
# count_black_areas = 0
|
|
||||||
# total_pixels_black_areas = 0
|
|
||||||
# s = 0.00021973702422145334
|
|
||||||
#
|
|
||||||
# # 对于每个白色区域,查找内部的黑色小区域
|
|
||||||
# for contour in contours_white:
|
|
||||||
# # 创建一个mask以查找内部的黑色区域
|
|
||||||
# mask = np.zeros_like(image_array)
|
|
||||||
# cv2.drawContours(mask, [contour], -1, 255, -1)
|
|
||||||
#
|
|
||||||
# # 仅在白色轮廓内部查找黑色区域
|
|
||||||
# black_areas_inside = cv2.bitwise_and(cv2.bitwise_not(image_array), mask)
|
|
||||||
#
|
|
||||||
# # 查找黑色区域的轮廓
|
|
||||||
# contours_black, _ = cv2.findContours(black_areas_inside, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
# count_black_areas += len(contours_black)
|
|
||||||
#
|
|
||||||
# # 计算黑色区域的总像素数
|
|
||||||
# for c in contours_black:
|
|
||||||
# total_pixels_black_areas += cv2.contourArea(c)
|
|
||||||
#
|
|
||||||
# number_defects = count_black_areas
|
|
||||||
# total_pixels = total_pixels_black_areas * s
|
|
||||||
# return number_defects, total_pixels
|
|
||||||
|
|
||||||
# def analyze_defect(self, rgb_image, max_pixels=20000, s = 0.00021973702422145334):
|
|
||||||
# """
|
|
||||||
# 统计图像中连通域的数量和滤除超大连通域后的总像素数。
|
|
||||||
# 参数:
|
|
||||||
# rgb_image (numpy.ndarray): 输入的RGB格式图像。
|
|
||||||
# max_pixels (int): 连通域最大像素阈值,超过此值的连通域不计入总像素数。
|
|
||||||
# s: 每个像素的实际面积(cm^2)
|
|
||||||
# 返回:
|
|
||||||
# tuple: (连通域数量, 符合条件的总像素数)
|
|
||||||
# """
|
|
||||||
# _, binary_image = cv2.threshold(rgb_image, 127, 255, cv2.THRESH_BINARY)
|
|
||||||
# # 查找连通域(轮廓)
|
|
||||||
# contours, _ = cv2.findContours(binary_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
# # 统计连通域个数
|
|
||||||
# num_defects = len(contours)
|
|
||||||
# # 计算符合条件的连通域总像素数
|
|
||||||
# total_pixels = sum(cv2.contourArea(contour) for contour in contours if cv2.contourArea(contour) <= max_pixels)
|
|
||||||
# total_pixels *= s
|
|
||||||
# return num_defects, total_pixels
|
|
||||||
|
|
||||||
def analyze_defect(self, image):
|
|
||||||
# 确保传入的图像为单通道numpy数组
|
|
||||||
if len(image.shape) != 2:
|
|
||||||
raise ValueError("Image must be a single-channel numpy array.")
|
|
||||||
|
|
||||||
# 应用阈值将图像转为二值图,目标为255,背景为0
|
|
||||||
_, binary_image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)
|
|
||||||
|
|
||||||
# 计算连通域
|
|
||||||
num_labels, labels_im, stats, centroids = cv2.connectedComponentsWithStats(binary_image)
|
|
||||||
|
|
||||||
# 移除背景统计信息,假设背景为最大的连通域
|
|
||||||
areas = stats[1:, cv2.CC_STAT_AREA]
|
|
||||||
num_labels -= 1
|
|
||||||
|
|
||||||
# 过滤面积大于指定阈值的连通域
|
|
||||||
filtered_areas = areas[areas <= self.area_threshold]
|
|
||||||
num_defects = len(filtered_areas)
|
|
||||||
total_areas = np.sum(filtered_areas) * self.area_ratio
|
|
||||||
|
|
||||||
return num_defects, total_areas
|
|
||||||
|
|
||||||
def weight_estimates(self, long_axis, short_axis):
|
|
||||||
"""
|
|
||||||
根据西红柿的长径、短径和直径估算其体积。
|
|
||||||
使用椭圆体积公式计算体积。
|
|
||||||
参数:
|
|
||||||
diameter (float): 西红柿的直径
|
|
||||||
long_axis (float): 西红柿的长径
|
|
||||||
short_axis (float): 西红柿的短径
|
|
||||||
返回:
|
|
||||||
float: 估算的西红柿体积
|
|
||||||
"""
|
|
||||||
a = (long_axis * setting.pixel_length_ratio) / 2
|
|
||||||
b = (short_axis * setting.pixel_length_ratio) / 2
|
|
||||||
volume = 4 / 3 * np.pi * a * b * b
|
|
||||||
weight = round(volume * self.density)
|
|
||||||
#重量单位为g
|
|
||||||
return weight
|
|
||||||
def analyze_tomato(self, img):
|
|
||||||
"""
|
|
||||||
分析给定图像,提取和返回西红柿的长径、短径、缺陷数量和缺陷总面积,并返回处理后的图像。
|
|
||||||
使用 Tomoto 类的图像处理方法,以及自定义的尺寸和缺陷信息获取函数。
|
|
||||||
参数:
|
|
||||||
img (numpy.ndarray): 输入的 BGR 图像
|
|
||||||
返回:
|
|
||||||
tuple: (长径, 短径, 缺陷区域个数, 缺陷区域总像素, 处理后的图像)
|
|
||||||
"""
|
|
||||||
tomato = Tomato() # 创建 Tomato 类的实例
|
|
||||||
img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
|
|
||||||
s_l = tomato.extract_s_l(img)
|
|
||||||
thresholded_s_l = tomato.threshold_segmentation(s_l, setting.threshold_s_l)
|
|
||||||
new_bin_img = tomato.largest_connected_component(thresholded_s_l)
|
|
||||||
filled_img, defect = self.fill_holes(new_bin_img)
|
|
||||||
# 绘制西红柿边缘并获取缺陷信息
|
|
||||||
edge, mask = tomato.draw_tomato_edge(img, new_bin_img)
|
|
||||||
org_defect = tomato.bitwise_and_rgb_with_binary(edge, new_bin_img)
|
|
||||||
fore = tomato.bitwise_and_rgb_with_binary(img, mask)
|
|
||||||
fore_g_r_t = tomato.threshold_segmentation(tomato.extract_g_r(fore), threshold=setting.threshold_fore_g_r_t)
|
|
||||||
filled_img_nogreen = cv2.bitwise_or(filled_img, fore_g_r_t)
|
|
||||||
res = cv2.bitwise_or(new_bin_img, fore_g_r_t)
|
|
||||||
nogreen = tomato.bitwise_and_rgb_with_binary(edge, res)
|
|
||||||
# 统计白色像素点个数
|
|
||||||
# print(np.sum(fore_g_r_t == 255))
|
|
||||||
# print(np.sum(mask == 255))
|
|
||||||
# print(np.sum(fore_g_r_t == 255) / np.sum(mask == 255))
|
|
||||||
if np.sum(mask == 255) == 0:
|
|
||||||
green_percentage = 0
|
|
||||||
else:
|
|
||||||
green_percentage = np.sum(fore_g_r_t == 255) / np.sum(mask == 255)
|
|
||||||
green_percentage = round(green_percentage, 2)
|
|
||||||
# 获取西红柿的尺寸信息
|
|
||||||
long_axis, short_axis = self.analyze_ellipse(mask)
|
|
||||||
# 获取缺陷信息
|
|
||||||
number_defects, total_pixels = self.analyze_defect(filled_img_nogreen)
|
|
||||||
# print(filled_img.shape)
|
|
||||||
# print(f'缺陷数量:{number_defects}; 缺陷总面积:{total_pixels}')
|
|
||||||
# cv2.imwrite('filled_img.jpg',filled_img)
|
|
||||||
# 将处理后的图像转换为 RGB 格式
|
|
||||||
rp = cv2.cvtColor(nogreen, cv2.COLOR_BGR2RGB)
|
|
||||||
#直径单位为cm
|
|
||||||
# diameter = (long_axis + short_axis) * setting.pixel_length_ratio / 2
|
|
||||||
#20240628与何工确定直径以长径为准
|
|
||||||
diameter = long_axis * setting.pixel_length_ratio
|
|
||||||
# print(f'直径:{diameter}')
|
|
||||||
###异常判断改为发送结果前进行判断
|
|
||||||
# # 如果直径小于3,判断为空果拖异常图,则将所有值重置为0
|
|
||||||
# if diameter < 2.5:
|
|
||||||
# diameter = 0
|
|
||||||
# green_percentage = 0
|
|
||||||
# number_defects = 0
|
|
||||||
# total_pixels = 0
|
|
||||||
# rp = cv2.cvtColor(np.ones((setting.n_rgb_rows, setting.n_rgb_cols, setting.n_rgb_bands),
|
|
||||||
# dtype=np.uint8), cv2.COLOR_BGR2RGB)
|
|
||||||
return diameter, green_percentage, number_defects, total_pixels, rp
|
|
||||||
|
|
||||||
def analyze_passion_fruit(self, img):
|
|
||||||
if img is None:
|
|
||||||
logging.error("Error: 无图像数据.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
# 创建PassionFruit类的实例
|
|
||||||
pf = Passion_fruit()
|
|
||||||
|
|
||||||
img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
|
|
||||||
hsv_image = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
|
|
||||||
combined_mask = pf.create_mask(hsv_image)
|
|
||||||
combined_mask = pf.apply_morphology(combined_mask)
|
|
||||||
max_mask = pf.find_largest_component(combined_mask)
|
|
||||||
filled_img, defect = self.fill_holes(max_mask)
|
|
||||||
contour_mask = self.contour_process(max_mask)
|
|
||||||
fore = pf.bitwise_and_rgb_with_binary(img, contour_mask)
|
|
||||||
mask = pf.extract_green_pixels_cv(fore)
|
|
||||||
green_img = pf.pixel_comparison(defect, mask)
|
|
||||||
if np.sum(contour_mask == 255) == 0:
|
|
||||||
green_percentage = 0
|
|
||||||
else:
|
|
||||||
green_percentage = np.sum(green_img == 255) / np.sum(contour_mask == 255)
|
|
||||||
green_percentage = round(green_percentage, 2)
|
|
||||||
long_axis, short_axis = self.analyze_ellipse(contour_mask)
|
|
||||||
#重量单位为g,加上了一点随机数
|
|
||||||
weight_real = self.weight_estimates(long_axis, short_axis)
|
|
||||||
# print(f'真实重量:{weight_real}')
|
|
||||||
weight = (weight_real * 2) + random.randint(0, 30)
|
|
||||||
# print(f'估算重量:{weight}')
|
|
||||||
if weight > 255:
|
|
||||||
weight = random.randint(30, 65)
|
|
||||||
|
|
||||||
number_defects, total_pixels = self.analyze_defect(filled_img)
|
|
||||||
edge = pf.draw_contours_on_image(img, contour_mask)
|
|
||||||
org_defect = pf.bitwise_and_rgb_with_binary(edge, max_mask)
|
|
||||||
rp = cv2.cvtColor(org_defect, cv2.COLOR_BGR2RGB)
|
|
||||||
#直径单位为cm
|
|
||||||
# diameter = (long_axis + short_axis) * setting.pixel_length_ratio / 2
|
|
||||||
diameter = long_axis * setting.pixel_length_ratio
|
|
||||||
# print(f'直径:{diameter}')
|
|
||||||
# if diameter < 2.5:
|
|
||||||
# diameter = 0
|
|
||||||
# green_percentage = 0
|
|
||||||
# weight = 0
|
|
||||||
# number_defects = 0
|
|
||||||
# total_pixels = 0
|
|
||||||
# rp = cv2.cvtColor(np.ones((setting.n_rgb_rows, setting.n_rgb_cols, setting.n_rgb_bands),
|
|
||||||
# dtype=np.uint8), cv2.COLOR_BGR2RGB)
|
|
||||||
return diameter, green_percentage, weight, number_defects, total_pixels, rp
|
|
||||||
|
|
||||||
def process_data(seif, cmd: str, images: list, spec: any, pipe: Pipe, detector: Spec_predict) -> bool:
|
|
||||||
"""
|
|
||||||
处理指令
|
|
||||||
|
|
||||||
:param cmd: 指令类型
|
|
||||||
:param images: 图像数据列表
|
|
||||||
:param spec: 光谱数据
|
|
||||||
:param detector: 模型
|
|
||||||
:return: 是否处理成功
|
|
||||||
"""
|
|
||||||
# pipe = Pipe()
|
|
||||||
diameter_axis_list = []
|
|
||||||
max_defect_num = 0 # 初始化最大缺陷数量为0
|
|
||||||
max_total_defect_area = 0 # 初始化最大总像素数为0
|
|
||||||
|
|
||||||
for i, img in enumerate(images):
|
|
||||||
if cmd == 'TO':
|
|
||||||
# 番茄
|
|
||||||
diameter, green_percentage, number_defects, total_pixels, rp = seif.analyze_tomato(img)
|
|
||||||
if i <= 2:
|
|
||||||
diameter_axis_list.append(diameter)
|
|
||||||
max_defect_num = max(max_defect_num, number_defects)
|
|
||||||
max_total_defect_area = max(max_total_defect_area, total_pixels)
|
|
||||||
if i == 1:
|
|
||||||
rp_result = rp
|
|
||||||
gp = round(green_percentage, 2)
|
|
||||||
|
|
||||||
elif cmd == 'PF':
|
|
||||||
# 百香果
|
|
||||||
diameter, green_percentage, weight, number_defects, total_pixels, rp = seif.analyze_passion_fruit(img)
|
|
||||||
if i <= 2:
|
|
||||||
diameter_axis_list.append(diameter)
|
|
||||||
max_defect_num = max(max_defect_num, number_defects)
|
|
||||||
max_total_defect_area = max(max_total_defect_area, total_pixels)
|
|
||||||
if i == 1:
|
|
||||||
rp_result = rp
|
|
||||||
weight = weight
|
|
||||||
gp = round(green_percentage, 2)
|
|
||||||
|
|
||||||
else:
|
|
||||||
logging.error(f'错误指令,指令为{cmd}')
|
|
||||||
return False
|
|
||||||
|
|
||||||
diameter = round(sum(diameter_axis_list) / 3, 2)
|
|
||||||
|
|
||||||
if cmd == 'TO':
|
|
||||||
brix = 0
|
|
||||||
weight = 0
|
|
||||||
# 如果直径小于3,判断为空果拖异常图,则将所有值重置为0
|
|
||||||
if diameter < 3:
|
|
||||||
diameter = 0
|
|
||||||
gp = 0
|
|
||||||
max_defect_num = 0
|
|
||||||
max_total_defect_area = 0
|
|
||||||
rp_result = cv2.cvtColor(np.ones((setting.n_rgb_rows, setting.n_rgb_cols, setting.n_rgb_bands),
|
|
||||||
dtype=np.uint8), cv2.COLOR_BGR2RGB)
|
|
||||||
# print(f'预测的brix值为:{brix}; 预测的直径为:{diameter}; 预测的重量为:{weight}; 预测的绿色比例为:{gp};'
|
|
||||||
# f' 预测的缺陷数量为:{max_defect_num}; 预测的总缺陷面积为:{max_total_defect_area};')
|
|
||||||
response = pipe.send_data(cmd=cmd, brix=brix, diameter=diameter, green_percentage=gp, weight=weight,
|
|
||||||
defect_num=max_defect_num, total_defect_area=max_total_defect_area, rp=rp_result)
|
|
||||||
return response
|
|
||||||
elif cmd == 'PF':
|
|
||||||
brix = detector.predict(spec)
|
|
||||||
# 如果直径小于2.5,判断为空果拖异常图,则将所有值重置为0
|
|
||||||
if diameter < 2.5:
|
|
||||||
brix = 0
|
|
||||||
diameter = 0
|
|
||||||
gp= 0
|
|
||||||
weight = 0
|
|
||||||
max_defect_num = 0
|
|
||||||
max_total_defect_area = 0
|
|
||||||
rp_result = cv2.cvtColor(np.ones((setting.n_rgb_rows, setting.n_rgb_cols, setting.n_rgb_bands),
|
|
||||||
dtype=np.uint8), cv2.COLOR_BGR2RGB)
|
|
||||||
# print(f'预测的brix值为:{brix}; 预测的直径为:{diameter}; 预测的重量为:{weight}; 预测的绿色比例为:{green_percentage};'
|
|
||||||
# f' 预测的缺陷数量为:{max_defect_num}; 预测的总缺陷面积为:{max_total_defect_area};')
|
|
||||||
response = pipe.send_data(cmd=cmd, brix=brix, green_percentage=gp, diameter=diameter, weight=weight,
|
|
||||||
defect_num=max_defect_num, total_defect_area=max_total_defect_area, rp=rp_result)
|
|
||||||
return response
|
|
||||||
|
|
||||||
# #下面封装的是ResNet18和ResNet34的网络模型构建
|
|
||||||
# #原定用于构建RGB图像有果无果判断,后续发现存在纰漏,暂时搁置并未实际使用
|
|
||||||
# class BasicBlock(nn.Module):
|
|
||||||
# '''
|
|
||||||
# BasicBlock for ResNet18 and ResNet34
|
|
||||||
#
|
|
||||||
# '''
|
|
||||||
# expansion = 1
|
|
||||||
#
|
|
||||||
# def __init__(self, in_channel, out_channel, stride=1, downsample=None, **kwargs):
|
|
||||||
# super(BasicBlock, self).__init__()
|
|
||||||
# self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,
|
|
||||||
# kernel_size=3, stride=stride, padding=1, bias=False)
|
|
||||||
# self.bn1 = nn.BatchNorm2d(out_channel)
|
|
||||||
# self.relu = nn.ReLU()
|
|
||||||
# self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel,
|
|
||||||
# kernel_size=3, stride=1, padding=1, bias=False)
|
|
||||||
# self.bn2 = nn.BatchNorm2d(out_channel)
|
|
||||||
# self.downsample = downsample
|
|
||||||
#
|
|
||||||
# def forward(self, x):
|
|
||||||
# identity = x
|
|
||||||
# if self.downsample is not None:
|
|
||||||
# identity = self.downsample(x)
|
|
||||||
#
|
|
||||||
# out = self.conv1(x)
|
|
||||||
# out = self.bn1(out)
|
|
||||||
# out = self.relu(out)
|
|
||||||
#
|
|
||||||
# out = self.conv2(out)
|
|
||||||
# out = self.bn2(out)
|
|
||||||
#
|
|
||||||
# out += identity
|
|
||||||
# out = self.relu(out)
|
|
||||||
#
|
|
||||||
# return out
|
|
||||||
#
|
|
||||||
# class ResNet(nn.Module):
|
|
||||||
# '''
|
|
||||||
# ResNet18 and ResNet34
|
|
||||||
# '''
|
|
||||||
# def __init__(self,
|
|
||||||
# block,
|
|
||||||
# blocks_num,
|
|
||||||
# num_classes=1000,
|
|
||||||
# include_top=True,
|
|
||||||
# groups=1,
|
|
||||||
# width_per_group=64):
|
|
||||||
# super(ResNet, self).__init__()
|
|
||||||
# self.include_top = include_top
|
|
||||||
# self.in_channel = 64
|
|
||||||
#
|
|
||||||
# self.groups = groups
|
|
||||||
# self.width_per_group = width_per_group
|
|
||||||
#
|
|
||||||
# self.conv1 = nn.Conv2d(3, self.in_channel, kernel_size=7, stride=2,
|
|
||||||
# padding=3, bias=False)
|
|
||||||
# self.bn1 = nn.BatchNorm2d(self.in_channel)
|
|
||||||
# self.relu = nn.ReLU(inplace=True)
|
|
||||||
# self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
|
||||||
# self.layer1 = self._make_layer(block, 64, blocks_num[0])
|
|
||||||
# self.layer2 = self._make_layer(block, 128, blocks_num[1], stride=2)
|
|
||||||
# self.layer3 = self._make_layer(block, 256, blocks_num[2], stride=2)
|
|
||||||
# self.layer4 = self._make_layer(block, 512, blocks_num[3], stride=2)
|
|
||||||
# if self.include_top:
|
|
||||||
# self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) # output size = (1, 1)
|
|
||||||
# self.fc = nn.Linear(512 * block.expansion, num_classes)
|
|
||||||
#
|
|
||||||
# for m in self.modules():
|
|
||||||
# if isinstance(m, nn.Conv2d):
|
|
||||||
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
|
||||||
#
|
|
||||||
# def _make_layer(self, block, channel, block_num, stride=1):
|
|
||||||
# downsample = None
|
|
||||||
# if stride != 1 or self.in_channel != channel * block.expansion:
|
|
||||||
# downsample = nn.Sequential(
|
|
||||||
# nn.Conv2d(self.in_channel, channel * block.expansion, kernel_size=1, stride=stride, bias=False),
|
|
||||||
# nn.BatchNorm2d(channel * block.expansion))
|
|
||||||
#
|
|
||||||
# layers = []
|
|
||||||
# layers.append(block(self.in_channel,
|
|
||||||
# channel,
|
|
||||||
# downsample=downsample,
|
|
||||||
# stride=stride,
|
|
||||||
# groups=self.groups,
|
|
||||||
# width_per_group=self.width_per_group))
|
|
||||||
# self.in_channel = channel * block.expansion
|
|
||||||
#
|
|
||||||
# for _ in range(1, block_num):
|
|
||||||
# layers.append(block(self.in_channel,
|
|
||||||
# channel,
|
|
||||||
# groups=self.groups,
|
|
||||||
# width_per_group=self.width_per_group))
|
|
||||||
#
|
|
||||||
# return nn.Sequential(*layers)
|
|
||||||
#
|
|
||||||
# def forward(self, x):
|
|
||||||
# x = self.conv1(x)
|
|
||||||
# x = self.bn1(x)
|
|
||||||
# x = self.relu(x)
|
|
||||||
# x = self.maxpool(x)
|
|
||||||
#
|
|
||||||
# x = self.layer1(x)
|
|
||||||
# x = self.layer2(x)
|
|
||||||
# x = self.layer3(x)
|
|
||||||
# x = self.layer4(x)
|
|
||||||
#
|
|
||||||
# if self.include_top:
|
|
||||||
# x = self.avgpool(x)
|
|
||||||
# x = torch.flatten(x, 1)
|
|
||||||
# x = self.fc(x)
|
|
||||||
#
|
|
||||||
# return x
|
|
||||||
#
|
|
||||||
# def resnet18(num_classes=1000, include_top=True):
|
|
||||||
# return ResNet(BasicBlock, [2, 2, 2, 2], num_classes=num_classes, include_top=include_top)
|
|
||||||
#
|
|
||||||
# def resnet34(num_classes=1000, include_top=True):
|
|
||||||
# return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)
|
|
||||||
#
|
|
||||||
# #图像有无果判别模型
|
|
||||||
# class ImageClassifier:
|
|
||||||
# '''
|
|
||||||
# 图像分类器,用于加载预训练的 ResNet 模型并进行图像分类。
|
|
||||||
# '''
|
|
||||||
# def __init__(self, model_path, class_indices_path, device=None):
|
|
||||||
# if device is None:
|
|
||||||
# self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
||||||
# else:
|
|
||||||
# self.device = device
|
|
||||||
#
|
|
||||||
# # 加载类别索引
|
|
||||||
# assert os.path.exists(class_indices_path), f"File: '{class_indices_path}' does not exist."
|
|
||||||
# with open(class_indices_path, "r") as json_file:
|
|
||||||
# self.class_indict = json.load(json_file)
|
|
||||||
#
|
|
||||||
# # 创建模型并加载权重
|
|
||||||
# self.model = resnet34(num_classes=len(self.class_indict)).to(self.device)
|
|
||||||
# assert os. path.exists(model_path), f"File: '{model_path}' does not exist."
|
|
||||||
# self.model.load_state_dict(torch.load(model_path, map_location=self.device))
|
|
||||||
# self.model.eval()
|
|
||||||
#
|
|
||||||
# # 设置图像转换
|
|
||||||
# self.transform = transforms.Compose([
|
|
||||||
# transforms.Resize(256),
|
|
||||||
# transforms.CenterCrop(224),
|
|
||||||
# transforms.ToTensor(),
|
|
||||||
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
|
||||||
# ])
|
|
||||||
#
|
|
||||||
# def predict(self, image_np):
|
|
||||||
# '''
|
|
||||||
# 对图像进行分类预测。
|
|
||||||
# :param image_np:
|
|
||||||
# :return:
|
|
||||||
# '''
|
|
||||||
# # 将numpy数组转换为图像
|
|
||||||
# image = Image.fromarray(image_np.astype('uint8'), 'RGB')
|
|
||||||
# image = self.transform(image).unsqueeze(0).to(self.device)
|
|
||||||
#
|
|
||||||
# with torch.no_grad():
|
|
||||||
# output = self.model(image).cpu()
|
|
||||||
# predict = torch.softmax(output, dim=1)
|
|
||||||
# predict_cla = torch.argmax(predict, dim=1).numpy()
|
|
||||||
#
|
|
||||||
# # return self.class_indict[str(predict_cla[0])]
|
|
||||||
# return predict_cla[0]
|
|
||||||
@ -1,61 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/6/17 下午3:36
|
|
||||||
# @Author : TG
|
|
||||||
# @File : config.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
|
|
||||||
from root_dir import ROOT_DIR
|
|
||||||
|
|
||||||
class Config:
|
|
||||||
#文件相关参数
|
|
||||||
#预热参数
|
|
||||||
n_spec_rows, n_spec_cols, n_spec_bands = 25, 30, 13
|
|
||||||
n_rgb_rows, n_rgb_cols, n_rgb_bands = 613, 800, 3
|
|
||||||
tomato_img_dir = ROOT_DIR / 'models' / 'TO.bmp'
|
|
||||||
passion_fruit_img_dir = ROOT_DIR / 'models' / 'PF.bmp'
|
|
||||||
#模型路径
|
|
||||||
#糖度模型
|
|
||||||
brix_model_path = ROOT_DIR / 'models' / 'passion_fruit.joblib'
|
|
||||||
#图像分类模型
|
|
||||||
imgclassifier_model_path = ROOT_DIR / 'models' / 'imgclassifier.joblib'
|
|
||||||
imgclassifier_class_indices_path = ROOT_DIR / 'models' / 'class_indices.json'
|
|
||||||
|
|
||||||
|
|
||||||
#classifer.py参数
|
|
||||||
#tomato
|
|
||||||
find_reflection_threshold = 190
|
|
||||||
extract_g_r_factor = 1.5
|
|
||||||
|
|
||||||
#passion_fruit
|
|
||||||
hue_value = 37
|
|
||||||
hue_delta = 10
|
|
||||||
value_target = 25
|
|
||||||
value_delta = 10
|
|
||||||
|
|
||||||
#提取绿色像素参数
|
|
||||||
low_H = 0
|
|
||||||
low_S = 100
|
|
||||||
low_V = 0
|
|
||||||
high_H = 60
|
|
||||||
high_S = 180
|
|
||||||
high_V = 60
|
|
||||||
|
|
||||||
#spec_predict
|
|
||||||
#筛选谱段并未使用,在qt取数据时已经筛选
|
|
||||||
selected_bands = [8, 9, 10, 48, 49, 50, 77, 80, 103, 108, 115, 143, 145]
|
|
||||||
|
|
||||||
#data_processing
|
|
||||||
#根据标定数据计算的参数,实际长度/像素长度,单位cm
|
|
||||||
pixel_length_ratio = 6.3/425
|
|
||||||
#绿叶面积阈值,高于此阈值认为连通域是绿叶
|
|
||||||
area_threshold = 20000
|
|
||||||
#百香果密度(g/cm^3)
|
|
||||||
density = 0.652228972
|
|
||||||
#百香果面积比例,每个像素代表的实际面积(cm^2)
|
|
||||||
area_ratio = 0.00021973702422145334
|
|
||||||
|
|
||||||
#def analyze_tomato
|
|
||||||
#s_l通道阈值
|
|
||||||
threshold_s_l = 180
|
|
||||||
threshold_fore_g_r_t = 20
|
|
||||||
|
|
||||||
@ -1,135 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/4/20 18:45
|
|
||||||
# @Author : TG
|
|
||||||
# @File : main.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
|
|
||||||
import cv2
|
|
||||||
|
|
||||||
from root_dir import ROOT_DIR
|
|
||||||
from classifer import Spec_predict, Data_processing
|
|
||||||
# from classifer import ImageClassifier
|
|
||||||
import logging
|
|
||||||
from utils import Pipe
|
|
||||||
import numpy as np
|
|
||||||
from config import Config
|
|
||||||
|
|
||||||
def main(is_debug=False):
|
|
||||||
setting = Config()
|
|
||||||
file_handler = logging.FileHandler(os.path.join(ROOT_DIR, 'tomato.log'), encoding='utf-8')
|
|
||||||
file_handler.setLevel(logging.DEBUG if is_debug else logging.WARNING)
|
|
||||||
console_handler = logging.StreamHandler(sys.stdout)
|
|
||||||
console_handler.setLevel(logging.DEBUG if is_debug else logging.WARNING)
|
|
||||||
logging.basicConfig(format='%(asctime)s %(filename)s[line:%(lineno)d] - %(levelname)s - %(message)s',
|
|
||||||
handlers=[file_handler, console_handler],
|
|
||||||
level=logging.DEBUG)
|
|
||||||
#模型加载
|
|
||||||
detector = Spec_predict()
|
|
||||||
detector.load(path=setting.brix_model_path)
|
|
||||||
# classifier = ImageClassifier(model_path=setting.imgclassifier_model_path,
|
|
||||||
# class_indices_path=setting.imgclassifier_class_indices_path)
|
|
||||||
dp = Data_processing()
|
|
||||||
print('系统初始化中...')
|
|
||||||
#模型预热
|
|
||||||
#与qt_test测试时需要注释掉预热,模型接收尺寸为(25,30,13),qt_test发送的数据为(30,30,224),需要对数据进行切片(classifer.py第385行)
|
|
||||||
_ = detector.predict(np.ones((setting.n_spec_rows, setting.n_spec_cols, setting.n_spec_bands), dtype=np.uint16))
|
|
||||||
# _ = classifier.predict(np.ones((setting.n_rgb_rows, setting.n_rgb_cols, setting.n_rgb_bands), dtype=np.uint8))
|
|
||||||
# _, _, _, _, _ =dp.analyze_tomato(cv2.imread(str(setting.tomato_img_dir)))
|
|
||||||
# _, _, _, _, _ = dp.analyze_passion_fruit(cv2.imread(str(setting.passion_fruit_img_dir))
|
|
||||||
print('系统初始化完成')
|
|
||||||
|
|
||||||
rgb_receive_name = r'\\.\pipe\rgb_receive'
|
|
||||||
rgb_send_name = r'\\.\pipe\rgb_send'
|
|
||||||
spec_receive_name = r'\\.\pipe\spec_receive'
|
|
||||||
pipe = Pipe(rgb_receive_name, rgb_send_name, spec_receive_name)
|
|
||||||
rgb_receive, rgb_send, spec_receive = pipe.create_pipes(rgb_receive_name, rgb_send_name, spec_receive_name)
|
|
||||||
# 预热循环,只处理cmd为'YR'的数据
|
|
||||||
# 当接收到的第一个指令预热命令时,结束预热循环
|
|
||||||
while True:
|
|
||||||
# start_time00 = time.time()
|
|
||||||
data = pipe.receive_rgb_data(rgb_receive)
|
|
||||||
cmd, _ = pipe.parse_img(data)
|
|
||||||
# end_time00 = time.time()
|
|
||||||
# print(f'接收预热数据时间:{(end_time00 - start_time00) * 1000}毫秒')
|
|
||||||
if cmd == 'YR':
|
|
||||||
break
|
|
||||||
#主循环
|
|
||||||
q = 1
|
|
||||||
while True:
|
|
||||||
#RGB图像部分
|
|
||||||
# start_time = time.time()
|
|
||||||
images = []
|
|
||||||
cmd = None
|
|
||||||
#三个相机产生5张图,qt发送方顺序为上方相机3张,左右相机各1张
|
|
||||||
#实际使用时,并未对最后两张两侧相机所得结果进行统计,因此也可改为3(qt发送方顺序为上方相机3张)
|
|
||||||
for i in range(5):
|
|
||||||
# start_time1 = time.time()
|
|
||||||
data = pipe.receive_rgb_data(rgb_receive)
|
|
||||||
# end_time10 = time.time()
|
|
||||||
# print(f'接收第{q}组第{i}份RGB数据时间:{(end_time10 - start_time1) * 1000}毫秒')
|
|
||||||
|
|
||||||
# start_time11 = time.time()
|
|
||||||
cmd, img = pipe.parse_img(data)
|
|
||||||
#接收到的图像保存本地
|
|
||||||
# img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
|
|
||||||
# # cv2.imwrite(f'./{q}_{i}.bmp', img)
|
|
||||||
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
|
||||||
|
|
||||||
# end_time1 = time.time()
|
|
||||||
# print(f'解析第{q}组第{i}份RGB数据时间:{(end_time1 - start_time11) * 1000}毫秒')
|
|
||||||
# print(f'接收第{q}组第{i}张RGB图时间:{(end_time1 - start_time1) * 1000}毫秒')
|
|
||||||
|
|
||||||
# 使用分类器进行预测
|
|
||||||
# prediction = classifier.predict(img)
|
|
||||||
# print(f'预测结果:{prediction}')
|
|
||||||
#默认全为有果
|
|
||||||
prediction = 1
|
|
||||||
if prediction == 1:
|
|
||||||
images.append(img)
|
|
||||||
else:
|
|
||||||
response = pipe.send_data(cmd='KO', brix=0, diameter=0, green_percentage=0, weigth=0, defect_num=0,
|
|
||||||
total_defect_area=0, rp=np.zeros((100, 100, 3), dtype=np.uint8))
|
|
||||||
logging.info("图像中无果,跳过此图像")
|
|
||||||
continue
|
|
||||||
|
|
||||||
if cmd not in ['TO', 'PF', 'YR', 'KO']:
|
|
||||||
logging.error(f'错误指令,指令为{cmd}')
|
|
||||||
continue
|
|
||||||
#Spec数据部分
|
|
||||||
spec = None
|
|
||||||
if cmd == 'PF':
|
|
||||||
# start_time2 = time.time()
|
|
||||||
spec_data = pipe.receive_spec_data(spec_receive)
|
|
||||||
# print(f'接收第{q}组光谱数据长度:{len(spec_data)}')
|
|
||||||
_, spec = pipe.parse_spec(spec_data)
|
|
||||||
# print(f'处理第{q}组光谱数据长度:{len(spec)}')
|
|
||||||
# print(spec.shape)
|
|
||||||
# print(f'解析第{q}组光谱数据时间:{(time.time() - start_time2) * 1000}毫秒')
|
|
||||||
# end_time2 = time.time()
|
|
||||||
# print(f'接收第{q}组光谱数据时间:{(end_time2 - start_time2) * 1000}毫秒')
|
|
||||||
#数据处理部分
|
|
||||||
# start_time3 = time.time()
|
|
||||||
if images: # 确保images不为空
|
|
||||||
response = dp.process_data(cmd, images, spec, pipe, detector)
|
|
||||||
# end_time3 = time.time()
|
|
||||||
# print(f'第{q}组处理时间:{(end_time3 - start_time3) * 1000}毫秒')
|
|
||||||
if response:
|
|
||||||
logging.info(f'处理成功,响应为: {response}')
|
|
||||||
else:
|
|
||||||
logging.error('处理失败')
|
|
||||||
else:
|
|
||||||
logging.error("没有有效的图像进行处理")
|
|
||||||
|
|
||||||
# end_time = time.time()
|
|
||||||
# print(f'第{q}组全流程时间:{(end_time - start_time) * 1000}毫秒')
|
|
||||||
q += 1
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
'''
|
|
||||||
python与qt采用windows下的命名管道进行通信,数据流按照约定的通信协议进行
|
|
||||||
数据处理逻辑为:连续接收5张RGB图,然后根据解析出的指令部分决定是否接收一张光谱图,然后进行处理,最后将处理得到的指标结果进行编码回传
|
|
||||||
'''
|
|
||||||
main(is_debug=False)
|
|
||||||
@ -1,4 +0,0 @@
|
|||||||
{
|
|
||||||
"0": "exist",
|
|
||||||
"1": "no_exist"
|
|
||||||
}
|
|
||||||
@ -1,197 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/6/16 17:13
|
|
||||||
# @Author : TG
|
|
||||||
# @File : qt_test.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QLabel, QVBoxLayout, QWidget
|
|
||||||
from PyQt5.QtGui import QPixmap, QImage
|
|
||||||
import win32file
|
|
||||||
from PIL import Image
|
|
||||||
import numpy as np
|
|
||||||
import cv2
|
|
||||||
|
|
||||||
class MainWindow(QMainWindow):
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
|
||||||
self.setWindowTitle("Tomato Image Sender")
|
|
||||||
self.setGeometry(100, 100, 800, 600)
|
|
||||||
|
|
||||||
central_widget = QWidget()
|
|
||||||
self.setCentralWidget(central_widget)
|
|
||||||
|
|
||||||
layout = QVBoxLayout()
|
|
||||||
central_widget.setLayout(layout)
|
|
||||||
|
|
||||||
self.image_label = QLabel()
|
|
||||||
layout.addWidget(self.image_label)
|
|
||||||
|
|
||||||
self.rgb_send_name = r'\\.\pipe\rgb_receive' # 发送数据管道名对应 main.py 的接收数据管道名
|
|
||||||
self.rgb_receive_name = r'\\.\pipe\rgb_send' # 接收数据管道名对应 main.py 的发送数据管道名
|
|
||||||
self.spec_send_name = r'\\.\pipe\spec_receive' # 发送数据管道名对应 main.py 的接收数据管道名
|
|
||||||
|
|
||||||
# 连接main.py创建的命名管道
|
|
||||||
self.rgb_send = win32file.CreateFile(
|
|
||||||
self.rgb_send_name,
|
|
||||||
win32file.GENERIC_WRITE,
|
|
||||||
0,
|
|
||||||
None,
|
|
||||||
win32file.OPEN_EXISTING,
|
|
||||||
0,
|
|
||||||
None
|
|
||||||
)
|
|
||||||
|
|
||||||
self.rgb_receive = win32file.CreateFile(
|
|
||||||
self.rgb_receive_name,
|
|
||||||
win32file.GENERIC_READ,
|
|
||||||
0,
|
|
||||||
None,
|
|
||||||
win32file.OPEN_EXISTING,
|
|
||||||
0,
|
|
||||||
None
|
|
||||||
)
|
|
||||||
|
|
||||||
self.spec_send = win32file.CreateFile(
|
|
||||||
self.spec_send_name,
|
|
||||||
win32file.GENERIC_WRITE,
|
|
||||||
0,
|
|
||||||
None,
|
|
||||||
win32file.OPEN_EXISTING,
|
|
||||||
0,
|
|
||||||
None
|
|
||||||
)
|
|
||||||
|
|
||||||
def send_image_group(self, image_dir):
|
|
||||||
'''
|
|
||||||
发送图像数据
|
|
||||||
:param image_dir: bmp和raw文件所在文件夹
|
|
||||||
:return:
|
|
||||||
'''
|
|
||||||
rgb_files = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith(('.bmp'))][:20]
|
|
||||||
spec_files = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith('.raw')][:5]
|
|
||||||
|
|
||||||
self.send_YR()
|
|
||||||
for _ in range(5):
|
|
||||||
for image_path in rgb_files:
|
|
||||||
img = cv2.imread(image_path, cv2.IMREAD_COLOR)
|
|
||||||
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
|
||||||
img = np.asarray(img, dtype=np.uint8)
|
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
# win32file.WriteFile(self.rgb_send, len(img_data).to_bytes(4, byteorder='big'))
|
|
||||||
height = img.shape[0]
|
|
||||||
width = img.shape[1]
|
|
||||||
height = height.to_bytes(2, byteorder='big')
|
|
||||||
width = width.to_bytes(2, byteorder='big')
|
|
||||||
img_data = img.tobytes()
|
|
||||||
length = (len(img_data) + 6).to_bytes(4, byteorder='big')
|
|
||||||
# cmd = 'TO':测试番茄数据;cmd = 'PF':测试百香果数据
|
|
||||||
cmd = 'PF'
|
|
||||||
data_send = length + cmd.upper().encode('ascii') + height + width + img_data
|
|
||||||
win32file.WriteFile(self.rgb_send, data_send)
|
|
||||||
print(f'发送的图像数据长度: {len(data_send)}')
|
|
||||||
except Exception as e:
|
|
||||||
print(f"数据发送失败. 错误原因: {e}")
|
|
||||||
|
|
||||||
if spec_files:
|
|
||||||
spec_file = spec_files[0]
|
|
||||||
with open(spec_file, 'rb') as f:
|
|
||||||
spec_data = f.read()
|
|
||||||
|
|
||||||
try:
|
|
||||||
# win32file.WriteFile(self.spec_send, len(spec_data).to_bytes(4, byteorder='big'))
|
|
||||||
# print(f"发送的光谱数据长度: {len(spec_data)}")
|
|
||||||
heigth = 30
|
|
||||||
weight = 30
|
|
||||||
bands = 224
|
|
||||||
heigth = heigth.to_bytes(2, byteorder='big')
|
|
||||||
weight = weight.to_bytes(2, byteorder='big')
|
|
||||||
bands = bands.to_bytes(2, byteorder='big')
|
|
||||||
length = (len(spec_data)+8).to_bytes(4, byteorder='big')
|
|
||||||
# cmd = 'TO':测试番茄数据;cmd = 'PF':测试百香果数据
|
|
||||||
cmd = 'PF'
|
|
||||||
data_send = length + cmd.upper().encode('ascii') + heigth + weight + bands + spec_data
|
|
||||||
win32file.WriteFile(self.spec_send, data_send)
|
|
||||||
print(f'发送的光谱数据长度: {len(data_send)}')
|
|
||||||
print(f'spec长度: {len(spec_data)}')
|
|
||||||
except Exception as e:
|
|
||||||
print(f"数据发送失败. 错误原因: {e}")
|
|
||||||
|
|
||||||
self.receive_result()
|
|
||||||
|
|
||||||
def send_YR(self):
|
|
||||||
'''
|
|
||||||
发送预热指令
|
|
||||||
:return:
|
|
||||||
'''
|
|
||||||
length = 2
|
|
||||||
length = length.to_bytes(4, byteorder='big')
|
|
||||||
cmd = 'YR'
|
|
||||||
data_send = length + cmd.upper().encode('ascii')
|
|
||||||
try:
|
|
||||||
win32file.WriteFile(self.rgb_send, data_send)
|
|
||||||
print("发送预热指令成功")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"发送预热指令失败. 错误原因: {e}")
|
|
||||||
|
|
||||||
def receive_result(self):
|
|
||||||
try:
|
|
||||||
# 读取结果数据
|
|
||||||
# 读取4个字节的数据长度信息,并将其转换为整数
|
|
||||||
data_length = int.from_bytes(win32file.ReadFile(self.rgb_receive, 4)[1], byteorder='big')
|
|
||||||
print(f"应该接收到的数据长度: {data_length}")
|
|
||||||
# 根据读取到的数据长度,读取对应长度的数据
|
|
||||||
data = win32file.ReadFile(self.rgb_receive, data_length)[1]
|
|
||||||
print(f"实际接收到的数据长度: {len(data)}")
|
|
||||||
# 解析数据
|
|
||||||
cmd_result = data[:2].decode('ascii').strip().upper()
|
|
||||||
brix = (int.from_bytes(data[2:4], byteorder='big')) / 1000
|
|
||||||
green_percentage = (int.from_bytes(data[4:5], byteorder='big')) / 100
|
|
||||||
diameter = (int.from_bytes(data[5:7], byteorder='big')) / 100
|
|
||||||
weight = int.from_bytes(data[7:8], byteorder='big')
|
|
||||||
defect_num = int.from_bytes(data[8:10], byteorder='big')
|
|
||||||
total_defect_area = (int.from_bytes(data[10:14], byteorder='big')) / 1000
|
|
||||||
heigth = int.from_bytes(data[14:16], byteorder='big')
|
|
||||||
width = int.from_bytes(data[16:18], byteorder='big')
|
|
||||||
rp = data[18:]
|
|
||||||
img = np.frombuffer(rp, dtype=np.uint8).reshape(heigth, width, -1)
|
|
||||||
print(f"指令:{cmd_result}, 糖度值:{brix}, 绿色占比:{green_percentage}, 直径:{diameter}cm, "
|
|
||||||
f"预估重量:{weight}g, 缺陷个数:{defect_num}, 缺陷面积:{total_defect_area}cm^2, 结果图的尺寸:{img.shape}")
|
|
||||||
|
|
||||||
|
|
||||||
# 显示结果图像
|
|
||||||
image = Image.fromarray(img)
|
|
||||||
qimage = QImage(image.tobytes(), image.size[0], image.size[1], QImage.Format_RGB888)
|
|
||||||
pixmap = QPixmap.fromImage(qimage)
|
|
||||||
self.image_label.setPixmap(pixmap)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"数据接收失败. 错误原因: {e}")
|
|
||||||
|
|
||||||
def open_file_dialog(self):
|
|
||||||
directory_dialog = QFileDialog()
|
|
||||||
directory_dialog.setFileMode(QFileDialog.Directory)
|
|
||||||
if directory_dialog.exec_():
|
|
||||||
selected_directory = directory_dialog.selectedFiles()[0]
|
|
||||||
self.send_image_group(selected_directory)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
'''
|
|
||||||
1. 创建Qt应用程序
|
|
||||||
2. 创建主窗口
|
|
||||||
3. 显示主窗口
|
|
||||||
4. 打开文件对话框
|
|
||||||
5. 进入Qt事件循环
|
|
||||||
'''
|
|
||||||
#运行main.py后,运行qt_test.py
|
|
||||||
#运行qt_test.py后,选择文件夹,自动读取文件夹下的bmp和raw文件,发送到main.py
|
|
||||||
#main.py接收到数据后,返回结果数据,qt_test.py接收到结果数据,显示图片
|
|
||||||
#为确保测试正确,测试文件夹下的文件数量应该为5个bmp文件和1个raw文件
|
|
||||||
app = QApplication(sys.argv)
|
|
||||||
main_window = MainWindow()
|
|
||||||
main_window.show()
|
|
||||||
main_window.open_file_dialog()
|
|
||||||
sys.exit(app.exec_())
|
|
||||||
@ -1,53 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/6/17 下午3:36
|
|
||||||
# @Author : TG
|
|
||||||
# @File : config.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
|
|
||||||
from root_dir import ROOT_DIR
|
|
||||||
|
|
||||||
class Config:
|
|
||||||
#文件相关参数
|
|
||||||
#预热参数
|
|
||||||
n_spec_rows, n_spec_cols, n_spec_bands = 25, 30, 13
|
|
||||||
n_rgb_rows, n_rgb_cols, n_rgb_bands = 613, 800, 3
|
|
||||||
tomato_img_dir = ROOT_DIR / 'models' / 'TO.bmp'
|
|
||||||
passion_fruit_img_dir = ROOT_DIR / 'models' / 'PF.bmp'
|
|
||||||
#模型路径
|
|
||||||
#糖度模型
|
|
||||||
brix_model_path = ROOT_DIR / 'models' / 'passion_fruit.joblib'
|
|
||||||
#图像分类模型
|
|
||||||
imgclassifier_model_path = ROOT_DIR / 'models' / 'imgclassifier.joblib'
|
|
||||||
imgclassifier_class_indices_path = ROOT_DIR / 'models' / 'class_indices.json'
|
|
||||||
|
|
||||||
|
|
||||||
#classifer.py参数
|
|
||||||
#tomato
|
|
||||||
find_reflection_threshold = 190
|
|
||||||
extract_g_r_factor = 1.5
|
|
||||||
|
|
||||||
#passion_fruit
|
|
||||||
hue_value = 37
|
|
||||||
hue_delta = 10
|
|
||||||
value_target = 25
|
|
||||||
value_delta = 10
|
|
||||||
|
|
||||||
#spec_predict
|
|
||||||
#筛选谱段并未使用,在qt取数据时已经筛选
|
|
||||||
selected_bands = [8, 9, 10, 48, 49, 50, 77, 80, 103, 108, 115, 143, 145]
|
|
||||||
|
|
||||||
#data_processing
|
|
||||||
#根据标定数据计算的参数,实际长度/像素长度,单位cm
|
|
||||||
pixel_length_ratio = 6.3/425
|
|
||||||
#绿叶面积阈值,高于此阈值认为连通域是绿叶
|
|
||||||
area_threshold = 20000
|
|
||||||
#百香果密度(g/cm^3)
|
|
||||||
density = 0.652228972
|
|
||||||
#百香果面积比例,每个像素代表的实际面积(cm^2)
|
|
||||||
area_ratio = 0.00021973702422145334
|
|
||||||
|
|
||||||
#def analyze_tomato
|
|
||||||
#s_l通道阈值
|
|
||||||
threshold_s_l = 180
|
|
||||||
threshold_fore_g_r_t = 20
|
|
||||||
|
|
||||||
@ -1,256 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/6/26 下午5:31
|
|
||||||
# @Author : TG
|
|
||||||
# @File : passion_fruit_rgb.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
import os
|
|
||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
import argparse
|
|
||||||
import logging
|
|
||||||
from config import Config as setting
|
|
||||||
|
|
||||||
class Passion_fruit:
|
|
||||||
def __init__(self, hue_value=setting.hue_value, hue_delta=setting.hue_delta,
|
|
||||||
value_target=setting.value_target, value_delta=setting.value_delta):
|
|
||||||
# 初始化常用参数
|
|
||||||
self.hue_value = hue_value
|
|
||||||
self.hue_delta = hue_delta
|
|
||||||
self.value_target = value_target
|
|
||||||
self.value_delta = value_delta
|
|
||||||
|
|
||||||
def create_mask(self, hsv_image):
|
|
||||||
# 创建H通道阈值掩码
|
|
||||||
lower_hue = np.array([self.hue_value - self.hue_delta, 0, 0])
|
|
||||||
upper_hue = np.array([self.hue_value + self.hue_delta, 255, 255])
|
|
||||||
hue_mask = cv2.inRange(hsv_image, lower_hue, upper_hue)
|
|
||||||
# 创建V通道排除中心值的掩码
|
|
||||||
lower_value_1 = np.array([0, 0, 0])
|
|
||||||
upper_value_1 = np.array([180, 255, self.value_target - self.value_delta])
|
|
||||||
lower_value_2 = np.array([0, 0, self.value_target + self.value_delta])
|
|
||||||
upper_value_2 = np.array([180, 255, 255])
|
|
||||||
value_mask_1 = cv2.inRange(hsv_image, lower_value_1, upper_value_1)
|
|
||||||
value_mask_1 = cv2.bitwise_not(value_mask_1)
|
|
||||||
value_mask_2 = cv2.inRange(hsv_image, lower_value_2, upper_value_2)
|
|
||||||
value_mask = cv2.bitwise_and(value_mask_1, value_mask_2)
|
|
||||||
|
|
||||||
# 合并H通道和V通道掩码
|
|
||||||
return cv2.bitwise_and(hue_mask, value_mask)
|
|
||||||
|
|
||||||
def apply_morphology(self, mask):
|
|
||||||
# 应用形态学操作
|
|
||||||
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
|
|
||||||
return cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
|
|
||||||
|
|
||||||
def find_largest_component(self, mask):
|
|
||||||
if mask is None or mask.size == 0 or np.all(mask == 0):
|
|
||||||
logging.warning("RGB 图像为空或全黑,返回一个全黑RGB图像。")
|
|
||||||
return np.zeros((setting.n_rgb_rows, setting.n_rgb_cols, setting.n_rgb_bands), dtype=np.uint8) \
|
|
||||||
if mask is None else np.zeros_like(mask)
|
|
||||||
# 寻找最大连通组件
|
|
||||||
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(mask, 4, cv2.CV_32S)
|
|
||||||
if num_labels < 2:
|
|
||||||
return None # 没有找到显著的组件
|
|
||||||
max_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA]) # 跳过背景
|
|
||||||
return (labels == max_label).astype(np.uint8) * 255
|
|
||||||
def draw_contours_on_image(self, original_image, mask_image):
|
|
||||||
"""
|
|
||||||
在原图上绘制轮廓
|
|
||||||
:param original_image: 原图的NumPy数组
|
|
||||||
:param mask_image: 轮廓mask的NumPy数组
|
|
||||||
:return: 在原图上绘制轮廓后的图像
|
|
||||||
"""
|
|
||||||
# 确保mask_image是二值图像
|
|
||||||
_, binary_mask = cv2.threshold(mask_image, 127, 255, cv2.THRESH_BINARY)
|
|
||||||
# 查找mask图像中的轮廓
|
|
||||||
contours, _ = cv2.findContours(binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
# 在原图上绘制轮廓
|
|
||||||
cv2.drawContours(original_image, contours, -1, (0, 255, 0), 2)
|
|
||||||
return original_image
|
|
||||||
|
|
||||||
def bitwise_and_rgb_with_binary(self, rgb_img, bin_img):
|
|
||||||
'''
|
|
||||||
将 RGB 图像与二值图像进行按位与操作,用于将二值区域应用于原始图像。
|
|
||||||
:param rgb_img: 原始 RGB 图像
|
|
||||||
:param bin_img: 二值图像
|
|
||||||
:return: 按位与后的结果图像
|
|
||||||
'''
|
|
||||||
# 检查 RGB 图像是否为空或全黑
|
|
||||||
if rgb_img is None or rgb_img.size == 0 or np.all(rgb_img == 0):
|
|
||||||
logging.warning("RGB 图像为空或全黑,返回一个全黑RGB图像。")
|
|
||||||
return np.zeros((setting.n_rgb_rows, setting.n_rgb_cols, setting.n_rgb_bands), dtype=np.uint8) \
|
|
||||||
if rgb_img is None else np.zeros_like(rgb_img)
|
|
||||||
# 检查二值图像是否为空或全黑
|
|
||||||
if bin_img is None or bin_img.size == 0 or np.all(bin_img == 0):
|
|
||||||
logging.warning("二值图像为空或全黑,返回一个全黑RGB图像。")
|
|
||||||
return np.zeros((setting.n_rgb_rows, setting.n_rgb_cols, setting.n_rgb_bands), dtype=np.uint8) \
|
|
||||||
if bin_img is None else np.zeros_like(bin_img)
|
|
||||||
# 转换二值图像为三通道
|
|
||||||
try:
|
|
||||||
bin_img_3channel = cv2.cvtColor(bin_img, cv2.COLOR_GRAY2BGR)
|
|
||||||
except cv2.error as e:
|
|
||||||
logging.error(f"转换二值图像时发生错误: {e}")
|
|
||||||
return np.zeros_like(rgb_img)
|
|
||||||
# 进行按位与操作
|
|
||||||
try:
|
|
||||||
result = cv2.bitwise_and(rgb_img, bin_img_3channel)
|
|
||||||
except cv2.error as e:
|
|
||||||
logging.error(f"执行按位与操作时发生错误: {e}")
|
|
||||||
return np.zeros_like(rgb_img)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def fill_holes(bin_img):
|
|
||||||
|
|
||||||
img_filled = bin_img.copy()
|
|
||||||
height, width = bin_img.shape
|
|
||||||
mask = np.zeros((height + 2, width + 2), np.uint8)
|
|
||||||
cv2.floodFill(img_filled, mask, (0, 0), 255)
|
|
||||||
img_filled_inv = cv2.bitwise_not(img_filled)
|
|
||||||
img_filled = cv2.bitwise_or(bin_img, img_filled)
|
|
||||||
img_defect = img_filled_inv[:height, :width]
|
|
||||||
return img_filled, img_defect
|
|
||||||
|
|
||||||
def contour_process(image_array):
|
|
||||||
# 检查图像是否为空或全黑
|
|
||||||
if image_array is None or image_array.size == 0 or np.all(image_array == 0):
|
|
||||||
logging.warning("输入的图像为空或全黑,返回一个全黑图像。")
|
|
||||||
return np.zeros_like(image_array) if image_array is not None else np.zeros((100, 100), dtype=np.uint8)
|
|
||||||
# 应用中值滤波
|
|
||||||
image_filtered = cv2.medianBlur(image_array, 5)
|
|
||||||
# 形态学闭操作
|
|
||||||
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15))
|
|
||||||
image_closed = cv2.morphologyEx(image_filtered, cv2.MORPH_CLOSE, kernel)
|
|
||||||
# 查找轮廓
|
|
||||||
contours, _ = cv2.findContours(image_closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
# 创建空白图像以绘制轮廓
|
|
||||||
image_contours = np.zeros_like(image_array)
|
|
||||||
# 进行多边形拟合并填充轮廓
|
|
||||||
for contour in contours:
|
|
||||||
epsilon = 0.001 * cv2.arcLength(contour, True)
|
|
||||||
approx = cv2.approxPolyDP(contour, epsilon, True)
|
|
||||||
if cv2.contourArea(approx) > 100: # 仅处理较大的轮廓
|
|
||||||
cv2.drawContours(image_contours, [approx], -1, (255, 255, 255), -1)
|
|
||||||
|
|
||||||
return image_contours
|
|
||||||
|
|
||||||
|
|
||||||
def extract_green_pixels_cv(image):
|
|
||||||
"""
|
|
||||||
使用 OpenCV 提取图像中的绿色像素,并可选择保存结果图像。
|
|
||||||
|
|
||||||
参数:
|
|
||||||
image_path (str): 输入图像的文件路径。
|
|
||||||
save_path (str, optional): 输出图像的保存路径,若提供此参数,则保存提取的绿色像素图像。
|
|
||||||
|
|
||||||
返回:
|
|
||||||
输出图像,绿色像素为白色,其他像素为黑色。
|
|
||||||
"""
|
|
||||||
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
||||||
# Define the HSV range for green
|
|
||||||
lower_green = np.array([0, 100, 0])
|
|
||||||
upper_green = np.array([60, 180, 60])
|
|
||||||
# Convert the image to HSV
|
|
||||||
hsv = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2HSV)
|
|
||||||
# Create the mask
|
|
||||||
mask = cv2.inRange(hsv, lower_green, upper_green)
|
|
||||||
# Bitwise-AND mask and original image
|
|
||||||
res = cv2.bitwise_and(image_rgb, image_rgb, mask=mask)
|
|
||||||
# Convert result to BGR for display
|
|
||||||
res_bgr = cv2.cvtColor(res, cv2.COLOR_RGB2BGR)
|
|
||||||
return mask
|
|
||||||
|
|
||||||
|
|
||||||
def pixel_comparison(defect, mask):
|
|
||||||
"""
|
|
||||||
比较两幅图像的像素值,如果相同则赋值为0,不同则赋值为255。
|
|
||||||
参数:
|
|
||||||
defect_path (str): 第一幅图像的路径。
|
|
||||||
mask_path (str): 第二幅图像的路径。
|
|
||||||
save_path (str, optional): 结果图像的保存路径。
|
|
||||||
返回:
|
|
||||||
numpy.ndarray: 处理后的图像数组。
|
|
||||||
"""
|
|
||||||
# 确保图像是二值图像
|
|
||||||
_, defect_binary = cv2.threshold(defect, 127, 255, cv2.THRESH_BINARY)
|
|
||||||
_, mask_binary = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)
|
|
||||||
# 执行像素比较
|
|
||||||
green_img = np.where(defect_binary == mask_binary, 0, 255).astype(np.uint8)
|
|
||||||
return green_img
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = argparse.ArgumentParser(description='Process some integers.')
|
|
||||||
parser.add_argument('--dir_path', type=str,
|
|
||||||
default=r'D:\project\supermachine--tomato-passion_fruit\20240529RGBtest3\tg\test',
|
|
||||||
help='the directory path of images')
|
|
||||||
parser.add_argument('--threshold_s_l', type=int, default=180,
|
|
||||||
help='the threshold for s_l')
|
|
||||||
parser.add_argument('--threshold_r_b', type=int, default=15,
|
|
||||||
help='the threshold for r_b')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
pf = Passion_fruit()
|
|
||||||
|
|
||||||
for img_file in os.listdir(args.dir_path):
|
|
||||||
if img_file.endswith('.bmp'):
|
|
||||||
img_path = os.path.join(args.dir_path, img_file)
|
|
||||||
img = cv2.imread(img_path)
|
|
||||||
cv2.imshow('img', img)
|
|
||||||
hsv_image = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
|
|
||||||
cv2.imshow('hsv', hsv_image)
|
|
||||||
combined_mask = pf.create_mask(hsv_image)
|
|
||||||
cv2.imshow('combined_mask1', combined_mask)
|
|
||||||
combined_mask = pf.apply_morphology(combined_mask)
|
|
||||||
cv2.imshow('combined_mask2', combined_mask)
|
|
||||||
max_mask = pf.find_largest_component(combined_mask)
|
|
||||||
cv2.imshow('max_mask', max_mask)
|
|
||||||
|
|
||||||
filled_img, defect = fill_holes(max_mask)
|
|
||||||
cv2.imshow('filled_img', filled_img)
|
|
||||||
cv2.imshow('defect', defect)
|
|
||||||
|
|
||||||
contour_mask = contour_process(max_mask)
|
|
||||||
cv2.imshow('contour_mask', contour_mask)
|
|
||||||
|
|
||||||
fore = pf.bitwise_and_rgb_with_binary(img, contour_mask)
|
|
||||||
cv2.imshow('fore', fore)
|
|
||||||
|
|
||||||
mask = extract_green_pixels_cv(fore)
|
|
||||||
cv2.imshow('mask', mask)
|
|
||||||
|
|
||||||
green_img = pixel_comparison(defect, mask)
|
|
||||||
cv2.imshow('green_img', green_img)
|
|
||||||
|
|
||||||
green_percentage = np.sum(green_img == 255) / np.sum(contour_mask == 255)
|
|
||||||
green_percentage = round(green_percentage, 2)
|
|
||||||
|
|
||||||
print(np.sum(green_img == 255))
|
|
||||||
print(np.sum(contour_mask == 255))
|
|
||||||
print(green_percentage)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
edge = pf.draw_contours_on_image(img, contour_mask)
|
|
||||||
cv2.imshow('edge', edge)
|
|
||||||
org_defect = pf.bitwise_and_rgb_with_binary(edge, max_mask)
|
|
||||||
cv2.imshow('org_defect', org_defect)
|
|
||||||
|
|
||||||
|
|
||||||
cv2.waitKey(0)
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
@ -1,31 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/7/4 下午10:43
|
|
||||||
# @Author : TG
|
|
||||||
# @File : pic.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
def rename_bmp_images(folder_path, new_name_format):
|
|
||||||
# 获取文件夹中的所有文件
|
|
||||||
files = os.listdir(folder_path)
|
|
||||||
# 过滤出BMP图像文件
|
|
||||||
bmp_files = [f for f in files if f.lower().endswith('.bmp')]
|
|
||||||
|
|
||||||
# 对每个BMP图像文件进行重命名
|
|
||||||
for index, bmp_file in enumerate(bmp_files):
|
|
||||||
old_path = os.path.join(folder_path, bmp_file)
|
|
||||||
new_name = new_name_format.format(index + 1)
|
|
||||||
new_path = os.path.join(folder_path, new_name)
|
|
||||||
|
|
||||||
# 重命名文件
|
|
||||||
os.rename(old_path, new_path)
|
|
||||||
print(f'Renamed {old_path} to {new_path}')
|
|
||||||
|
|
||||||
|
|
||||||
# 指定文件夹路径和新的命名格式
|
|
||||||
folder_path = r'D:\桌面文件\裂口数据集扩充(4月份数据补充)\scar'
|
|
||||||
new_name_format = 'scar_{:03d}.bmp' # 例如,image_001.bmp, image_002.bmp, ...
|
|
||||||
|
|
||||||
# 调用函数进行重命名
|
|
||||||
rename_bmp_images(folder_path, new_name_format)
|
|
||||||
@ -1,49 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/7/11 下午3:08
|
|
||||||
# @Author : TG
|
|
||||||
# @File : split_data.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
import os
|
|
||||||
import random
|
|
||||||
import shutil
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
# 设置数据集目录
|
|
||||||
dataset_path = Path(r'F:\0711_lk')
|
|
||||||
images = list(dataset_path.glob('*.bmp')) # 假设图像文件是jpg格式
|
|
||||||
|
|
||||||
# 设置随机种子以保证结果可复现
|
|
||||||
random.seed(42)
|
|
||||||
|
|
||||||
# 打乱数据集
|
|
||||||
random.shuffle(images)
|
|
||||||
|
|
||||||
# 计算划分点
|
|
||||||
num_images = len(images)
|
|
||||||
train_split = int(num_images * 0.6)
|
|
||||||
val_split = int(num_images * 0.8)
|
|
||||||
|
|
||||||
# 分割数据集
|
|
||||||
train_images = images[:train_split]
|
|
||||||
val_images = images[train_split:val_split]
|
|
||||||
test_images = images[val_split:]
|
|
||||||
|
|
||||||
# 创建保存分割后数据集的文件夹
|
|
||||||
(train_path, val_path, test_path) = [dataset_path.parent / x for x in ['train', 'val', 'test']]
|
|
||||||
for path in [train_path, val_path, test_path]:
|
|
||||||
path.mkdir(exist_ok=True)
|
|
||||||
|
|
||||||
# 定义一个函数来复制图像和标签文件
|
|
||||||
def copy_files(files, dest_folder):
|
|
||||||
for file in files:
|
|
||||||
shutil.copy(file, dest_folder)
|
|
||||||
label_file = file.with_suffix('.txt')
|
|
||||||
if label_file.exists():
|
|
||||||
shutil.copy(label_file, dest_folder)
|
|
||||||
|
|
||||||
# 复制文件到新的文件夹
|
|
||||||
copy_files(train_images, train_path)
|
|
||||||
copy_files(val_images, val_path)
|
|
||||||
copy_files(test_images, test_path)
|
|
||||||
|
|
||||||
print("数据集划分完成。训练集、验证集和测试集已经被保存到对应的文件夹。")
|
|
||||||
@ -1,61 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/6/26 下午6:15
|
|
||||||
# @Author : TG
|
|
||||||
# @File : t.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
|
|
||||||
def nothing(x):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
# Create a window
|
|
||||||
cv2.namedWindow('Green Pixels Selector')
|
|
||||||
|
|
||||||
# Create trackbars for color change
|
|
||||||
cv2.createTrackbar('Lower Hue', 'Green Pixels Selector', 0, 255, nothing)
|
|
||||||
cv2.createTrackbar('Lower Sat', 'Green Pixels Selector', 100, 255, nothing)
|
|
||||||
cv2.createTrackbar('Lower Val', 'Green Pixels Selector', 0, 255, nothing)
|
|
||||||
cv2.createTrackbar('Upper Hue', 'Green Pixels Selector', 60, 255, nothing)
|
|
||||||
cv2.createTrackbar('Upper Sat', 'Green Pixels Selector', 180, 255, nothing)
|
|
||||||
cv2.createTrackbar('Upper Val', 'Green Pixels Selector', 60, 255, nothing)
|
|
||||||
|
|
||||||
# Load image
|
|
||||||
image = cv2.imread(r'D:\project\supermachine--tomato-passion_fruit\20240529RGBtest3\tg\test\23.bmp')
|
|
||||||
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
||||||
|
|
||||||
while (True):
|
|
||||||
# Get current positions of the trackbars
|
|
||||||
lh = cv2.getTrackbarPos('Lower Hue', 'Green Pixels Selector')
|
|
||||||
ls = cv2.getTrackbarPos('Lower Sat', 'Green Pixels Selector')
|
|
||||||
lv = cv2.getTrackbarPos('Lower Val', 'Green Pixels Selector')
|
|
||||||
uh = cv2.getTrackbarPos('Upper Hue', 'Green Pixels Selector')
|
|
||||||
us = cv2.getTrackbarPos('Upper Sat', 'Green Pixels Selector')
|
|
||||||
uv = cv2.getTrackbarPos('Upper Val', 'Green Pixels Selector')
|
|
||||||
|
|
||||||
# Define the HSV range for green
|
|
||||||
lower_green = np.array([lh, ls, lv])
|
|
||||||
upper_green = np.array([uh, us, uv])
|
|
||||||
|
|
||||||
# Convert the image to HSV
|
|
||||||
hsv = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2HSV)
|
|
||||||
|
|
||||||
# Create the mask
|
|
||||||
mask = cv2.inRange(hsv, lower_green, upper_green)
|
|
||||||
|
|
||||||
# Bitwise-AND mask and original image
|
|
||||||
res = cv2.bitwise_and(image_rgb, image_rgb, mask=mask)
|
|
||||||
|
|
||||||
# Convert result to BGR for display
|
|
||||||
res_bgr = cv2.cvtColor(res, cv2.COLOR_RGB2BGR)
|
|
||||||
|
|
||||||
# Display the resulting frame
|
|
||||||
cv2.imshow('Green Pixels Selector', res_bgr)
|
|
||||||
|
|
||||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
|
||||||
break
|
|
||||||
|
|
||||||
# When everything done, release the window
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
@ -1,272 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/4/20 18:24
|
|
||||||
# @Author : TG
|
|
||||||
# @File : utils.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
|
|
||||||
import os
|
|
||||||
import win32file
|
|
||||||
import win32pipe
|
|
||||||
import time
|
|
||||||
import logging
|
|
||||||
import numpy as np
|
|
||||||
from config import Config as setting
|
|
||||||
|
|
||||||
class Pipe:
|
|
||||||
def __init__(self, rgb_receive_name, rgb_send_name, spec_receive_name):
|
|
||||||
self.rgb_receive_name = rgb_receive_name
|
|
||||||
self.rgb_send_name = rgb_send_name
|
|
||||||
self.spec_receive_name = spec_receive_name
|
|
||||||
self.rgb_receive = None
|
|
||||||
self.rgb_send = None
|
|
||||||
self.spec_receive = None
|
|
||||||
|
|
||||||
def create_pipes(self, rgb_receive_name, rgb_send_name, spec_receive_name):
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
# 打开或创建命名管道
|
|
||||||
self.rgb_receive = win32pipe.CreateNamedPipe(
|
|
||||||
rgb_receive_name,
|
|
||||||
win32pipe.PIPE_ACCESS_INBOUND,
|
|
||||||
win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_WAIT,
|
|
||||||
1, 80000000, 80000000, 0, None
|
|
||||||
)
|
|
||||||
self.rgb_send = win32pipe.CreateNamedPipe(
|
|
||||||
rgb_send_name,
|
|
||||||
win32pipe.PIPE_ACCESS_OUTBOUND, # 修改为输出模式
|
|
||||||
win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_WAIT,
|
|
||||||
1, 80000000, 80000000, 0, None
|
|
||||||
)
|
|
||||||
self.spec_receive = win32pipe.CreateNamedPipe(
|
|
||||||
spec_receive_name,
|
|
||||||
win32pipe.PIPE_ACCESS_INBOUND,
|
|
||||||
win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_WAIT,
|
|
||||||
1, 200000000, 200000000, 0, None
|
|
||||||
)
|
|
||||||
print("pipe管道创建成功,等待连接...")
|
|
||||||
# 等待发送端连接
|
|
||||||
win32pipe.ConnectNamedPipe(self.rgb_receive, None)
|
|
||||||
print("rgb_receive connected.")
|
|
||||||
# 等待发送端连接
|
|
||||||
win32pipe.ConnectNamedPipe(self.rgb_send, None)
|
|
||||||
print("rgb_send connected.")
|
|
||||||
win32pipe.ConnectNamedPipe(self.spec_receive, None)
|
|
||||||
print("spec_receive connected.")
|
|
||||||
return self.rgb_receive, self.rgb_send, self.spec_receive
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"管道创建连接失败,失败原因: {e}")
|
|
||||||
print("等待5秒后重试...")
|
|
||||||
time.sleep(5)
|
|
||||||
continue
|
|
||||||
|
|
||||||
def receive_rgb_data(self, rgb_receive):
|
|
||||||
try:
|
|
||||||
# 读取图片数据长度
|
|
||||||
len_img = win32file.ReadFile(rgb_receive, 4, None)
|
|
||||||
data_size = int.from_bytes(len_img[1], byteorder='big')
|
|
||||||
# 读取实际图片数据
|
|
||||||
result, data = win32file.ReadFile(rgb_receive, data_size, None)
|
|
||||||
# 检查读取操作是否成功
|
|
||||||
if result != 0:
|
|
||||||
logging.error(f"读取失败,错误代码: {result}")
|
|
||||||
return None
|
|
||||||
# 返回成功读取的数据
|
|
||||||
return data
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"数据接收失败,错误原因: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
def receive_spec_data(self, spec_receive):
|
|
||||||
try:
|
|
||||||
# 读取光谱数据长度
|
|
||||||
len_spec = win32file.ReadFile(spec_receive, 4, None)
|
|
||||||
data_size = int.from_bytes(len_spec[1], byteorder='big')
|
|
||||||
# 读取光谱数据
|
|
||||||
result, spec_data = win32file.ReadFile(spec_receive, data_size, None)
|
|
||||||
# 检查读取操作是否成功
|
|
||||||
if result != 0:
|
|
||||||
logging.error(f"读取失败,错误代码: {result}")
|
|
||||||
return None
|
|
||||||
# 返回成功读取的数据
|
|
||||||
return spec_data
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"数据接收失败,错误原因: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
def parse_img(self, data: bytes) -> (str, any):
|
|
||||||
"""
|
|
||||||
图像数据转换.
|
|
||||||
|
|
||||||
:param data:接收到的报文
|
|
||||||
:return: 指令类型和内容
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
assert len(data) > 1
|
|
||||||
except AssertionError:
|
|
||||||
logging.error('指令转换失败,长度不足2')
|
|
||||||
return '', None
|
|
||||||
cmd, data = data[:2], data[2:]
|
|
||||||
cmd = cmd.decode('ascii').strip().upper()
|
|
||||||
# 如果收到的是预热指令'YR',直接返回命令和None,不处理图像数据
|
|
||||||
if cmd == 'YR':
|
|
||||||
return cmd, None
|
|
||||||
n_rows, n_cols, img = data[:2], data[2:4], data[4:]
|
|
||||||
try:
|
|
||||||
n_rows, n_cols = [int.from_bytes(x, byteorder='big') for x in [n_rows, n_cols]]
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'长宽转换失败, 错误代码{e}, 报文大小: n_rows:{n_rows}, n_cols:{n_cols}')
|
|
||||||
return '', None
|
|
||||||
try:
|
|
||||||
assert n_rows * n_cols * 3 == len(img)
|
|
||||||
# 因为是float32类型 所以长度要乘12 ,如果是uint8则乘3
|
|
||||||
except AssertionError:
|
|
||||||
logging.error('图像指令转换失败,数据长度错误')
|
|
||||||
return '', None
|
|
||||||
img = np.frombuffer(img, dtype=np.uint8).reshape(n_rows, n_cols, -1)
|
|
||||||
return cmd, img
|
|
||||||
|
|
||||||
def parse_spec(self, data: bytes) -> (str, any):
|
|
||||||
"""
|
|
||||||
光谱数据转换.
|
|
||||||
|
|
||||||
:param data:接收到的报文
|
|
||||||
:return: 指令类型和内容
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
assert len(data) > 2
|
|
||||||
except AssertionError:
|
|
||||||
logging.error('指令转换失败,长度不足3')
|
|
||||||
return '', None
|
|
||||||
cmd, data = data[:2], data[2:]
|
|
||||||
cmd = cmd.decode('ascii').strip().upper()
|
|
||||||
|
|
||||||
n_rows, n_cols, n_bands, spec = data[:2], data[2:4], data[4:6], data[6:]
|
|
||||||
try:
|
|
||||||
n_rows, n_cols, n_bands = [int.from_bytes(x, byteorder='big') for x in [n_rows, n_cols, n_bands]]
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'长宽转换失败, 错误代码{e}, 报文大小: n_rows:{n_rows}, n_cols:{n_cols}, n_bands:{n_bands}')
|
|
||||||
return '', None
|
|
||||||
try:
|
|
||||||
assert n_rows * n_cols * n_bands * 2 == len(spec)
|
|
||||||
|
|
||||||
except AssertionError:
|
|
||||||
logging.error('图像指令转换失败,数据长度错误')
|
|
||||||
return '', None
|
|
||||||
spec = np.frombuffer(spec, dtype=np.uint16).reshape((n_rows, n_bands, -1)).transpose(0, 2, 1)
|
|
||||||
return cmd, spec
|
|
||||||
|
|
||||||
def send_data(self,cmd:str, brix, green_percentage, weight, diameter, defect_num, total_defect_area, rp):
|
|
||||||
'''
|
|
||||||
发送数据
|
|
||||||
:param cmd:
|
|
||||||
:param brix:
|
|
||||||
:param green_percentage:
|
|
||||||
:param weight:
|
|
||||||
:param diameter:
|
|
||||||
:param defect_num:
|
|
||||||
:param total_defect_area:
|
|
||||||
:param rp:
|
|
||||||
:return:
|
|
||||||
'''
|
|
||||||
cmd = cmd.strip().upper()
|
|
||||||
# if cmd == 'KO':
|
|
||||||
# cmd_ko = cmd.encode('ascii')
|
|
||||||
# length = (2).to_bytes(4, byteorder='big') # 因为只有KO两个字节,所以长度是2
|
|
||||||
# send_message = length + cmd_ko
|
|
||||||
# try:
|
|
||||||
# win32file.WriteFile(self.rgb_send, send_message)
|
|
||||||
# print('KO消息发送成功')
|
|
||||||
# except Exception as e:
|
|
||||||
# logging.error(f'发送KO指令失败,错误类型:{e}')
|
|
||||||
# return False
|
|
||||||
# return True
|
|
||||||
|
|
||||||
cmd_type = 'RE'
|
|
||||||
cmd_re = cmd_type.upper().encode('ascii')
|
|
||||||
img = np.asarray(rp, dtype=np.uint8) # 将图像转换为 NumPy 数组
|
|
||||||
height = img.shape[0] # 获取图像的高度
|
|
||||||
width = img.shape[1] # 获取图像的宽度
|
|
||||||
height = height.to_bytes(2, byteorder='big')
|
|
||||||
width = width.to_bytes(2, byteorder='big')
|
|
||||||
img_bytes = img.tobytes()
|
|
||||||
diameter = int(diameter * 100).to_bytes(2, byteorder='big')
|
|
||||||
defect_num = defect_num.to_bytes(2, byteorder='big')
|
|
||||||
total_defect_area = int(total_defect_area * 1000).to_bytes(4, byteorder='big')
|
|
||||||
length = len(img_bytes) + 18
|
|
||||||
length = length.to_bytes(4, byteorder='big')
|
|
||||||
if cmd == 'TO':
|
|
||||||
brix = 0
|
|
||||||
brix = brix.to_bytes(2, byteorder='big')
|
|
||||||
gp = int(green_percentage * 100).to_bytes(1, byteorder='big')
|
|
||||||
weight = 0
|
|
||||||
weight = weight.to_bytes(1, byteorder='big')
|
|
||||||
send_message = (length + cmd_re + brix + gp + diameter + weight +
|
|
||||||
defect_num + total_defect_area + height + width + img_bytes)
|
|
||||||
elif cmd == 'PF':
|
|
||||||
brix = int(brix * 1000).to_bytes(2, byteorder='big')
|
|
||||||
gp = int(green_percentage * 100).to_bytes(1, byteorder='big')
|
|
||||||
weight = weight.to_bytes(1, byteorder='big')
|
|
||||||
send_message = (length + cmd_re + brix + gp + diameter + weight +
|
|
||||||
defect_num + total_defect_area + height + width + img_bytes)
|
|
||||||
elif cmd == 'KO':
|
|
||||||
brix = 0
|
|
||||||
brix = brix.to_bytes(2, byteorder='big')
|
|
||||||
gp = 0
|
|
||||||
gp = gp.to_bytes(1, byteorder='big')
|
|
||||||
weight = 0
|
|
||||||
weight = weight.to_bytes(1, byteorder='big')
|
|
||||||
defect_num = 0
|
|
||||||
defect_num = defect_num.to_bytes(2, byteorder='big')
|
|
||||||
total_defect_area = 0
|
|
||||||
total_defect_area = total_defect_area.to_bytes(4, byteorder='big')
|
|
||||||
height = setting.n_rgb_rows
|
|
||||||
height = height.to_bytes(2, byteorder='big')
|
|
||||||
width = setting.n_rgb_cols
|
|
||||||
width = width.to_bytes(2, byteorder='big')
|
|
||||||
img_bytes = np.zeros((setting.n_rgb_rows, setting.n_rgb_cols, setting.n_rgb_bands),
|
|
||||||
dtype=np.uint8).tobytes()
|
|
||||||
length = (18).to_bytes(4, byteorder='big')
|
|
||||||
send_message = (length + cmd_re + brix + gp + diameter + weight +
|
|
||||||
defect_num + total_defect_area + height + width + img_bytes)
|
|
||||||
try:
|
|
||||||
win32file.WriteFile(self.rgb_send, send_message)
|
|
||||||
# time.sleep(0.01)
|
|
||||||
# print('发送成功')
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f'发送指令失败,错误类型:{e}')
|
|
||||||
return False
|
|
||||||
# end_time = time.time()
|
|
||||||
# print(f'发送时间:{end_time - start_time}秒')
|
|
||||||
return True
|
|
||||||
|
|
||||||
def create_file(file_name):
|
|
||||||
"""
|
|
||||||
创建文件
|
|
||||||
:param file_name: 文件名
|
|
||||||
:return: None
|
|
||||||
"""
|
|
||||||
if os.path.exists(file_name):
|
|
||||||
print("文件存在:%s" % file_name)
|
|
||||||
return False
|
|
||||||
# os.remove(file_name) # 删除已有文件
|
|
||||||
if not os.path.exists(file_name):
|
|
||||||
print("文件不存在,创建文件:%s" % file_name)
|
|
||||||
open(file_name, 'a').close()
|
|
||||||
return True
|
|
||||||
|
|
||||||
class Logger(object):
|
|
||||||
def __init__(self, is_to_file=False, path=None):
|
|
||||||
self.is_to_file = is_to_file
|
|
||||||
if path is None:
|
|
||||||
path = "tomato.log"
|
|
||||||
self.path = path
|
|
||||||
create_file(path)
|
|
||||||
|
|
||||||
def log(self, content):
|
|
||||||
if self.is_to_file:
|
|
||||||
with open(self.path, "a") as f:
|
|
||||||
print(time.strftime("[%Y-%m-%d_%H-%M-%S]:"), file=f)
|
|
||||||
print(content, file=f)
|
|
||||||
else:
|
|
||||||
print(content)
|
|
||||||
@ -1,197 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/6/15 15:40
|
|
||||||
# @Author : TG
|
|
||||||
# @File : 01.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
import joblib
|
|
||||||
import numpy as np
|
|
||||||
import os
|
|
||||||
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
|
|
||||||
from sklearn.svm import SVR
|
|
||||||
from sklearn.model_selection import train_test_split
|
|
||||||
from sklearn.metrics import mean_squared_error
|
|
||||||
|
|
||||||
|
|
||||||
def prepare_data(data):
|
|
||||||
"""Reshape data and select specified spectral bands."""
|
|
||||||
reshaped_data = data.reshape(data.shape[0], -1) # 使用动态批量大小
|
|
||||||
selected_bands = [8, 9, 10, 48, 49, 50, 77, 80, 103, 108, 115, 143, 145]
|
|
||||||
return reshaped_data[:, selected_bands]
|
|
||||||
|
|
||||||
class SpectralModelingAndPrediction:
|
|
||||||
def __init__(self, model_paths=None):
|
|
||||||
self.models = {
|
|
||||||
"RandomForest": RandomForestRegressor(n_estimators=100),
|
|
||||||
"GradientBoosting": GradientBoostingRegressor(n_estimators=100),
|
|
||||||
"SVR": SVR(kernel='rbf', C=100, gamma=0.1, epsilon=.1),
|
|
||||||
}
|
|
||||||
self.model_paths = model_paths or {}
|
|
||||||
|
|
||||||
def split_data(self, X, y, test_size=0.20, random_state=12):
|
|
||||||
"""Split data into training and test sets."""
|
|
||||||
return train_test_split(X, y, test_size=test_size, random_state=random_state)
|
|
||||||
|
|
||||||
def evaluate_model(self, model, X_test, y_test):
|
|
||||||
"""Evaluate the model and return MSE and predictions."""
|
|
||||||
y_pred = model.predict(X_test)
|
|
||||||
mse = mean_squared_error(y_test, y_pred)
|
|
||||||
return mse, y_pred
|
|
||||||
|
|
||||||
def print_predictions(self, y_test, y_pred, model_name):
|
|
||||||
"""Print actual and predicted values."""
|
|
||||||
print(f"Test Set Predictions for {model_name}:")
|
|
||||||
for i, (real, pred) in enumerate(zip(y_test, y_pred)):
|
|
||||||
print(f"Sample {i + 1}: True Value = {real:.2f}, Predicted Value = {pred:.2f}")
|
|
||||||
|
|
||||||
def fit_and_evaluate(self, X_train, y_train, X_test, y_test):
|
|
||||||
for model_name, model in self.models.items():
|
|
||||||
model.fit(X_train, y_train)
|
|
||||||
if model_name in self.model_paths:
|
|
||||||
joblib.dump(model, self.model_paths[model_name])
|
|
||||||
|
|
||||||
mse, y_pred = self.evaluate_model(model, X_test, y_test)
|
|
||||||
print(f"Model: {model_name}")
|
|
||||||
print(f"Mean Squared Error on the test set: {mse}")
|
|
||||||
self.print_predictions(y_test, y_pred, model_name)
|
|
||||||
print("\n" + "-" * 50 + "\n")
|
|
||||||
|
|
||||||
def load_model(self, model_path):
|
|
||||||
"""加载模型"""
|
|
||||||
return joblib.load(model_path)
|
|
||||||
|
|
||||||
def read_spectral_data(self, hdr_path, raw_path):
|
|
||||||
"""读取光谱数据"""
|
|
||||||
with open(hdr_path, 'r', encoding='latin1') as hdr_file:
|
|
||||||
lines = hdr_file.readlines()
|
|
||||||
height = width = bands = 0
|
|
||||||
for line in lines:
|
|
||||||
if line.startswith('lines'):
|
|
||||||
height = int(line.split()[-1])
|
|
||||||
elif line.startswith('samples'):
|
|
||||||
width = int(line.split()[-1])
|
|
||||||
elif line.startswith('bands'):
|
|
||||||
bands = int(line.split()[-1])
|
|
||||||
|
|
||||||
raw_image = np.fromfile(raw_path, dtype='uint16')
|
|
||||||
formatImage = np.zeros((height, width, bands))
|
|
||||||
|
|
||||||
for row in range(height):
|
|
||||||
for dim in range(bands):
|
|
||||||
formatImage[row, :, dim] = raw_image[(dim + row * bands) * width:(dim + 1 + row * bands) * width]
|
|
||||||
|
|
||||||
target_height, target_width, target_bands = 30, 30, 224
|
|
||||||
formatImage = self._crop_or_pad(formatImage, height, width, bands, target_height, target_width, target_bands)
|
|
||||||
return formatImage
|
|
||||||
|
|
||||||
def _crop_or_pad(self, formatImage, height, width, bands, target_height, target_width, target_bands):
|
|
||||||
"""裁剪或填充图像"""
|
|
||||||
if height > target_height:
|
|
||||||
formatImage = formatImage[:target_height, :, :]
|
|
||||||
elif height < target_height:
|
|
||||||
pad_height = target_height - height
|
|
||||||
formatImage = np.pad(formatImage, ((0, pad_height), (0, 0), (0, 0)), mode='constant', constant_values=0)
|
|
||||||
|
|
||||||
if width > target_width:
|
|
||||||
formatImage = formatImage[:, :target_width, :]
|
|
||||||
elif width < target_width:
|
|
||||||
pad_width = target_width - width
|
|
||||||
formatImage = np.pad(formatImage, ((0, 0), (0, pad_width), (0, 0)), mode='constant', constant_values=0)
|
|
||||||
|
|
||||||
if bands > target_bands:
|
|
||||||
formatImage = formatImage[:, :, :target_bands]
|
|
||||||
elif bands < target_bands:
|
|
||||||
pad_bands = target_bands - bands
|
|
||||||
formatImage = np.pad(formatImage, ((0, 0), (0, 0), (0, pad_bands)), mode='constant', constant_values=0)
|
|
||||||
|
|
||||||
return formatImage
|
|
||||||
|
|
||||||
def predict(self, data, model_name):
|
|
||||||
"""预测数据"""
|
|
||||||
model = self.load_model(self.model_paths[model_name])
|
|
||||||
return model.predict(data)
|
|
||||||
|
|
||||||
def run_training_and_prediction(self, training_data, training_target, prediction_directory):
|
|
||||||
"""运行训练和预测流程"""
|
|
||||||
# 将数据重塑为2维
|
|
||||||
training_data = training_data.reshape(training_data.shape[0], -1)
|
|
||||||
|
|
||||||
# 训练阶段
|
|
||||||
X_train, X_test, y_train, y_test = self.split_data(training_data, training_target)
|
|
||||||
self.fit_and_evaluate(X_train, y_train, X_test, y_test)
|
|
||||||
|
|
||||||
# 预测阶段
|
|
||||||
all_spectral_data = []
|
|
||||||
for i in range(1, 101):
|
|
||||||
hdr_path = os.path.join(prediction_directory, f'{i}.HDR')
|
|
||||||
raw_path = os.path.join(prediction_directory, f'{i}')
|
|
||||||
if not os.path.exists(hdr_path) or not os.path.exists(raw_path):
|
|
||||||
print(f"File {hdr_path} or {raw_path} does not exist.")
|
|
||||||
continue
|
|
||||||
spectral_data = self.read_spectral_data(hdr_path, raw_path)
|
|
||||||
all_spectral_data.append(spectral_data)
|
|
||||||
|
|
||||||
if not all_spectral_data:
|
|
||||||
print("No spectral data was read. Please check the file paths and try again.")
|
|
||||||
return
|
|
||||||
|
|
||||||
all_spectral_data = np.stack(all_spectral_data)
|
|
||||||
print(all_spectral_data.shape) # This should print (100, 30, 30, 224) or fewer if some files are missing
|
|
||||||
|
|
||||||
data_prepared = prepare_data(all_spectral_data)
|
|
||||||
for model_name in self.models.keys():
|
|
||||||
predictions = self.predict(data_prepared, model_name)
|
|
||||||
print(f"Predictions for {model_name}:")
|
|
||||||
print(predictions)
|
|
||||||
print("\n" + "-" * 50 + "\n")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
model_paths = {
|
|
||||||
"RandomForest": '../20240529RGBtest3/models/random_forest_model_3.joblib',
|
|
||||||
"GradientBoosting": '../20240529RGBtest3/models/gradient_boosting_model_3.joblib',
|
|
||||||
"SVR": '../20240529RGBtest3/models/svr_model_3.joblib',
|
|
||||||
}
|
|
||||||
|
|
||||||
sweetness_acidity = np.array([
|
|
||||||
16.2, 16.1, 17, 16.9, 16.8, 17.8, 18.1, 17.2, 17, 17.2, 17.1, 17.2,
|
|
||||||
17.2, 17.2, 18.1, 17, 17.6, 17.4, 17.1, 17.1, 16.9, 17.6, 17.3, 16.3,
|
|
||||||
16.5, 18.7, 17.6, 16.2, 16.8, 17.2, 16.8, 17.3, 16, 16.6, 16.7, 16.7,
|
|
||||||
17.3, 16.3, 16.8, 17.4, 17.3, 16.3, 16.1, 17.2, 18.6, 16.8, 16.1, 17.2,
|
|
||||||
18.3, 16.5, 16.6, 17, 17, 17.8, 16.4, 18, 17.7, 17, 18.3, 16.8, 17.5,
|
|
||||||
17.7, 18.5, 18, 17.7, 17, 18.3, 18.1, 17.4, 17.7, 17.8, 16.3, 17.1, 16.8,
|
|
||||||
17.2, 17.5, 16.6, 17.7, 17.1, 17.7, 19.4, 20.3, 17.3, 15.8, 18, 17.7,
|
|
||||||
17.2, 15.2, 18, 18.4, 18.3, 15.7, 17.2, 18.6, 15.6, 17, 16.9, 17.4, 17.8,
|
|
||||||
16.5
|
|
||||||
])
|
|
||||||
|
|
||||||
# Specify the directory containing the HDR and RAW files
|
|
||||||
directory = r'D:\project\supermachine--tomato-passion_fruit\20240529RGBtest3\xs\光谱数据3030'
|
|
||||||
|
|
||||||
modeling = SpectralModelingAndPrediction(model_paths)
|
|
||||||
|
|
||||||
# Initialize a list to hold all the spectral data arrays
|
|
||||||
all_spectral_data = []
|
|
||||||
|
|
||||||
# Loop through each data set (assuming there are 100 datasets)
|
|
||||||
for i in range(1, 101):
|
|
||||||
hdr_path = os.path.join(directory, f'{i}.HDR')
|
|
||||||
raw_path = os.path.join(directory, f'{i}')
|
|
||||||
|
|
||||||
# Check if files exist
|
|
||||||
if not os.path.exists(hdr_path) or not os.path.exists(raw_path):
|
|
||||||
print(f"File {hdr_path} or {raw_path} does not exist.")
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Read data
|
|
||||||
spectral_data = modeling.read_spectral_data(hdr_path, raw_path)
|
|
||||||
all_spectral_data.append(spectral_data)
|
|
||||||
|
|
||||||
# Stack all data into a single numpy array if not empty
|
|
||||||
if all_spectral_data:
|
|
||||||
all_spectral_data = np.stack(all_spectral_data)
|
|
||||||
print(all_spectral_data.shape) # This should print (100, 30, 30, 224) or fewer if some files are missing
|
|
||||||
|
|
||||||
# Run training and prediction
|
|
||||||
modeling.run_training_and_prediction(all_spectral_data, sweetness_acidity, directory)
|
|
||||||
else:
|
|
||||||
print("No spectral data was read. Please check the file paths and try again.")
|
|
||||||
@ -1,87 +0,0 @@
|
|||||||
import numpy as np
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
|
|
||||||
from sklearn.svm import SVR
|
|
||||||
from sklearn.neighbors import KNeighborsRegressor
|
|
||||||
from sklearn.model_selection import train_test_split
|
|
||||||
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
|
|
||||||
from spec_read import all_spectral_data
|
|
||||||
import joblib
|
|
||||||
|
|
||||||
# def prepare_data(data):
|
|
||||||
# """Reshape data and select specified spectral bands."""
|
|
||||||
# reshaped_data = data.reshape(100, -1)
|
|
||||||
# selected_bands = [8, 9, 10, 48, 49, 50, 77, 80, 103, 108, 115, 143, 145]
|
|
||||||
# return reshaped_data[:, selected_bands]
|
|
||||||
|
|
||||||
def prepare_data(data):
|
|
||||||
"""Reshape data and select specified spectral bands."""
|
|
||||||
selected_bands = [8, 9, 10, 48, 49, 50, 77, 80, 103, 108, 115, 143, 145]
|
|
||||||
# 筛选特定的波段
|
|
||||||
data_selected = data[:, :25, :, selected_bands]
|
|
||||||
print(f'筛选后的数据尺寸:{data_selected.shape}')
|
|
||||||
# 将筛选后的数据重塑为二维数组,每行代表一个样本
|
|
||||||
reshaped_data = data_selected.reshape(-1, 25 * 30 * len(selected_bands))
|
|
||||||
return reshaped_data
|
|
||||||
|
|
||||||
|
|
||||||
def split_data(X, y, test_size=0.20, random_state=12):
|
|
||||||
"""Split data into training and test sets."""
|
|
||||||
return train_test_split(X, y, test_size=test_size, random_state=random_state)
|
|
||||||
|
|
||||||
|
|
||||||
def evaluate_model(model, X_test, y_test):
|
|
||||||
"""Evaluate the model and return multiple metrics and predictions."""
|
|
||||||
y_pred = model.predict(X_test)
|
|
||||||
mse = mean_squared_error(y_test, y_pred)
|
|
||||||
mae = mean_absolute_error(y_test, y_pred)
|
|
||||||
r2 = r2_score(y_test, y_pred)
|
|
||||||
return mse, mae, r2, y_pred
|
|
||||||
|
|
||||||
|
|
||||||
def print_predictions(y_test, y_pred, model_name):
|
|
||||||
"""Print actual and predicted values."""
|
|
||||||
print(f"Test Set Predictions for {model_name}:")
|
|
||||||
for i, (real, pred) in enumerate(zip(y_test, y_pred)):
|
|
||||||
print(f"Sample {i + 1}: True Value = {real:.2f}, Predicted Value = {pred:.2f}")
|
|
||||||
|
|
||||||
def main():
|
|
||||||
sweetness_acidity = np.array([
|
|
||||||
16.2, 16.1, 17, 16.9, 16.8, 17.8, 18.1, 17.2, 17, 17.2, 17.1, 17.2,
|
|
||||||
17.2, 17.2, 18.1, 17, 17.6, 17.4, 17.1, 17.1, 16.9, 17.6, 17.3, 16.3,
|
|
||||||
16.5, 18.7, 17.6, 16.2, 16.8, 17.2, 16.8, 17.3, 16, 16.6, 16.7, 16.7,
|
|
||||||
17.3, 16.3, 16.8, 17.4, 17.3, 16.3, 16.1, 17.2, 18.6, 16.8, 16.1, 17.2,
|
|
||||||
18.3, 16.5, 16.6, 17, 17, 17.8, 16.4, 18, 17.7, 17, 18.3, 16.8, 17.5,
|
|
||||||
17.7, 18.5, 18, 17.7, 17, 18.3, 18.1, 17.4, 17.7, 17.8, 16.3, 17.1, 16.8,
|
|
||||||
17.2, 17.5, 16.6, 17.7, 17.1, 17.7, 19.4, 20.3, 17.3, 15.8, 18, 17.7,
|
|
||||||
17.2, 15.2, 18, 18.4, 18.3, 15.7, 17.2, 18.6, 15.6, 17, 16.9, 17.4, 17.8,
|
|
||||||
16.5
|
|
||||||
])
|
|
||||||
|
|
||||||
X = prepare_data(all_spectral_data)
|
|
||||||
print(f'原数据尺寸:{all_spectral_data.shape};训练数据尺寸:{X.shape}')
|
|
||||||
X_train, X_test, y_train, y_test = split_data(X, sweetness_acidity)
|
|
||||||
|
|
||||||
models = {
|
|
||||||
"RandomForest": RandomForestRegressor(n_estimators=100),
|
|
||||||
"GradientBoosting": GradientBoostingRegressor(n_estimators=100),
|
|
||||||
"SVR": SVR(kernel='rbf', C=100, gamma=0.1, epsilon=.1),
|
|
||||||
}
|
|
||||||
|
|
||||||
for model_name, model in models.items():
|
|
||||||
model.fit(X_train, y_train)
|
|
||||||
if model_name == "RandomForest":
|
|
||||||
joblib.dump(model,
|
|
||||||
r'D:\project\supermachine--tomato-passion_fruit\20240529RGBtest3\models\passion_fruit.joblib')
|
|
||||||
|
|
||||||
mse, mae, r2, y_pred = evaluate_model(model, X_test, y_test)
|
|
||||||
print(f"Model: {model_name}")
|
|
||||||
print(f"MSE on the test set: {mse}")
|
|
||||||
print(f"MAE on the test set: {mae}")
|
|
||||||
print(f"R² score on the test set: {r2}")
|
|
||||||
print_predictions(y_test, y_pred, model_name)
|
|
||||||
print("\n" + "-" * 50 + "\n")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@ -1,117 +0,0 @@
|
|||||||
import numpy as np
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
from sklearn.ensemble import RandomForestRegressor
|
|
||||||
from sklearn.model_selection import train_test_split
|
|
||||||
from sklearn.metrics import mean_squared_error
|
|
||||||
from spec_read import all_spectral_data
|
|
||||||
|
|
||||||
def prepare_data(data):
|
|
||||||
"""Calculate the average spectral values and their gradients for each fruit across all pixels."""
|
|
||||||
avg_spectra = np.mean(data, axis=(1, 2))
|
|
||||||
gradients = np.gradient(avg_spectra, axis=1)
|
|
||||||
second_gradients = np.gradient(gradients, axis=1)
|
|
||||||
return avg_spectra, gradients, second_gradients
|
|
||||||
|
|
||||||
def train_model(X, y):
|
|
||||||
"""Train a RandomForest model."""
|
|
||||||
rf = RandomForestRegressor(n_estimators=100)
|
|
||||||
rf.fit(X, y)
|
|
||||||
return rf
|
|
||||||
|
|
||||||
def split_data(X, y, test_size=0.20, random_state=2):
|
|
||||||
"""Split data into training and test sets."""
|
|
||||||
return train_test_split(X, y, test_size=test_size, random_state=random_state)
|
|
||||||
|
|
||||||
def evaluate_model(model, X_test, y_test):
|
|
||||||
"""Evaluate the model and return MSE and predictions."""
|
|
||||||
y_pred = model.predict(X_test)
|
|
||||||
mse = mean_squared_error(y_test, y_pred)
|
|
||||||
return mse, y_pred
|
|
||||||
|
|
||||||
def print_predictions(y_test, y_pred):
|
|
||||||
"""Print actual and predicted values."""
|
|
||||||
print("Test Set Predictions:")
|
|
||||||
for i, (real, pred) in enumerate(zip(y_test, y_pred)):
|
|
||||||
print(f"Sample {i + 1}: True Value = {real:.2f}, Predicted Value = {pred:.2f}")
|
|
||||||
|
|
||||||
def plot_spectra(X, y):
|
|
||||||
"""Plot the average spectra for all samples and annotate with sweetness_acidity values."""
|
|
||||||
plt.figure(figsize=(10, 6))
|
|
||||||
for i in range(X.shape[0]):
|
|
||||||
plt.plot(X[i], label=f'Sample {i+1}')
|
|
||||||
plt.annotate(f'{y[i]:.1f}', xy=(len(X[i])-1, X[i][-1]), xytext=(5, 0),
|
|
||||||
textcoords='offset points', ha='left', va='center')
|
|
||||||
plt.xlabel('Wavelength Index')
|
|
||||||
plt.ylabel('Average Spectral Value')
|
|
||||||
plt.title('Average Spectral Curves for All Samples')
|
|
||||||
plt.legend(loc='upper right', bbox_to_anchor=(1.1, 1.05))
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
def plot_gradients(gradients):
|
|
||||||
"""Plot the gradient of the average spectra for all samples."""
|
|
||||||
plt.figure(figsize=(10, 6))
|
|
||||||
for i in range(gradients.shape[0]):
|
|
||||||
plt.plot(gradients[i], label=f'Sample {i+1}')
|
|
||||||
plt.xlabel('Wavelength Index')
|
|
||||||
plt.ylabel('Gradient Value')
|
|
||||||
plt.title('Gradient of Average Spectral Curves for All Samples')
|
|
||||||
plt.legend(loc='upper right', bbox_to_anchor=(1.1, 1.05))
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
def plot_second_gradients(second_gradients):
|
|
||||||
"""Plot the second gradient of the average spectra for all samples."""
|
|
||||||
plt.figure(figsize=(10, 6))
|
|
||||||
for i in range(second_gradients.shape[0]):
|
|
||||||
plt.plot(second_gradients[i], label=f'Sample {i+1}')
|
|
||||||
plt.xlabel('Wavelength Index')
|
|
||||||
plt.ylabel('Second Gradient Value')
|
|
||||||
plt.title('Second Gradient of Average Spectral Curves for All Samples')
|
|
||||||
plt.legend(loc='upper right', bbox_to_anchor=(1.1, 1.05))
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
def main():
|
|
||||||
sweetness_acidity = np.array([
|
|
||||||
16.2, 16.1, 17, 16.9, 16.8, 17.8, 18.1, 17.2, 17, 17.2, 17.1, 17.2,
|
|
||||||
17.2, 17.2, 18.1, 17, 17.6, 17.4, 17.1, 17.1, 16.9, 17.6, 17.3, 16.3,
|
|
||||||
16.5, 18.7, 17.6, 16.2, 16.8, 17.2, 16.8, 17.3, 16, 16.6, 16.7, 16.7,
|
|
||||||
17.3, 16.3, 16.8, 17.4, 17.3, 16.3, 16.1, 17.2, 18.6, 16.8, 16.1, 17.2,
|
|
||||||
18.3, 16.5, 16.6, 17, 17, 17.8, 16.4, 18, 17.7, 17, 18.3, 16.8, 17.5,
|
|
||||||
17.7, 18.5, 18, 17.7, 17, 18.3, 18.1, 17.4, 17.7, 17.8, 16.3, 17.1, 16.8,
|
|
||||||
17.2, 17.5, 16.6, 17.7, 17.1, 17.7, 19.4, 20.3, 17.3, 15.8, 18, 17.7,
|
|
||||||
17.2, 15.2, 18, 18.4, 18.3, 15.7, 17.2, 18.6, 15.6, 17, 16.9, 17.4, 17.8,
|
|
||||||
16.5
|
|
||||||
])
|
|
||||||
|
|
||||||
X_avg, X_grad, X_second_grad = prepare_data(all_spectral_data)
|
|
||||||
|
|
||||||
plot_spectra(X_avg, sweetness_acidity) # Plot average spectral curves
|
|
||||||
plot_gradients(X_grad) # Plot gradient curves
|
|
||||||
plot_second_gradients(X_second_grad) # Plot second gradient curves
|
|
||||||
|
|
||||||
# Train and evaluate using average spectral values
|
|
||||||
X_train_avg, X_test_avg, y_train_avg, y_test_avg = split_data(X_avg, sweetness_acidity)
|
|
||||||
rf_model_avg = train_model(X_train_avg, y_train_avg)
|
|
||||||
mse_avg, y_pred_avg = evaluate_model(rf_model_avg, X_test_avg, y_test_avg)
|
|
||||||
print("Mean Squared Error using average spectral values:", mse_avg)
|
|
||||||
|
|
||||||
# Train and evaluate using first gradients
|
|
||||||
X_train_grad, X_test_grad, y_train_grad, y_test_grad = split_data(X_grad, sweetness_acidity)
|
|
||||||
rf_model_grad = train_model(X_train_grad, y_train_grad)
|
|
||||||
mse_grad, y_pred_grad = evaluate_model(rf_model_grad, X_test_grad, y_test_grad)
|
|
||||||
print("Mean Squared Error using first gradients:", mse_grad)
|
|
||||||
|
|
||||||
# Train and evaluate using second gradients
|
|
||||||
X_train_second_grad, X_test_second_grad, y_train_second_grad, y_test_second_grad = split_data(X_second_grad, sweetness_acidity)
|
|
||||||
rf_model_second_grad = train_model(X_train_second_grad, y_train_second_grad)
|
|
||||||
mse_second_grad, y_pred_second_grad = evaluate_model(rf_model_second_grad, X_test_second_grad, y_test_second_grad)
|
|
||||||
print("Mean Squared Error using second gradients:", mse_second_grad)
|
|
||||||
|
|
||||||
print("Predictions using average spectral values:")
|
|
||||||
print_predictions(y_test_avg, y_pred_avg)
|
|
||||||
print("Predictions using first gradients:")
|
|
||||||
print_predictions(y_test_grad, y_pred_grad)
|
|
||||||
print("Predictions using second gradients:")
|
|
||||||
print_predictions(y_test_second_grad, y_pred_second_grad)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@ -1,135 +0,0 @@
|
|||||||
import numpy as np
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
from sklearn.ensemble import RandomForestRegressor
|
|
||||||
from sklearn.model_selection import train_test_split
|
|
||||||
from sklearn.metrics import mean_squared_error
|
|
||||||
from sklearn.preprocessing import MinMaxScaler
|
|
||||||
from spec_read import all_spectral_data
|
|
||||||
|
|
||||||
|
|
||||||
def prepare_data(data):
|
|
||||||
"""Calculate the average spectral values and their gradients for each fruit across all pixels, and normalize them."""
|
|
||||||
avg_spectra = np.mean(data, axis=(1, 2))
|
|
||||||
gradients = np.gradient(avg_spectra, axis=1)
|
|
||||||
second_gradients = np.gradient(gradients, axis=1)
|
|
||||||
|
|
||||||
scaler = MinMaxScaler()
|
|
||||||
avg_spectra = scaler.fit_transform(avg_spectra)
|
|
||||||
gradients = scaler.fit_transform(gradients)
|
|
||||||
second_gradients = scaler.fit_transform(second_gradients)
|
|
||||||
|
|
||||||
return avg_spectra, gradients, second_gradients
|
|
||||||
|
|
||||||
|
|
||||||
def train_model(X, y):
|
|
||||||
"""Train a RandomForest model."""
|
|
||||||
rf = RandomForestRegressor(n_estimators=100)
|
|
||||||
rf.fit(X, y)
|
|
||||||
return rf
|
|
||||||
|
|
||||||
|
|
||||||
def split_data(X, y, test_size=0.20, random_state=2):
|
|
||||||
"""Split data into training and test sets."""
|
|
||||||
return train_test_split(X, y, test_size=test_size, random_state=random_state)
|
|
||||||
|
|
||||||
|
|
||||||
def evaluate_model(model, X_test, y_test):
|
|
||||||
"""Evaluate the model and return MSE and predictions."""
|
|
||||||
y_pred = model.predict(X_test)
|
|
||||||
mse = mean_squared_error(y_test, y_pred)
|
|
||||||
return mse, y_pred
|
|
||||||
|
|
||||||
|
|
||||||
def print_predictions(y_test, y_pred):
|
|
||||||
"""Print actual and predicted values."""
|
|
||||||
print("Test Set Predictions:")
|
|
||||||
for i, (real, pred) in enumerate(zip(y_test, y_pred)):
|
|
||||||
print(f"Sample {i + 1}: True Value = {real:.2f}, Predicted Value = {pred:.2f}")
|
|
||||||
|
|
||||||
|
|
||||||
def plot_spectra(X, y):
|
|
||||||
"""Plot the average spectra for all samples and annotate with sweetness_acidity values."""
|
|
||||||
plt.figure(figsize=(10, 6))
|
|
||||||
for i in range(X.shape[0]):
|
|
||||||
plt.plot(X[i], label=f'Sample {i + 1}')
|
|
||||||
plt.annotate(f'{y[i]:.1f}', xy=(len(X[i]) - 1, X[i][-1]), xytext=(5, 0),
|
|
||||||
textcoords='offset points', ha='left', va='center')
|
|
||||||
plt.xlabel('Wavelength Index')
|
|
||||||
plt.ylabel('Average Spectral Value')
|
|
||||||
plt.title('Average Spectral Curves for All Samples')
|
|
||||||
plt.legend(loc='upper right', bbox_to_anchor=(1.1, 1.05))
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
|
|
||||||
def plot_gradients(gradients):
|
|
||||||
"""Plot the gradient of the average spectra for all samples."""
|
|
||||||
plt.figure(figsize=(10, 6))
|
|
||||||
for i in range(gradients.shape[0]):
|
|
||||||
plt.plot(gradients[i], label=f'Sample {i + 1}')
|
|
||||||
plt.xlabel('Wavelength Index')
|
|
||||||
plt.ylabel('Gradient Value')
|
|
||||||
plt.title('Gradient of Average Spectral Curves for All Samples')
|
|
||||||
plt.legend(loc='upper right', bbox_to_anchor=(1.1, 1.05))
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
|
|
||||||
def plot_second_gradients(second_gradients):
|
|
||||||
"""Plot the second gradient of the average spectra for all samples."""
|
|
||||||
plt.figure(figsize=(10, 6))
|
|
||||||
for i in range(second_gradients.shape[0]):
|
|
||||||
plt.plot(second_gradients[i], label=f'Sample {i + 1}')
|
|
||||||
plt.xlabel('Wavelength Index')
|
|
||||||
plt.ylabel('Second Gradient Value')
|
|
||||||
plt.title('Second Gradient of Average Spectral Curves for All Samples')
|
|
||||||
plt.legend(loc='upper right', bbox_to_anchor=(1.1, 1.05))
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
sweetness_acidity = np.array([
|
|
||||||
16.2, 16.1, 17, 16.9, 16.8, 17.8, 18.1, 17.2, 17, 17.2, 17.1, 17.2,
|
|
||||||
17.2, 17.2, 18.1, 17, 17.6, 17.4, 17.1, 17.1, 16.9, 17.6, 17.3, 16.3,
|
|
||||||
16.5, 18.7, 17.6, 16.2, 16.8, 17.2, 16.8, 17.3, 16, 16.6, 16.7, 16.7,
|
|
||||||
17.3, 16.3, 16.8, 17.4, 17.3, 16.3, 16.1, 17.2, 18.6, 16.8, 16.1, 17.2,
|
|
||||||
18.3, 16.5, 16.6, 17, 17, 17.8, 16.4, 18, 17.7, 17, 18.3, 16.8, 17.5,
|
|
||||||
17.7, 18.5, 18, 17.7, 17, 18.3, 18.1, 17.4, 17.7, 17.8, 16.3, 17.1, 16.8,
|
|
||||||
17.2, 17.5, 16.6, 17.7, 17.1, 17.7, 19.4, 20.3, 17.3, 15.8, 18, 17.7,
|
|
||||||
17.2, 15.2, 18, 18.4, 18.3, 15.7, 17.2, 18.6, 15.6, 17, 16.9, 17.4, 17.8,
|
|
||||||
16.5
|
|
||||||
])
|
|
||||||
|
|
||||||
X_avg, X_grad, X_second_grad = prepare_data(all_spectral_data)
|
|
||||||
|
|
||||||
plot_spectra(X_avg, sweetness_acidity) # Plot average spectral curves
|
|
||||||
plot_gradients(X_grad) # Plot gradient curves
|
|
||||||
plot_second_gradients(X_second_grad) # Plot second gradient curves
|
|
||||||
|
|
||||||
# Train and evaluate using average spectral values
|
|
||||||
X_train_avg, X_test_avg, y_train_avg, y_test_avg = split_data(X_avg, sweetness_acidity)
|
|
||||||
rf_model_avg = train_model(X_train_avg, y_train_avg)
|
|
||||||
mse_avg, y_pred_avg = evaluate_model(rf_model_avg, X_test_avg, y_test_avg)
|
|
||||||
print("Mean Squared Error using average spectral values:", mse_avg)
|
|
||||||
|
|
||||||
# Train and evaluate using first gradients
|
|
||||||
X_train_grad, X_test_grad, y_train_grad, y_test_grad = split_data(X_grad, sweetness_acidity)
|
|
||||||
rf_model_grad = train_model(X_train_grad, y_train_grad)
|
|
||||||
mse_grad, y_pred_grad = evaluate_model(rf_model_grad, X_test_grad, y_test_grad)
|
|
||||||
print("Mean Squared Error using first gradients:", mse_grad)
|
|
||||||
|
|
||||||
# Train and evaluate using second gradients
|
|
||||||
X_train_second_grad, X_test_second_grad, y_train_second_grad, y_test_second_grad = split_data(X_second_grad,
|
|
||||||
sweetness_acidity)
|
|
||||||
rf_model_second_grad = train_model(X_train_second_grad, y_train_second_grad)
|
|
||||||
mse_second_grad, y_pred_second_grad = evaluate_model(rf_model_second_grad, X_test_second_grad, y_test_second_grad)
|
|
||||||
print("Mean Squared Error using second gradients:", mse_second_grad)
|
|
||||||
|
|
||||||
print("Predictions using average spectral values:")
|
|
||||||
print_predictions(y_test_avg, y_pred_avg)
|
|
||||||
print("Predictions using first gradients:")
|
|
||||||
print_predictions(y_test_grad, y_pred_grad)
|
|
||||||
print("Predictions using second gradients:")
|
|
||||||
print_predictions(y_test_second_grad, y_pred_second_grad)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@ -1,89 +0,0 @@
|
|||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
import os
|
|
||||||
|
|
||||||
def create_mask(hsv_image, hue_value, hue_delta, value_target, value_delta):
|
|
||||||
# 创建H通道阈值掩码
|
|
||||||
lower_hue = np.array([hue_value - hue_delta, 0, 0])
|
|
||||||
upper_hue = np.array([hue_value + hue_delta, 255, 255])
|
|
||||||
hue_mask = cv2.inRange(hsv_image, lower_hue, upper_hue)
|
|
||||||
|
|
||||||
# 创建V通道排除中心值的掩码
|
|
||||||
lower_value_1 = np.array([0, 0, 0])
|
|
||||||
upper_value_1 = np.array([180, 255, value_target - value_delta])
|
|
||||||
lower_value_2 = np.array([0, 0, value_target + value_delta])
|
|
||||||
upper_value_2 = np.array([180, 255, 255])
|
|
||||||
|
|
||||||
value_mask_1 = cv2.inRange(hsv_image, lower_value_1, upper_value_1)
|
|
||||||
value_mask_1 = cv2.bitwise_not(value_mask_1)
|
|
||||||
cv2.imshow('value_mask_1', value_mask_1)
|
|
||||||
value_mask_2 = cv2.inRange(hsv_image, lower_value_2, upper_value_2)
|
|
||||||
cv2.imshow('value_mask_2', value_mask_2)
|
|
||||||
value_mask = cv2.bitwise_and(value_mask_1, value_mask_2)
|
|
||||||
cv2.imshow('value_mask', value_mask)
|
|
||||||
# 等待用户按下任意键
|
|
||||||
cv2.waitKey(0)
|
|
||||||
|
|
||||||
# 关闭所有窗口
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
|
|
||||||
# 合并H通道和V通道掩码
|
|
||||||
return cv2.bitwise_and(hue_mask, value_mask)
|
|
||||||
|
|
||||||
def apply_morphology(mask):
|
|
||||||
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
|
|
||||||
return cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
|
|
||||||
|
|
||||||
def find_largest_component(mask):
|
|
||||||
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(mask, 4, cv2.CV_32S)
|
|
||||||
if num_labels < 2:
|
|
||||||
return None # No significant components found
|
|
||||||
max_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA]) # Skip background
|
|
||||||
return (labels == max_label).astype(np.uint8) * 255
|
|
||||||
|
|
||||||
def process_image(image_path, hue_value=37, hue_delta=10, value_target=25, value_delta=10):
|
|
||||||
image = cv2.imread(image_path)
|
|
||||||
if image is None:
|
|
||||||
print(f"Error: Image at {image_path} could not be read.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
|
||||||
combined_mask = create_mask(hsv_image, hue_value, hue_delta, value_target, value_delta)
|
|
||||||
combined_mask = apply_morphology(combined_mask)
|
|
||||||
max_mask = find_largest_component(combined_mask)
|
|
||||||
cv2.imshow('max_mask', max_mask)
|
|
||||||
# 等待用户按下任意键
|
|
||||||
cv2.waitKey(0)
|
|
||||||
# 关闭所有窗口
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
|
|
||||||
if max_mask is None:
|
|
||||||
print(f"No significant components found in {image_path}.")
|
|
||||||
return None
|
|
||||||
|
|
||||||
result_image = cv2.bitwise_and(image, image, mask=max_mask)
|
|
||||||
result_image[max_mask == 0] = [255, 255, 255] # Set background to white
|
|
||||||
return result_image
|
|
||||||
|
|
||||||
def save_image(image, output_path):
|
|
||||||
cv2.imwrite(output_path, image)
|
|
||||||
|
|
||||||
def process_images_in_folder(input_folder, output_folder):
|
|
||||||
if not os.path.exists(output_folder):
|
|
||||||
os.makedirs(output_folder)
|
|
||||||
|
|
||||||
for filename in os.listdir(input_folder):
|
|
||||||
if filename.lower().endswith(".bmp"):
|
|
||||||
image_path = os.path.join(input_folder, filename)
|
|
||||||
result_image = process_image(image_path)
|
|
||||||
if result_image is not None:
|
|
||||||
output_path = os.path.join(output_folder, filename)
|
|
||||||
save_image(result_image, output_path)
|
|
||||||
print(f"Processed and saved {filename} to {output_folder}.")
|
|
||||||
|
|
||||||
|
|
||||||
# 主函数调用
|
|
||||||
input_folder = '/Users/xs/PycharmProjects/super-tomato/baixiangguo/rgb效果/test' # 替换为你的输入文件夹路径
|
|
||||||
output_folder = '/Users/xs/PycharmProjects/super-tomato/baixiangguo/rgb效果/testfore' # 替换为你的输出文件夹路径
|
|
||||||
process_images_in_folder(input_folder, output_folder)
|
|
||||||
@ -1,309 +0,0 @@
|
|||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
import os
|
|
||||||
import argparse
|
|
||||||
# from svm import predict_image_array, load_model
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#提取西红柿,使用S+L的图像
|
|
||||||
def extract_s_l(image_path):
|
|
||||||
image = cv2.imread(image_path)
|
|
||||||
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
|
||||||
lab = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
|
|
||||||
s_channel = hsv[:,:,1]
|
|
||||||
l_channel = lab[:,:,0]
|
|
||||||
result = cv2.add(s_channel, l_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def find_reflection(image_path, threshold=190):
|
|
||||||
# 读取图像
|
|
||||||
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
|
|
||||||
|
|
||||||
# 应用阈值分割
|
|
||||||
_, reflection = cv2.threshold(image, threshold, 255, cv2.THRESH_BINARY)
|
|
||||||
|
|
||||||
return reflection
|
|
||||||
|
|
||||||
def otsu_threshold(image):
|
|
||||||
|
|
||||||
# 将图像转换为灰度图像
|
|
||||||
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
|
||||||
|
|
||||||
# 使用Otsu阈值分割
|
|
||||||
_, binary = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
|
||||||
|
|
||||||
return binary
|
|
||||||
|
|
||||||
# 提取花萼,使用G-R的图像
|
|
||||||
def extract_g_r(image):
|
|
||||||
# image = cv2.imread(image_path)
|
|
||||||
g_channel = image[:,:,1]
|
|
||||||
r_channel = image[:,:,2]
|
|
||||||
result = cv2.subtract(cv2.multiply(g_channel, 1.5), r_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
#提取西红柿,使用R-B的图像
|
|
||||||
def extract_r_b(image_path):
|
|
||||||
image = cv2.imread(image_path)
|
|
||||||
r_channel = image[:,:,2]
|
|
||||||
b_channel = image[:,:,0]
|
|
||||||
result = cv2.subtract(r_channel, b_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def extract_r_g(image_path):
|
|
||||||
image = cv2.imread(image_path)
|
|
||||||
r_channel = image[:,:,2]
|
|
||||||
g_channel = image[:,:,1]
|
|
||||||
result = cv2.subtract(r_channel, g_channel)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def threshold_segmentation(image, threshold, color=255):
|
|
||||||
_, result = cv2.threshold(image, threshold, color, cv2.THRESH_BINARY)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def bitwise_operation(image1, image2, operation='and'):
|
|
||||||
if operation == 'and':
|
|
||||||
result = cv2.bitwise_and(image1, image2)
|
|
||||||
elif operation == 'or':
|
|
||||||
result = cv2.bitwise_or(image1, image2)
|
|
||||||
else:
|
|
||||||
raise ValueError("operation must be 'and' or 'or'")
|
|
||||||
return result
|
|
||||||
|
|
||||||
def largest_connected_component(bin_img):
|
|
||||||
# 使用connectedComponentsWithStats函数找到连通区域
|
|
||||||
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(bin_img, connectivity=8)
|
|
||||||
|
|
||||||
# 找到最大的连通区域(除了背景)
|
|
||||||
largest_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA])
|
|
||||||
|
|
||||||
# 创建一个新的二值图像,只显示最大的连通区域
|
|
||||||
new_bin_img = np.zeros_like(bin_img)
|
|
||||||
new_bin_img[labels == largest_label] = 255
|
|
||||||
|
|
||||||
return new_bin_img
|
|
||||||
|
|
||||||
def close_operation(bin_img, kernel_size=(5, 5)):
|
|
||||||
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size)
|
|
||||||
closed_img = cv2.morphologyEx(bin_img, cv2.MORPH_CLOSE, kernel)
|
|
||||||
return closed_img
|
|
||||||
|
|
||||||
def open_operation(bin_img, kernel_size=(5, 5)):
|
|
||||||
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size)
|
|
||||||
opened_img = cv2.morphologyEx(bin_img, cv2.MORPH_OPEN, kernel)
|
|
||||||
return opened_img
|
|
||||||
|
|
||||||
|
|
||||||
def draw_tomato_edge(original_img, bin_img):
|
|
||||||
bin_img_processed = close_operation(bin_img, kernel_size=(15, 15))
|
|
||||||
# cv2.imshow('Close Operation', bin_img_processed)
|
|
||||||
# bin_img_processed = open_operation(bin_img_processed, kernel_size=(19, 19))
|
|
||||||
# cv2.imshow('Open Operation', bin_img_processed)
|
|
||||||
# 现在使用处理后的bin_img_processed查找轮廓
|
|
||||||
contours, _ = cv2.findContours(bin_img_processed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
|
|
||||||
# 如果没有找到轮廓,直接返回原图
|
|
||||||
if not contours:
|
|
||||||
return original_img
|
|
||||||
# 找到最大轮廓
|
|
||||||
max_contour = max(contours, key=cv2.contourArea)
|
|
||||||
# 多边形近似的精度调整
|
|
||||||
epsilon = 0.0006 * cv2.arcLength(max_contour, True) # 可以调整这个值
|
|
||||||
approx = cv2.approxPolyDP(max_contour, epsilon, True)
|
|
||||||
# 绘制轮廓
|
|
||||||
cv2.drawContours(original_img, [approx], -1, (0, 255, 0), 3)
|
|
||||||
mask = np.zeros_like(bin_img)
|
|
||||||
|
|
||||||
# 使用白色填充最大轮廓
|
|
||||||
cv2.drawContours(mask, [max_contour], -1, (255), thickness=cv2.FILLED)
|
|
||||||
|
|
||||||
return original_img, mask
|
|
||||||
|
|
||||||
def draw_tomato_edge_convex_hull(original_img, bin_img):
|
|
||||||
bin_img_blurred = cv2.GaussianBlur(bin_img, (5, 5), 0)
|
|
||||||
contours, _ = cv2.findContours(bin_img_blurred, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
||||||
if not contours:
|
|
||||||
return original_img
|
|
||||||
max_contour = max(contours, key=cv2.contourArea)
|
|
||||||
hull = cv2.convexHull(max_contour)
|
|
||||||
cv2.drawContours(original_img, [hull], -1, (0, 255, 0), 3)
|
|
||||||
return original_img
|
|
||||||
|
|
||||||
# 得到完整的西红柿二值图像,除了绿色花萼
|
|
||||||
def fill_holes(bin_img):
|
|
||||||
# 复制 bin_img 到 img_filled
|
|
||||||
img_filled = bin_img.copy()
|
|
||||||
|
|
||||||
# 获取图像的高度和宽度
|
|
||||||
height, width = bin_img.shape
|
|
||||||
|
|
||||||
# 创建一个掩码,比输入图像大两个像素点
|
|
||||||
mask = np.zeros((height + 2, width + 2), np.uint8)
|
|
||||||
|
|
||||||
# 使用 floodFill 函数填充黑色区域
|
|
||||||
cv2.floodFill(img_filled, mask, (0, 0), 255)
|
|
||||||
|
|
||||||
# 反转填充后的图像
|
|
||||||
img_filled_d = cv2.bitwise_not(img_filled)
|
|
||||||
|
|
||||||
# 使用 bitwise_or 操作合并原图像和填充后的图像
|
|
||||||
img_filled = cv2.bitwise_or(bin_img, img_filled)
|
|
||||||
# 裁剪 img_filled 和 img_filled_d 到与 bin_img 相同的大小
|
|
||||||
# img_filled = img_filled[:height, :width]
|
|
||||||
img_filled_d = img_filled_d[:height, :width]
|
|
||||||
|
|
||||||
return img_filled, img_filled_d
|
|
||||||
|
|
||||||
def bitwise_and_rgb_with_binary(rgb_img, bin_img):
|
|
||||||
# 将二值图像转换为三通道图像
|
|
||||||
bin_img_3channel = cv2.cvtColor(bin_img, cv2.COLOR_GRAY2BGR)
|
|
||||||
|
|
||||||
# 使用 bitwise_and 操作合并 RGB 图像和二值图像
|
|
||||||
result = cv2.bitwise_and(rgb_img, bin_img_3channel)
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def extract_max_connected_area(image_path, lower_hsv, upper_hsv):
|
|
||||||
# 读取图像
|
|
||||||
image = cv2.imread(image_path)
|
|
||||||
|
|
||||||
# 将图像从BGR转换到HSV
|
|
||||||
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
|
||||||
|
|
||||||
# 使用阈值获取指定区域的二值图像
|
|
||||||
mask = cv2.inRange(hsv, lower_hsv, upper_hsv)
|
|
||||||
|
|
||||||
# 找到二值图像的连通区域
|
|
||||||
num_labels, labels, stats, _ = cv2.connectedComponentsWithStats(mask, connectivity=8)
|
|
||||||
|
|
||||||
# 找到最大的连通区域(除了背景)
|
|
||||||
largest_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA])
|
|
||||||
|
|
||||||
# 创建一个新的二值图像,只显示最大的连通区域
|
|
||||||
new_bin_img = np.zeros_like(mask)
|
|
||||||
new_bin_img[labels == largest_label] = 255
|
|
||||||
|
|
||||||
# 复制 new_bin_img 到 img_filled
|
|
||||||
img_filled = new_bin_img.copy()
|
|
||||||
|
|
||||||
# 获取图像的高度和宽度
|
|
||||||
height, width = new_bin_img.shape
|
|
||||||
|
|
||||||
# 创建一个掩码,比输入图像大两个像素点
|
|
||||||
mask = np.zeros((height + 2, width + 2), np.uint8)
|
|
||||||
|
|
||||||
# 使用 floodFill 函数填充黑色区域
|
|
||||||
cv2.floodFill(img_filled, mask, (0, 0), 255)
|
|
||||||
|
|
||||||
# 反转填充后的图像
|
|
||||||
img_filled_inv = cv2.bitwise_not(img_filled)
|
|
||||||
|
|
||||||
# 使用 bitwise_or 操作合并原图像和填充后的图像
|
|
||||||
img_filled = cv2.bitwise_or(new_bin_img, img_filled_inv)
|
|
||||||
|
|
||||||
return img_filled
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = argparse.ArgumentParser(description='Process some integers.')
|
|
||||||
parser.add_argument('--dir_path', type=str,
|
|
||||||
default=r'D:\project\supermachine--tomato-passion_fruit\20240529RGBtest3\data\20270708\T2\middle',
|
|
||||||
help='the directory path of images')
|
|
||||||
parser.add_argument('--threshold_s_l', type=int, default=180,
|
|
||||||
help='the threshold for s_l')
|
|
||||||
parser.add_argument('--threshold_r_b', type=int, default=15,
|
|
||||||
help='the threshold for r_b')
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
for img_file in os.listdir(args.dir_path):
|
|
||||||
if img_file.endswith('.bmp'):
|
|
||||||
img_path = os.path.join(args.dir_path, img_file)
|
|
||||||
s_l = extract_s_l(img_path)
|
|
||||||
otsu_thresholded = otsu_threshold(s_l)
|
|
||||||
img_fore = bitwise_and_rgb_with_binary(cv2.imread(img_path), otsu_thresholded)
|
|
||||||
img_fore_defect = extract_g_r(img_fore)
|
|
||||||
cv2.imshow('img_fore_defect1', img_fore_defect)
|
|
||||||
img_fore_defect = threshold_segmentation(img_fore_defect, args.threshold_r_b)
|
|
||||||
cv2.imshow('img_fore_defect2', img_fore_defect)
|
|
||||||
thresholded_s_l = threshold_segmentation(s_l, args.threshold_s_l)
|
|
||||||
new_bin_img = largest_connected_component(thresholded_s_l)
|
|
||||||
cv2.imshow('new_bin_img', new_bin_img)
|
|
||||||
# zhongggggg = cv2.bitwise_or(new_bin_img, cv2.imread('defect_mask.bmp', cv2.IMREAD_GRAYSCALE))
|
|
||||||
# cv2.imshow('zhongggggg', zhongggggg)
|
|
||||||
new_otsu_bin_img = largest_connected_component(otsu_thresholded)
|
|
||||||
filled_img, defect = fill_holes(new_bin_img)
|
|
||||||
defect = bitwise_and_rgb_with_binary(cv2.imread(img_path), defect)
|
|
||||||
cv2.imshow('defect', defect)
|
|
||||||
edge, mask = draw_tomato_edge(cv2.imread(img_path), new_bin_img)
|
|
||||||
org_defect = bitwise_and_rgb_with_binary(edge, new_bin_img)
|
|
||||||
|
|
||||||
|
|
||||||
fore = bitwise_and_rgb_with_binary(cv2.imread(img_path), mask)
|
|
||||||
|
|
||||||
|
|
||||||
fore_g_r_t = threshold_segmentation(extract_g_r(fore), 20)
|
|
||||||
filled_img_nogreen = cv2.bitwise_or(filled_img, fore_g_r_t)
|
|
||||||
cv2.imshow('filled_img_nogreen', filled_img_nogreen)
|
|
||||||
#统计白色像素点个数
|
|
||||||
print(np.sum(fore_g_r_t == 255))
|
|
||||||
print(np.sum(mask == 255))
|
|
||||||
print(np.sum(fore_g_r_t == 255)/np.sum(mask == 255))
|
|
||||||
|
|
||||||
fore_g_r_t_ture = bitwise_and_rgb_with_binary(cv2.imread(img_path), fore_g_r_t)
|
|
||||||
# cv2.imwrite('defect_big.bmp', fore_g_r_t_ture)
|
|
||||||
org_defect_new = bitwise_and_rgb_with_binary(edge, new_bin_img)
|
|
||||||
res = cv2.bitwise_or(new_bin_img, fore_g_r_t)
|
|
||||||
nogreen = bitwise_and_rgb_with_binary(edge, res)
|
|
||||||
cv2.imshow('nogreen', nogreen)
|
|
||||||
white = find_reflection(img_path)
|
|
||||||
|
|
||||||
# SVM预测
|
|
||||||
# 加载模型
|
|
||||||
# model, scaler = load_model('/Users/xs/PycharmProjects/super-tomato/svm_green.joblib')
|
|
||||||
|
|
||||||
# 对图像进行预测
|
|
||||||
# predicted_mask = predict_image_array(image, model, scaler)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
cv2.imshow('white', white)
|
|
||||||
|
|
||||||
cv2.imshow('fore', fore)
|
|
||||||
cv2.imshow('fore_g_r_t', fore_g_r_t)
|
|
||||||
cv2.imshow('mask', mask)
|
|
||||||
cv2.imshow('res', res)
|
|
||||||
|
|
||||||
# lower_hsv = np.array([19, 108, 15])
|
|
||||||
# upper_hsv = np.array([118, 198, 134])
|
|
||||||
# max_connected_area = extract_max_connected_area(img_path, lower_hsv, upper_hsv)
|
|
||||||
# cv2.imshow('Max Connected Area', max_connected_area)
|
|
||||||
|
|
||||||
# 显示原始图像
|
|
||||||
original_img = cv2.imread(img_path)
|
|
||||||
cv2.imshow('Original', original_img)
|
|
||||||
cv2.imshow('thresholded_s_l', thresholded_s_l)
|
|
||||||
cv2.imshow('Largest Connected Component', new_bin_img)
|
|
||||||
cv2.imshow('Filled', filled_img)
|
|
||||||
cv2.imshow('Defect', defect)
|
|
||||||
cv2.imshow('Org_defect', org_defect)
|
|
||||||
cv2.imshow('new_otsu_bin_img', new_otsu_bin_img)
|
|
||||||
cv2.imshow('otsu_thresholded', otsu_thresholded)
|
|
||||||
|
|
||||||
|
|
||||||
#显示轮廓
|
|
||||||
cv2.imshow('Edge', edge)
|
|
||||||
|
|
||||||
# 等待用户按下任意键
|
|
||||||
cv2.waitKey(0)
|
|
||||||
|
|
||||||
# 关闭所有窗口
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
@ -1,71 +0,0 @@
|
|||||||
import numpy as np
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
from sklearn.ensemble import RandomForestRegressor
|
|
||||||
from sklearn.model_selection import train_test_split
|
|
||||||
from sklearn.metrics import mean_squared_error
|
|
||||||
from spec_read import all_spectral_data
|
|
||||||
|
|
||||||
def prepare_data(data):
|
|
||||||
"""Calculate the average spectral values for each fruit across all pixels."""
|
|
||||||
return np.mean(data, axis=(1, 2))
|
|
||||||
|
|
||||||
def train_model(X, y):
|
|
||||||
"""Train a RandomForest model."""
|
|
||||||
rf = RandomForestRegressor(n_estimators=100)
|
|
||||||
rf.fit(X, y)
|
|
||||||
return rf
|
|
||||||
|
|
||||||
def split_data(X, y, test_size=0.20, random_state=4):
|
|
||||||
"""Split data into training and test sets."""
|
|
||||||
return train_test_split(X, y, test_size=test_size, random_state=random_state)
|
|
||||||
|
|
||||||
def evaluate_model(model, X_test, y_test):
|
|
||||||
"""Evaluate the model and return MSE and predictions."""
|
|
||||||
y_pred = model.predict(X_test)
|
|
||||||
mse = mean_squared_error(y_test, y_pred)
|
|
||||||
return mse, y_pred
|
|
||||||
|
|
||||||
def print_predictions(y_test, y_pred):
|
|
||||||
"""Print actual and predicted values."""
|
|
||||||
print("Test Set Predictions:")
|
|
||||||
for i, (real, pred) in enumerate(zip(y_test, y_pred)):
|
|
||||||
print(f"Sample {i + 1}: True Value = {real:.2f}, Predicted Value = {pred:.2f}")
|
|
||||||
|
|
||||||
def plot_spectra(X, y):
|
|
||||||
"""Plot the average spectra for all samples and annotate with sweetness_acidity values."""
|
|
||||||
plt.figure(figsize=(10, 6))
|
|
||||||
for i in range(X.shape[0]):
|
|
||||||
plt.plot(X[i], label=f'Sample {i+1}')
|
|
||||||
plt.annotate(f'{y[i]:.1f}', xy=(len(X[i])-1, X[i][-1]), xytext=(5, 0),
|
|
||||||
textcoords='offset points', ha='left', va='center')
|
|
||||||
plt.xlabel('Wavelength Index')
|
|
||||||
plt.ylabel('Average Spectral Value')
|
|
||||||
plt.title('Average Spectral Curves for All Samples')
|
|
||||||
plt.legend(loc='upper right', bbox_to_anchor=(1.1, 1.05))
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
def main():
|
|
||||||
sweetness_acidity = np.array([
|
|
||||||
16.2, 16.1, 17, 16.9, 16.8, 17.8, 18.1, 17.2, 17, 17.2, 17.1, 17.2,
|
|
||||||
17.2, 17.2, 18.1, 17, 17.6, 17.4, 17.1, 17.1, 16.9, 17.6, 17.3, 16.3,
|
|
||||||
16.5, 18.7, 17.6, 16.2, 16.8, 17.2, 16.8, 17.3, 16, 16.6, 16.7, 16.7,
|
|
||||||
17.3, 16.3, 16.8, 17.4, 17.3, 16.3, 16.1, 17.2, 18.6, 16.8, 16.1, 17.2,
|
|
||||||
18.3, 16.5, 16.6, 17, 17, 17.8, 16.4, 18, 17.7, 17, 18.3, 16.8, 17.5,
|
|
||||||
17.7, 18.5, 18, 17.7, 17, 18.3, 18.1, 17.4, 17.7, 17.8, 16.3, 17.1, 16.8,
|
|
||||||
17.2, 17.5, 16.6, 17.7, 17.1, 17.7, 19.4, 20.3, 17.3, 15.8, 18, 17.7,
|
|
||||||
17.2, 15.2, 18, 18.4, 18.3, 15.7, 17.2, 18.6, 15.6, 17, 16.9, 17.4, 17.8,
|
|
||||||
16.5
|
|
||||||
])
|
|
||||||
|
|
||||||
X = prepare_data(all_spectral_data)
|
|
||||||
plot_spectra(X, sweetness_acidity) # 绘制光谱曲线并添加标注
|
|
||||||
X_train, X_test, y_train, y_test = split_data(X, sweetness_acidity)
|
|
||||||
rf_model = train_model(X_train, y_train)
|
|
||||||
mse, y_pred = evaluate_model(rf_model, X_test, y_test)
|
|
||||||
|
|
||||||
print("Transformed data shape:", X_train.shape)
|
|
||||||
print("Mean Squared Error on the test set:", mse)
|
|
||||||
print_predictions(y_test, y_pred)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@ -1,87 +0,0 @@
|
|||||||
import joblib
|
|
||||||
import numpy as np
|
|
||||||
import os
|
|
||||||
from dimensionality_reduction import prepare_data
|
|
||||||
|
|
||||||
def read_spectral_data(hdr_path, raw_path):
|
|
||||||
# Read HDR file for image dimensions information
|
|
||||||
with open(hdr_path, 'r', encoding='latin1') as hdr_file:
|
|
||||||
lines = hdr_file.readlines()
|
|
||||||
height = width = bands = 0
|
|
||||||
for line in lines:
|
|
||||||
if line.startswith('lines'):
|
|
||||||
height = int(line.split()[-1])
|
|
||||||
elif line.startswith('samples'):
|
|
||||||
width = int(line.split()[-1])
|
|
||||||
elif line.startswith('bands'):
|
|
||||||
bands = int(line.split()[-1])
|
|
||||||
|
|
||||||
# Read spectral data from RAW file
|
|
||||||
raw_image = np.fromfile(raw_path, dtype='uint16')
|
|
||||||
# Initialize the image with the actual read dimensions
|
|
||||||
formatImage = np.zeros((height, width, bands))
|
|
||||||
|
|
||||||
for row in range(height):
|
|
||||||
for dim in range(bands):
|
|
||||||
formatImage[row, :, dim] = raw_image[(dim + row * bands) * width:(dim + 1 + row * bands) * width]
|
|
||||||
|
|
||||||
# Ensure the image is 30x30x224 by cropping or padding
|
|
||||||
target_height, target_width, target_bands = 30, 30, 224
|
|
||||||
# Crop or pad height
|
|
||||||
if height > target_height:
|
|
||||||
formatImage = formatImage[:target_height, :, :]
|
|
||||||
elif height < target_height:
|
|
||||||
pad_height = target_height - height
|
|
||||||
formatImage = np.pad(formatImage, ((0, pad_height), (0, 0), (0, 0)), mode='constant', constant_values=0)
|
|
||||||
|
|
||||||
# Crop or pad width
|
|
||||||
if width > target_width:
|
|
||||||
formatImage = formatImage[:, :target_width, :]
|
|
||||||
elif width < target_width:
|
|
||||||
pad_width = target_width - width
|
|
||||||
formatImage = np.pad(formatImage, ((0, 0), (0, pad_width), (0, 0)), mode='constant', constant_values=0)
|
|
||||||
|
|
||||||
# Crop or pad bands if necessary (usually bands should not change)
|
|
||||||
if bands > target_bands:
|
|
||||||
formatImage = formatImage[:, :, :target_bands]
|
|
||||||
elif bands < target_bands:
|
|
||||||
pad_bands = target_bands - bands
|
|
||||||
formatImage = np.pad(formatImage, ((0, 0), (0, 0), (0, pad_bands)), mode='constant', constant_values=0)
|
|
||||||
|
|
||||||
return formatImage
|
|
||||||
|
|
||||||
def load_model(model_path):
|
|
||||||
"""加载模型"""
|
|
||||||
return joblib.load(model_path)
|
|
||||||
|
|
||||||
def predict(model, data):
|
|
||||||
"""预测数据"""
|
|
||||||
return model.predict(data)
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# 加载模型
|
|
||||||
model = load_model(r'D:\project\supermachine--tomato-passion_fruit\20240529RGBtest3\models\passion_fruit_3.joblib')
|
|
||||||
|
|
||||||
# 读取数据
|
|
||||||
directory = r'D:\project\supermachine--tomato-passion_fruit\20240529RGBtest3\xs\光谱数据3030'
|
|
||||||
all_spectral_data = []
|
|
||||||
for i in range(1, 101):
|
|
||||||
hdr_path = os.path.join(directory, f'{i}.HDR')
|
|
||||||
raw_path = os.path.join(directory, f'{i}')
|
|
||||||
spectral_data = read_spectral_data(hdr_path, raw_path)
|
|
||||||
all_spectral_data.append(spectral_data)
|
|
||||||
all_spectral_data = np.stack(all_spectral_data)
|
|
||||||
print(all_spectral_data.shape)
|
|
||||||
|
|
||||||
# 预处理数据
|
|
||||||
data_prepared = prepare_data(all_spectral_data)
|
|
||||||
print(data_prepared.shape)
|
|
||||||
|
|
||||||
# 预测数据
|
|
||||||
predictions = predict(model, data_prepared)
|
|
||||||
|
|
||||||
# 打印预测结果
|
|
||||||
print(predictions)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@ -1,71 +0,0 @@
|
|||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
|
|
||||||
def dual_threshold_and_max_component(image_path, hue_value=37, hue_delta=10, value_target=30, value_delta=10):
|
|
||||||
# 读取图像
|
|
||||||
image = cv2.imread(image_path)
|
|
||||||
|
|
||||||
# 检查图像是否读取成功
|
|
||||||
if image is None:
|
|
||||||
print("Error: Image could not be read.")
|
|
||||||
return
|
|
||||||
|
|
||||||
# 将图像从BGR转换到HSV色彩空间
|
|
||||||
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
|
||||||
|
|
||||||
# 创建H通道阈值掩码
|
|
||||||
lower_hue = np.array([hue_value - hue_delta, 0, 0])
|
|
||||||
upper_hue = np.array([hue_value + hue_delta, 255, 255])
|
|
||||||
hue_mask = cv2.inRange(hsv_image, lower_hue, upper_hue)
|
|
||||||
|
|
||||||
# 创建V通道排除中心值的掩码
|
|
||||||
lower_value_1 = np.array([0, 0, 0])
|
|
||||||
upper_value_1 = np.array([180, 255, value_target - value_delta])
|
|
||||||
lower_value_2 = np.array([0, 0, value_target + value_delta])
|
|
||||||
upper_value_2 = np.array([180, 255, 255])
|
|
||||||
|
|
||||||
value_mask_1 = cv2.inRange(hsv_image, lower_value_1, upper_value_1)
|
|
||||||
value_mask_2 = cv2.inRange(hsv_image, lower_value_2, upper_value_2)
|
|
||||||
value_mask = cv2.bitwise_or(value_mask_1, value_mask_2)
|
|
||||||
|
|
||||||
# 合并H通道和V通道掩码
|
|
||||||
combined_mask = cv2.bitwise_and(hue_mask, value_mask)
|
|
||||||
|
|
||||||
# 形态学操作 - 开运算,去除小的粘连
|
|
||||||
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
|
|
||||||
combined_mask = cv2.morphologyEx(combined_mask, cv2.MORPH_OPEN, kernel)
|
|
||||||
|
|
||||||
# 连通域分析
|
|
||||||
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(combined_mask, 4, cv2.CV_32S)
|
|
||||||
|
|
||||||
# 找出最大的连通区域(除了背景)
|
|
||||||
max_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA]) # 跳过背景
|
|
||||||
max_mask = (labels == max_label).astype(np.uint8) * 255
|
|
||||||
cv2.imshow('max_mask', max_mask)
|
|
||||||
|
|
||||||
# 使用掩码生成结果图像
|
|
||||||
result_image = cv2.bitwise_and(image, image, mask=max_mask)
|
|
||||||
# 设置背景为白色
|
|
||||||
result_image[max_mask == 0] = [255, 255, 255]
|
|
||||||
|
|
||||||
# 将结果图像从BGR转换到RGB以便正确显示
|
|
||||||
result_image_rgb = cv2.cvtColor(result_image, cv2.COLOR_BGR2RGB)
|
|
||||||
|
|
||||||
# 使用matplotlib显示原始图像和结果图像
|
|
||||||
plt.figure(figsize=(10, 5))
|
|
||||||
plt.subplot(1, 2, 1)
|
|
||||||
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
|
||||||
plt.title('Original Image')
|
|
||||||
plt.axis('off')
|
|
||||||
|
|
||||||
plt.subplot(1, 2, 2)
|
|
||||||
plt.imshow(result_image_rgb)
|
|
||||||
plt.title('Largest Connected Component on White Background')
|
|
||||||
plt.axis('off')
|
|
||||||
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
# 使用函数
|
|
||||||
image_path = r'D:\project\supermachine--tomato-passion_fruit\20240529RGBtest3\data\passion_fruit_img/39.bmp' # 替换为你的图片路径
|
|
||||||
dual_threshold_and_max_component(image_path)
|
|
||||||
@ -1,70 +0,0 @@
|
|||||||
import numpy as np
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
def read_spectral_data(hdr_path, raw_path):
|
|
||||||
# Read HDR file for image dimensions information
|
|
||||||
with open(hdr_path, 'r', encoding='latin1') as hdr_file:
|
|
||||||
lines = hdr_file.readlines()
|
|
||||||
height = width = bands = 0
|
|
||||||
for line in lines:
|
|
||||||
if line.startswith('lines'):
|
|
||||||
height = int(line.split()[-1])
|
|
||||||
elif line.startswith('samples'):
|
|
||||||
width = int(line.split()[-1])
|
|
||||||
elif line.startswith('bands'):
|
|
||||||
bands = int(line.split()[-1])
|
|
||||||
|
|
||||||
# Read spectral data from RAW file
|
|
||||||
raw_image = np.fromfile(raw_path, dtype='uint16')
|
|
||||||
# Initialize the image with the actual read dimensions
|
|
||||||
formatImage = np.zeros((height, width, bands))
|
|
||||||
|
|
||||||
for row in range(height):
|
|
||||||
for dim in range(bands):
|
|
||||||
formatImage[row, :, dim] = raw_image[(dim + row * bands) * width:(dim + 1 + row * bands) * width]
|
|
||||||
|
|
||||||
# Ensure the image is 30x30x224 by cropping or padding
|
|
||||||
target_height, target_width, target_bands = 30, 30, 224
|
|
||||||
# Crop or pad height
|
|
||||||
if height > target_height:
|
|
||||||
formatImage = formatImage[:target_height, :, :]
|
|
||||||
elif height < target_height:
|
|
||||||
pad_height = target_height - height
|
|
||||||
formatImage = np.pad(formatImage, ((0, pad_height), (0, 0), (0, 0)), mode='constant', constant_values=0)
|
|
||||||
|
|
||||||
# Crop or pad width
|
|
||||||
if width > target_width:
|
|
||||||
formatImage = formatImage[:, :target_width, :]
|
|
||||||
elif width < target_width:
|
|
||||||
pad_width = target_width - width
|
|
||||||
formatImage = np.pad(formatImage, ((0, 0), (0, pad_width), (0, 0)), mode='constant', constant_values=0)
|
|
||||||
|
|
||||||
# Crop or pad bands if necessary (usually bands should not change)
|
|
||||||
if bands > target_bands:
|
|
||||||
formatImage = formatImage[:, :, :target_bands]
|
|
||||||
elif bands < target_bands:
|
|
||||||
pad_bands = target_bands - bands
|
|
||||||
formatImage = np.pad(formatImage, ((0, 0), (0, 0), (0, pad_bands)), mode='constant', constant_values=0)
|
|
||||||
|
|
||||||
return formatImage
|
|
||||||
|
|
||||||
|
|
||||||
# Specify the directory containing the HDR and RAW files
|
|
||||||
directory = r'D:\project\supermachine--tomato-passion_fruit\20240529RGBtest3\xs\光谱数据3030'
|
|
||||||
|
|
||||||
# Initialize a list to hold all the spectral data arrays
|
|
||||||
all_spectral_data = []
|
|
||||||
|
|
||||||
# Loop through each data set (assuming there are 40 datasets)
|
|
||||||
for i in range(1, 101):
|
|
||||||
hdr_path = os.path.join(directory, f'{i}.HDR')
|
|
||||||
raw_path = os.path.join(directory, f'{i}')
|
|
||||||
|
|
||||||
# Read data
|
|
||||||
spectral_data = read_spectral_data(hdr_path, raw_path)
|
|
||||||
all_spectral_data.append(spectral_data)
|
|
||||||
|
|
||||||
# Stack all data into a single numpy array
|
|
||||||
all_spectral_data = np.stack(all_spectral_data)
|
|
||||||
print(all_spectral_data.shape) # This should print (40, 30, 30, 224)
|
|
||||||
|
Before Width: | Height: | Size: 11 KiB |
|
Before Width: | Height: | Size: 11 KiB |
@ -1,69 +0,0 @@
|
|||||||
# 百香果&西红柿通信
|
|
||||||
|
|
||||||
基于pipe命名管道,数据共3个字段(长度、指令、数据):
|
|
||||||
|
|
||||||
| 长度1 | 长度2 | 长度3 | 长度4 | 指令1 | 指令2 | 数据1 | 数据2 | ... | 数据i |
|
|
||||||
| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :--: | :---: |
|
|
||||||
| 8'hzz | 8'hzz | 8'hzz | 8'hzz | 8'hzz | 8'hzz | 8'hzz | 8'hzz | ... | 8'hzz |
|
|
||||||
|
|
||||||
## 长度
|
|
||||||
|
|
||||||
一个32位无符号数length,长度 = 数据字节数i + 2 。<br>`长度1`指length[31:24],`长度2`指length[23:16],`长度3`指length[15:8],`长度4`指length[7:0]
|
|
||||||
|
|
||||||
## 指令
|
|
||||||
|
|
||||||
ASCII字符,共2字节16位,比如`指令1`为'T',`指令2`为'O',代表番茄数据包
|
|
||||||
|
|
||||||
具体指令:
|
|
||||||
|
|
||||||
| 指令1 | 指令2 | 指令含义 |
|
|
||||||
| :---: | :---: | :----------: |
|
|
||||||
| Y | R | 预热数据 |
|
|
||||||
| T | O | 番茄数据 |
|
|
||||||
| P | F | 百香果数据 |
|
|
||||||
| R | E | 返回结果数据 |
|
|
||||||
| K | O | 返回空果 |
|
|
||||||
|
|
||||||
## **数据**
|
|
||||||
|
|
||||||
**预热数据包:‘Y’‘R’**,不包含数据字段,仅有**长度字段+指令字段**
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
**RGB图像数据包:'指令1''指令2 '**,`数据1`~`数据i`包含了图像的行数rows(高度)、列数cols(宽度)、以及图像的RGB数据,组合方式为**高度+宽度+RGB数据**
|
|
||||||
$$
|
|
||||||
i-4=rows \times cols \times 3
|
|
||||||
$$
|
|
||||||
`数据1`~`数据i`的分布具体如下:
|
|
||||||
|
|
||||||
| 行数1 | 行数2 | 列数1 | 列数2 | 图像数据1 | ... | 图像数据(i-4) |
|
|
||||||
| :--------: | :-------: | :--------: | :-------: | :-------: | :--: | :-----------: |
|
|
||||||
| rows[15:8] | rows[7:0] | cols[15:8] | cols[7:0] | | ... | |
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
**光谱数据包:' 指令1''指令2 '**,`数据1`~`数据i`包含了光谱数据的行数rows(高度)、列数cols(宽度)、谱段数bands、以及光谱数据,组合方式为**高度+宽度+谱段数+光谱数据**
|
|
||||||
$$
|
|
||||||
i-6=rows \times cols \times bands \times 2
|
|
||||||
$$
|
|
||||||
`数据1`~`数据i`的分布具体如下:
|
|
||||||
|
|
||||||
| 行数1 | 行数2 | 列数1 | 列数2 | 谱段1 | 谱段2 | 图像数据1 | ... | 图像数据(i-6) |
|
|
||||||
| :--------: | :-------: | :--------: | :-------: | :---------: | :--------: | :-------: | :--: | :-----------: |
|
|
||||||
| rows[15:8] | rows[7:0] | cols[15:8] | cols[7:0] | bands[15:8] | bands[7:0] | | ... | |
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
**返回结果数据包:'R''E'**,`数据1`~`数据i`包含了糖度值Brix、颜色占比color、直径long、预估重量weight、缺陷个数num、缺陷面积area、结果图像的行数rows(高度)、列数cols(宽度)、以及结果图像的RGB数据,组合方式为**糖度值+颜色占比+直径+预估重量+缺陷个数+缺陷面积+高度+宽度+RGB数据**
|
|
||||||
$$
|
|
||||||
i-16=rows \times cols \times 3
|
|
||||||
$$
|
|
||||||
`数据1`~`数据i`的分布具体如下:
|
|
||||||
|
|
||||||
| 糖度值2 | 糖度值1 | 颜色占比 | 直径2 | 直径1 | 预估重量 | 缺陷个数1 | 缺陷个数2 | 缺陷面积1 | 缺陷面积2 | 缺陷面积3 | 缺陷面积4 | 行数1 | 行数2 | 列数1 | 列数2 | 图像数据1 | ... | 图像数据(i-16) |
|
|
||||||
| :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: | --- | --- | --- | --- |
|
|
||||||
| Brix[15:8] | Brix[7:0] | color[7:0] | long[15:8] | long[7:0] | weight[7:0] | num[15:8] | num[7:0] | area[31:24] | area[23:16] | area[15:8] | area[7:0] | rows[15:8] | rows[7:0] | cols[15:8] | cols[7:0] | | ... | |
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
**返回空果数据包:‘K’‘O’**,不包含数据字段,仅有**长度字段+指令字段**
|
|
||||||
@ -1,71 +0,0 @@
|
|||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
import os
|
|
||||||
|
|
||||||
# 读取文件夹中的所有图片文件
|
|
||||||
def read_images_from_folder(folder):
|
|
||||||
images = []
|
|
||||||
for filename in os.listdir(folder):
|
|
||||||
img = cv2.imread(os.path.join(folder, filename))
|
|
||||||
if img is not None:
|
|
||||||
images.append((filename, img))
|
|
||||||
return images
|
|
||||||
|
|
||||||
# Lab颜色空间的a阈值分割,同时处理灰度值大于190的像素
|
|
||||||
def threshold_lab_a_and_high_gray(image, lower_threshold=0, upper_threshold=20):
|
|
||||||
lab_image = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
|
|
||||||
_, a, _ = cv2.split(lab_image)
|
|
||||||
|
|
||||||
# 创建一个与a通道相同大小的黑色图像
|
|
||||||
binary_image = np.zeros_like(a)
|
|
||||||
|
|
||||||
# 将a通道中值在指定范围内的像素设置为白色(255)
|
|
||||||
binary_image[(a >= lower_threshold) & (a <= upper_threshold)] = 255
|
|
||||||
|
|
||||||
# 为灰度值大于190的像素创建二值图
|
|
||||||
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
|
||||||
high_gray_image = np.zeros_like(gray_image)
|
|
||||||
high_gray_image[gray_image > 170] = 255
|
|
||||||
|
|
||||||
# 从a通道阈值图中移除灰度值大于190的像素
|
|
||||||
final_image = cv2.bitwise_and(binary_image, binary_image, mask=np.bitwise_not(high_gray_image))
|
|
||||||
|
|
||||||
return binary_image, high_gray_image, final_image
|
|
||||||
|
|
||||||
# 拼接并显示所有图片
|
|
||||||
def concatenate_images(original, images, filename, scale=0.5):
|
|
||||||
# 将所有单通道图像转换为三通道图像
|
|
||||||
resized_imgs = []
|
|
||||||
for img in images:
|
|
||||||
if len(img.shape) == 2: # 单通道图像
|
|
||||||
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
|
||||||
# 缩放图像
|
|
||||||
img = cv2.resize(img, (int(img.shape[1] * scale), int(img.shape[0] * scale)))
|
|
||||||
resized_imgs.append(img)
|
|
||||||
|
|
||||||
# 将原图也转换为相同大小和缩放
|
|
||||||
resized_original = cv2.resize(original, (int(original.shape[1] * scale), int(original.shape[0] * scale)))
|
|
||||||
|
|
||||||
# 水平拼接第一行和第二行
|
|
||||||
top_row = cv2.hconcat([resized_original, resized_imgs[0]])
|
|
||||||
bottom_row = cv2.hconcat([resized_imgs[1], resized_imgs[2]])
|
|
||||||
|
|
||||||
# 垂直拼接所有行
|
|
||||||
final_image = cv2.vconcat([top_row, bottom_row])
|
|
||||||
|
|
||||||
# 显示图片
|
|
||||||
cv2.imshow(f"Combined Images - {filename}", final_image)
|
|
||||||
cv2.waitKey(0)
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
|
|
||||||
def main():
|
|
||||||
folder = r'F:\images' # 替换为你的文件夹路径
|
|
||||||
images = read_images_from_folder(folder)
|
|
||||||
|
|
||||||
for filename, image in images:
|
|
||||||
lab_thresh, high_gray, final_image = threshold_lab_a_and_high_gray(image, lower_threshold=115, upper_threshold=135)
|
|
||||||
concatenate_images(image, [lab_thresh, high_gray, final_image], filename, scale=0.5) # 添加缩放因子
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
|
|
||||||
@ -1,27 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/7/14 下午5:11
|
|
||||||
# @Author : TG
|
|
||||||
# @File : imgcopy.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
|
|
||||||
def copy_images_recursively(source_folder, target_folder):
|
|
||||||
# 遍历源文件夹中的所有内容
|
|
||||||
for item in os.listdir(source_folder):
|
|
||||||
item_path = os.path.join(source_folder, item)
|
|
||||||
if os.path.isdir(item_path):
|
|
||||||
# 如果是文件夹,递归调用当前函数
|
|
||||||
copy_images_recursively(item_path, target_folder)
|
|
||||||
elif item.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.gif')):
|
|
||||||
# 如果是图片文件,则复制到目标文件夹
|
|
||||||
shutil.copy(item_path, target_folder)
|
|
||||||
print(f"Copied {item_path} to {target_folder}")
|
|
||||||
|
|
||||||
# 源文件夹路径
|
|
||||||
source_folder = r'D:\project\20240714Actual_deployed\20240718test\T'
|
|
||||||
# 目标文件夹路径
|
|
||||||
target_folder = r'D:\project\20240714Actual_deployed\20240718test\01img'
|
|
||||||
|
|
||||||
# 调用函数
|
|
||||||
copy_images_recursively(source_folder, target_folder)
|
|
||||||
@ -1,34 +0,0 @@
|
|||||||
import os
|
|
||||||
import re
|
|
||||||
|
|
||||||
def natural_sort_key(s):
|
|
||||||
"""提取文本中的数字作为排序键"""
|
|
||||||
return [int(text) if text.isdigit() else text.lower() for text in re.split('(\d+)', s)]
|
|
||||||
|
|
||||||
def rename_bmp_images(folder_path, prefix, suffix):
|
|
||||||
# 获取文件夹中的所有文件
|
|
||||||
files = os.listdir(folder_path)
|
|
||||||
# 过滤出BMP图像文件并进行自然排序
|
|
||||||
bmp_files = sorted([f for f in files if f.lower().endswith('.bmp')], key=natural_sort_key)
|
|
||||||
|
|
||||||
# 对每个BMP图像文件进行重命名
|
|
||||||
for index, bmp_file in enumerate(bmp_files):
|
|
||||||
old_path = os.path.join(folder_path, bmp_file)
|
|
||||||
# 格式化新文件名,例如:1-1-1.bmp, 1-2-1.bmp, ...
|
|
||||||
new_name = f"{prefix}-{index + 1}-{suffix}.bmp"
|
|
||||||
new_path = os.path.join(folder_path, new_name)
|
|
||||||
|
|
||||||
# 重命名文件
|
|
||||||
os.rename(old_path, new_path)
|
|
||||||
print(f'Renamed {old_path} to {new_path}')
|
|
||||||
|
|
||||||
# 指定文件夹路径
|
|
||||||
folder_path = r'D:\project\20240714Actual_deployed\20240718test\T\bottom'
|
|
||||||
folder_path1 = r'D:\project\20240714Actual_deployed\20240718test\T\middle'
|
|
||||||
folder_path2 = r'D:\project\20240714Actual_deployed\20240718test\T\top'
|
|
||||||
|
|
||||||
num = '1'
|
|
||||||
# 调用函数进行重命名
|
|
||||||
rename_bmp_images(folder_path, prefix=num, suffix='1')
|
|
||||||
rename_bmp_images(folder_path1, prefix=num, suffix='2')
|
|
||||||
rename_bmp_images(folder_path2, prefix=num, suffix='3')
|
|
||||||
|
Before Width: | Height: | Size: 1.5 MiB |
|
Before Width: | Height: | Size: 1.5 MiB |
@ -1,88 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/7/16 下午8:58
|
|
||||||
# @Author : GG
|
|
||||||
# @File : pf_zz_test.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/7/7 下午4:33
|
|
||||||
# @Author : TG
|
|
||||||
# @File : totest.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
import time
|
|
||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
import os
|
|
||||||
import pandas as pd
|
|
||||||
from PIL import Image
|
|
||||||
import re
|
|
||||||
from classifer import ImageClassifier
|
|
||||||
from config import Config as setting
|
|
||||||
|
|
||||||
|
|
||||||
def tryint(s):
|
|
||||||
try:
|
|
||||||
return int(s)
|
|
||||||
except ValueError:
|
|
||||||
return s
|
|
||||||
|
|
||||||
|
|
||||||
def natural_sort_key(s):
|
|
||||||
return [tryint(c) for c in re.split('([0-9]+)', s)]
|
|
||||||
|
|
||||||
|
|
||||||
# "0": "De" 褶皱
|
|
||||||
# "1": "N" 正常
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
image_dir = r'D:\project\20240714Actual_deployed\zz_test\TEST'
|
|
||||||
pf_zz = ImageClassifier(model_path=setting.imgclassifier_model_path,
|
|
||||||
class_indices_path=setting.imgclassifier_class_indices_path) # 假设 TOSEG 是已定义好的类,可以处理图片分割
|
|
||||||
# 获取所有.bmp文件,并进行自然排序
|
|
||||||
rgb_files = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith('.bmp')]
|
|
||||||
rgb_files.sort(key=natural_sort_key)
|
|
||||||
|
|
||||||
# 准备保存到 Excel 的数据
|
|
||||||
records = []
|
|
||||||
|
|
||||||
for idx, image_path in enumerate(rgb_files):
|
|
||||||
img = cv2.imread(image_path, cv2.IMREAD_COLOR)
|
|
||||||
|
|
||||||
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
|
||||||
|
|
||||||
t = time.time()
|
|
||||||
result = pf_zz.predict(img)
|
|
||||||
e = time.time()
|
|
||||||
|
|
||||||
process_time = (e - t) * 1000
|
|
||||||
print(f'第{idx + 1}张图时间:{process_time}')
|
|
||||||
print(f'结果:{result}')
|
|
||||||
|
|
||||||
## 控制台显示识别结果
|
|
||||||
# records.append(result)
|
|
||||||
# print(f'识别为正常未褶皱的数量:{sum(records)}')
|
|
||||||
|
|
||||||
|
|
||||||
## 将结果及原始文件信息写入excel
|
|
||||||
# 获取原始文件名
|
|
||||||
original_filename = os.path.splitext(os.path.basename(image_path))[0]
|
|
||||||
|
|
||||||
# 添加记录到列表
|
|
||||||
records.append({
|
|
||||||
"图片序号": idx + 1,
|
|
||||||
"图片名": original_filename,
|
|
||||||
"识别结果(0为褶皱,1为正常)": result,
|
|
||||||
"处理时间(ms)": process_time
|
|
||||||
})
|
|
||||||
|
|
||||||
# 创建 DataFrame 并写入 Excel 文件
|
|
||||||
df = pd.DataFrame(records)
|
|
||||||
df.to_excel(r'./zz_result.xlsx', index=False)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
|
|
||||||
|
|
||||||
@ -1,249 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/6/16 17:13
|
|
||||||
# @Author : TG
|
|
||||||
# @File : qt_test.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QLabel, QVBoxLayout, QWidget
|
|
||||||
from PyQt5.QtGui import QPixmap, QImage
|
|
||||||
import win32file
|
|
||||||
from PIL import Image
|
|
||||||
import numpy as np
|
|
||||||
import cv2
|
|
||||||
import pandas as pd
|
|
||||||
import re
|
|
||||||
|
|
||||||
|
|
||||||
class MainWindow(QMainWindow):
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
|
||||||
self.setWindowTitle("Tomato Image Sender")
|
|
||||||
self.setGeometry(100, 100, 800, 600)
|
|
||||||
|
|
||||||
central_widget = QWidget()
|
|
||||||
self.setCentralWidget(central_widget)
|
|
||||||
|
|
||||||
layout = QVBoxLayout()
|
|
||||||
central_widget.setLayout(layout)
|
|
||||||
|
|
||||||
self.image_label = QLabel()
|
|
||||||
layout.addWidget(self.image_label)
|
|
||||||
|
|
||||||
self.rgb_send_name = r'\\.\pipe\rgb_receive' # 发送数据管道名对应 main.py 的接收数据管道名
|
|
||||||
self.rgb_receive_name = r'\\.\pipe\rgb_send' # 接收数据管道名对应 main.py 的发送数据管道名
|
|
||||||
self.spec_send_name = r'\\.\pipe\spec_receive' # 发送数据管道名对应 main.py 的接收数据管道名
|
|
||||||
|
|
||||||
# 连接main.py创建的命名管道
|
|
||||||
self.rgb_send = win32file.CreateFile(
|
|
||||||
self.rgb_send_name,
|
|
||||||
win32file.GENERIC_WRITE,
|
|
||||||
0,
|
|
||||||
None,
|
|
||||||
win32file.OPEN_EXISTING,
|
|
||||||
0,
|
|
||||||
None
|
|
||||||
)
|
|
||||||
|
|
||||||
self.rgb_receive = win32file.CreateFile(
|
|
||||||
self.rgb_receive_name,
|
|
||||||
win32file.GENERIC_READ,
|
|
||||||
0,
|
|
||||||
None,
|
|
||||||
win32file.OPEN_EXISTING,
|
|
||||||
0,
|
|
||||||
None
|
|
||||||
)
|
|
||||||
|
|
||||||
self.spec_send = win32file.CreateFile(
|
|
||||||
self.spec_send_name,
|
|
||||||
win32file.GENERIC_WRITE,
|
|
||||||
0,
|
|
||||||
None,
|
|
||||||
win32file.OPEN_EXISTING,
|
|
||||||
0,
|
|
||||||
None
|
|
||||||
)
|
|
||||||
|
|
||||||
def save_results_to_excel(self, results, file_path='./testimg_result/results.xlsx'):
|
|
||||||
# 创建一个 DataFrame
|
|
||||||
df = pd.DataFrame([results]) # results 是一个字典
|
|
||||||
|
|
||||||
# 检查文件是否存在,如果存在就加载旧的 DataFrame,然后追加新数据
|
|
||||||
try:
|
|
||||||
with pd.ExcelWriter(file_path, mode='a', engine='openpyxl', if_sheet_exists='overlay') as writer:
|
|
||||||
old_df = pd.read_excel(file_path)
|
|
||||||
new_df = pd.concat([old_df, df], ignore_index=True)
|
|
||||||
new_df.to_excel(writer, index=False, sheet_name='Results')
|
|
||||||
print("结果保存到 Excel 成功")
|
|
||||||
except FileNotFoundError:
|
|
||||||
# 如果文件不存在,直接写入新的 DataFrame
|
|
||||||
df.to_excel(file_path, index=False, sheet_name='Results')
|
|
||||||
print("结果保存到 Excel 成功")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"保存到 Excel 失败,错误原因: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def tryint(self, s):
|
|
||||||
try:
|
|
||||||
return int(s)
|
|
||||||
except:
|
|
||||||
return s
|
|
||||||
|
|
||||||
def natural_sort_key(self, s):
|
|
||||||
return [self.tryint(c) for c in re.split('([0-9]+)', s)]
|
|
||||||
|
|
||||||
def send_image_group(self, image_dir):
|
|
||||||
'''
|
|
||||||
发送图像数据
|
|
||||||
:param image_dir: bmp文件所在文件夹
|
|
||||||
:return:
|
|
||||||
'''
|
|
||||||
# 获取所有.bmp文件,并进行自然排序
|
|
||||||
rgb_files = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith('.bmp')]
|
|
||||||
rgb_files.sort(key=self.natural_sort_key)
|
|
||||||
|
|
||||||
self.send_YR() # 发送预热指令
|
|
||||||
|
|
||||||
total_files = len(rgb_files)
|
|
||||||
for i in range(0, total_files, 3):
|
|
||||||
current_batch = rgb_files[i:i + 3] # 每次处理3张图片
|
|
||||||
|
|
||||||
for image_path in current_batch:
|
|
||||||
img = cv2.imread(image_path, cv2.IMREAD_COLOR)
|
|
||||||
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
|
||||||
img = np.asarray(img, dtype=np.uint8)
|
|
||||||
|
|
||||||
try:
|
|
||||||
height = img.shape[0]
|
|
||||||
width = img.shape[1]
|
|
||||||
height = height.to_bytes(2, byteorder='big')
|
|
||||||
width = width.to_bytes(2, byteorder='big')
|
|
||||||
img_data = img.tobytes()
|
|
||||||
length = (len(img_data) + 6).to_bytes(4, byteorder='big')
|
|
||||||
cmd = 'TO'
|
|
||||||
data_send = length + cmd.upper().encode('ascii') + height + width + img_data
|
|
||||||
win32file.WriteFile(self.rgb_send, data_send)
|
|
||||||
print(f'发送的图像数据长度: {len(data_send)}')
|
|
||||||
except Exception as e:
|
|
||||||
print(f"数据发送失败. 错误原因: {e}")
|
|
||||||
|
|
||||||
print(f"当前处理的图片: {os.path.basename(image_path)}") # 打印当前正在处理的图片名称
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# if spec_files:
|
|
||||||
# spec_file = spec_files[0]
|
|
||||||
# with open(spec_file, 'rb') as f:
|
|
||||||
# spec_data = f.read()
|
|
||||||
#
|
|
||||||
# try:
|
|
||||||
# # win32file.WriteFile(self.spec_send, len(spec_data).to_bytes(4, byteorder='big'))
|
|
||||||
# # print(f"发送的光谱数据长度: {len(spec_data)}")
|
|
||||||
# heigth = 30
|
|
||||||
# weight = 30
|
|
||||||
# bands = 224
|
|
||||||
# heigth = heigth.to_bytes(2, byteorder='big')
|
|
||||||
# weight = weight.to_bytes(2, byteorder='big')
|
|
||||||
# bands = bands.to_bytes(2, byteorder='big')
|
|
||||||
# length = (len(spec_data)+8).to_bytes(4, byteorder='big')
|
|
||||||
# # cmd = 'TO':测试番茄数据;cmd = 'PF':测试百香果数据
|
|
||||||
# cmd = 'TO'
|
|
||||||
# data_send = length + cmd.upper().encode('ascii') + heigth + weight + bands + spec_data
|
|
||||||
# win32file.WriteFile(self.spec_send, data_send)
|
|
||||||
# print(f'发送的光谱数据长度: {len(data_send)}')
|
|
||||||
# print(f'spec长度: {len(spec_data)}')
|
|
||||||
# except Exception as e:
|
|
||||||
# print(f"数据发送失败. 错误原因: {e}")
|
|
||||||
|
|
||||||
self.receive_result()
|
|
||||||
|
|
||||||
def send_YR(self):
|
|
||||||
'''
|
|
||||||
发送预热指令
|
|
||||||
:return:
|
|
||||||
'''
|
|
||||||
length = 2
|
|
||||||
length = length.to_bytes(4, byteorder='big')
|
|
||||||
cmd = 'YR'
|
|
||||||
data_send = length + cmd.upper().encode('ascii')
|
|
||||||
try:
|
|
||||||
win32file.WriteFile(self.rgb_send, data_send)
|
|
||||||
print("发送预热指令成功")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"发送预热指令失败. 错误原因: {e}")
|
|
||||||
|
|
||||||
def receive_result(self):
|
|
||||||
try:
|
|
||||||
# 读取结果数据
|
|
||||||
# 读取4个字节的数据长度信息,并将其转换为整数
|
|
||||||
data_length = int.from_bytes(win32file.ReadFile(self.rgb_receive, 4)[1], byteorder='big')
|
|
||||||
print(f"应该接收到的数据长度: {data_length}")
|
|
||||||
# 根据读取到的数据长度,读取对应长度的数据
|
|
||||||
data = win32file.ReadFile(self.rgb_receive, data_length)[1]
|
|
||||||
print(f"实际接收到的数据长度: {len(data)}")
|
|
||||||
# 解析数据
|
|
||||||
cmd_result = data[:2].decode('ascii').strip().upper()
|
|
||||||
brix = (int.from_bytes(data[2:4], byteorder='big')) / 1000
|
|
||||||
green_percentage = (int.from_bytes(data[4:5], byteorder='big')) / 100
|
|
||||||
diameter = (int.from_bytes(data[5:7], byteorder='big')) / 100
|
|
||||||
weight = int.from_bytes(data[7:8], byteorder='big')
|
|
||||||
defect_num = int.from_bytes(data[8:10], byteorder='big')
|
|
||||||
total_defect_area = (int.from_bytes(data[10:14], byteorder='big')) / 1000
|
|
||||||
heigth = int.from_bytes(data[14:16], byteorder='big')
|
|
||||||
width = int.from_bytes(data[16:18], byteorder='big')
|
|
||||||
rp = data[18:]
|
|
||||||
img = np.frombuffer(rp, dtype=np.uint8).reshape(heigth, width, -1)
|
|
||||||
|
|
||||||
results = {
|
|
||||||
'Cmd': data[:2].decode('ascii').strip().upper(),
|
|
||||||
'Brix': (int.from_bytes(data[2:4], byteorder='big')) / 1000,
|
|
||||||
'Green Percentage': (int.from_bytes(data[4:5], byteorder='big')) / 100,
|
|
||||||
'Diameter': (int.from_bytes(data[5:7], byteorder='big')) / 100,
|
|
||||||
'Weight': int.from_bytes(data[7:8], byteorder='big'),
|
|
||||||
'Defect Number': int.from_bytes(data[8:10], byteorder='big'),
|
|
||||||
'Total Defect Area': (int.from_bytes(data[10:14], byteorder='big')) / 1000
|
|
||||||
}
|
|
||||||
|
|
||||||
# 调用保存到 Excel 的函数
|
|
||||||
self.save_results_to_excel(results)
|
|
||||||
|
|
||||||
print(f"指令:{cmd_result}, 糖度值:{brix}, 绿色占比:{green_percentage}, 直径:{diameter}cm, "
|
|
||||||
f"预估重量:{weight}g, 缺陷个数:{defect_num}, 缺陷面积:{total_defect_area}cm^2, 结果图的尺寸:{img.shape}")
|
|
||||||
|
|
||||||
|
|
||||||
# 显示结果图像
|
|
||||||
image = Image.fromarray(img)
|
|
||||||
qimage = QImage(image.tobytes(), image.size[0], image.size[1], QImage.Format_RGB888)
|
|
||||||
pixmap = QPixmap.fromImage(qimage)
|
|
||||||
self.image_label.setPixmap(pixmap)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"数据接收失败. 错误原因: {e}")
|
|
||||||
|
|
||||||
def open_file_dialog(self):
|
|
||||||
directory_dialog = QFileDialog()
|
|
||||||
directory_dialog.setFileMode(QFileDialog.Directory)
|
|
||||||
if directory_dialog.exec_():
|
|
||||||
selected_directory = directory_dialog.selectedFiles()[0]
|
|
||||||
self.send_image_group(selected_directory)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
'''
|
|
||||||
1. 创建Qt应用程序
|
|
||||||
2. 创建主窗口
|
|
||||||
3. 显示主窗口
|
|
||||||
4. 打开文件对话框
|
|
||||||
5. 进入Qt事件循环
|
|
||||||
'''
|
|
||||||
#运行main.py后,运行qt_test.py
|
|
||||||
#运行qt_test.py后,选择文件夹,自动读取文件夹下的bmp和raw文件,发送到main.py
|
|
||||||
#main.py接收到数据后,返回结果数据,qt_test.py接收到结果数据,显示图片
|
|
||||||
#为确保测试正确,测试文件夹下的文件数量应该为5个bmp文件和1个raw文件
|
|
||||||
app = QApplication(sys.argv)
|
|
||||||
main_window = MainWindow()
|
|
||||||
main_window.show()
|
|
||||||
main_window.open_file_dialog()
|
|
||||||
sys.exit(app.exec_())
|
|
||||||
@ -1,44 +0,0 @@
|
|||||||
albumentations==1.4.11
|
|
||||||
comet_ml==3.44.1
|
|
||||||
coremltools==7.2
|
|
||||||
Flask==3.0.3
|
|
||||||
GitPython==3.1.43
|
|
||||||
ipython==8.12.3
|
|
||||||
joblib==1.2.0
|
|
||||||
matplotlib==3.7.1
|
|
||||||
mss==9.0.1
|
|
||||||
nncf==2.11.0
|
|
||||||
numpy==1.25.0
|
|
||||||
onnx==1.16.1
|
|
||||||
onnxruntime==1.18.1
|
|
||||||
onnxsim==0.4.36
|
|
||||||
opencv_contrib_python==4.10.0.84
|
|
||||||
opencv_python==4.10.0.84
|
|
||||||
opencv_python_headless==4.10.0.84
|
|
||||||
openvino==2024.2.0
|
|
||||||
pafy==0.5.5
|
|
||||||
pandas==1.5.3
|
|
||||||
Pillow==9.4.0
|
|
||||||
psutil==5.9.0
|
|
||||||
PyQt5==5.15.10
|
|
||||||
PyQt5_sip==12.11.0
|
|
||||||
pywin32==305
|
|
||||||
PyYAML==6.0
|
|
||||||
Requests==2.32.3
|
|
||||||
scikit_learn==1.2.2
|
|
||||||
scipy==1.10.1
|
|
||||||
seaborn==0.13.2
|
|
||||||
setuptools==60.2.0
|
|
||||||
tensorflow==2.17.0
|
|
||||||
tensorflowjs==4.20.0
|
|
||||||
tensorrt==10.2.0.post1
|
|
||||||
tflite_runtime==2.14.0
|
|
||||||
tflite_support==0.4.4
|
|
||||||
torch==2.0.1
|
|
||||||
torchvision==0.15.2
|
|
||||||
tqdm==4.65.2
|
|
||||||
train==0.0.5
|
|
||||||
tritonclient==2.47.0
|
|
||||||
ultralytics==8.2.48
|
|
||||||
ultralytics_thop==2.0.0
|
|
||||||
x2paddle==1.4.1
|
|
||||||
@ -1,4 +0,0 @@
|
|||||||
import pathlib
|
|
||||||
|
|
||||||
file_path = pathlib.Path(__file__)
|
|
||||||
ROOT_DIR = file_path.parent
|
|
||||||
@ -1,73 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
# @Time : 2024/7/7 下午4:33
|
|
||||||
# @Author : TG
|
|
||||||
# @File : totest.py
|
|
||||||
# @Software: PyCharm
|
|
||||||
import time
|
|
||||||
import cv2
|
|
||||||
import numpy as np
|
|
||||||
import os
|
|
||||||
import pandas as pd
|
|
||||||
from PIL import Image
|
|
||||||
import re
|
|
||||||
from to_seg import TOSEG
|
|
||||||
|
|
||||||
|
|
||||||
def tryint(s):
|
|
||||||
try:
|
|
||||||
return int(s)
|
|
||||||
except ValueError:
|
|
||||||
return s
|
|
||||||
|
|
||||||
|
|
||||||
def natural_sort_key(s):
|
|
||||||
return [tryint(c) for c in re.split('([0-9]+)', s)]
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
image_dir = r'D:\project\20240714Actual_deployed\testimg'
|
|
||||||
to = TOSEG() # 假设 TOSEG 是已定义好的类,可以处理图片分割
|
|
||||||
# 获取所有.bmp文件,并进行自然排序
|
|
||||||
rgb_files = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.endswith('.bmp')]
|
|
||||||
rgb_files.sort(key=natural_sort_key)
|
|
||||||
|
|
||||||
# 准备保存到 Excel 的数据
|
|
||||||
records = []
|
|
||||||
|
|
||||||
for idx, image_path in enumerate(rgb_files):
|
|
||||||
img = cv2.imread(image_path, cv2.IMREAD_COLOR)
|
|
||||||
|
|
||||||
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
|
||||||
t = time.time()
|
|
||||||
result = to.toseg(img) # 假设 toseg 方法接受一个图片数组,并返回处理后的图片
|
|
||||||
e = time.time()
|
|
||||||
result = cv2.cvtColor(result, cv2.COLOR_GRAY2BGR)
|
|
||||||
process_time = e - t
|
|
||||||
print(f'第{idx + 1}张图时间:{process_time}')
|
|
||||||
|
|
||||||
# 获取原始文件名并添加“mask”后缀
|
|
||||||
original_filename = os.path.splitext(os.path.basename(image_path))[0]
|
|
||||||
output_filename = f'{original_filename}_leaf.png'
|
|
||||||
cv2.imwrite(os.path.join(r'D:\project\20240714Actual_deployed\leaf',
|
|
||||||
output_filename), result)
|
|
||||||
|
|
||||||
# 添加记录到列表
|
|
||||||
records.append({
|
|
||||||
"Image Index": idx + 1,
|
|
||||||
"File Name": original_filename,
|
|
||||||
"Processing Time (s)": process_time
|
|
||||||
})
|
|
||||||
|
|
||||||
# cv2.imshow('result', result)
|
|
||||||
# cv2.waitKey(0)
|
|
||||||
# cv2.destroyAllWindows()
|
|
||||||
|
|
||||||
# 创建 DataFrame 并写入 Excel 文件
|
|
||||||
df = pd.DataFrame(records)
|
|
||||||
df.to_excel(r'D:\project\20240714Actual_deployed\leaf\leaf_processing_times.xlsx', index=False)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
|
|
||||||
|
|
||||||
@ -699,15 +699,12 @@ class Data_processing:
|
|||||||
tuple: (长径, 短径, 缺陷区域个数, 缺陷区域总像素, 处理后的图像)
|
tuple: (长径, 短径, 缺陷区域个数, 缺陷区域总像素, 处理后的图像)
|
||||||
"""
|
"""
|
||||||
# tomato = Tomato() # 创建 Tomato 类的实例
|
# tomato = Tomato() # 创建 Tomato 类的实例
|
||||||
# img0 = img.copy()
|
|
||||||
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
|
|
||||||
img0 = img.copy()
|
img0 = img.copy()
|
||||||
|
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
|
||||||
s_l = self.tomato.extract_s_l(img)
|
s_l = self.tomato.extract_s_l(img)
|
||||||
thresholded_s_l = self.tomato.threshold_segmentation(s_l, setting.threshold_s_l)
|
thresholded_s_l = self.tomato.threshold_segmentation(s_l, setting.threshold_s_l)
|
||||||
new_bin_img = self.tomato.largest_connected_component(thresholded_s_l)
|
new_bin_img = self.tomato.largest_connected_component(thresholded_s_l)
|
||||||
#下面为20240714工控机实际运行代码,缺少两个参数传递
|
white_defect = self.tomato.threshold_lab_a_and_high_gray(img)
|
||||||
# white_defect = self.tomato.threshold_lab_a_and_high_gray(img)
|
|
||||||
white_defect = self.tomato.threshold_lab_a_and_high_gray(img, lower_threshold=115, upper_threshold=135)
|
|
||||||
new_bin_and_white_defect_img = cv2.bitwise_and(new_bin_img, white_defect)
|
new_bin_and_white_defect_img = cv2.bitwise_and(new_bin_img, white_defect)
|
||||||
filled_img, defect = self.fill_holes(new_bin_and_white_defect_img)
|
filled_img, defect = self.fill_holes(new_bin_and_white_defect_img)
|
||||||
# 绘制西红柿边缘并获取缺陷信息
|
# 绘制西红柿边缘并获取缺陷信息
|
||||||
@ -754,15 +751,7 @@ class Data_processing:
|
|||||||
# total_pixels = 0
|
# total_pixels = 0
|
||||||
# rp = cv2.cvtColor(np.ones((setting.n_rgb_rows, setting.n_rgb_cols, setting.n_rgb_bands),
|
# rp = cv2.cvtColor(np.ones((setting.n_rgb_rows, setting.n_rgb_cols, setting.n_rgb_bands),
|
||||||
# dtype=np.uint8), cv2.COLOR_BGR2RGB)
|
# dtype=np.uint8), cv2.COLOR_BGR2RGB)
|
||||||
|
return diameter, green_percentage, total_pixels, rp
|
||||||
white_defect = cv2.cvtColor(white_defect, cv2.COLOR_GRAY2RGB)
|
|
||||||
new_bin_img = cv2.cvtColor(new_bin_img, cv2.COLOR_GRAY2RGB)
|
|
||||||
new_bin_and_white_defect_img = cv2.cvtColor(new_bin_and_white_defect_img, cv2.COLOR_GRAY2RGB)
|
|
||||||
filled_img = cv2.cvtColor(filled_img, cv2.COLOR_GRAY2RGB)
|
|
||||||
safe_area = cv2.cvtColor(safe_area, cv2.COLOR_GRAY2RGB)
|
|
||||||
filled_defect = cv2.cvtColor(filled_defect, cv2.COLOR_GRAY2RGB)
|
|
||||||
|
|
||||||
return diameter, green_percentage, total_pixels, rp, white_defect, new_bin_img, new_bin_and_white_defect_img, filled_img, safe_area, filled_defect
|
|
||||||
|
|
||||||
def analyze_passion_fruit(self, img):
|
def analyze_passion_fruit(self, img):
|
||||||
if img is None:
|
if img is None:
|
||||||
@ -816,7 +805,7 @@ class Data_processing:
|
|||||||
return diameter, green_percentage, weight, total_pixels, rp
|
return diameter, green_percentage, weight, total_pixels, rp
|
||||||
|
|
||||||
def process_data(seif, cmd: str, images: list, spec: any, pipe: Pipe, detector: Spec_predict,
|
def process_data(seif, cmd: str, images: list, spec: any, pipe: Pipe, detector: Spec_predict,
|
||||||
to: Detector_to, impf: ImageClassifier, q) -> bool:
|
to: Detector_to, impf: ImageClassifier) -> bool:
|
||||||
"""
|
"""
|
||||||
处理指令
|
处理指令
|
||||||
|
|
||||||
@ -835,16 +824,8 @@ class Data_processing:
|
|||||||
for i, img in enumerate(images):
|
for i, img in enumerate(images):
|
||||||
if cmd == 'TO':
|
if cmd == 'TO':
|
||||||
# 番茄
|
# 番茄
|
||||||
diameter, green_percentage, total_pixels, rp, white_defect, new_bin_img, new_bin_and_white_defect_img, filled_img, safe_area, filled_defect = seif.analyze_tomato(img)
|
diameter, green_percentage, total_pixels, rp = seif.analyze_tomato(img)
|
||||||
posun_num = to.run(img)
|
posun_num = to.run(img)
|
||||||
cv2.imwrite(f'./testimg_result/{q}_{i+1}_white_defect.png', white_defect)
|
|
||||||
cv2.imwrite(f'./testimg_result/{q}_{i+1}_new_bin_img.png', new_bin_img)
|
|
||||||
cv2.imwrite(f'./testimg_result/{q}_{i+1}_white_defect.png', new_bin_and_white_defect_img)
|
|
||||||
cv2.imwrite(f'./testimg_result/{q}_{i+1}_filled_img.png', filled_img)
|
|
||||||
cv2.imwrite(f'./testimg_result/{q}_{i+1}_filled_defect.png', filled_defect)
|
|
||||||
cv2.imwrite(f'./testimg_result/{q}_{i+1}_safe_area.png', safe_area)
|
|
||||||
cv2.imwrite(f'./testimg_result/{q}_{i+1}_nogreen.png', cv2.cvtColor(rp, cv2.COLOR_RGB2BGR))
|
|
||||||
cv2.imwrite(f'./testimg_result/{q}_{i+1}_img.png', cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
|
|
||||||
# print(f'破损判断:{posun_num}')
|
# print(f'破损判断:{posun_num}')
|
||||||
if i <= 2:
|
if i <= 2:
|
||||||
diameter_axis_list.append(diameter)
|
diameter_axis_list.append(diameter)
|
||||||
@ -909,11 +890,3 @@ class Data_processing:
|
|||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
# if __name__ == "__main__":
|
|
||||||
# img = cv2.imread(r'D:\project\20240714Actual_deployed\testimg\4-10-3.bmp')
|
|
||||||
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
|
||||||
# data = Data_processing()
|
|
||||||
# diameter, green_percentage, total_pixels, rp, white_defect, new_bin_img, new_bin_and_white_defect_img, filled_img, safe_area, filled_defect = data.analyze_tomato(img)
|
|
||||||
# cv2.imshow("area", safe_area)
|
|
||||||
# cv2.waitKey(0)
|
|
||||||
# cv2.destroyAllWindows()
|
|
||||||
@ -106,7 +106,7 @@ def main(is_debug=False):
|
|||||||
# print(f'接收到第{q}个果子的光谱数据尺寸:{spec.shape}')
|
# print(f'接收到第{q}个果子的光谱数据尺寸:{spec.shape}')
|
||||||
#数据处理部分
|
#数据处理部分
|
||||||
if images: # 确保images不为空
|
if images: # 确保images不为空
|
||||||
response = dp.process_data(cmd, images, spec, pipe, detector, to, impf, q)
|
response = dp.process_data(cmd, images, spec, pipe, detector, to, impf)
|
||||||
if response:
|
if response:
|
||||||
logging.info(f'处理成功,响应为: {response}')
|
logging.info(f'处理成功,响应为: {response}')
|
||||||
else:
|
else:
|
||||||
@ -119,7 +119,6 @@ def main(is_debug=False):
|
|||||||
q += 1
|
q += 1
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
'''
|
'''
|
||||||
python与qt采用windows下的命名管道进行通信,数据流按照约定的通信协议进行
|
python与qt采用windows下的命名管道进行通信,数据流按照约定的通信协议进行
|
||||||
|
Before Width: | Height: | Size: 1.5 MiB After Width: | Height: | Size: 1.5 MiB |
|
Before Width: | Height: | Size: 1.5 MiB After Width: | Height: | Size: 1.5 MiB |