Spaces:
Runtime error
Runtime error
mingyang91
commited on
add crack split.py
Browse files- .idea/workspace.xml +77 -26
- demo.py +7 -5
- models/__init__.py +1 -0
- models/tools/__init__.py +1 -0
- models/tools/draw.py +65 -0
- models/tools/split.py +175 -0
- models/yolo_crack.py +295 -0
- yolo_model.py +6 -47
.idea/workspace.xml
CHANGED
@@ -4,10 +4,12 @@
|
|
4 |
<option name="autoReloadType" value="SELECTIVE" />
|
5 |
</component>
|
6 |
<component name="ChangeListManager">
|
7 |
-
<list default="true" id="d7806539-b6d6-42e7-bb45-1565f5d54891" name="Changes" comment="
|
|
|
8 |
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
|
9 |
<change beforePath="$PROJECT_DIR$/demo.py" beforeDir="false" afterPath="$PROJECT_DIR$/demo.py" afterDir="false" />
|
10 |
-
<change beforePath="$PROJECT_DIR$/
|
|
|
11 |
</list>
|
12 |
<option name="SHOW_DIALOG" value="false" />
|
13 |
<option name="HIGHLIGHT_CONFLICTS" value="true" />
|
@@ -56,37 +58,41 @@
|
|
56 |
<option name="hideEmptyMiddlePackages" value="true" />
|
57 |
<option name="showLibraryContents" value="true" />
|
58 |
</component>
|
59 |
-
<component name="PropertiesComponent"
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
|
|
77 |
}
|
78 |
-
}
|
79 |
<component name="RecentsManager">
|
80 |
<key name="CopyFile.RECENT_KEYS">
|
|
|
|
|
81 |
<recent name="$PROJECT_DIR$/tests" />
|
82 |
<recent name="$PROJECT_DIR$" />
|
83 |
</key>
|
84 |
<key name="MoveFile.RECENT_KEYS">
|
|
|
85 |
<recent name="$PROJECT_DIR$/tests" />
|
86 |
<recent name="$PROJECT_DIR$" />
|
87 |
</key>
|
88 |
</component>
|
89 |
-
<component name="RunManager" selected="Python.
|
90 |
<configuration name="demo" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
|
91 |
<module name="detector" />
|
92 |
<option name="ENV_FILES" value="" />
|
@@ -157,6 +163,29 @@
|
|
157 |
<option name="INPUT_FILE" value="" />
|
158 |
<method v="2" />
|
159 |
</configuration>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
<configuration name="yolo_dataset" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
|
161 |
<module name="detector" />
|
162 |
<option name="ENV_FILES" value="" />
|
@@ -199,12 +228,14 @@
|
|
199 |
<list>
|
200 |
<item itemvalue="FastAPI.detector" />
|
201 |
<item itemvalue="Python.streamlit" />
|
|
|
202 |
<item itemvalue="Python.demo" />
|
203 |
<item itemvalue="Python.evaluator" />
|
204 |
<item itemvalue="Python.yolo_dataset" />
|
205 |
</list>
|
206 |
<recent_temporary>
|
207 |
<list>
|
|
|
208 |
<item itemvalue="Python.demo" />
|
209 |
<item itemvalue="Python.evaluator" />
|
210 |
<item itemvalue="Python.yolo_dataset" />
|
@@ -239,7 +270,8 @@
|
|
239 |
<workItem from="1706274709532" duration="27000" />
|
240 |
<workItem from="1706445222599" duration="1814000" />
|
241 |
<workItem from="1706449264770" duration="4974000" />
|
242 |
-
<workItem from="1706517295469" duration="
|
|
|
243 |
</task>
|
244 |
<task id="LOCAL-00001" summary="init commit">
|
245 |
<option name="closed" value="true" />
|
@@ -473,7 +505,23 @@
|
|
473 |
<option name="project" value="LOCAL" />
|
474 |
<updated>1706517885858</updated>
|
475 |
</task>
|
476 |
-
<
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
477 |
<servers />
|
478 |
</component>
|
479 |
<component name="TypeScriptGeneratedFilesManager">
|
@@ -518,7 +566,9 @@
|
|
518 |
<MESSAGE value="IoU evaluator" />
|
519 |
<MESSAGE value="Update UI" />
|
520 |
<MESSAGE value="fix runtime error in coco evaluator" />
|
521 |
-
<
|
|
|
|
|
522 |
</component>
|
523 |
<component name="XDebuggerManager">
|
524 |
<breakpoint-manager>
|
@@ -545,7 +595,8 @@
|
|
545 |
<SUITE FILE_PATH="coverage/detector$yolo_dataset.coverage" NAME="yolo_dataset Coverage Results" MODIFIED="1705852113469" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
546 |
<SUITE FILE_PATH="coverage/detector$evaluator.coverage" NAME="evaluator Coverage Results" MODIFIED="1706107083258" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
547 |
<SUITE FILE_PATH="coverage/detector$demo.coverage" NAME="demo Coverage Results" MODIFIED="1706108414052" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
548 |
-
<SUITE FILE_PATH="coverage/detector$streamlit.coverage" NAME="streamlit Coverage Results" MODIFIED="
|
|
|
549 |
<SUITE FILE_PATH="coverage/detector$extract.coverage" NAME="yolo_dataset Coverage Results" MODIFIED="1705764465837" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
550 |
</component>
|
551 |
</project>
|
|
|
4 |
<option name="autoReloadType" value="SELECTIVE" />
|
5 |
</component>
|
6 |
<component name="ChangeListManager">
|
7 |
+
<list default="true" id="d7806539-b6d6-42e7-bb45-1565f5d54891" name="Changes" comment="add crack split.py">
|
8 |
+
<change afterPath="$PROJECT_DIR$/models/tools/draw.py" afterDir="false" />
|
9 |
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
|
10 |
<change beforePath="$PROJECT_DIR$/demo.py" beforeDir="false" afterPath="$PROJECT_DIR$/demo.py" afterDir="false" />
|
11 |
+
<change beforePath="$PROJECT_DIR$/models/yolo_crack.py" beforeDir="false" afterPath="$PROJECT_DIR$/models/yolo_crack.py" afterDir="false" />
|
12 |
+
<change beforePath="$PROJECT_DIR$/yolo_model.py" beforeDir="false" afterPath="$PROJECT_DIR$/yolo_model.py" afterDir="false" />
|
13 |
</list>
|
14 |
<option name="SHOW_DIALOG" value="false" />
|
15 |
<option name="HIGHLIGHT_CONFLICTS" value="true" />
|
|
|
58 |
<option name="hideEmptyMiddlePackages" value="true" />
|
59 |
<option name="showLibraryContents" value="true" />
|
60 |
</component>
|
61 |
+
<component name="PropertiesComponent"><![CDATA[{
|
62 |
+
"keyToString": {
|
63 |
+
"Python.demo.executor": "Debug",
|
64 |
+
"Python.evaluator.executor": "Debug",
|
65 |
+
"Python.extract.executor": "Run",
|
66 |
+
"Python.streamlit.executor": "Run",
|
67 |
+
"Python.yolo_crack.executor": "Run",
|
68 |
+
"Python.yolo_dataset.executor": "Run",
|
69 |
+
"RunOnceActivity.OpenProjectViewOnStart": "true",
|
70 |
+
"RunOnceActivity.ShowReadmeOnStart": "true",
|
71 |
+
"git-widget-placeholder": "main",
|
72 |
+
"last_opened_file_path": "/Users/famer.me/PycharmProjects/detector/tools",
|
73 |
+
"node.js.detected.package.eslint": "true",
|
74 |
+
"node.js.detected.package.tslint": "true",
|
75 |
+
"node.js.selected.package.eslint": "(autodetect)",
|
76 |
+
"node.js.selected.package.tslint": "(autodetect)",
|
77 |
+
"nodejs_package_manager_path": "npm",
|
78 |
+
"settings.editor.selected.configurable": "settings.qodana",
|
79 |
+
"vue.rearranger.settings.migration": "true"
|
80 |
}
|
81 |
+
}]]></component>
|
82 |
<component name="RecentsManager">
|
83 |
<key name="CopyFile.RECENT_KEYS">
|
84 |
+
<recent name="$PROJECT_DIR$/tools" />
|
85 |
+
<recent name="$PROJECT_DIR$/datasets" />
|
86 |
<recent name="$PROJECT_DIR$/tests" />
|
87 |
<recent name="$PROJECT_DIR$" />
|
88 |
</key>
|
89 |
<key name="MoveFile.RECENT_KEYS">
|
90 |
+
<recent name="$PROJECT_DIR$/models" />
|
91 |
<recent name="$PROJECT_DIR$/tests" />
|
92 |
<recent name="$PROJECT_DIR$" />
|
93 |
</key>
|
94 |
</component>
|
95 |
+
<component name="RunManager" selected="Python.yolo_crack">
|
96 |
<configuration name="demo" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
|
97 |
<module name="detector" />
|
98 |
<option name="ENV_FILES" value="" />
|
|
|
163 |
<option name="INPUT_FILE" value="" />
|
164 |
<method v="2" />
|
165 |
</configuration>
|
166 |
+
<configuration name="yolo_crack" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
|
167 |
+
<module name="detector" />
|
168 |
+
<option name="ENV_FILES" value="" />
|
169 |
+
<option name="INTERPRETER_OPTIONS" value="" />
|
170 |
+
<option name="PARENT_ENVS" value="true" />
|
171 |
+
<envs>
|
172 |
+
<env name="PYTHONUNBUFFERED" value="1" />
|
173 |
+
</envs>
|
174 |
+
<option name="SDK_HOME" value="" />
|
175 |
+
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
|
176 |
+
<option name="IS_MODULE_SDK" value="true" />
|
177 |
+
<option name="ADD_CONTENT_ROOTS" value="true" />
|
178 |
+
<option name="ADD_SOURCE_ROOTS" value="true" />
|
179 |
+
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
|
180 |
+
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/models/yolo_crack.py" />
|
181 |
+
<option name="PARAMETERS" value="" />
|
182 |
+
<option name="SHOW_COMMAND_LINE" value="false" />
|
183 |
+
<option name="EMULATE_TERMINAL" value="false" />
|
184 |
+
<option name="MODULE_MODE" value="false" />
|
185 |
+
<option name="REDIRECT_INPUT" value="false" />
|
186 |
+
<option name="INPUT_FILE" value="" />
|
187 |
+
<method v="2" />
|
188 |
+
</configuration>
|
189 |
<configuration name="yolo_dataset" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
|
190 |
<module name="detector" />
|
191 |
<option name="ENV_FILES" value="" />
|
|
|
228 |
<list>
|
229 |
<item itemvalue="FastAPI.detector" />
|
230 |
<item itemvalue="Python.streamlit" />
|
231 |
+
<item itemvalue="Python.yolo_crack" />
|
232 |
<item itemvalue="Python.demo" />
|
233 |
<item itemvalue="Python.evaluator" />
|
234 |
<item itemvalue="Python.yolo_dataset" />
|
235 |
</list>
|
236 |
<recent_temporary>
|
237 |
<list>
|
238 |
+
<item itemvalue="Python.yolo_crack" />
|
239 |
<item itemvalue="Python.demo" />
|
240 |
<item itemvalue="Python.evaluator" />
|
241 |
<item itemvalue="Python.yolo_dataset" />
|
|
|
270 |
<workItem from="1706274709532" duration="27000" />
|
271 |
<workItem from="1706445222599" duration="1814000" />
|
272 |
<workItem from="1706449264770" duration="4974000" />
|
273 |
+
<workItem from="1706517295469" duration="2595000" />
|
274 |
+
<workItem from="1706855634459" duration="14109000" />
|
275 |
</task>
|
276 |
<task id="LOCAL-00001" summary="init commit">
|
277 |
<option name="closed" value="true" />
|
|
|
505 |
<option name="project" value="LOCAL" />
|
506 |
<updated>1706517885858</updated>
|
507 |
</task>
|
508 |
+
<task id="LOCAL-00030" summary="filter out macos cache files">
|
509 |
+
<option name="closed" value="true" />
|
510 |
+
<created>1706519049182</created>
|
511 |
+
<option name="number" value="00030" />
|
512 |
+
<option name="presentableId" value="LOCAL-00030" />
|
513 |
+
<option name="project" value="LOCAL" />
|
514 |
+
<updated>1706519049183</updated>
|
515 |
+
</task>
|
516 |
+
<task id="LOCAL-00031" summary="add crack split.py">
|
517 |
+
<option name="closed" value="true" />
|
518 |
+
<created>1706866629031</created>
|
519 |
+
<option name="number" value="00031" />
|
520 |
+
<option name="presentableId" value="LOCAL-00031" />
|
521 |
+
<option name="project" value="LOCAL" />
|
522 |
+
<updated>1706866629031</updated>
|
523 |
+
</task>
|
524 |
+
<option name="localTasksCounter" value="32" />
|
525 |
<servers />
|
526 |
</component>
|
527 |
<component name="TypeScriptGeneratedFilesManager">
|
|
|
566 |
<MESSAGE value="IoU evaluator" />
|
567 |
<MESSAGE value="Update UI" />
|
568 |
<MESSAGE value="fix runtime error in coco evaluator" />
|
569 |
+
<MESSAGE value="filter out macos cache files" />
|
570 |
+
<MESSAGE value="add crack split.py" />
|
571 |
+
<option name="LAST_COMMIT_MESSAGE" value="add crack split.py" />
|
572 |
</component>
|
573 |
<component name="XDebuggerManager">
|
574 |
<breakpoint-manager>
|
|
|
595 |
<SUITE FILE_PATH="coverage/detector$yolo_dataset.coverage" NAME="yolo_dataset Coverage Results" MODIFIED="1705852113469" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
596 |
<SUITE FILE_PATH="coverage/detector$evaluator.coverage" NAME="evaluator Coverage Results" MODIFIED="1706107083258" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
597 |
<SUITE FILE_PATH="coverage/detector$demo.coverage" NAME="demo Coverage Results" MODIFIED="1706108414052" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
598 |
+
<SUITE FILE_PATH="coverage/detector$streamlit.coverage" NAME="streamlit Coverage Results" MODIFIED="1706949799318" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="" />
|
599 |
+
<SUITE FILE_PATH="coverage/detector$yolo_crack.coverage" NAME="yolo_crack Coverage Results" MODIFIED="1706950034213" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
600 |
<SUITE FILE_PATH="coverage/detector$extract.coverage" NAME="yolo_dataset Coverage Results" MODIFIED="1705764465837" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
601 |
</component>
|
602 |
</project>
|
demo.py
CHANGED
@@ -4,6 +4,7 @@ import time
|
|
4 |
from functools import wraps
|
5 |
from io import StringIO
|
6 |
from zipfile import ZipFile
|
|
|
7 |
|
8 |
import streamlit as st
|
9 |
from PIL import Image
|
@@ -11,9 +12,10 @@ from PIL import Image
|
|
11 |
import evaluator
|
12 |
from yolo_dataset import YoloDataset
|
13 |
from yolo_model import YoloModel
|
|
|
14 |
|
15 |
fire_and_smoke = YoloModel("SHOU-ISD/fire-and-smoke", "yolov8n.pt")
|
16 |
-
crack =
|
17 |
coco = YoloModel("ultralyticsplus/yolov8s", "yolov8s.pt")
|
18 |
|
19 |
|
@@ -71,15 +73,15 @@ def detect(model: YoloModel):
|
|
71 |
|
72 |
if buffer:
|
73 |
# Object Detecting
|
74 |
-
with st.spinner('Wait for it...'):
|
75 |
# Slider for changing confidence
|
76 |
confidence = st.slider('Confidence Threshold', 0, 100, 30)
|
77 |
|
78 |
# Calculating time for detection
|
79 |
t1 = time.time()
|
80 |
-
|
81 |
-
|
82 |
-
res_img = model.preview_detect(
|
83 |
t2 = time.time()
|
84 |
|
85 |
# Displaying the image
|
|
|
4 |
from functools import wraps
|
5 |
from io import StringIO
|
6 |
from zipfile import ZipFile
|
7 |
+
from tempfile import mktemp
|
8 |
|
9 |
import streamlit as st
|
10 |
from PIL import Image
|
|
|
12 |
import evaluator
|
13 |
from yolo_dataset import YoloDataset
|
14 |
from yolo_model import YoloModel
|
15 |
+
from models.yolo_crack import YoloModel as CrackModel
|
16 |
|
17 |
fire_and_smoke = YoloModel("SHOU-ISD/fire-and-smoke", "yolov8n.pt")
|
18 |
+
crack = CrackModel("SHOU-ISD/yolo-cracks", "last4.pt", "SHOU-ISD/yolo-cracks", "best.pt")
|
19 |
coco = YoloModel("ultralyticsplus/yolov8s", "yolov8s.pt")
|
20 |
|
21 |
|
|
|
73 |
|
74 |
if buffer:
|
75 |
# Object Detecting
|
76 |
+
with (st.spinner('Wait for it...')):
|
77 |
# Slider for changing confidence
|
78 |
confidence = st.slider('Confidence Threshold', 0, 100, 30)
|
79 |
|
80 |
# Calculating time for detection
|
81 |
t1 = time.time()
|
82 |
+
filename = mktemp(suffix=buffer.name)
|
83 |
+
Image.open(buffer).save(filename)
|
84 |
+
res_img = model.preview_detect(filename, confidence / 100.0)
|
85 |
t2 = time.time()
|
86 |
|
87 |
# Displaying the image
|
models/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .yolo_crack import *
|
models/tools/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .split import *
|
models/tools/draw.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
from PIL import ImageDraw
|
3 |
+
|
4 |
+
|
5 |
+
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
|
6 |
+
"""
|
7 |
+
Helper Functions for Plotting BBoxes
|
8 |
+
:param x:
|
9 |
+
:param img:
|
10 |
+
:param color:
|
11 |
+
:param label:
|
12 |
+
:param line_thickness:
|
13 |
+
:return:
|
14 |
+
"""
|
15 |
+
width, height = img.size
|
16 |
+
tl = line_thickness or round(0.002 * (width + height) / 2) + 1 # line/font thickness
|
17 |
+
color = color or (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
|
18 |
+
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
|
19 |
+
img_draw = ImageDraw.Draw(img)
|
20 |
+
img_draw.rectangle((c1[0], c1[1], c2[0], c2[1]), outline=color, width=tl)
|
21 |
+
if label:
|
22 |
+
tf = max(tl - 1, 1) # font thickness
|
23 |
+
x1, y1, x2, y2 = img_draw.textbbox(c1, label, stroke_width=tf)
|
24 |
+
img_draw.rectangle((x1, y1, x2, y2), fill=color)
|
25 |
+
img_draw.text((x1, y1), label, fill=(255, 255, 255))
|
26 |
+
|
27 |
+
|
28 |
+
def add_bboxes(pil_img, result, confidence=0.6):
|
29 |
+
"""
|
30 |
+
Plotting Bounding Box on img
|
31 |
+
:param pil_img:
|
32 |
+
:param result:
|
33 |
+
:param confidence:
|
34 |
+
:return:
|
35 |
+
"""
|
36 |
+
for box in result.boxes:
|
37 |
+
[cl] = box.cls.tolist()
|
38 |
+
[conf] = box.conf.tolist()
|
39 |
+
if conf < confidence:
|
40 |
+
continue
|
41 |
+
[rect] = box.xyxy.tolist()
|
42 |
+
text = f'{result.names[cl]}: {conf: 0.2f}'
|
43 |
+
plot_one_box(x=rect, img=pil_img, label=text)
|
44 |
+
|
45 |
+
return pil_img
|
46 |
+
|
47 |
+
|
48 |
+
def add_bboxes2(pil_img, result, confidence=0.6):
|
49 |
+
"""
|
50 |
+
Plotting Bounding Box on img
|
51 |
+
:param pil_img:
|
52 |
+
:param result:
|
53 |
+
:param confidence:
|
54 |
+
:return:
|
55 |
+
"""
|
56 |
+
for box in result['boxes']:
|
57 |
+
cl = box['cls']
|
58 |
+
conf = box['conf']
|
59 |
+
if conf < confidence:
|
60 |
+
continue
|
61 |
+
rect = box['xyxy']
|
62 |
+
text = f'{cl}: {conf: 0.2f}'
|
63 |
+
plot_one_box(x=rect, img=pil_img, label=text)
|
64 |
+
|
65 |
+
return pil_img
|
models/tools/split.py
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import copy
|
3 |
+
from typing import Optional
|
4 |
+
from PIL import Image
|
5 |
+
|
6 |
+
bbox = [float, float, float, float]
|
7 |
+
|
8 |
+
annotation = {
|
9 |
+
"id": int,
|
10 |
+
"image_id": int,
|
11 |
+
"category_id": int,
|
12 |
+
"bbox": bbox,
|
13 |
+
"ignore": int,
|
14 |
+
"iscrowd": int,
|
15 |
+
"area": float,
|
16 |
+
}
|
17 |
+
|
18 |
+
small_image = {
|
19 |
+
"image": Image,
|
20 |
+
"area": bbox
|
21 |
+
}
|
22 |
+
|
23 |
+
|
24 |
+
def split_image(image: Image,
|
25 |
+
hint_size_min: tuple[int, int],
|
26 |
+
hint_size_max: tuple[int, int],
|
27 |
+
overlap: float = 0.1) -> list[small_image]:
|
28 |
+
"""
|
29 |
+
Given an image and a hint size, split the image into a list of images.
|
30 |
+
New images are overlapped with other images by the overlap ratio.
|
31 |
+
:param image: The image to split. typically a large image. 1kx1k ~ 10kx10k
|
32 |
+
:param hint_size_min: The minimum size of the output image.
|
33 |
+
:param hint_size_max: The maximum size of the output image.
|
34 |
+
:param overlap: The overlap ratio of the output image.
|
35 |
+
:return: A list of images.
|
36 |
+
"""
|
37 |
+
Wi, Hi = image.size
|
38 |
+
Wmin, Hmin = hint_size_min
|
39 |
+
Wmax, Hmax = hint_size_max
|
40 |
+
assert Wmin <= Wmax <= Wi
|
41 |
+
assert Hmin <= Hmax <= Hi
|
42 |
+
w_search = search(Wi, Wmin, Wmax, overlap)
|
43 |
+
h_search = search(Hi, Hmin, Hmax, overlap)
|
44 |
+
if w_search is None or h_search is None:
|
45 |
+
raise ValueError('The image is too small to split.')
|
46 |
+
w_count, output_width, last_output_width, width_overlap = w_search
|
47 |
+
h_count, output_height, last_output_height, height_overlap = h_search
|
48 |
+
images = []
|
49 |
+
for h_index in range(h_count):
|
50 |
+
h = h_index * (output_height - height_overlap)
|
51 |
+
for w_index in range(w_count):
|
52 |
+
w = w_index * (output_width - width_overlap)
|
53 |
+
small = {
|
54 |
+
"image": image.crop((w, h, w + output_width, h + output_height)),
|
55 |
+
"area": (w, h, output_width, output_height)
|
56 |
+
}
|
57 |
+
images.append(small)
|
58 |
+
if last_output_width > 0:
|
59 |
+
w = Wi - output_width
|
60 |
+
small = {
|
61 |
+
"image": image.crop((w, h, w + output_width, h + output_height)),
|
62 |
+
"area": (w, h, output_width, output_height)
|
63 |
+
}
|
64 |
+
images.append(small)
|
65 |
+
return images
|
66 |
+
|
67 |
+
|
68 |
+
def search(input: int,
|
69 |
+
output_min: int,
|
70 |
+
output_max: int,
|
71 |
+
overlap: float) -> Optional[tuple[int, int, int, int]]:
|
72 |
+
"""
|
73 |
+
example 1:
|
74 |
+
input: 8000, output: 1000, overlap: 0.1
|
75 |
+
8000 // (1000 - 100) = 8
|
76 |
+
8000 % (1000 - 100) = 800
|
77 |
+
count = 8, output = 1000, last_output = 800, overlap_pixels = 100
|
78 |
+
|
79 |
+
example 2:
|
80 |
+
input: 7200, output: 800, overlap: 0.1
|
81 |
+
7200 // (800 - 80) = 10
|
82 |
+
7200 % (800 - 80) = 0
|
83 |
+
count = 10, output = 800, last_output = 0, overlap_pixels = 80
|
84 |
+
|
85 |
+
:param input: The length of the input image.
|
86 |
+
:param output_min: The minimum length of the output image.
|
87 |
+
:param output_max: The maximum length of the output image.
|
88 |
+
:param overlap: The overlap ratio of the output image.
|
89 |
+
:return: A tuple of (count, output, last_output, overlap_pixels).
|
90 |
+
"""
|
91 |
+
|
92 |
+
for output in range(output_max, output_min - 1, -1):
|
93 |
+
overlap_pixels = int(output * overlap)
|
94 |
+
last_output = input % (output - overlap_pixels)
|
95 |
+
if last_output == 0 or output_min <= last_output <= output_max:
|
96 |
+
count = input // (output - overlap_pixels)
|
97 |
+
return count, output, last_output, overlap_pixels
|
98 |
+
|
99 |
+
return None
|
100 |
+
|
101 |
+
|
102 |
+
def box_intersected(box1: bbox, box2: bbox) -> bool:
|
103 |
+
"""
|
104 |
+
Check if two boxes are intersected.
|
105 |
+
:param box1: The first box.
|
106 |
+
:param box2: The second box.
|
107 |
+
:return: True if the two boxes are intersected.
|
108 |
+
"""
|
109 |
+
x1, y1, w1, h1 = box1
|
110 |
+
x2, y2, w2, h2 = box2
|
111 |
+
return x1 < x2 + w2 and x2 < x1 + w1 and y1 < y2 + h2 and y2 < y1 + h1
|
112 |
+
|
113 |
+
|
114 |
+
def fit_in_area(annotations: list[annotation], in_area: bbox) -> list[annotation]:
|
115 |
+
result = []
|
116 |
+
for old in annotations:
|
117 |
+
ann = copy.deepcopy(old)
|
118 |
+
result.append(ann)
|
119 |
+
x, y, w, h = ann["bbox"]
|
120 |
+
if x < in_area[0]:
|
121 |
+
ann["bbox"][0] = 0
|
122 |
+
else:
|
123 |
+
ann["bbox"][0] -= in_area[0]
|
124 |
+
if y < in_area[1]:
|
125 |
+
ann["bbox"][1] = 0
|
126 |
+
else:
|
127 |
+
ann["bbox"][1] -= in_area[1]
|
128 |
+
if x + w > in_area[0] + in_area[2]:
|
129 |
+
ann["bbox"][2] = in_area[2] - ann["bbox"][0]
|
130 |
+
if y + h > in_area[1] + in_area[3]:
|
131 |
+
ann["bbox"][3] = in_area[3] - ann["bbox"][1]
|
132 |
+
return result
|
133 |
+
|
134 |
+
|
135 |
+
small_image_with_labels = {
|
136 |
+
"image": Image,
|
137 |
+
"area": bbox,
|
138 |
+
"labels": list[annotation]
|
139 |
+
}
|
140 |
+
|
141 |
+
|
142 |
+
def split_image_with_labels(image: Image,
|
143 |
+
labels: list[annotation],
|
144 |
+
hint_size_min: tuple[int, int],
|
145 |
+
hint_size_max: tuple[int, int],
|
146 |
+
overlap: float = 0.1) -> list[small_image]:
|
147 |
+
small_imgs = split_image(image, hint_size_min, hint_size_max, overlap)
|
148 |
+
result = []
|
149 |
+
for small_img in small_imgs:
|
150 |
+
small_labels = [ann for ann in labels if box_intersected(ann["bbox"], small_img["area"])]
|
151 |
+
small_labels = fit_in_area(small_labels, small_img["area"])
|
152 |
+
result.append({
|
153 |
+
"image": small_img["image"],
|
154 |
+
"area": small_img["area"],
|
155 |
+
"labels": small_labels
|
156 |
+
})
|
157 |
+
return result
|
158 |
+
|
159 |
+
|
160 |
+
def main():
|
161 |
+
image = Image.open('../datasets/Das3300161.jpg')
|
162 |
+
small_imgs = split_image(image, (800, 800), (1000, 1000), 0.1)
|
163 |
+
labels = json.load(open('../datasets/result.json'))
|
164 |
+
annotations = list(filter(lambda ann: ann["image_id"] == 28, labels["annotations"]))
|
165 |
+
for small_img in small_imgs:
|
166 |
+
small_labels = [ann for ann in annotations if box_intersected(ann["bbox"], small_img["area"])]
|
167 |
+
small_labels = fit_in_area(small_labels, small_img["area"])
|
168 |
+
# save small_labels to json
|
169 |
+
json.dump(small_labels, open('datasets/' + str(small_img["area"]) + '.json', 'w'))
|
170 |
+
# save small_image["image"] to file
|
171 |
+
small_img["image"].save('datasets/' + str(small_img["area"]) + '.jpg')
|
172 |
+
|
173 |
+
|
174 |
+
if __name__ == '__main__':
|
175 |
+
main()
|
models/yolo_crack.py
ADDED
@@ -0,0 +1,295 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from models.tools import split
|
2 |
+
from PIL import Image
|
3 |
+
from huggingface_hub import hf_hub_download
|
4 |
+
from ultralytics import YOLO
|
5 |
+
|
6 |
+
import torch
|
7 |
+
import cv2
|
8 |
+
import numpy as np
|
9 |
+
import math
|
10 |
+
|
11 |
+
from models.tools.draw import add_bboxes2
|
12 |
+
|
13 |
+
|
14 |
+
class YoloModel:
|
15 |
+
def __init__(self, seg_repo_name: str, seg_file_name: str, det_repo_name: str, det_file_name: str):
|
16 |
+
seg_weight_file = YoloModel.download_weight_file(seg_repo_name, seg_file_name)
|
17 |
+
det_weight_file = YoloModel.download_weight_file(det_repo_name, det_file_name)
|
18 |
+
self.seg_model = YOLO(seg_weight_file)
|
19 |
+
self.det_model = YOLO(det_weight_file)
|
20 |
+
|
21 |
+
@staticmethod
|
22 |
+
def download_weight_file(repo_name: str, file_name: str):
|
23 |
+
return hf_hub_download(repo_name, file_name)
|
24 |
+
|
25 |
+
def preview_detect(self, im, confidence):
|
26 |
+
results = self.detect(im)
|
27 |
+
res_img = Image.open(im)
|
28 |
+
res = {
|
29 |
+
'boxes': [
|
30 |
+
{
|
31 |
+
'xyxy': [x1, y1, x2, y2],
|
32 |
+
'cls': cls,
|
33 |
+
'conf': conf
|
34 |
+
} for x1, y1, x2, y2, conf, cls in results
|
35 |
+
]
|
36 |
+
}
|
37 |
+
res_img = add_bboxes2(res_img, res, confidence)
|
38 |
+
return res_img
|
39 |
+
|
40 |
+
def detect(self, source):
|
41 |
+
pred_bbox_list = [] # 初始化该图像bbox列表
|
42 |
+
threshold = 50 # 暂定bbox merge 阈值为50, 后期可根据用户需求做自适应调整
|
43 |
+
strategy = "distance" # 暂定bbox merge 策略为distance
|
44 |
+
|
45 |
+
seg_img_list = self._seg_ori_img(source) # 对该图像进行路面分割
|
46 |
+
assert len(seg_img_list) == 1, "seg_img_list out of range"
|
47 |
+
road_img = Image.fromarray(cv2.cvtColor(seg_img_list[0], cv2.COLOR_BGR2RGB))
|
48 |
+
small_imgs = split.split_image(road_img, (640, 640), (1080, 1080), 0.1) # 对路面图像进行小图分割
|
49 |
+
num = 0
|
50 |
+
for small_img in small_imgs:
|
51 |
+
num += 1
|
52 |
+
results = self.det_model(source=small_img["image"])
|
53 |
+
for result in results:
|
54 |
+
temp_bbox_list = result.boxes.xyxy # 获取检测结果中的bbox坐标(此处使用xyxy格式)
|
55 |
+
w_bias = small_img["area"][0]
|
56 |
+
h_bias = small_img["area"][1]
|
57 |
+
temp_bbox_list = self._bbox_map(temp_bbox_list, w_bias, h_bias) # 将bbox坐标映射到原始大图坐标系中
|
58 |
+
temp_bbox_cls = result.boxes.cls # 获取检测结果中的class
|
59 |
+
temp_bbox_conf = result.boxes.conf # 获取检测结果中的confidence
|
60 |
+
assert len(temp_bbox_list) == len(temp_bbox_cls) == len(
|
61 |
+
temp_bbox_conf), 'different number of matrix size'
|
62 |
+
for i in range(len(temp_bbox_list)): # 整合bbox、conf和class到一个数组中
|
63 |
+
temp_bbox_list[i].append(temp_bbox_conf[i])
|
64 |
+
temp_bbox_list[i].append(temp_bbox_cls[i])
|
65 |
+
pred_bbox_list += temp_bbox_list # 将单张大图分割后的全体小图得到的检测结果(bbox、conf、class)整合到一个list
|
66 |
+
pred_bbox_list = self._merge_box(pred_bbox_list, threshold, strategy=strategy) # 调用指定算法,对bbox进行分析合并
|
67 |
+
|
68 |
+
return pred_bbox_list
|
69 |
+
|
70 |
+
def _seg_ori_img(self, source):
|
71 |
+
"""
|
72 |
+
分割原始图像中的沥青路面区域
|
73 |
+
:param source: 图像路径
|
74 |
+
:return: 分割得到的沥青路面图像(尺寸与原始图像一致,非路面区域用白色填充)
|
75 |
+
"""
|
76 |
+
ori_img = cv2.imread(source)
|
77 |
+
ori_size = ori_img.shape
|
78 |
+
results = self.seg_model(source=source)
|
79 |
+
seg_img_list = []
|
80 |
+
|
81 |
+
for result in results:
|
82 |
+
if result.masks is not None and len(result.masks) > 0: # 检测到路面时
|
83 |
+
masks_data = result.masks.data
|
84 |
+
obj_masks = masks_data[:]
|
85 |
+
road_mask = torch.any(obj_masks, dim=0).int() * 255
|
86 |
+
mask = road_mask.cpu().numpy()
|
87 |
+
Mask = mask.astype(np.uint8)
|
88 |
+
mask_res = cv2.resize(Mask, (ori_size[1], ori_size[0]), interpolation=cv2.INTER_CUBIC)
|
89 |
+
|
90 |
+
else: # 检测不到路面时保存纯黑色图像
|
91 |
+
mask_res = np.zeros((ori_size[0], ori_size[1], 3), dtype=np.uint8)
|
92 |
+
|
93 |
+
mask_region = mask_res == 0
|
94 |
+
ori_img[mask_region] = 255 # 判断条件置0掩码为黑,置255背景为白
|
95 |
+
seg_img_list.append(ori_img)
|
96 |
+
|
97 |
+
return seg_img_list
|
98 |
+
|
99 |
+
def _bbox_map(self, bbox_list, w, h):
|
100 |
+
"""
|
101 |
+
将小图中的bbox坐标映射到原始图像中
|
102 |
+
:param bbox_list: 小图中的bbox数组
|
103 |
+
:param w: 小图在原始图像中的偏置w
|
104 |
+
:param h: 小图在原始图像中的偏置h
|
105 |
+
:return: 该bbox数组在原始图像中的坐标
|
106 |
+
"""
|
107 |
+
if isinstance(bbox_list, torch.Tensor):
|
108 |
+
bbox_list = bbox_list.tolist()
|
109 |
+
for bbox in bbox_list:
|
110 |
+
bbox[0] += w
|
111 |
+
bbox[1] += h
|
112 |
+
bbox[2] += w
|
113 |
+
bbox[3] += h
|
114 |
+
|
115 |
+
return bbox_list
|
116 |
+
|
117 |
+
def _xywh2xyxy(self, box_list):
|
118 |
+
"""
|
119 |
+
YOLO标签,xywh转xyxy
|
120 |
+
:param box_list: bbox数组(xywh)
|
121 |
+
:return: bbox数组(xyxy)
|
122 |
+
"""
|
123 |
+
new_box_list = []
|
124 |
+
for box in box_list:
|
125 |
+
x1 = box[0] - box[2] / 2
|
126 |
+
y1 = box[1] - box[3] / 2
|
127 |
+
x2 = box[0] + box[2] / 2
|
128 |
+
y2 = box[1] + box[3] / 2
|
129 |
+
new_box_list.append([x1, y1, x2, y2])
|
130 |
+
|
131 |
+
return new_box_list
|
132 |
+
|
133 |
+
def _xyxy2xywh(self, box_list):
|
134 |
+
"""
|
135 |
+
YOLO标签,xyxy转xywh
|
136 |
+
:param box_list: bbox数组(xyxy)
|
137 |
+
:return: bbox数组(xywh)
|
138 |
+
"""
|
139 |
+
new_box_list = []
|
140 |
+
for box in box_list:
|
141 |
+
x1 = (box[0] + box[2]) / 2
|
142 |
+
y1 = (box[1] + box[3]) / 2
|
143 |
+
w = (box[2] - box[0])
|
144 |
+
h = (box[3] - box[1])
|
145 |
+
new_box_list.append([x1, y1, w, h])
|
146 |
+
|
147 |
+
return new_box_list
|
148 |
+
|
149 |
+
def _nor2std(self, box_list, img_w, img_h):
|
150 |
+
"""
|
151 |
+
YOLO标签,标准化坐标映射到原始图像
|
152 |
+
:param box_list: bbox数组(nor)
|
153 |
+
:param img_w: 原始图像宽度
|
154 |
+
:param img_h: 原始图像高度
|
155 |
+
:return: bbox数组(在原始图像中的坐标)
|
156 |
+
"""
|
157 |
+
for box in box_list:
|
158 |
+
box[0] *= img_w
|
159 |
+
box[1] *= img_h
|
160 |
+
box[2] *= img_w
|
161 |
+
box[3] *= img_h
|
162 |
+
|
163 |
+
def _std2nor(self, box_list, img_w, img_h):
|
164 |
+
"""
|
165 |
+
YOLO标签,原始图像坐标转标准化坐标
|
166 |
+
:param box_list: bbox数组(std)
|
167 |
+
:param img_w: 原始图像宽度
|
168 |
+
:param img_h: 原始图像高度
|
169 |
+
:return: bbox数组(标准化坐标)
|
170 |
+
"""
|
171 |
+
for box in box_list:
|
172 |
+
box[0] /= img_w
|
173 |
+
box[1] /= img_h
|
174 |
+
box[2] /= img_w
|
175 |
+
box[3] /= img_h
|
176 |
+
|
177 |
+
def _judge_merge_by_center_distance(self, center_box1, center_box2, distance_threshold):
|
178 |
+
"""
|
179 |
+
根据bbox中心坐标间距,判断是否进行bbox合并
|
180 |
+
:param center_box1: box1的中心坐标
|
181 |
+
:param center_box2: box2的中心坐标
|
182 |
+
:param distance_threshold: 间距阈值
|
183 |
+
:return: 若间距小于阈值,进行合并(Ture);反之则忽略(False)
|
184 |
+
"""
|
185 |
+
x1 = center_box1[0]
|
186 |
+
x2 = center_box2[0]
|
187 |
+
y1 = center_box1[1]
|
188 |
+
y2 = center_box2[1]
|
189 |
+
distance = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
|
190 |
+
if distance < distance_threshold:
|
191 |
+
return True
|
192 |
+
else:
|
193 |
+
return False
|
194 |
+
|
195 |
+
def _judge_merge_by_overlap_area(self, std_box1, std_box2, overlap_threshold):
|
196 |
+
"""
|
197 |
+
根据bbox交叉面积,判断是否进行bbox合并
|
198 |
+
:param std_box1: box1的标准坐标
|
199 |
+
:param std_box2: box2的标准坐标
|
200 |
+
:param overlap_threshold: 交叉面积阈值
|
201 |
+
:return: 若交叉面积大于阈值,进行合并(True);反之则忽略(False)
|
202 |
+
"""
|
203 |
+
x1 = max(std_box1[0], std_box2[0])
|
204 |
+
y1 = max(std_box1[1], std_box2[1])
|
205 |
+
x2 = min(std_box1[2], std_box2[2])
|
206 |
+
y2 = min(std_box1[3], std_box2[3])
|
207 |
+
width = max(0, x2 - x1)
|
208 |
+
height = max(0, y2 - y1)
|
209 |
+
area = width * height
|
210 |
+
if area < overlap_threshold:
|
211 |
+
return False
|
212 |
+
else:
|
213 |
+
return True
|
214 |
+
|
215 |
+
def _basic_merge(self, box1, box2):
|
216 |
+
"""
|
217 |
+
合并两个box,生成新的box坐标
|
218 |
+
:param box1: box1坐标(std)
|
219 |
+
:param box2: box2坐标(std)
|
220 |
+
:return: 新box坐标(std)
|
221 |
+
"""
|
222 |
+
x11 = box1[0]
|
223 |
+
y11 = box1[1]
|
224 |
+
x12 = box1[2]
|
225 |
+
y12 = box1[3]
|
226 |
+
x21 = box2[0]
|
227 |
+
y21 = box2[1]
|
228 |
+
x22 = box2[2]
|
229 |
+
y22 = box2[3]
|
230 |
+
new_x1 = min(x11, x12, x21, x22)
|
231 |
+
new_y1 = min(y11, y12, y21, y22)
|
232 |
+
new_x2 = max(x11, x12, x21, x22)
|
233 |
+
new_y2 = max(y11, y12, y21, y22)
|
234 |
+
assert len(box1) == len(box2), 'box1 and box2 has different size'
|
235 |
+
if len(box1) == 6: # 此时,box中带有conf和class,其结构为[x1, y1, x2, y2, conf, class]
|
236 |
+
avg_conf = (box1[4] + box2[4]) / 2
|
237 |
+
clas = box1[5]
|
238 |
+
new_box = [new_x1, new_y1, new_x2, new_y2, avg_conf, clas]
|
239 |
+
else:
|
240 |
+
new_box = [new_x1, new_y1, new_x2, new_y2]
|
241 |
+
|
242 |
+
return new_box
|
243 |
+
|
244 |
+
def _update_list(self, bbox_list, del_index):
|
245 |
+
"""
|
246 |
+
更新bbox数组,删除特定的bbox元素(已经被合并到其他box中的bbox)
|
247 |
+
:param bbox_list: bbox数组
|
248 |
+
:param del_index: 待删除bbox元素的rank
|
249 |
+
:return: 更新后的bbox数组
|
250 |
+
"""
|
251 |
+
assert len(bbox_list) > del_index >= 0, 'del_index out of boundary'
|
252 |
+
bbox_list[del_index] = bbox_list[-1:][0]
|
253 |
+
bbox_list.pop()
|
254 |
+
return bbox_list
|
255 |
+
|
256 |
+
def _merge_box(self, std_bbox_list, threshold, strategy='overlap'):
|
257 |
+
"""
|
258 |
+
bbox合并算法,根据选定的合并策略及阈值,进行bbox合并
|
259 |
+
:param std_bbox_list: std_bbox_list可有两种格式:(Array[N, 4] -> [x1, y1, x2, y2]; Array[N, 6] -> [x1, y1, x2, y2, conf, class])
|
260 |
+
:param threshold: 阈值
|
261 |
+
:param strategy: 合并策略(distance/overlap)
|
262 |
+
"""
|
263 |
+
if isinstance(std_bbox_list, torch.Tensor):
|
264 |
+
std_bbox_list = std_bbox_list.tolist()
|
265 |
+
center_bbox_list = self._xyxy2xywh(std_bbox_list)
|
266 |
+
i = 0
|
267 |
+
while i < len(std_bbox_list):
|
268 |
+
j = i + 1
|
269 |
+
while j < len(std_bbox_list):
|
270 |
+
if strategy == 'overlap':
|
271 |
+
assert i < len(std_bbox_list) and j < len(std_bbox_list), f'len={len(std_bbox_list)}, j={j}, i={i}'
|
272 |
+
if self._judge_merge_by_overlap_area(std_bbox_list[i], std_bbox_list[j], threshold):
|
273 |
+
std_bbox_list[i] = self._basic_merge(std_bbox_list[i], std_bbox_list[j])
|
274 |
+
self._update_list(std_bbox_list, j)
|
275 |
+
self._update_list(center_bbox_list, j)
|
276 |
+
continue
|
277 |
+
else:
|
278 |
+
if self._judge_merge_by_center_distance(center_bbox_list[i], center_bbox_list[j], threshold):
|
279 |
+
std_bbox_list[i] = self._basic_merge(std_bbox_list[i], std_bbox_list[j])
|
280 |
+
self._update_list(std_bbox_list, j)
|
281 |
+
self._update_list(center_bbox_list, j)
|
282 |
+
continue
|
283 |
+
j += 1
|
284 |
+
i += 1
|
285 |
+
|
286 |
+
return std_bbox_list
|
287 |
+
|
288 |
+
|
289 |
+
def main():
|
290 |
+
model = YoloModel("SHOU-ISD/yolo-cracks", "last4.pt", "SHOU-ISD/yolo-cracks", "best.pt")
|
291 |
+
model.preview_detect('./datasets/Das1100209.jpg', 0.4).show()
|
292 |
+
|
293 |
+
|
294 |
+
if __name__ == '__main__':
|
295 |
+
main()
|
yolo_model.py
CHANGED
@@ -1,50 +1,8 @@
|
|
1 |
-
import
|
2 |
-
from PIL import ImageDraw, Image
|
3 |
from huggingface_hub import hf_hub_download
|
4 |
from ultralytics import YOLO
|
5 |
|
6 |
-
|
7 |
-
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
|
8 |
-
"""
|
9 |
-
Helper Functions for Plotting BBoxes
|
10 |
-
:param x:
|
11 |
-
:param img:
|
12 |
-
:param color:
|
13 |
-
:param label:
|
14 |
-
:param line_thickness:
|
15 |
-
:return:
|
16 |
-
"""
|
17 |
-
width, height = img.size
|
18 |
-
tl = line_thickness or round(0.002 * (width + height) / 2) + 1 # line/font thickness
|
19 |
-
color = color or (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
|
20 |
-
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
|
21 |
-
img_draw = ImageDraw.Draw(img)
|
22 |
-
img_draw.rectangle((c1[0], c1[1], c2[0], c2[1]), outline=color, width=tl)
|
23 |
-
if label:
|
24 |
-
tf = max(tl - 1, 1) # font thickness
|
25 |
-
x1, y1, x2, y2 = img_draw.textbbox(c1, label, stroke_width=tf)
|
26 |
-
img_draw.rectangle((x1, y1, x2, y2), fill=color)
|
27 |
-
img_draw.text((x1, y1), label, fill=(255, 255, 255))
|
28 |
-
|
29 |
-
|
30 |
-
def add_bboxes(pil_img, result, confidence=0.6):
|
31 |
-
"""
|
32 |
-
Plotting Bounding Box on img
|
33 |
-
:param pil_img:
|
34 |
-
:param result:
|
35 |
-
:param confidence:
|
36 |
-
:return:
|
37 |
-
"""
|
38 |
-
for box in result.boxes:
|
39 |
-
[cl] = box.cls.tolist()
|
40 |
-
[conf] = box.conf.tolist()
|
41 |
-
if conf < confidence:
|
42 |
-
continue
|
43 |
-
[rect] = box.xyxy.tolist()
|
44 |
-
text = f'{result.names[cl]}: {conf: 0.2f}'
|
45 |
-
plot_one_box(x=rect, img=pil_img, label=text)
|
46 |
-
|
47 |
-
return pil_img
|
48 |
|
49 |
|
50 |
class YoloModel:
|
@@ -59,9 +17,10 @@ class YoloModel:
|
|
59 |
def detect(self, im):
|
60 |
return self.model(source=im)
|
61 |
|
62 |
-
def preview_detect(self,
|
63 |
-
|
64 |
-
|
|
|
65 |
for result in results:
|
66 |
res_img = add_bboxes(res_img, result, confidence)
|
67 |
return res_img
|
|
|
1 |
+
from PIL import Image
|
|
|
2 |
from huggingface_hub import hf_hub_download
|
3 |
from ultralytics import YOLO
|
4 |
|
5 |
+
from models.tools.draw import add_bboxes
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
|
8 |
class YoloModel:
|
|
|
17 |
def detect(self, im):
|
18 |
return self.model(source=im)
|
19 |
|
20 |
+
def preview_detect(self, filename, confidence):
|
21 |
+
image = Image.open(filename)
|
22 |
+
results = self.model(source=image)
|
23 |
+
res_img = image
|
24 |
for result in results:
|
25 |
res_img = add_bboxes(res_img, result, confidence)
|
26 |
return res_img
|