Upload 4 files
Browse files- app.py +79 -0
- best-3.pt +3 -0
- requirements.txt +4 -0
- yolo11s.pt +3 -0
app.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from ultralytics import YOLO
|
3 |
+
import numpy as np
|
4 |
+
import cv2
|
5 |
+
|
6 |
+
# Load models
|
7 |
+
model = YOLO("best-3.pt") # load a custom model for segmentation (protection zone)
|
8 |
+
model2 = YOLO('yolo11s.pt') # load a second model for object detection
|
9 |
+
|
10 |
+
def process_image(image):
|
11 |
+
# Gradio passes the image as RGB, so we'll work in RGB color space
|
12 |
+
image_rgb = np.array(image)
|
13 |
+
|
14 |
+
# Predict protection zone with the first model
|
15 |
+
segment_results = model(image_rgb) # predict segments
|
16 |
+
protection_mask = np.zeros(image_rgb.shape[:2], dtype=np.uint8) # create an empty mask
|
17 |
+
|
18 |
+
for result in segment_results:
|
19 |
+
if result.masks is not None:
|
20 |
+
for segment in result.masks.data:
|
21 |
+
segment_array = segment.cpu().numpy().astype(np.uint8)
|
22 |
+
segment_array = cv2.resize(segment_array, (image_rgb.shape[1], image_rgb.shape[0]))
|
23 |
+
protection_mask = cv2.bitwise_or(protection_mask, segment_array * 255)
|
24 |
+
|
25 |
+
# Create a copy of the original image to draw on
|
26 |
+
output_image = image_rgb.copy()
|
27 |
+
|
28 |
+
# Create a red overlay for the protection zone (using RGB)
|
29 |
+
protection_overlay = np.zeros_like(output_image)
|
30 |
+
protection_overlay[protection_mask > 0] = [255, 0, 0] # Red color in RGB
|
31 |
+
|
32 |
+
# Overlay the protection zone on the output image
|
33 |
+
output_image = cv2.addWeighted(output_image, 1, protection_overlay, 0.3, 0)
|
34 |
+
|
35 |
+
# Predict objects with the second model
|
36 |
+
object_results = model2(image_rgb) # predict objects using model2
|
37 |
+
|
38 |
+
for result in object_results:
|
39 |
+
boxes = result.boxes.xyxy.cpu().numpy().astype(int)
|
40 |
+
classes = result.boxes.cls.cpu().numpy()
|
41 |
+
names = result.names
|
42 |
+
|
43 |
+
for box, cls in zip(boxes, classes):
|
44 |
+
x1, y1, x2, y2 = box
|
45 |
+
# Check if the object is within the protection zone
|
46 |
+
object_mask = np.zeros(image_rgb.shape[:2], dtype=np.uint8)
|
47 |
+
object_mask[y1:y2, x1:x2] = 1 # create a mask for the object
|
48 |
+
|
49 |
+
# Check overlap
|
50 |
+
overlap = cv2.bitwise_and(protection_mask, object_mask)
|
51 |
+
is_inside = np.sum(overlap) > 0
|
52 |
+
|
53 |
+
# Red if in zone, green if outside (in RGB)
|
54 |
+
color = (255, 0, 0) if is_inside else (0, 255, 0)
|
55 |
+
|
56 |
+
# Draw bounding box around the object
|
57 |
+
cv2.rectangle(output_image, (x1, y1), (x2, y2), color, 2)
|
58 |
+
|
59 |
+
# If inside protection zone, display class name
|
60 |
+
if is_inside:
|
61 |
+
class_name = names[int(cls)]
|
62 |
+
label = f"{class_name}"
|
63 |
+
(label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
|
64 |
+
cv2.rectangle(output_image, (x1, y1 - label_height - 5), (x1 + label_width, y1), color, -1)
|
65 |
+
cv2.putText(output_image, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
|
66 |
+
|
67 |
+
return output_image
|
68 |
+
|
69 |
+
# Define Gradio interface
|
70 |
+
iface = gr.Interface(
|
71 |
+
fn=process_image,
|
72 |
+
inputs=gr.Image(),
|
73 |
+
outputs=gr.Image(label="Protection Zone and Detected Objects"),
|
74 |
+
title="Protection Zone and Object Detection",
|
75 |
+
description="Upload an image to detect protection zones (in red) and objects. Objects inside the protection zone are marked in red with their class name, while objects outside are marked in green."
|
76 |
+
)
|
77 |
+
|
78 |
+
# Launch the Gradio app
|
79 |
+
iface.launch()
|
best-3.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:df8f4b98a7ee8cafb7f56513fa604f75a7f693efa4a11cc07d65f9234e52d666
|
3 |
+
size 61130925
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit
|
2 |
+
ultralytics
|
3 |
+
numpy
|
4 |
+
opencv-python
|
yolo11s.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:85a76fe86dd8afe384648546b56a7a78580c7cb7b404fc595f97969322d502d5
|
3 |
+
size 19313732
|