Sompote commited on
Commit
9f59317
·
verified ·
1 Parent(s): 6bd61b4

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +60 -0
  2. best-3.pt +3 -0
  3. requirements.txt +0 -0
  4. yolo11s.pt +3 -0
app.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from ultralytics import YOLO
3
+ import numpy as np
4
+ import cv2
5
+
6
+ # Load models
7
+ model = YOLO("best-3.pt") # load a custom model for segmentation (protection zone)
8
+ model2 = YOLO('yolo11s.pt') # load a second model for object detection
9
+
10
+ # Streamlit app title
11
+ st.title("Protection Zone and Object Detection")
12
+
13
+ # Upload image
14
+ uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
15
+
16
+ if uploaded_file is not None:
17
+ # Read the image
18
+ image = uploaded_file.read()
19
+ image_np = np.frombuffer(image, np.uint8)
20
+ image_cv = cv2.imdecode(image_np, cv2.IMREAD_COLOR)
21
+
22
+ # Predict protection zone with the first model
23
+ segment_results = model(image_cv) # predict segments
24
+ protection_mask = np.zeros(image_cv.shape[:2], dtype=np.uint8) # create an empty mask
25
+
26
+ for result in segment_results:
27
+ if result.masks is not None:
28
+ for segment in result.masks.data:
29
+ # Convert segment to numpy array and ensure it's the right shape and type
30
+ segment_array = segment.cpu().numpy().astype(np.uint8)
31
+ segment_array = cv2.resize(segment_array, (image_cv.shape[1], image_cv.shape[0]))
32
+ protection_mask = cv2.bitwise_or(protection_mask, segment_array * 255)
33
+
34
+ # Create a copy of the original image to draw on
35
+ output_image = image_cv.copy()
36
+
37
+ # Overlay the protection zone mask on the output image
38
+ protection_overlay = cv2.applyColorMap(protection_mask, cv2.COLORMAP_COOL)
39
+ output_image = cv2.addWeighted(output_image, 0.7, protection_overlay, 0.3, 0)
40
+
41
+ # Predict objects with the second model
42
+ object_results = model2(image_cv) # predict objects using model2
43
+
44
+ for result in object_results:
45
+ boxes = result.boxes.xyxy.cpu().numpy().astype(int)
46
+ for box in boxes:
47
+ x1, y1, x2, y2 = box
48
+ # Check if the object is within the protection zone
49
+ object_mask = np.zeros(image_cv.shape[:2], dtype=np.uint8)
50
+ object_mask[y1:y2, x1:x2] = 1 # create a mask for the object
51
+ # Check overlap
52
+ overlap = cv2.bitwise_and(protection_mask, object_mask)
53
+ color = (0, 0, 255) if np.sum(overlap) > 0 else (0, 255, 0) # red if in zone, green if outside
54
+ # Draw bounding box around the object
55
+ cv2.rectangle(output_image, (x1, y1), (x2, y2), color, 2)
56
+
57
+ # Display the final image
58
+ st.image(output_image, caption="Protection Zone and Detected Objects", channels="BGR")
59
+ else:
60
+ st.write("Please upload an image to process.")
best-3.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df8f4b98a7ee8cafb7f56513fa604f75a7f693efa4a11cc07d65f9234e52d666
3
+ size 61130925
requirements.txt ADDED
File without changes
yolo11s.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85a76fe86dd8afe384648546b56a7a78580c7cb7b404fc595f97969322d502d5
3
+ size 19313732