itsTomLie commited on
Commit
0db98b9
·
verified ·
1 Parent(s): ac0b1e3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -15
app.py CHANGED
@@ -1,16 +1,76 @@
1
- # Import the InferencePipeline object
2
- from inference import InferencePipeline
3
-
4
- # Import the built in render_boxes sink for visualizing results
5
- from inference.core.interfaces.stream.sinks import render_boxes
6
-
7
- # initialize a pipeline object
8
- pipeline = InferencePipeline.init(
9
- model_id="fall-detection-v2-ihywg/5", # Roboflow model to use
10
- # Path to video, device id (int, usually 0 for built in webcams), or RTSP stream url
11
- video_reference="./examples/fall_test.mp4",
12
- on_prediction=render_boxes, # Function to run after each prediction
13
- api_key="rOZmS39JRmLDRuDaF8KE"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  )
15
- pipeline.start()
16
- pipeline.join()
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import gradio as gr
4
+ import supervision as sv
5
+ from ultralytics import YOLO
6
+
7
+ # Define paths
8
+ HOME = os.getcwd()
9
+ MODEL_PATH = "./best.pt"
10
+
11
+ # Load the YOLO model
12
+ model = YOLO(MODEL_PATH)
13
+
14
+ # Initialize annotators
15
+ box_annotator = sv.BoxAnnotator()
16
+ label_annotator = sv.LabelAnnotator()
17
+
18
+ # Define the confidence threshold
19
+ CONFIDENCE_THRESHOLD = 0.6
20
+
21
+ # Define the callback function for processing each video frame
22
+ def callback(frame: np.ndarray, _: int) -> np.ndarray:
23
+ # Perform detection on the frame
24
+ results = model(frame)[0]
25
+ detections = sv.Detections.from_ultralytics(results)
26
+
27
+ # Filter detections based on confidence threshold
28
+ detections_filtered = detections[detections.confidence >
29
+ CONFIDENCE_THRESHOLD]
30
+
31
+ # Create labels for filtered detections
32
+ labels = [
33
+ f"{model.model.names[class_id]} {confidence:.2f}"
34
+ for class_id, confidence in zip(
35
+ detections_filtered.class_id, detections_filtered.confidence
36
+ )
37
+ ]
38
+
39
+ # Annotate the frame with bounding boxes and labels
40
+ annotated_frame = box_annotator.annotate(
41
+ scene=frame.copy(),
42
+ detections=detections_filtered,
43
+ )
44
+ annotated_frame = label_annotator.annotate(
45
+ scene=annotated_frame,
46
+ detections=detections_filtered,
47
+ labels=labels
48
+ )
49
+
50
+ return annotated_frame
51
+
52
+ # Function to process the video and generate the output
53
+ def process_video_gradio(input_video):
54
+ SOURCE_VIDEO_PATH = input_video
55
+ TARGET_VIDEO_PATH = f"{HOME}/output_fall_detection.mp4"
56
+
57
+ sv.process_video(
58
+ source_path=SOURCE_VIDEO_PATH,
59
+ target_path=TARGET_VIDEO_PATH,
60
+ callback=callback
61
+ )
62
+
63
+ return TARGET_VIDEO_PATH
64
+
65
+ # Define the Gradio interface
66
+ interface = gr.Interface(
67
+ fn=process_video_gradio, # Function to process video
68
+ inputs=gr.Video(), # Upload video input
69
+ outputs=gr.Video(), # Return the annotated video output
70
+ title="Fall Detection Video Annotator",
71
+ description="Upload a video, and the model will annotate it with fall detection using Fine-Tuned YOLO model."
72
  )
73
+
74
+ # Launch the interface
75
+ if __name__ == "__main__":
76
+ interface.launch()