Spaces:
Runtime error
Runtime error
Commit
·
2a17b3c
1
Parent(s):
f936f06
Update app.py
Browse files
app.py
CHANGED
@@ -13,98 +13,100 @@ URL = "https://www.youtube.com/watch?v=dQw4w9WgXcQ" #URL to parse
|
|
13 |
#stream = cv2.VideoCapture(0) # 0 means read from local camera.
|
14 |
#camera_ip = "rtsp://username:password@IP/port"
|
15 |
#stream = cv2.VideoCapture(camera_ip)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
def load():
|
17 |
yt = YouTube(URL)
|
18 |
vid_cap = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first().download(filename="tmp.mp4")
|
19 |
stream = cv2.VideoCapture(vid)
|
|
|
20 |
return vid_cap
|
21 |
|
22 |
-
|
23 |
-
# load model
|
24 |
-
model = YOLO('ultralyticsplus/yolov8s')
|
25 |
-
|
26 |
-
# set model parameters
|
27 |
-
model.overrides['conf'] = 0.25 # NMS confidence threshold
|
28 |
-
model.overrides['iou'] = 0.45 # NMS IoU threshold
|
29 |
-
model.overrides['agnostic_nms'] = False # NMS class-agnostic
|
30 |
-
model.overrides['max_det'] = 1000 # maximum number of detections per image
|
31 |
-
|
32 |
-
# set image
|
33 |
-
#image = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg'
|
34 |
-
|
35 |
-
# perform inference
|
36 |
-
#def show(feed)
|
37 |
-
# return model.predict(feed)
|
38 |
-
|
39 |
-
# observe results
|
40 |
-
#print(results[0].boxes)
|
41 |
-
#render = render_result(model=model, image=image, result=results[0])
|
42 |
-
#render.show()
|
43 |
-
|
44 |
-
"""
|
45 |
-
The function below identifies the device which is availabe to make the prediction and uses it to load and infer the frame. Once it has results it will extract the labels and cordinates(Along with scores) for each object detected in the frame.
|
46 |
-
"""
|
47 |
-
def score_frame(frame, model):
|
48 |
-
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
49 |
-
model.to(device)
|
50 |
-
frame = [torch.tensor(frame)]
|
51 |
-
results = self.model(frame)
|
52 |
-
labels = results.xyxyn[0][:, -1].numpy()
|
53 |
-
cord = results.xyxyn[0][:, :-1].numpy()
|
54 |
-
return labels, cord
|
55 |
-
|
56 |
-
"""
|
57 |
-
The function below takes the results and the frame as input and plots boxes over all the objects which have a score higer than our threshold.
|
58 |
-
"""
|
59 |
-
def plot_boxes(self, results, frame):
|
60 |
-
labels, cord = results
|
61 |
-
n = len(labels)
|
62 |
-
x_shape, y_shape = frame.shape[1], frame.shape[0]
|
63 |
-
for i in range(n):
|
64 |
-
row = cord[i]
|
65 |
-
# If score is less than 0.2 we avoid making a prediction.
|
66 |
-
if row[4] < 0.2:
|
67 |
-
continue
|
68 |
-
x1 = int(row[0]*x_shape)
|
69 |
-
y1 = int(row[1]*y_shape)
|
70 |
-
x2 = int(row[2]*x_shape)
|
71 |
-
y2 = int(row[3]*y_shape)
|
72 |
-
bgr = (0, 255, 0) # color of the box
|
73 |
-
classes = self.model.names # Get the name of label index
|
74 |
-
label_font = cv2.FONT_HERSHEY_SIMPLEX #Font for the label.
|
75 |
-
cv2.rectangle(frame, \
|
76 |
-
(x1, y1), (x2, y2), \
|
77 |
-
bgr, 2) #Plot the boxes
|
78 |
-
cv2.putText(frame,\
|
79 |
-
classes[labels[i]], \
|
80 |
-
(x1, y1), \
|
81 |
-
label_font, 0.9, bgr, 2) #Put a label over box.
|
82 |
-
return frame
|
83 |
-
|
84 |
-
"""
|
85 |
-
The Function below oracestrates the entire operation and performs the real-time parsing for video stream.
|
86 |
-
"""
|
87 |
-
def __call__(self):
|
88 |
-
player = self.get_video_stream() #Get your video stream.
|
89 |
-
assert player.isOpened() # Make sure that their is a stream.
|
90 |
-
#Below code creates a new video writer object to write our
|
91 |
-
#output stream.
|
92 |
-
x_shape = int(player.get(cv2.CAP_PROP_FRAME_WIDTH))
|
93 |
-
y_shape = int(player.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
94 |
-
four_cc = cv2.VideoWriter_fourcc(*"MJPG") #Using MJPEG codex
|
95 |
-
out = cv2.VideoWriter(out_file, four_cc, 20, \
|
96 |
-
(x_shape, y_shape))
|
97 |
-
ret, frame = player.read() # Read the first frame.
|
98 |
-
while rect: # Run until stream is out of frames
|
99 |
-
start_time = time() # We would like to measure the FPS.
|
100 |
-
results = self.score_frame(frame) # Score the Frame
|
101 |
-
frame = self.plot_boxes(results, frame) # Plot the boxes.
|
102 |
-
end_time = time()
|
103 |
-
fps = 1/np.round(end_time - start_time, 3) #Measure the FPS.
|
104 |
-
print(f"Frames Per Second : {fps}")
|
105 |
-
out.write(frame) # Write the frame onto the output.
|
106 |
-
ret, frame = player.read() # Read next frame.
|
107 |
-
|
108 |
with gr.Blocks() as app:
|
109 |
youtube_url = gr.Textbox(label="YouTube URL",value=f"{URL}")
|
110 |
load_button = gr.Button("Load Video")
|
|
|
13 |
#stream = cv2.VideoCapture(0) # 0 means read from local camera.
|
14 |
#camera_ip = "rtsp://username:password@IP/port"
|
15 |
#stream = cv2.VideoCapture(camera_ip)
|
16 |
+
class Capvid:
|
17 |
+
|
18 |
+
|
19 |
+
# load model
|
20 |
+
model = YOLO('ultralyticsplus/yolov8s')
|
21 |
+
|
22 |
+
# set model parameters
|
23 |
+
model.overrides['conf'] = 0.25 # NMS confidence threshold
|
24 |
+
model.overrides['iou'] = 0.45 # NMS IoU threshold
|
25 |
+
model.overrides['agnostic_nms'] = False # NMS class-agnostic
|
26 |
+
model.overrides['max_det'] = 1000 # maximum number of detections per image
|
27 |
+
|
28 |
+
# set image
|
29 |
+
#image = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg'
|
30 |
+
|
31 |
+
# perform inference
|
32 |
+
#def show(feed)
|
33 |
+
# return model.predict(feed)
|
34 |
+
|
35 |
+
# observe results
|
36 |
+
#print(results[0].boxes)
|
37 |
+
#render = render_result(model=model, image=image, result=results[0])
|
38 |
+
#render.show()
|
39 |
+
|
40 |
+
"""
|
41 |
+
The function below identifies the device which is availabe to make the prediction and uses it to load and infer the frame. Once it has results it will extract the labels and cordinates(Along with scores) for each object detected in the frame.
|
42 |
+
"""
|
43 |
+
def score_frame(frame, model):
|
44 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
45 |
+
model.to(device)
|
46 |
+
frame = [torch.tensor(frame)]
|
47 |
+
results = self.model(frame)
|
48 |
+
labels = results.xyxyn[0][:, -1].numpy()
|
49 |
+
cord = results.xyxyn[0][:, :-1].numpy()
|
50 |
+
return labels, cord
|
51 |
+
|
52 |
+
"""
|
53 |
+
The function below takes the results and the frame as input and plots boxes over all the objects which have a score higer than our threshold.
|
54 |
+
"""
|
55 |
+
def plot_boxes(self, results, frame):
|
56 |
+
labels, cord = results
|
57 |
+
n = len(labels)
|
58 |
+
x_shape, y_shape = frame.shape[1], frame.shape[0]
|
59 |
+
for i in range(n):
|
60 |
+
row = cord[i]
|
61 |
+
# If score is less than 0.2 we avoid making a prediction.
|
62 |
+
if row[4] < 0.2:
|
63 |
+
continue
|
64 |
+
x1 = int(row[0]*x_shape)
|
65 |
+
y1 = int(row[1]*y_shape)
|
66 |
+
x2 = int(row[2]*x_shape)
|
67 |
+
y2 = int(row[3]*y_shape)
|
68 |
+
bgr = (0, 255, 0) # color of the box
|
69 |
+
classes = self.model.names # Get the name of label index
|
70 |
+
label_font = cv2.FONT_HERSHEY_SIMPLEX #Font for the label.
|
71 |
+
cv2.rectangle(frame, \
|
72 |
+
(x1, y1), (x2, y2), \
|
73 |
+
bgr, 2) #Plot the boxes
|
74 |
+
cv2.putText(frame,\
|
75 |
+
classes[labels[i]], \
|
76 |
+
(x1, y1), \
|
77 |
+
label_font, 0.9, bgr, 2) #Put a label over box.
|
78 |
+
return frame
|
79 |
+
|
80 |
+
"""
|
81 |
+
The Function below oracestrates the entire operation and performs the real-time parsing for video stream.
|
82 |
+
"""
|
83 |
+
def __call__(self):
|
84 |
+
player = self.get_video_stream() #Get your video stream.
|
85 |
+
assert player.isOpened() # Make sure that their is a stream.
|
86 |
+
#Below code creates a new video writer object to write our
|
87 |
+
#output stream.
|
88 |
+
x_shape = int(player.get(cv2.CAP_PROP_FRAME_WIDTH))
|
89 |
+
y_shape = int(player.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
90 |
+
four_cc = cv2.VideoWriter_fourcc(*"MJPG") #Using MJPEG codex
|
91 |
+
out = cv2.VideoWriter(out_file, four_cc, 20, \
|
92 |
+
(x_shape, y_shape))
|
93 |
+
ret, frame = player.read() # Read the first frame.
|
94 |
+
while rect: # Run until stream is out of frames
|
95 |
+
start_time = time() # We would like to measure the FPS.
|
96 |
+
results = self.score_frame(frame) # Score the Frame
|
97 |
+
frame = self.plot_boxes(results, frame) # Plot the boxes.
|
98 |
+
end_time = time()
|
99 |
+
fps = 1/np.round(end_time - start_time, 3) #Measure the FPS.
|
100 |
+
print(f"Frames Per Second : {fps}")
|
101 |
+
out.write(frame) # Write the frame onto the output.
|
102 |
+
ret, frame = player.read() # Read next frame.
|
103 |
def load():
|
104 |
yt = YouTube(URL)
|
105 |
vid_cap = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first().download(filename="tmp.mp4")
|
106 |
stream = cv2.VideoCapture(vid)
|
107 |
+
Capvid()
|
108 |
return vid_cap
|
109 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
with gr.Blocks() as app:
|
111 |
youtube_url = gr.Textbox(label="YouTube URL",value=f"{URL}")
|
112 |
load_button = gr.Button("Load Video")
|