Spaces:
Runtime error
Runtime error
improve the app layout a little
Browse files- app.py +33 -26
- streamlit_av_queue.py +9 -1
app.py
CHANGED
@@ -16,6 +16,8 @@ from sample_utils.turn import get_ice_servers
|
|
16 |
import json
|
17 |
from typing import List
|
18 |
|
|
|
|
|
19 |
from vosk import SetLogLevel, Model, KaldiRecognizer
|
20 |
SetLogLevel(-1) # mutes vosk verbosity
|
21 |
|
@@ -40,15 +42,18 @@ async def main():
|
|
40 |
|
41 |
system_one_audio_status = st.empty()
|
42 |
|
43 |
-
playing = st.checkbox("Playing", value=True)
|
44 |
|
45 |
system_one_audio_status.write("Initializing streaming")
|
46 |
-
system_one_audio_output = st.empty()
|
47 |
|
48 |
-
|
|
|
|
|
|
|
|
|
49 |
|
50 |
-
|
51 |
-
|
|
|
52 |
|
53 |
# Initialize resources if not already done
|
54 |
system_one_audio_status.write("Initializing streaming")
|
@@ -58,28 +63,28 @@ async def main():
|
|
58 |
|
59 |
system_one_audio_status.write("resources referecned")
|
60 |
|
61 |
-
|
62 |
-
|
63 |
system_one_audio_status.write("Initializing webrtc_streamer")
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
"
|
74 |
-
"
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
|
|
|
|
83 |
|
84 |
if not webrtc_ctx.state.playing:
|
85 |
exit
|
@@ -92,6 +97,8 @@ async def main():
|
|
92 |
|
93 |
try:
|
94 |
while True:
|
|
|
|
|
95 |
if not webrtc_ctx.state.playing:
|
96 |
system_one_audio_status.write("Stopped.")
|
97 |
await asyncio.sleep(0.1)
|
|
|
16 |
import json
|
17 |
from typing import List
|
18 |
|
19 |
+
st.set_page_config(layout="wide")
|
20 |
+
|
21 |
from vosk import SetLogLevel, Model, KaldiRecognizer
|
22 |
SetLogLevel(-1) # mutes vosk verbosity
|
23 |
|
|
|
42 |
|
43 |
system_one_audio_status = st.empty()
|
44 |
|
|
|
45 |
|
46 |
system_one_audio_status.write("Initializing streaming")
|
|
|
47 |
|
48 |
+
# system_one_audio_output = st.empty()
|
49 |
+
# system_one_video_output = st.empty()
|
50 |
+
# system_one_audio_history = []
|
51 |
+
|
52 |
+
col1, col2 = st.columns(2)
|
53 |
|
54 |
+
with col1:
|
55 |
+
listening = st.checkbox("Listen", value=True)
|
56 |
+
system_one_audio_history_output = st.empty()
|
57 |
|
58 |
# Initialize resources if not already done
|
59 |
system_one_audio_status.write("Initializing streaming")
|
|
|
63 |
|
64 |
system_one_audio_status.write("resources referecned")
|
65 |
|
|
|
|
|
66 |
system_one_audio_status.write("Initializing webrtc_streamer")
|
67 |
+
with col2:
|
68 |
+
playing = st.checkbox("Playing", value=True)
|
69 |
+
webrtc_ctx = webrtc_streamer(
|
70 |
+
key="charles",
|
71 |
+
desired_playing_state=playing,
|
72 |
+
queued_audio_frames_callback=st.session_state.streamlit_av_queue.queued_audio_frames_callback,
|
73 |
+
queued_video_frames_callback=st.session_state.streamlit_av_queue.queued_video_frames_callback,
|
74 |
+
mode=WebRtcMode.SENDRECV,
|
75 |
+
media_stream_constraints={
|
76 |
+
"video": True,
|
77 |
+
"audio": {
|
78 |
+
"sampleRate": 48000,
|
79 |
+
"sampleSize": 16,
|
80 |
+
"noiseSuppression": True,
|
81 |
+
"echoCancellation": True,
|
82 |
+
"channelCount": 1,
|
83 |
+
}
|
84 |
+
},
|
85 |
+
rtc_configuration={"iceServers": get_ice_servers()},
|
86 |
+
async_processing=True,
|
87 |
+
)
|
88 |
|
89 |
if not webrtc_ctx.state.playing:
|
90 |
exit
|
|
|
97 |
|
98 |
try:
|
99 |
while True:
|
100 |
+
if "streamlit_av_queue" in st.session_state:
|
101 |
+
st.session_state.streamlit_av_queue.set_listening(listening)
|
102 |
if not webrtc_ctx.state.playing:
|
103 |
system_one_audio_status.write("Stopped.")
|
104 |
await asyncio.sleep(0.1)
|
streamlit_av_queue.py
CHANGED
@@ -14,10 +14,16 @@ class StreamlitAVQueue:
|
|
14 |
def __init__(self, audio_bit_rate=16000):
|
15 |
self._output_channels = 2
|
16 |
self._audio_bit_rate = audio_bit_rate
|
|
|
|
|
17 |
self.queue_actor = WebRtcAVQueueActor.options(
|
18 |
name="WebRtcAVQueueActor",
|
19 |
get_if_exists=True,
|
20 |
).remote()
|
|
|
|
|
|
|
|
|
21 |
|
22 |
async def queued_video_frames_callback(
|
23 |
self,
|
@@ -37,8 +43,10 @@ class StreamlitAVQueue:
|
|
37 |
frames: List[av.AudioFrame],
|
38 |
) -> av.AudioFrame:
|
39 |
try:
|
|
|
|
|
40 |
sound_chunk = pydub.AudioSegment.empty()
|
41 |
-
if len(frames) > 0:
|
42 |
for frame in frames:
|
43 |
sound = pydub.AudioSegment(
|
44 |
data=frame.to_ndarray().tobytes(),
|
|
|
14 |
def __init__(self, audio_bit_rate=16000):
|
15 |
self._output_channels = 2
|
16 |
self._audio_bit_rate = audio_bit_rate
|
17 |
+
self._listening = True
|
18 |
+
self._lock = threading.Lock()
|
19 |
self.queue_actor = WebRtcAVQueueActor.options(
|
20 |
name="WebRtcAVQueueActor",
|
21 |
get_if_exists=True,
|
22 |
).remote()
|
23 |
+
|
24 |
+
def set_listening(self, listening: bool):
|
25 |
+
with self._lock:
|
26 |
+
self._listening = listening
|
27 |
|
28 |
async def queued_video_frames_callback(
|
29 |
self,
|
|
|
43 |
frames: List[av.AudioFrame],
|
44 |
) -> av.AudioFrame:
|
45 |
try:
|
46 |
+
with self._lock:
|
47 |
+
should_listed = self._listening
|
48 |
sound_chunk = pydub.AudioSegment.empty()
|
49 |
+
if len(frames) > 0 and should_listed:
|
50 |
for frame in frames:
|
51 |
sound = pydub.AudioSegment(
|
52 |
data=frame.to_ndarray().tobytes(),
|