Gopikanth123 commited on
Commit
c50f268
·
verified ·
1 Parent(s): d7878e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -148
app.py CHANGED
@@ -1,158 +1,21 @@
1
- import streamlit as st
2
  from transformers import pipeline
3
  import numpy as np
4
- import threading
5
- from gradio_client import Client
6
- from streamlit_audio_recorder import st_audiorec
7
 
8
- # Initialize session state for chat history
9
- if "messages" not in st.session_state:
10
- st.session_state["messages"] = [] # Store chat history
11
-
12
- # Load the ASR model using the Hugging Face transformers pipeline
13
  transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
14
 
15
- # Function to generate a response using Gradio client
16
- def generate_response(query):
17
- try:
18
- client = Client("Gopikanth123/llama2")
19
- result = client.predict(query=query, api_name="/predict")
20
- return result
21
- except Exception as e:
22
- return f"Error communicating with the Gradio backend: {e}"
23
-
24
- # Function to handle user input and bot response
25
- def handle_user_input(user_input):
26
- if user_input:
27
- # Add user message to session state
28
- st.session_state["messages"].append({"user": user_input})
29
-
30
- # Generate bot response
31
- response = generate_response(user_input)
32
- st.session_state["messages"].append({"bot": response})
33
-
34
- # Speak out bot response in a new thread to avoid blocking
35
- threading.Thread(target=speak_text, args=(response,), daemon=True).start()
36
-
37
- # Function to speak text (Voice Output)
38
- def speak_text(text):
39
- import pyttsx3
40
- engine = pyttsx3.init()
41
- engine.stop() # Ensure no previous loop is running
42
- engine.say(text)
43
- engine.runAndWait()
44
-
45
- # Function to update chat history dynamically
46
- def update_chat_history():
47
- chat_history = st.session_state["messages"]
48
- for msg in chat_history:
49
- if "user" in msg:
50
- st.markdown(f"<div class='chat-bubble user-message'><strong>You:</strong> {msg['user']}</div>", unsafe_allow_html=True)
51
- if "bot" in msg:
52
- st.markdown(f"<div class='chat-bubble bot-message'><strong>Bot:</strong> {msg['bot']}</div>", unsafe_allow_html=True)
53
-
54
- # Function to process and transcribe audio
55
- def transcribe_audio(audio_data, sr):
56
- # Normalize audio to float32
57
- audio_data = audio_data.astype(np.float32)
58
- audio_data /= np.max(np.abs(audio_data))
59
 
60
- # Use the ASR model to transcribe the audio
61
- transcription = transcriber({"sampling_rate": sr, "raw": audio_data})["text"]
62
- return transcription
63
 
64
- # Main Streamlit app
65
- st.set_page_config(page_title="Llama2 Chatbot", page_icon="🤖", layout="wide")
66
- st.markdown(
67
- """
68
- <style>
69
- .stButton>button {
70
- background-color: #6C63FF;
71
- color: white;
72
- font-size: 16px;
73
- border-radius: 10px;
74
- padding: 10px 20px;
75
- }
76
- .stTextInput>div>input {
77
- border: 2px solid #6C63FF;
78
- border-radius: 10px;
79
- padding: 10px;
80
- }
81
- .chat-container {
82
- background-color: #F7F9FC;
83
- padding: 20px;
84
- border-radius: 15px;
85
- max-height: 400px;
86
- overflow-y: auto;
87
- }
88
- .chat-bubble {
89
- padding: 10px 15px;
90
- border-radius: 15px;
91
- margin: 5px 0;
92
- max-width: 80%;
93
- display: inline-block;
94
- }
95
- .user-message {
96
- background-color: #D1C4E9;
97
- text-align: left;
98
- margin-left: auto;
99
- }
100
- .bot-message {
101
- background-color: #BBDEFB;
102
- text-align: left;
103
- margin-right: auto;
104
- }
105
- .input-container {
106
- display: flex;
107
- justify-content: space-between;
108
- gap: 10px;
109
- padding: 10px 0;
110
- }
111
- </style>
112
- """,
113
- unsafe_allow_html=True
114
- )
115
 
116
- st.title("🤖 Chat with Llama2 Bot")
117
- st.markdown(
118
- """
119
- Welcome to the *Llama2 Chatbot*!
120
- - *Type* your message below, or
121
- - *Use the microphone* to speak to the bot.
122
- """
123
  )
124
 
125
- # Display chat history
126
- chat_history_container = st.container()
127
- with chat_history_container:
128
- # Add input field within a form
129
- with st.form(key='input_form', clear_on_submit=True):
130
- user_input = st.text_input("Type your message here...", placeholder="Hello, how are you?")
131
- submit_button = st.form_submit_button("Send")
132
-
133
- # Handle form submission
134
- if submit_button:
135
- handle_user_input(user_input)
136
-
137
- # Separate button for speech recognition outside of the form
138
- if st.button("Speak"):
139
- # Record and process the speech using Streamlit Audio Recorder
140
- audio_data, sr = st_audiorec()
141
-
142
- if audio_data is not None:
143
- st.audio(audio_data, format="audio/wav")
144
-
145
- # Convert to numpy array
146
- audio_np = np.array(audio_data)
147
-
148
- # Transcribe the audio
149
- transcription = transcribe_audio(audio_np, sr)
150
-
151
- # Display the recognized text
152
- st.session_state["user_input"] = transcription
153
- st.success(f"Recognized Text: {transcription}")
154
- handle_user_input(transcription)
155
-
156
- st.markdown("### Chat History")
157
- # Update chat history on every interaction
158
- update_chat_history()
 
1
+ import gradio as gr
2
  from transformers import pipeline
3
  import numpy as np
 
 
 
4
 
 
 
 
 
 
5
  transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
6
 
7
+ def transcribe(audio):
8
+ sr, y = audio
9
+ y = y.astype(np.float32)
10
+ y /= np.max(np.abs(y))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ return transcriber({"sampling_rate": sr, "raw": y})["text"]
 
 
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ demo = gr.Interface(
16
+ transcribe,
17
+ gr.Audio(sources=["microphone"]),
18
+ "text",
 
 
 
19
  )
20
 
21
+ demo.launch()