|
import streamlit as st |
|
|
|
from langchain.chains import RetrievalQA |
|
from langchain.chains.question_answering import load_qa_chain |
|
from langchain_community.llms import HuggingFaceHub |
|
from langchain.document_loaders import AssemblyAIAudioTranscriptLoader |
|
from langchain.embeddings import HuggingFaceHubEmbeddings |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain.vectorstores import FAISS |
|
from langchain.prompts import PromptTemplate |
|
from tempfile import NamedTemporaryFile |
|
|
|
|
|
|
|
|
|
|
|
def create_qa_prompt() -> PromptTemplate: |
|
template = """\n\nHuman: Use the following pieces of context to answer the question at the end. If the answer is not clear, say I DON'T KNOW |
|
{context} |
|
Question: {question} |
|
\n\nAssistant: |
|
Answer:""" |
|
|
|
return PromptTemplate(template=template, input_variables=["context", "question"]) |
|
|
|
|
|
def create_docs(urls_list): |
|
documents = [] |
|
for url in urls_list: |
|
st.write(f'Transcribing {url}') |
|
documents.append(AssemblyAIAudioTranscriptLoader(file_path=url).load()[0]) |
|
return documents |
|
|
|
|
|
def make_embedder(): |
|
model_name = "sentence-transformers/all-mpnet-base-v2" |
|
model_kwargs = {'device': 'cpu'} |
|
encode_kwargs = {'normalize_embeddings': False} |
|
return HuggingFaceHubEmbeddings( |
|
repo_id=model_name, |
|
task="feature-extraction" |
|
) |
|
|
|
|
|
def make_qa_chain(): |
|
llm = HuggingFaceHub( |
|
repo_id="HuggingFaceH4/zephyr-7b-beta", |
|
model_kwargs={ |
|
"max_new_tokens": 512, |
|
"top_k": 30, |
|
"temperature": 0.01, |
|
"repetition_penalty": 1.5, |
|
}, |
|
) |
|
return llm |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
st.set_page_config(page_title="Audio Query Chatbot", page_icon=":microphone:", layout="wide") |
|
|
|
|
|
col1, col2 = st.columns([1, 2]) |
|
|
|
with col1: |
|
st.header("Upload Audio File") |
|
uploaded_file = st.file_uploader("Choose a WAV or MP3 file", type=["wav", "mp3"], key="audio_uploader") |
|
|
|
if uploaded_file is not None: |
|
with NamedTemporaryFile(suffix='.mp3') as temp: |
|
temp.write(uploaded_file.getvalue()) |
|
temp.seek(0) |
|
docs = create_docs([temp.name]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
st.success('Audio file transcribed successfully!') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with col2: |
|
st.header("Chatbot Interface") |
|
|
|
if uploaded_file is not None: |
|
with st.form(key="form"): |
|
user_input = st.text_input("Ask your question", key="user_input") |
|
|
|
|
|
st.markdown("<div><br></div>", unsafe_allow_html=True) |
|
st.markdown( |
|
"""<style> |
|
#form input {margin-bottom: 15px;} |
|
</style>""", unsafe_allow_html=True |
|
) |
|
|
|
submit = st.form_submit_button("Submit Question") |
|
|
|
|
|
if submit: |
|
llm = make_qa_chain() |
|
chain = load_qa_chain(llm, chain_type="stuff") |
|
|
|
result = chain.run(question=user_input,input_documents = docs) |
|
|
|
|
|
st.success("Query Result:") |
|
st.write(f"User: {user_input}") |
|
st.write(f"Assistant: {result}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|