Spaces:
Runtime error
Runtime error
AlbertoFH98
commited on
Commit
·
cce1e9f
1
Parent(s):
29eaa3f
Update app.py
Browse files
app.py
CHANGED
@@ -17,7 +17,6 @@ import os
|
|
17 |
import re
|
18 |
|
19 |
def main():
|
20 |
-
print("HOLA")
|
21 |
st.set_page_config(layout="wide")
|
22 |
|
23 |
# -- 1. Setup arguments
|
@@ -58,7 +57,6 @@ def main():
|
|
58 |
)
|
59 |
video_option_joined = '_'.join(video_option.replace(': Entrevista a ', ' ').lower().split(' ')).replace("\'", "")
|
60 |
video_option_joined_path = "{}_transcription.txt".format(video_option_joined)
|
61 |
-
print("Filtering: {}".format(video_option_joined))
|
62 |
youtube_video_url = list(podcast_url_video_df[podcast_url_video_df['podcast_name'].str.contains(video_option_joined)]['youtube_video_url'])[0].replace("\'", "")
|
63 |
|
64 |
# -- 4. Setup request for system prompt
|
@@ -100,10 +98,9 @@ RESPUESTA: """
|
|
100 |
with st.chat_message(message["role"]):
|
101 |
st.markdown(message["content"])
|
102 |
if prompt := st.chat_input("¡Pregunta lo que quieras!"):
|
103 |
-
st.session_state.messages.append({"role": "user", "content": prompt})
|
104 |
with st.chat_message("user"):
|
105 |
st.markdown(prompt)
|
106 |
-
|
107 |
with st.chat_message("assistant"):
|
108 |
llm_response = qa_chain(prompt)
|
109 |
llm_response = utils.process_llm_response(llm_response, nlp)
|
|
|
17 |
import re
|
18 |
|
19 |
def main():
|
|
|
20 |
st.set_page_config(layout="wide")
|
21 |
|
22 |
# -- 1. Setup arguments
|
|
|
57 |
)
|
58 |
video_option_joined = '_'.join(video_option.replace(': Entrevista a ', ' ').lower().split(' ')).replace("\'", "")
|
59 |
video_option_joined_path = "{}_transcription.txt".format(video_option_joined)
|
|
|
60 |
youtube_video_url = list(podcast_url_video_df[podcast_url_video_df['podcast_name'].str.contains(video_option_joined)]['youtube_video_url'])[0].replace("\'", "")
|
61 |
|
62 |
# -- 4. Setup request for system prompt
|
|
|
98 |
with st.chat_message(message["role"]):
|
99 |
st.markdown(message["content"])
|
100 |
if prompt := st.chat_input("¡Pregunta lo que quieras!"):
|
|
|
101 |
with st.chat_message("user"):
|
102 |
st.markdown(prompt)
|
103 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
104 |
with st.chat_message("assistant"):
|
105 |
llm_response = qa_chain(prompt)
|
106 |
llm_response = utils.process_llm_response(llm_response, nlp)
|