Spaces:
Runtime error
Runtime error
AlbertoFH98
commited on
Commit
·
999bfb6
1
Parent(s):
3a8d578
Update app.py
Browse files
app.py
CHANGED
@@ -131,7 +131,7 @@ def get_basics_comp(emb_model, model, default_system_prompt_link, _logger, podca
|
|
131 |
# -- 6. Setup model
|
132 |
together.api_key = os.environ["TOGETHER_API_KEY"]
|
133 |
#together.Models.start(model)
|
134 |
-
return together, translator, nlp, retriever, video_option, video_option_joined_path, default_system_prompt, youtube_video_url
|
135 |
|
136 |
def clean_chat():
|
137 |
st.session_state.conversation = None
|
@@ -155,9 +155,9 @@ def main():
|
|
155 |
|
156 |
podcast_url_video_df = get_podcast_data(PODCAST_URL_VIDEO_PATH)
|
157 |
|
158 |
-
together, translator, nlp, retriever, video_option,
|
159 |
-
|
160 |
-
|
161 |
|
162 |
# -- 6. Setup prompt template + llm chain
|
163 |
instruction = """CONTEXTO:/n/n {context}/n
|
@@ -191,24 +191,28 @@ RESPUESTA: """
|
|
191 |
st.markdown(prompt)
|
192 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
193 |
with st.chat_message("assistant"):
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
|
|
|
|
|
|
|
|
212 |
st.session_state.messages.append({"role": "assistant", "content": llm_response})
|
213 |
# -- Sample: streamlit run app.py -- --DEFAULT_SYSTEM_PROMPT_LINK=https://raw.githubusercontent.com/AlbertoUAH/Castena/main/prompts/default_system_prompt.txt --PODCAST_URL_VIDEO_PATH=https://raw.githubusercontent.com/AlbertoUAH/Castena/main/data/podcast_youtube_video.csv --TRANSCRIPTION=worldcast_roberto_vaquero --MODEL=togethercomputer/llama-2-7b-chat --EMB_MODEL=BAAI/bge-base-en-v1.5
|
214 |
if __name__ == '__main__':
|
|
|
131 |
# -- 6. Setup model
|
132 |
together.api_key = os.environ["TOGETHER_API_KEY"]
|
133 |
#together.Models.start(model)
|
134 |
+
return together, translator, nlp, retriever, video_option, video_option_joined_path, default_system_prompt, youtube_video_url, genre
|
135 |
|
136 |
def clean_chat():
|
137 |
st.session_state.conversation = None
|
|
|
155 |
|
156 |
podcast_url_video_df = get_podcast_data(PODCAST_URL_VIDEO_PATH)
|
157 |
|
158 |
+
together, translator, nlp, retriever, video_option,
|
159 |
+
video_option_joined_path, default_system_prompt, youtube_video_url, genre = get_basics_comp(EMB_MODEL, MODEL, DEFAULT_SYSTEM_PROMPT_LINK, logger,
|
160 |
+
podcast_url_video_df, img_size=100)
|
161 |
|
162 |
# -- 6. Setup prompt template + llm chain
|
163 |
instruction = """CONTEXTO:/n/n {context}/n
|
|
|
191 |
st.markdown(prompt)
|
192 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
193 |
with st.chat_message("assistant"):
|
194 |
+
if not genre.contains('GPT'):
|
195 |
+
llm_response, cleaned_prompt = qa_chain(prompt)
|
196 |
+
llm_response = utils.process_llm_response(llm_response, nlp)
|
197 |
+
st.markdown(llm_response)
|
198 |
+
start_time_str_list = []; start_time_seconds_list = []; end_time_seconds_list = []
|
199 |
+
for response in llm_response.split('\n'):
|
200 |
+
if re.search(r'(\d{2}:\d{2}:\d{2}(.\d{6})?)', response) != None:
|
201 |
+
start_time_str, start_time_seconds, _, end_time_seconds = utils.add_hyperlink_and_convert_to_seconds(response, cleaned_prompt)
|
202 |
+
start_time_str_list.append(start_time_str)
|
203 |
+
start_time_seconds_list.append(start_time_seconds)
|
204 |
+
end_time_seconds_list.append(end_time_seconds)
|
205 |
+
|
206 |
+
if start_time_str_list:
|
207 |
+
for start_time_seconds, start_time_str, end_time_seconds in zip(start_time_seconds_list, start_time_str_list, end_time_seconds_list):
|
208 |
+
st.markdown("__Fragmento: " + start_time_str + "__")
|
209 |
+
_, container, _ = st.columns([SIDE, WIDTH, SIDE])
|
210 |
+
with container:
|
211 |
+
st_player(youtube_video_url.replace("?enablejsapi=1", "") + f'?start={start_time_seconds}&end={end_time_seconds}')
|
212 |
+
else:
|
213 |
+
llm_response = utils.get_gpt_response(prompt)
|
214 |
+
st.markdown(llm_response)
|
215 |
+
|
216 |
st.session_state.messages.append({"role": "assistant", "content": llm_response})
|
217 |
# -- Sample: streamlit run app.py -- --DEFAULT_SYSTEM_PROMPT_LINK=https://raw.githubusercontent.com/AlbertoUAH/Castena/main/prompts/default_system_prompt.txt --PODCAST_URL_VIDEO_PATH=https://raw.githubusercontent.com/AlbertoUAH/Castena/main/data/podcast_youtube_video.csv --TRANSCRIPTION=worldcast_roberto_vaquero --MODEL=togethercomputer/llama-2-7b-chat --EMB_MODEL=BAAI/bge-base-en-v1.5
|
218 |
if __name__ == '__main__':
|