Spaces:
Runtime error
Runtime error
AlbertoFH98
commited on
Commit
·
90e8e7a
1
Parent(s):
2bae7ed
Update app.py
Browse files
app.py
CHANGED
@@ -46,11 +46,72 @@ def get_basics_comp(emb_model, model, default_system_prompt_link, _logger, podca
|
|
46 |
r = requests.get("https://raw.githubusercontent.com/AlbertoUAH/Castena/main/media/castena-animated-icon.gif", stream=True)
|
47 |
icon = Image.open(r.raw)
|
48 |
icon = icon.resize((img_size, img_size))
|
49 |
-
st.sidebar.image(icon)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
video_option = st.sidebar.selectbox(
|
51 |
"Seleccione el podcast",
|
52 |
-
|
|
|
53 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
video_option_joined = '_'.join(video_option.replace(': Entrevista a ', ' ').lower().split(' ')).replace("\'", "")
|
55 |
video_option_joined_path = "{}_transcription.txt".format(video_option_joined)
|
56 |
youtube_video_url = list(podcast_url_video_df[podcast_url_video_df['podcast_name'].str.contains(video_option_joined)]['youtube_video_url'])[0].replace("\'", "")
|
@@ -67,7 +128,12 @@ def get_basics_comp(emb_model, model, default_system_prompt_link, _logger, podca
|
|
67 |
together.api_key = os.environ["TOGETHER_API_KEY"]
|
68 |
together.Models.start(model)
|
69 |
return together, translator, nlp, retriever, video_option, video_option_joined_path, default_system_prompt, youtube_video_url
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
71 |
def main():
|
72 |
args, logger = get_args()
|
73 |
B_INST, E_INST = "[INST]", "[/INST]"
|
@@ -82,13 +148,13 @@ def main():
|
|
82 |
WIDTH = 50
|
83 |
SIDE = (100 - WIDTH) / 2
|
84 |
|
|
|
85 |
podcast_url_video_df = get_podcast_data(PODCAST_URL_VIDEO_PATH)
|
86 |
|
87 |
together, translator, nlp, retriever, video_option, video_option_joined_path, default_system_prompt, youtube_video_url = get_basics_comp(EMB_MODEL, MODEL,
|
88 |
DEFAULT_SYSTEM_PROMPT_LINK, logger,
|
89 |
podcast_url_video_df, img_size=100)
|
90 |
|
91 |
-
|
92 |
# -- 6. Setup prompt template + llm chain
|
93 |
instruction = """CONTEXTO:/n/n {context}/n
|
94 |
|
@@ -111,9 +177,12 @@ RESPUESTA: """
|
|
111 |
|
112 |
if "messages" not in st.session_state:
|
113 |
st.session_state.messages = []
|
|
|
114 |
for message in st.session_state.messages:
|
115 |
with st.chat_message(message["role"]):
|
|
|
116 |
st.markdown(message["content"])
|
|
|
117 |
if prompt := st.chat_input("¡Pregunta lo que quieras!"):
|
118 |
with st.chat_message("user"):
|
119 |
st.markdown(prompt)
|
|
|
46 |
r = requests.get("https://raw.githubusercontent.com/AlbertoUAH/Castena/main/media/castena-animated-icon.gif", stream=True)
|
47 |
icon = Image.open(r.raw)
|
48 |
icon = icon.resize((img_size, img_size))
|
49 |
+
#st.sidebar.image(icon)
|
50 |
+
|
51 |
+
with st.sidebar.container():
|
52 |
+
st.markdown(
|
53 |
+
"""
|
54 |
+
<head>
|
55 |
+
<style>
|
56 |
+
.footer1 {
|
57 |
+
text-align: center;
|
58 |
+
}
|
59 |
+
</style>
|
60 |
+
</head>
|
61 |
+
<body>
|
62 |
+
<div class="footer1">
|
63 |
+
<img src=https://raw.githubusercontent.com/AlbertoUAH/Castena/main/media/castena-animated-icon.gif width="150" height="150">
|
64 |
+
</div>
|
65 |
+
<br>
|
66 |
+
</body>
|
67 |
+
""",
|
68 |
+
unsafe_allow_html=True,
|
69 |
+
)
|
70 |
+
|
71 |
+
|
72 |
+
podcast_list = list(podcast_url_video_df['podcast_name_lit'].apply(lambda x: x.replace("'", "")))
|
73 |
video_option = st.sidebar.selectbox(
|
74 |
"Seleccione el podcast",
|
75 |
+
podcast_list,
|
76 |
+
on_change=clean_chat
|
77 |
)
|
78 |
+
|
79 |
+
# -- Add icons
|
80 |
+
with st.sidebar.container():
|
81 |
+
st.markdown(
|
82 |
+
"""
|
83 |
+
<head>
|
84 |
+
<style>
|
85 |
+
.footer2 {
|
86 |
+
position: fixed;
|
87 |
+
bottom: 2%;
|
88 |
+
left: 6.5%;
|
89 |
+
}
|
90 |
+
|
91 |
+
.footer2 a {
|
92 |
+
margin: 10px;
|
93 |
+
text-decoration: none;
|
94 |
+
}
|
95 |
+
</style>
|
96 |
+
</head>
|
97 |
+
<body>
|
98 |
+
<div class="footer2">
|
99 |
+
<a href="https://www.linkedin.com/in/alberto-fernandez-hernandez-3a3474136">
|
100 |
+
<img src="https://cdn-icons-png.flaticon.com/128/3536/3536505.png" width="32" height="32">
|
101 |
+
</a>
|
102 |
+
<a href="https://github.com/AlbertoUAH/Castena">
|
103 |
+
<img src="https://cdn-icons-png.flaticon.com/128/733/733553.png" width="32" height="32">
|
104 |
+
</a>
|
105 |
+
<a href="https://www.buymeacoffee.com/castena">
|
106 |
+
<img src="https://cdn-icons-png.flaticon.com/128/761/761767.png" width="32" height="32">
|
107 |
+
</a>
|
108 |
+
</div>
|
109 |
+
</body>
|
110 |
+
""",
|
111 |
+
unsafe_allow_html=True,
|
112 |
+
)
|
113 |
+
|
114 |
+
|
115 |
video_option_joined = '_'.join(video_option.replace(': Entrevista a ', ' ').lower().split(' ')).replace("\'", "")
|
116 |
video_option_joined_path = "{}_transcription.txt".format(video_option_joined)
|
117 |
youtube_video_url = list(podcast_url_video_df[podcast_url_video_df['podcast_name'].str.contains(video_option_joined)]['youtube_video_url'])[0].replace("\'", "")
|
|
|
128 |
together.api_key = os.environ["TOGETHER_API_KEY"]
|
129 |
together.Models.start(model)
|
130 |
return together, translator, nlp, retriever, video_option, video_option_joined_path, default_system_prompt, youtube_video_url
|
131 |
+
|
132 |
+
def clean_chat():
|
133 |
+
st.session_state.conversation = None
|
134 |
+
st.session_state.chat_history = None
|
135 |
+
st.session_state.messages = [{'role': 'assistant', 'content': 'Nuevo chat creado'}]
|
136 |
+
|
137 |
def main():
|
138 |
args, logger = get_args()
|
139 |
B_INST, E_INST = "[INST]", "[/INST]"
|
|
|
148 |
WIDTH = 50
|
149 |
SIDE = (100 - WIDTH) / 2
|
150 |
|
151 |
+
|
152 |
podcast_url_video_df = get_podcast_data(PODCAST_URL_VIDEO_PATH)
|
153 |
|
154 |
together, translator, nlp, retriever, video_option, video_option_joined_path, default_system_prompt, youtube_video_url = get_basics_comp(EMB_MODEL, MODEL,
|
155 |
DEFAULT_SYSTEM_PROMPT_LINK, logger,
|
156 |
podcast_url_video_df, img_size=100)
|
157 |
|
|
|
158 |
# -- 6. Setup prompt template + llm chain
|
159 |
instruction = """CONTEXTO:/n/n {context}/n
|
160 |
|
|
|
177 |
|
178 |
if "messages" not in st.session_state:
|
179 |
st.session_state.messages = []
|
180 |
+
|
181 |
for message in st.session_state.messages:
|
182 |
with st.chat_message(message["role"]):
|
183 |
+
print("MUESTRO! " + message["content"])
|
184 |
st.markdown(message["content"])
|
185 |
+
|
186 |
if prompt := st.chat_input("¡Pregunta lo que quieras!"):
|
187 |
with st.chat_message("user"):
|
188 |
st.markdown(prompt)
|