Spaces:
Running
Running
Update backup11.app.py
Browse files- backup11.app.py +20 -280
backup11.app.py
CHANGED
@@ -39,7 +39,6 @@ title = "๐ฒBikeAI๐ Claude and GPT Multi-Agent Research AI"
|
|
39 |
helpURL = 'https://huggingface.co/awacke1'
|
40 |
bugURL = 'https://huggingface.co/spaces/awacke1'
|
41 |
icons = '๐ฒ๐'
|
42 |
-
|
43 |
st.set_page_config(
|
44 |
page_title=title,
|
45 |
page_icon=icons,
|
@@ -51,27 +50,29 @@ st.set_page_config(
|
|
51 |
'About': title
|
52 |
}
|
53 |
)
|
54 |
-
|
55 |
-
# 2. ๐ฒBikeAI๐ Load environment variables and initialize clients
|
56 |
load_dotenv()
|
57 |
-
|
58 |
-
# OpenAI setup
|
59 |
openai.api_key = os.getenv('OPENAI_API_KEY')
|
60 |
if openai.api_key == None:
|
61 |
openai.api_key = st.secrets['OPENAI_API_KEY']
|
62 |
-
|
63 |
openai_client = OpenAI(
|
64 |
api_key=os.getenv('OPENAI_API_KEY'),
|
65 |
organization=os.getenv('OPENAI_ORG_ID')
|
66 |
)
|
67 |
-
|
68 |
-
# 3.๐ฒBikeAI๐ Claude setup
|
69 |
anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
|
70 |
if anthropic_key == None:
|
71 |
anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
|
72 |
claude_client = anthropic.Anthropic(api_key=anthropic_key)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
-
|
|
|
75 |
if 'transcript_history' not in st.session_state:
|
76 |
st.session_state.transcript_history = []
|
77 |
if "chat_history" not in st.session_state:
|
@@ -83,17 +84,8 @@ if "messages" not in st.session_state:
|
|
83 |
if 'last_voice_input' not in st.session_state:
|
84 |
st.session_state.last_voice_input = ""
|
85 |
|
86 |
-
# 5. ๐ฒBikeAI๐ HuggingFace AI setup
|
87 |
-
API_URL = os.getenv('API_URL')
|
88 |
-
HF_KEY = os.getenv('HF_KEY')
|
89 |
-
MODEL1 = "meta-llama/Llama-2-7b-chat-hf"
|
90 |
-
MODEL2 = "openai/whisper-small.en"
|
91 |
-
headers = {
|
92 |
-
"Authorization": f"Bearer {HF_KEY}",
|
93 |
-
"Content-Type": "application/json"
|
94 |
-
}
|
95 |
|
96 |
-
#
|
97 |
st.markdown("""
|
98 |
<style>
|
99 |
.main {
|
@@ -134,7 +126,8 @@ st.markdown("""
|
|
134 |
""", unsafe_allow_html=True)
|
135 |
|
136 |
|
137 |
-
|
|
|
138 |
def generate_filename(prompt, file_type):
|
139 |
"""Generate a safe filename using the prompt and file type."""
|
140 |
central = pytz.timezone('US/Central')
|
@@ -142,11 +135,6 @@ def generate_filename(prompt, file_type):
|
|
142 |
replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
|
143 |
safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
|
144 |
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
# 8. Function to create and save a file (and avoid the black hole of lost data ๐ณ)
|
150 |
def create_file(filename, prompt, response, should_save=True):
|
151 |
if not should_save:
|
152 |
return
|
@@ -163,8 +151,8 @@ def create_and_save_file(content, file_type="md", prompt=None, is_image=False, s
|
|
163 |
else:
|
164 |
f.write(prompt + "\n\n" + content if prompt else content)
|
165 |
return filename
|
166 |
-
|
167 |
|
|
|
168 |
def get_download_link(file_path):
|
169 |
"""Create download link for file."""
|
170 |
with open(file_path, "rb") as file:
|
@@ -172,6 +160,7 @@ def get_download_link(file_path):
|
|
172 |
b64 = base64.b64encode(contents).decode()
|
173 |
return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}๐</a>'
|
174 |
|
|
|
175 |
@st.cache_resource
|
176 |
def SpeechSynthesis(result):
|
177 |
"""HTML5 Speech Synthesis."""
|
@@ -204,9 +193,7 @@ def process_image(image_input, user_prompt):
|
|
204 |
if isinstance(image_input, str):
|
205 |
with open(image_input, "rb") as image_file:
|
206 |
image_input = image_file.read()
|
207 |
-
|
208 |
base64_image = base64.b64encode(image_input).decode("utf-8")
|
209 |
-
|
210 |
response = openai_client.chat.completions.create(
|
211 |
model=st.session_state["openai_model"],
|
212 |
messages=[
|
@@ -220,7 +207,6 @@ def process_image(image_input, user_prompt):
|
|
220 |
],
|
221 |
temperature=0.0,
|
222 |
)
|
223 |
-
|
224 |
return response.choices[0].message.content
|
225 |
|
226 |
def process_audio(audio_input, text_input=''):
|
@@ -228,18 +214,14 @@ def process_audio(audio_input, text_input=''):
|
|
228 |
if isinstance(audio_input, str):
|
229 |
with open(audio_input, "rb") as file:
|
230 |
audio_input = file.read()
|
231 |
-
|
232 |
transcription = openai_client.audio.transcriptions.create(
|
233 |
model="whisper-1",
|
234 |
file=audio_input,
|
235 |
)
|
236 |
-
|
237 |
st.session_state.messages.append({"role": "user", "content": transcription.text})
|
238 |
-
|
239 |
with st.chat_message("assistant"):
|
240 |
st.markdown(transcription.text)
|
241 |
SpeechSynthesis(transcription.text)
|
242 |
-
|
243 |
filename = generate_filename(transcription.text, "wav")
|
244 |
create_and_save_file(audio_input, "wav", transcription.text, True)
|
245 |
|
@@ -259,14 +241,12 @@ def process_video(video_path, seconds_per_frame=1):
|
|
259 |
break
|
260 |
_, buffer = cv2.imencode(".jpg", frame)
|
261 |
base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
|
262 |
-
|
263 |
video.release()
|
264 |
return base64Frames, None
|
265 |
|
266 |
def process_video_with_gpt(video_input, user_prompt):
|
267 |
"""Process video with GPT-4 vision."""
|
268 |
base64Frames, _ = process_video(video_input)
|
269 |
-
|
270 |
response = openai_client.chat.completions.create(
|
271 |
model=st.session_state["openai_model"],
|
272 |
messages=[
|
@@ -280,122 +260,6 @@ def process_video_with_gpt(video_input, user_prompt):
|
|
280 |
)
|
281 |
return response.choices[0].message.content
|
282 |
|
283 |
-
|
284 |
-
|
285 |
-
def process_tts(text, voice="alloy"):
|
286 |
-
"""
|
287 |
-
Process text-to-speech using OpenAI's TTS API
|
288 |
-
Voices available: alloy, echo, fable, onyx, nova, shimmer
|
289 |
-
"""
|
290 |
-
try:
|
291 |
-
response = openai_client.audio.speech.create(
|
292 |
-
model="tts-1",
|
293 |
-
voice=voice,
|
294 |
-
input=text
|
295 |
-
)
|
296 |
-
|
297 |
-
# Generate a unique filename
|
298 |
-
filename = generate_filename("tts_output", "mp3")
|
299 |
-
|
300 |
-
# Save the audio file
|
301 |
-
response.stream_to_file(filename)
|
302 |
-
|
303 |
-
# Create audio player HTML
|
304 |
-
audio_html = f"""
|
305 |
-
<audio controls>
|
306 |
-
<source src="data:audio/mp3;base64,{base64.b64encode(open(filename, 'rb').read()).decode()}" type="audio/mp3">
|
307 |
-
Your browser does not support the audio element.
|
308 |
-
</audio>
|
309 |
-
"""
|
310 |
-
|
311 |
-
return filename, audio_html
|
312 |
-
except Exception as e:
|
313 |
-
st.error(f"TTS Error: {str(e)}")
|
314 |
-
return None, None
|
315 |
-
|
316 |
-
def update_chat_interface():
|
317 |
-
"""Update the chat interface to include voice selection and TTS playback"""
|
318 |
-
# Add voice selection to sidebar
|
319 |
-
st.sidebar.markdown("### ๐ฃ๏ธ TTS Voice Settings")
|
320 |
-
selected_voice = st.sidebar.selectbox(
|
321 |
-
"Choose TTS Voice:",
|
322 |
-
["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
|
323 |
-
help="Select the voice for text-to-speech playback"
|
324 |
-
)
|
325 |
-
|
326 |
-
# Store the selected voice in session state
|
327 |
-
if "selected_voice" not in st.session_state:
|
328 |
-
st.session_state.selected_voice = selected_voice
|
329 |
-
|
330 |
-
# Modify the chat processing functions to include TTS
|
331 |
-
def process_with_gpt(text_input):
|
332 |
-
"""Process text with GPT-4o and add TTS."""
|
333 |
-
if text_input:
|
334 |
-
st.session_state.messages.append({"role": "user", "content": text_input})
|
335 |
-
|
336 |
-
with st.chat_message("user"):
|
337 |
-
st.markdown(text_input)
|
338 |
-
|
339 |
-
with st.chat_message("assistant"):
|
340 |
-
completion = openai_client.chat.completions.create(
|
341 |
-
model=st.session_state["openai_model"],
|
342 |
-
messages=[
|
343 |
-
{"role": m["role"], "content": m["content"]}
|
344 |
-
for m in st.session_state.messages
|
345 |
-
],
|
346 |
-
stream=False
|
347 |
-
)
|
348 |
-
return_text = completion.choices[0].message.content
|
349 |
-
st.write("GPT-4o: " + return_text)
|
350 |
-
|
351 |
-
# Add TTS playback
|
352 |
-
filename, audio_html = process_tts(return_text, st.session_state.selected_voice)
|
353 |
-
if audio_html:
|
354 |
-
st.markdown(audio_html, unsafe_allow_html=True)
|
355 |
-
|
356 |
-
# Original file handling
|
357 |
-
filename = generate_filename("GPT-4o: " + return_text, "md")
|
358 |
-
create_file(filename, text_input, return_text)
|
359 |
-
st.session_state.messages.append({"role": "assistant", "content": return_text})
|
360 |
-
return return_text
|
361 |
-
|
362 |
-
def process_with_claude(text_input):
|
363 |
-
"""Process text with Claude and add TTS."""
|
364 |
-
if text_input:
|
365 |
-
with st.chat_message("user"):
|
366 |
-
st.markdown(text_input)
|
367 |
-
|
368 |
-
with st.chat_message("assistant"):
|
369 |
-
response = claude_client.messages.create(
|
370 |
-
model="claude-3-sonnet-20240229",
|
371 |
-
max_tokens=1000,
|
372 |
-
messages=[
|
373 |
-
{"role": "user", "content": text_input}
|
374 |
-
]
|
375 |
-
)
|
376 |
-
response_text = response.content[0].text
|
377 |
-
st.write("Claude: " + response_text)
|
378 |
-
|
379 |
-
# Add TTS playback
|
380 |
-
filename, audio_html = process_tts(response_text, st.session_state.selected_voice)
|
381 |
-
if audio_html:
|
382 |
-
st.markdown(audio_html, unsafe_allow_html=True)
|
383 |
-
|
384 |
-
# Original file handling
|
385 |
-
filename = generate_filename("Claude: " + response_text, "md")
|
386 |
-
create_file(filename, text_input, response_text)
|
387 |
-
|
388 |
-
st.session_state.chat_history.append({
|
389 |
-
"user": text_input,
|
390 |
-
"claude": response_text
|
391 |
-
})
|
392 |
-
return response_text
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
|
400 |
def extract_urls(text):
|
401 |
try:
|
@@ -407,7 +271,6 @@ def extract_urls(text):
|
|
407 |
abs_link_matches = abs_link_pattern.findall(text)
|
408 |
pdf_link_matches = pdf_link_pattern.findall(text)
|
409 |
title_matches = title_pattern.findall(text)
|
410 |
-
|
411 |
# markdown with the extracted fields
|
412 |
markdown_text = ""
|
413 |
for i in range(len(date_matches)):
|
@@ -421,17 +284,14 @@ def extract_urls(text):
|
|
421 |
markdown_text += f"**PDF Link:** [{pdf_link}]({pdf_link})\n\n"
|
422 |
markdown_text += "---\n\n"
|
423 |
return markdown_text
|
424 |
-
|
425 |
except:
|
426 |
st.write('.')
|
427 |
return ''
|
428 |
|
429 |
|
430 |
def search_arxiv(query):
|
431 |
-
|
432 |
st.write("Performing AI Lookup...")
|
433 |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
434 |
-
|
435 |
result1 = client.predict(
|
436 |
prompt=query,
|
437 |
llm_model_picked="mistralai/Mixtral-8x7B-Instruct-v0.1",
|
@@ -440,7 +300,6 @@ def search_arxiv(query):
|
|
440 |
)
|
441 |
st.markdown("### Mixtral-8x7B-Instruct-v0.1 Result")
|
442 |
st.markdown(result1)
|
443 |
-
|
444 |
result2 = client.predict(
|
445 |
prompt=query,
|
446 |
llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
|
@@ -451,7 +310,6 @@ def search_arxiv(query):
|
|
451 |
st.markdown(result2)
|
452 |
combined_result = f"{result1}\n\n{result2}"
|
453 |
return combined_result
|
454 |
-
|
455 |
#return responseall
|
456 |
|
457 |
|
@@ -481,7 +339,6 @@ def perform_ai_lookup(query):
|
|
481 |
Question = '### ๐ ' + query + '\r\n' # Format for markdown display with links
|
482 |
References = response1[0]
|
483 |
ReferenceLinks = extract_urls(References)
|
484 |
-
|
485 |
RunSecondQuery = True
|
486 |
results=''
|
487 |
if RunSecondQuery:
|
@@ -498,7 +355,6 @@ def perform_ai_lookup(query):
|
|
498 |
# Restructure results to follow format of Question, Answer, References, ReferenceLinks
|
499 |
results = Question + '\r\n' + Answer + '\r\n' + References + '\r\n' + ReferenceLinks
|
500 |
st.markdown(results)
|
501 |
-
|
502 |
st.write('๐Run of Multi-Agent System Paper Summary Spec is Complete')
|
503 |
end_time = time.strftime("%Y-%m-%d %H:%M:%S")
|
504 |
start_timestamp = time.mktime(time.strptime(start_time, "%Y-%m-%d %H:%M:%S"))
|
@@ -507,8 +363,6 @@ def perform_ai_lookup(query):
|
|
507 |
st.write(f"Start time: {start_time}")
|
508 |
st.write(f"Finish time: {end_time}")
|
509 |
st.write(f"Elapsed time: {elapsed_seconds:.2f} seconds")
|
510 |
-
|
511 |
-
|
512 |
filename = generate_filename(query, "md")
|
513 |
create_file(filename, query, results)
|
514 |
return results
|
@@ -518,10 +372,8 @@ def process_with_gpt(text_input):
|
|
518 |
"""Process text with GPT-4o."""
|
519 |
if text_input:
|
520 |
st.session_state.messages.append({"role": "user", "content": text_input})
|
521 |
-
|
522 |
with st.chat_message("user"):
|
523 |
st.markdown(text_input)
|
524 |
-
|
525 |
with st.chat_message("assistant"):
|
526 |
completion = openai_client.chat.completions.create(
|
527 |
model=st.session_state["openai_model"],
|
@@ -533,7 +385,6 @@ def process_with_gpt(text_input):
|
|
533 |
)
|
534 |
return_text = completion.choices[0].message.content
|
535 |
st.write("GPT-4o: " + return_text)
|
536 |
-
|
537 |
#filename = generate_filename(text_input, "md")
|
538 |
filename = generate_filename("GPT-4o: " + return_text, "md")
|
539 |
create_file(filename, text_input, return_text)
|
@@ -543,10 +394,8 @@ def process_with_gpt(text_input):
|
|
543 |
def process_with_claude(text_input):
|
544 |
"""Process text with Claude."""
|
545 |
if text_input:
|
546 |
-
|
547 |
with st.chat_message("user"):
|
548 |
st.markdown(text_input)
|
549 |
-
|
550 |
with st.chat_message("assistant"):
|
551 |
response = claude_client.messages.create(
|
552 |
model="claude-3-sonnet-20240229",
|
@@ -557,11 +406,9 @@ def process_with_claude(text_input):
|
|
557 |
)
|
558 |
response_text = response.content[0].text
|
559 |
st.write("Claude: " + response_text)
|
560 |
-
|
561 |
#filename = generate_filename(text_input, "md")
|
562 |
filename = generate_filename("Claude: " + response_text, "md")
|
563 |
create_file(filename, text_input, response_text)
|
564 |
-
|
565 |
st.session_state.chat_history.append({
|
566 |
"user": text_input,
|
567 |
"claude": response_text
|
@@ -583,8 +430,6 @@ def create_zip_of_files(files):
|
|
583 |
zipf.write(file)
|
584 |
return zip_name
|
585 |
|
586 |
-
|
587 |
-
|
588 |
def get_media_html(media_path, media_type="video", width="100%"):
|
589 |
"""Generate HTML for media player."""
|
590 |
media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
|
@@ -606,9 +451,7 @@ def get_media_html(media_path, media_type="video", width="100%"):
|
|
606 |
def create_media_gallery():
|
607 |
"""Create the media gallery interface."""
|
608 |
st.header("๐ฌ Media Gallery")
|
609 |
-
|
610 |
tabs = st.tabs(["๐ผ๏ธ Images", "๐ต Audio", "๐ฅ Video"])
|
611 |
-
|
612 |
with tabs[0]:
|
613 |
image_files = glob.glob("*.png") + glob.glob("*.jpg")
|
614 |
if image_files:
|
@@ -618,13 +461,11 @@ def create_media_gallery():
|
|
618 |
with cols[idx % num_cols]:
|
619 |
img = Image.open(image_file)
|
620 |
st.image(img, use_container_width=True)
|
621 |
-
|
622 |
# Add GPT vision analysis option
|
623 |
if st.button(f"Analyze {os.path.basename(image_file)}"):
|
624 |
analysis = process_image(image_file,
|
625 |
"Describe this image in detail and identify key elements.")
|
626 |
st.markdown(analysis)
|
627 |
-
|
628 |
with tabs[1]:
|
629 |
audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
|
630 |
for audio_file in audio_files:
|
@@ -634,7 +475,6 @@ def create_media_gallery():
|
|
634 |
with open(audio_file, "rb") as f:
|
635 |
transcription = process_audio(f)
|
636 |
st.write(transcription)
|
637 |
-
|
638 |
with tabs[2]:
|
639 |
video_files = glob.glob("*.mp4")
|
640 |
for video_file in video_files:
|
@@ -646,29 +486,23 @@ def create_media_gallery():
|
|
646 |
st.markdown(analysis)
|
647 |
|
648 |
|
649 |
-
|
650 |
def display_file_manager():
|
651 |
"""Display file management sidebar with guaranteed unique button keys."""
|
652 |
st.sidebar.title("๐ File Management")
|
653 |
-
|
654 |
all_files = glob.glob("*.md")
|
655 |
all_files.sort(reverse=True)
|
656 |
-
|
657 |
if st.sidebar.button("๐ Delete All", key="delete_all_files_button"):
|
658 |
for file in all_files:
|
659 |
os.remove(file)
|
660 |
st.rerun()
|
661 |
-
|
662 |
if st.sidebar.button("โฌ๏ธ Download All", key="download_all_files_button"):
|
663 |
zip_file = create_zip_of_files(all_files)
|
664 |
st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
|
665 |
-
|
666 |
# Create unique keys using file attributes
|
667 |
for idx, file in enumerate(all_files):
|
668 |
# Get file stats for unique identification
|
669 |
file_stat = os.stat(file)
|
670 |
unique_id = f"{idx}_{file_stat.st_size}_{file_stat.st_mtime}"
|
671 |
-
|
672 |
col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
|
673 |
with col1:
|
674 |
if st.button("๐", key=f"view_{unique_id}"):
|
@@ -686,8 +520,6 @@ def display_file_manager():
|
|
686 |
st.rerun()
|
687 |
|
688 |
|
689 |
-
|
690 |
-
|
691 |
# Speech Recognition HTML Component
|
692 |
speech_recognition_html = """
|
693 |
<!DOCTYPE html>
|
@@ -985,7 +817,7 @@ def main():
|
|
985 |
|
986 |
# Main navigation
|
987 |
tab_main = st.radio("Choose Action:",
|
988 |
-
["๐ค Voice Input", "
|
989 |
horizontal=True)
|
990 |
|
991 |
if tab_main == "๐ค Voice Input":
|
@@ -1003,105 +835,13 @@ def main():
|
|
1003 |
st.session_state.voice_transcript = transcript
|
1004 |
|
1005 |
# Display the transcript in a Streamlit text area
|
1006 |
-
st.markdown("### Processed Voice Input:")
|
1007 |
-
st.text_area("Voice Transcript", st.session_state.voice_transcript, height=100)
|
1008 |
-
|
1009 |
-
# Add functionality to process the transcript
|
1010 |
-
if st.button("Process Transcript"):
|
1011 |
-
st.subheader("AI Response to Transcript")
|
1012 |
-
gpt_response = process_with_gpt(st.session_state.voice_transcript)
|
1013 |
-
st.markdown(gpt_response)
|
1014 |
-
|
1015 |
-
# Option to clear the transcript
|
1016 |
-
if st.button("Clear Transcript"):
|
1017 |
-
st.session_state.voice_transcript = ""
|
1018 |
-
st.rerun()
|
1019 |
-
|
1020 |
-
|
1021 |
-
# Buttons to process the transcript
|
1022 |
-
if st.button("Search with GPT"):
|
1023 |
-
st.subheader("GPT-4o Response")
|
1024 |
-
gpt_response = process_with_gpt(st.session_state.voice_transcript)
|
1025 |
-
st.markdown(gpt_response)
|
1026 |
-
|
1027 |
-
if st.button("Search with Claude"):
|
1028 |
-
st.subheader("Claude Response")
|
1029 |
-
claude_response = process_with_claude(st.session_state.voice_transcript)
|
1030 |
-
st.markdown(claude_response)
|
1031 |
-
|
1032 |
-
if st.button("Search ArXiv"):
|
1033 |
-
st.subheader("ArXiv Search Results")
|
1034 |
-
arxiv_results = perform_ai_lookup(st.session_state.voice_transcript)
|
1035 |
-
st.markdown(arxiv_results)
|
1036 |
-
|
1037 |
-
|
1038 |
-
# Display last voice input
|
1039 |
-
if st.session_state.last_voice_input:
|
1040 |
-
st.text_area("Last Voice Input:", st.session_state.last_voice_input, height=100)
|
1041 |
-
|
1042 |
-
|
1043 |
-
# Model Selection
|
1044 |
-
model_choice = st.sidebar.radio(
|
1045 |
-
"Choose AI Model:",
|
1046 |
-
["GPT-4o", "Claude-3", "GPT+Claude+Arxiv"]
|
1047 |
-
)
|
1048 |
-
|
1049 |
-
# Chat Interface
|
1050 |
-
user_input = st.text_area("Message:", height=100)
|
1051 |
-
|
1052 |
-
if st.button("Send ๐จ"):
|
1053 |
-
if user_input:
|
1054 |
-
if model_choice == "GPT-4o":
|
1055 |
-
gpt_response = process_with_gpt(user_input)
|
1056 |
-
elif model_choice == "Claude-3":
|
1057 |
-
claude_response = process_with_claude(user_input)
|
1058 |
-
else: # Both
|
1059 |
-
col1, col2, col3 = st.columns(3)
|
1060 |
-
with col2:
|
1061 |
-
st.subheader("Claude-3.5 Sonnet:")
|
1062 |
-
try:
|
1063 |
-
claude_response = process_with_claude(user_input)
|
1064 |
-
except:
|
1065 |
-
st.write('Claude 3.5 Sonnet out of tokens.')
|
1066 |
-
with col1:
|
1067 |
-
st.subheader("GPT-4o Omni:")
|
1068 |
-
try:
|
1069 |
-
gpt_response = process_with_gpt(user_input)
|
1070 |
-
except:
|
1071 |
-
st.write('GPT 4o out of tokens')
|
1072 |
-
with col3:
|
1073 |
-
st.subheader("Arxiv and Mistral Research:")
|
1074 |
-
with st.spinner("Searching ArXiv..."):
|
1075 |
-
#results = search_arxiv(user_input)
|
1076 |
-
results = perform_ai_lookup(user_input)
|
1077 |
-
|
1078 |
-
st.markdown(results)
|
1079 |
-
|
1080 |
-
# Display Chat History
|
1081 |
-
st.subheader("Chat History ๐")
|
1082 |
-
tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
|
1083 |
-
|
1084 |
-
with tab1:
|
1085 |
-
for chat in st.session_state.chat_history:
|
1086 |
-
st.text_area("You:", chat["user"], height=100)
|
1087 |
-
st.text_area("Claude:", chat["claude"], height=200)
|
1088 |
-
st.markdown(chat["claude"])
|
1089 |
-
|
1090 |
-
with tab2:
|
1091 |
-
for message in st.session_state.messages:
|
1092 |
-
with st.chat_message(message["role"]):
|
1093 |
-
st.markdown(message["content"])
|
1094 |
-
|
1095 |
-
|
1096 |
-
# ------------------------------------------------------- ************************* --->
|
1097 |
-
|
1098 |
-
|
1099 |
-
|
1100 |
-
if tab_main == "๐ฌ Chat":
|
1101 |
# Model Selection
|
1102 |
model_choice = st.sidebar.radio(
|
1103 |
"Choose AI Model:",
|
1104 |
-
["GPT
|
1105 |
)
|
1106 |
|
1107 |
# Chat Interface
|
|
|
39 |
helpURL = 'https://huggingface.co/awacke1'
|
40 |
bugURL = 'https://huggingface.co/spaces/awacke1'
|
41 |
icons = '๐ฒ๐'
|
|
|
42 |
st.set_page_config(
|
43 |
page_title=title,
|
44 |
page_icon=icons,
|
|
|
50 |
'About': title
|
51 |
}
|
52 |
)
|
|
|
|
|
53 |
load_dotenv()
|
|
|
|
|
54 |
openai.api_key = os.getenv('OPENAI_API_KEY')
|
55 |
if openai.api_key == None:
|
56 |
openai.api_key = st.secrets['OPENAI_API_KEY']
|
|
|
57 |
openai_client = OpenAI(
|
58 |
api_key=os.getenv('OPENAI_API_KEY'),
|
59 |
organization=os.getenv('OPENAI_ORG_ID')
|
60 |
)
|
|
|
|
|
61 |
anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
|
62 |
if anthropic_key == None:
|
63 |
anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
|
64 |
claude_client = anthropic.Anthropic(api_key=anthropic_key)
|
65 |
+
API_URL = os.getenv('API_URL')
|
66 |
+
HF_KEY = os.getenv('HF_KEY')
|
67 |
+
MODEL1 = "meta-llama/Llama-2-7b-chat-hf"
|
68 |
+
MODEL2 = "openai/whisper-small.en"
|
69 |
+
headers = {
|
70 |
+
"Authorization": f"Bearer {HF_KEY}",
|
71 |
+
"Content-Type": "application/json"
|
72 |
+
}
|
73 |
|
74 |
+
|
75 |
+
# 2.๐ฒBikeAI๐ Initialize session states
|
76 |
if 'transcript_history' not in st.session_state:
|
77 |
st.session_state.transcript_history = []
|
78 |
if "chat_history" not in st.session_state:
|
|
|
84 |
if 'last_voice_input' not in st.session_state:
|
85 |
st.session_state.last_voice_input = ""
|
86 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
|
88 |
+
# 3. ๐ฒBikeAI๐ Custom CSS
|
89 |
st.markdown("""
|
90 |
<style>
|
91 |
.main {
|
|
|
126 |
""", unsafe_allow_html=True)
|
127 |
|
128 |
|
129 |
+
|
130 |
+
# create and save a file (and avoid the black hole of lost data ๐ณ)
|
131 |
def generate_filename(prompt, file_type):
|
132 |
"""Generate a safe filename using the prompt and file type."""
|
133 |
central = pytz.timezone('US/Central')
|
|
|
135 |
replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
|
136 |
safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
|
137 |
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
|
|
|
|
|
|
|
|
|
|
138 |
def create_file(filename, prompt, response, should_save=True):
|
139 |
if not should_save:
|
140 |
return
|
|
|
151 |
else:
|
152 |
f.write(prompt + "\n\n" + content if prompt else content)
|
153 |
return filename
|
|
|
154 |
|
155 |
+
# Load a file, base64 it, return as link
|
156 |
def get_download_link(file_path):
|
157 |
"""Create download link for file."""
|
158 |
with open(file_path, "rb") as file:
|
|
|
160 |
b64 = base64.b64encode(contents).decode()
|
161 |
return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}๐</a>'
|
162 |
|
163 |
+
# Speech Synth Browser Style
|
164 |
@st.cache_resource
|
165 |
def SpeechSynthesis(result):
|
166 |
"""HTML5 Speech Synthesis."""
|
|
|
193 |
if isinstance(image_input, str):
|
194 |
with open(image_input, "rb") as image_file:
|
195 |
image_input = image_file.read()
|
|
|
196 |
base64_image = base64.b64encode(image_input).decode("utf-8")
|
|
|
197 |
response = openai_client.chat.completions.create(
|
198 |
model=st.session_state["openai_model"],
|
199 |
messages=[
|
|
|
207 |
],
|
208 |
temperature=0.0,
|
209 |
)
|
|
|
210 |
return response.choices[0].message.content
|
211 |
|
212 |
def process_audio(audio_input, text_input=''):
|
|
|
214 |
if isinstance(audio_input, str):
|
215 |
with open(audio_input, "rb") as file:
|
216 |
audio_input = file.read()
|
|
|
217 |
transcription = openai_client.audio.transcriptions.create(
|
218 |
model="whisper-1",
|
219 |
file=audio_input,
|
220 |
)
|
|
|
221 |
st.session_state.messages.append({"role": "user", "content": transcription.text})
|
|
|
222 |
with st.chat_message("assistant"):
|
223 |
st.markdown(transcription.text)
|
224 |
SpeechSynthesis(transcription.text)
|
|
|
225 |
filename = generate_filename(transcription.text, "wav")
|
226 |
create_and_save_file(audio_input, "wav", transcription.text, True)
|
227 |
|
|
|
241 |
break
|
242 |
_, buffer = cv2.imencode(".jpg", frame)
|
243 |
base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
|
|
|
244 |
video.release()
|
245 |
return base64Frames, None
|
246 |
|
247 |
def process_video_with_gpt(video_input, user_prompt):
|
248 |
"""Process video with GPT-4 vision."""
|
249 |
base64Frames, _ = process_video(video_input)
|
|
|
250 |
response = openai_client.chat.completions.create(
|
251 |
model=st.session_state["openai_model"],
|
252 |
messages=[
|
|
|
260 |
)
|
261 |
return response.choices[0].message.content
|
262 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
263 |
|
264 |
def extract_urls(text):
|
265 |
try:
|
|
|
271 |
abs_link_matches = abs_link_pattern.findall(text)
|
272 |
pdf_link_matches = pdf_link_pattern.findall(text)
|
273 |
title_matches = title_pattern.findall(text)
|
|
|
274 |
# markdown with the extracted fields
|
275 |
markdown_text = ""
|
276 |
for i in range(len(date_matches)):
|
|
|
284 |
markdown_text += f"**PDF Link:** [{pdf_link}]({pdf_link})\n\n"
|
285 |
markdown_text += "---\n\n"
|
286 |
return markdown_text
|
|
|
287 |
except:
|
288 |
st.write('.')
|
289 |
return ''
|
290 |
|
291 |
|
292 |
def search_arxiv(query):
|
|
|
293 |
st.write("Performing AI Lookup...")
|
294 |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
|
|
295 |
result1 = client.predict(
|
296 |
prompt=query,
|
297 |
llm_model_picked="mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
|
300 |
)
|
301 |
st.markdown("### Mixtral-8x7B-Instruct-v0.1 Result")
|
302 |
st.markdown(result1)
|
|
|
303 |
result2 = client.predict(
|
304 |
prompt=query,
|
305 |
llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
|
|
|
310 |
st.markdown(result2)
|
311 |
combined_result = f"{result1}\n\n{result2}"
|
312 |
return combined_result
|
|
|
313 |
#return responseall
|
314 |
|
315 |
|
|
|
339 |
Question = '### ๐ ' + query + '\r\n' # Format for markdown display with links
|
340 |
References = response1[0]
|
341 |
ReferenceLinks = extract_urls(References)
|
|
|
342 |
RunSecondQuery = True
|
343 |
results=''
|
344 |
if RunSecondQuery:
|
|
|
355 |
# Restructure results to follow format of Question, Answer, References, ReferenceLinks
|
356 |
results = Question + '\r\n' + Answer + '\r\n' + References + '\r\n' + ReferenceLinks
|
357 |
st.markdown(results)
|
|
|
358 |
st.write('๐Run of Multi-Agent System Paper Summary Spec is Complete')
|
359 |
end_time = time.strftime("%Y-%m-%d %H:%M:%S")
|
360 |
start_timestamp = time.mktime(time.strptime(start_time, "%Y-%m-%d %H:%M:%S"))
|
|
|
363 |
st.write(f"Start time: {start_time}")
|
364 |
st.write(f"Finish time: {end_time}")
|
365 |
st.write(f"Elapsed time: {elapsed_seconds:.2f} seconds")
|
|
|
|
|
366 |
filename = generate_filename(query, "md")
|
367 |
create_file(filename, query, results)
|
368 |
return results
|
|
|
372 |
"""Process text with GPT-4o."""
|
373 |
if text_input:
|
374 |
st.session_state.messages.append({"role": "user", "content": text_input})
|
|
|
375 |
with st.chat_message("user"):
|
376 |
st.markdown(text_input)
|
|
|
377 |
with st.chat_message("assistant"):
|
378 |
completion = openai_client.chat.completions.create(
|
379 |
model=st.session_state["openai_model"],
|
|
|
385 |
)
|
386 |
return_text = completion.choices[0].message.content
|
387 |
st.write("GPT-4o: " + return_text)
|
|
|
388 |
#filename = generate_filename(text_input, "md")
|
389 |
filename = generate_filename("GPT-4o: " + return_text, "md")
|
390 |
create_file(filename, text_input, return_text)
|
|
|
394 |
def process_with_claude(text_input):
|
395 |
"""Process text with Claude."""
|
396 |
if text_input:
|
|
|
397 |
with st.chat_message("user"):
|
398 |
st.markdown(text_input)
|
|
|
399 |
with st.chat_message("assistant"):
|
400 |
response = claude_client.messages.create(
|
401 |
model="claude-3-sonnet-20240229",
|
|
|
406 |
)
|
407 |
response_text = response.content[0].text
|
408 |
st.write("Claude: " + response_text)
|
|
|
409 |
#filename = generate_filename(text_input, "md")
|
410 |
filename = generate_filename("Claude: " + response_text, "md")
|
411 |
create_file(filename, text_input, response_text)
|
|
|
412 |
st.session_state.chat_history.append({
|
413 |
"user": text_input,
|
414 |
"claude": response_text
|
|
|
430 |
zipf.write(file)
|
431 |
return zip_name
|
432 |
|
|
|
|
|
433 |
def get_media_html(media_path, media_type="video", width="100%"):
|
434 |
"""Generate HTML for media player."""
|
435 |
media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
|
|
|
451 |
def create_media_gallery():
|
452 |
"""Create the media gallery interface."""
|
453 |
st.header("๐ฌ Media Gallery")
|
|
|
454 |
tabs = st.tabs(["๐ผ๏ธ Images", "๐ต Audio", "๐ฅ Video"])
|
|
|
455 |
with tabs[0]:
|
456 |
image_files = glob.glob("*.png") + glob.glob("*.jpg")
|
457 |
if image_files:
|
|
|
461 |
with cols[idx % num_cols]:
|
462 |
img = Image.open(image_file)
|
463 |
st.image(img, use_container_width=True)
|
|
|
464 |
# Add GPT vision analysis option
|
465 |
if st.button(f"Analyze {os.path.basename(image_file)}"):
|
466 |
analysis = process_image(image_file,
|
467 |
"Describe this image in detail and identify key elements.")
|
468 |
st.markdown(analysis)
|
|
|
469 |
with tabs[1]:
|
470 |
audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
|
471 |
for audio_file in audio_files:
|
|
|
475 |
with open(audio_file, "rb") as f:
|
476 |
transcription = process_audio(f)
|
477 |
st.write(transcription)
|
|
|
478 |
with tabs[2]:
|
479 |
video_files = glob.glob("*.mp4")
|
480 |
for video_file in video_files:
|
|
|
486 |
st.markdown(analysis)
|
487 |
|
488 |
|
|
|
489 |
def display_file_manager():
|
490 |
"""Display file management sidebar with guaranteed unique button keys."""
|
491 |
st.sidebar.title("๐ File Management")
|
|
|
492 |
all_files = glob.glob("*.md")
|
493 |
all_files.sort(reverse=True)
|
|
|
494 |
if st.sidebar.button("๐ Delete All", key="delete_all_files_button"):
|
495 |
for file in all_files:
|
496 |
os.remove(file)
|
497 |
st.rerun()
|
|
|
498 |
if st.sidebar.button("โฌ๏ธ Download All", key="download_all_files_button"):
|
499 |
zip_file = create_zip_of_files(all_files)
|
500 |
st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
|
|
|
501 |
# Create unique keys using file attributes
|
502 |
for idx, file in enumerate(all_files):
|
503 |
# Get file stats for unique identification
|
504 |
file_stat = os.stat(file)
|
505 |
unique_id = f"{idx}_{file_stat.st_size}_{file_stat.st_mtime}"
|
|
|
506 |
col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
|
507 |
with col1:
|
508 |
if st.button("๐", key=f"view_{unique_id}"):
|
|
|
520 |
st.rerun()
|
521 |
|
522 |
|
|
|
|
|
523 |
# Speech Recognition HTML Component
|
524 |
speech_recognition_html = """
|
525 |
<!DOCTYPE html>
|
|
|
817 |
|
818 |
# Main navigation
|
819 |
tab_main = st.radio("Choose Action:",
|
820 |
+
["๐ค Voice Input", "๐ธ Media Gallery", "๐ Search ArXiv", "๐ File Editor"],
|
821 |
horizontal=True)
|
822 |
|
823 |
if tab_main == "๐ค Voice Input":
|
|
|
835 |
st.session_state.voice_transcript = transcript
|
836 |
|
837 |
# Display the transcript in a Streamlit text area
|
838 |
+
# st.markdown("### Processed Voice Input:")
|
839 |
+
# st.text_area("Voice Transcript", st.session_state.voice_transcript, height=100)
|
840 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
841 |
# Model Selection
|
842 |
model_choice = st.sidebar.radio(
|
843 |
"Choose AI Model:",
|
844 |
+
[ "GPT+Claude+Arxiv", "GPT-4o", "Claude-3"]
|
845 |
)
|
846 |
|
847 |
# Chat Interface
|