awacke1 commited on
Commit
a1ed500
β€’
1 Parent(s): 48c25ab

Create backup12.app.py

Browse files
Files changed (1) hide show
  1. backup12.app.py +968 -0
backup12.app.py ADDED
@@ -0,0 +1,968 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The length of this program is 968 lines I believe. Can you rewrite the code in a way to reduce the code number of lines while still making it better by using emojis and other things appropriate to a unicode compliant streamlit python program launchingg on linux on huggingface? import streamlit as st
2
+ import anthropic
3
+ import openai
4
+ import base64
5
+ from datetime import datetime
6
+ import plotly.graph_objects as go
7
+ import cv2
8
+ import glob
9
+ import json
10
+ import math
11
+ import os
12
+ import pytz
13
+ import random
14
+ import re
15
+ import requests
16
+ import streamlit.components.v1 as components
17
+ import textract
18
+ import time
19
+ import zipfile
20
+ from audio_recorder_streamlit import audio_recorder
21
+ from bs4 import BeautifulSoup
22
+ from collections import deque
23
+ from dotenv import load_dotenv
24
+ from gradio_client import Client
25
+ from huggingface_hub import InferenceClient
26
+ from io import BytesIO
27
+ from PIL import Image
28
+ from PyPDF2 import PdfReader
29
+ from urllib.parse import quote
30
+ from xml.etree import ElementTree as ET
31
+ from openai import OpenAI
32
+ import extra_streamlit_components as stx
33
+ from streamlit.runtime.scriptrunner import get_script_run_ctx
34
+
35
+
36
+ # 1. 🚲BikeAIπŸ† Configuration and Setup
37
+ Site_Name = '🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI'
38
+ title = "🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI"
39
+ helpURL = 'https://huggingface.co/awacke1'
40
+ bugURL = 'https://huggingface.co/spaces/awacke1'
41
+ icons = 'πŸš²πŸ†'
42
+ st.set_page_config(
43
+ page_title=title,
44
+ page_icon=icons,
45
+ layout="wide",
46
+ initial_sidebar_state="auto",
47
+ menu_items={
48
+ 'Get Help': helpURL,
49
+ 'Report a bug': bugURL,
50
+ 'About': title
51
+ }
52
+ )
53
+ load_dotenv()
54
+ openai.api_key = os.getenv('OPENAI_API_KEY')
55
+ if openai.api_key == None:
56
+ openai.api_key = st.secrets['OPENAI_API_KEY']
57
+ openai_client = OpenAI(
58
+ api_key=os.getenv('OPENAI_API_KEY'),
59
+ organization=os.getenv('OPENAI_ORG_ID')
60
+ )
61
+ anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
62
+ if anthropic_key == None:
63
+ anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
64
+ claude_client = anthropic.Anthropic(api_key=anthropic_key)
65
+ API_URL = os.getenv('API_URL')
66
+ HF_KEY = os.getenv('HF_KEY')
67
+ MODEL1 = "meta-llama/Llama-2-7b-chat-hf"
68
+ MODEL2 = "openai/whisper-small.en"
69
+ headers = {
70
+ "Authorization": f"Bearer {HF_KEY}",
71
+ "Content-Type": "application/json"
72
+ }
73
+ # markdown target for viewing files in markdown (number one feature)
74
+ markdown_target = st.empty()
75
+
76
+
77
+
78
+ # 2.🚲BikeAIπŸ† Initialize session states
79
+ if 'transcript_history' not in st.session_state:
80
+ st.session_state.transcript_history = []
81
+ if "chat_history" not in st.session_state:
82
+ st.session_state.chat_history = []
83
+ if "openai_model" not in st.session_state:
84
+ st.session_state["openai_model"] = "gpt-4o-2024-05-13"
85
+ if "messages" not in st.session_state:
86
+ st.session_state.messages = []
87
+ if 'last_voice_input' not in st.session_state:
88
+ st.session_state.last_voice_input = ""
89
+
90
+
91
+ # 3. 🚲BikeAIπŸ† Custom CSS
92
+ st.markdown("""
93
+ <style>
94
+ .main {
95
+ background: linear-gradient(to right, #1a1a1a, #2d2d2d);
96
+ color: #ffffff;
97
+ }
98
+ .stMarkdown {
99
+ font-family: 'Helvetica Neue', sans-serif;
100
+ }
101
+ .category-header {
102
+ background: linear-gradient(45deg, #2b5876, #4e4376);
103
+ padding: 20px;
104
+ border-radius: 10px;
105
+ margin: 10px 0;
106
+ }
107
+ .scene-card {
108
+ background: rgba(0,0,0,0.3);
109
+ padding: 15px;
110
+ border-radius: 8px;
111
+ margin: 10px 0;
112
+ border: 1px solid rgba(255,255,255,0.1);
113
+ }
114
+ .media-gallery {
115
+ display: grid;
116
+ gap: 1rem;
117
+ padding: 1rem;
118
+ }
119
+ .bike-card {
120
+ background: rgba(255,255,255,0.05);
121
+ border-radius: 10px;
122
+ padding: 15px;
123
+ transition: transform 0.3s;
124
+ }
125
+ .bike-card:hover {
126
+ transform: scale(1.02);
127
+ }
128
+ </style>
129
+ """, unsafe_allow_html=True)
130
+
131
+
132
+
133
+ # create and save a file (and avoid the black hole of lost data πŸ•³)
134
+ def generate_filename(prompt, file_type):
135
+ """Generate a safe filename using the prompt and file type."""
136
+ central = pytz.timezone('US/Central')
137
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
138
+ replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
139
+ #safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
140
+ safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:90] # Ensures file name is long enough but doesnt prevent unzip due to path length
141
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
142
+
143
+
144
+ def create_file(filename, prompt, response, should_save=True):
145
+ if not should_save:
146
+ return
147
+ with open(filename, 'w', encoding='utf-8') as file:
148
+ file.write(prompt + "\n\n" + response)
149
+ def create_and_save_file(content, file_type="md", prompt=None, is_image=False, should_save=True):
150
+ """Create and save file with proper handling of different types."""
151
+ if not should_save:
152
+ return None
153
+ filename = generate_filename(prompt if prompt else content, file_type)
154
+ with open(filename, "w", encoding="utf-8") as f:
155
+ if is_image:
156
+ f.write(content)
157
+ else:
158
+ f.write(prompt + "\n\n" + content if prompt else content)
159
+ return filename
160
+
161
+ # Load a file, base64 it, return as link
162
+ def get_download_link(file_path):
163
+ """Create download link for file."""
164
+ with open(file_path, "rb") as file:
165
+ contents = file.read()
166
+ b64 = base64.b64encode(contents).decode()
167
+ return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}πŸ“‚</a>'
168
+
169
+ # Speech Synth Browser Style
170
+ @st.cache_resource
171
+ def SpeechSynthesis(result):
172
+ """HTML5 Speech Synthesis."""
173
+ documentHTML5 = f'''
174
+ <!DOCTYPE html>
175
+ <html>
176
+ <head>
177
+ <title>Read It Aloud</title>
178
+ <script type="text/javascript">
179
+ function readAloud() {{
180
+ const text = document.getElementById("textArea").value;
181
+ const speech = new SpeechSynthesisUtterance(text);
182
+ window.speechSynthesis.speak(speech);
183
+ }}
184
+ </script>
185
+ </head>
186
+ <body>
187
+ <h1>πŸ”Š Read It Aloud</h1>
188
+ <textarea id="textArea" rows="10" cols="80">{result}</textarea>
189
+ <br>
190
+ <button onclick="readAloud()">πŸ”Š Read Aloud</button>
191
+ </body>
192
+ </html>
193
+ '''
194
+ components.html(documentHTML5, width=1280, height=300)
195
+
196
+ # Media Processing Functions
197
+ def process_image(image_input, user_prompt):
198
+ """Process image with GPT-4o vision."""
199
+ if isinstance(image_input, str):
200
+ with open(image_input, "rb") as image_file:
201
+ image_input = image_file.read()
202
+ base64_image = base64.b64encode(image_input).decode("utf-8")
203
+ response = openai_client.chat.completions.create(
204
+ model=st.session_state["openai_model"],
205
+ messages=[
206
+ {"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
207
+ {"role": "user", "content": [
208
+ {"type": "text", "text": user_prompt},
209
+ {"type": "image_url", "image_url": {
210
+ "url": f"data:image/png;base64,{base64_image}"
211
+ }}
212
+ ]}
213
+ ],
214
+ temperature=0.0,
215
+ )
216
+ return response.choices[0].message.content
217
+
218
+ def process_audio(audio_input, text_input=''):
219
+ """Process audio with Whisper and GPT."""
220
+ if isinstance(audio_input, str):
221
+ with open(audio_input, "rb") as file:
222
+ audio_input = file.read()
223
+ transcription = openai_client.audio.transcriptions.create(
224
+ model="whisper-1",
225
+ file=audio_input,
226
+ )
227
+ st.session_state.messages.append({"role": "user", "content": transcription.text})
228
+ with st.chat_message("assistant"):
229
+ st.markdown(transcription.text)
230
+ SpeechSynthesis(transcription.text)
231
+ filename = generate_filename(transcription.text, "wav")
232
+ create_and_save_file(audio_input, "wav", transcription.text, True)
233
+
234
+ # Modified video processing function without moviepy dependency
235
+ def process_video(video_path, seconds_per_frame=1):
236
+ """Process video files for frame extraction."""
237
+ base64Frames = []
238
+ video = cv2.VideoCapture(video_path)
239
+ total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
240
+ fps = video.get(cv2.CAP_PROP_FPS)
241
+ frames_to_skip = int(fps * seconds_per_frame)
242
+
243
+ for frame_idx in range(0, total_frames, frames_to_skip):
244
+ video.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
245
+ success, frame = video.read()
246
+ if not success:
247
+ break
248
+ _, buffer = cv2.imencode(".jpg", frame)
249
+ base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
250
+ video.release()
251
+ return base64Frames, None
252
+
253
+ def process_video_with_gpt(video_input, user_prompt):
254
+ """Process video with GPT-4 vision."""
255
+ base64Frames, _ = process_video(video_input)
256
+ response = openai_client.chat.completions.create(
257
+ model=st.session_state["openai_model"],
258
+ messages=[
259
+ {"role": "system", "content": "Analyze the video frames and provide a detailed description."},
260
+ {"role": "user", "content": [
261
+ {"type": "text", "text": user_prompt},
262
+ *[{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{frame}"}}
263
+ for frame in base64Frames]
264
+ ]}
265
+ ]
266
+ )
267
+ return response.choices[0].message.content
268
+
269
+
270
+ def extract_urls(text):
271
+ try:
272
+ date_pattern = re.compile(r'### (\d{2} \w{3} \d{4})')
273
+ abs_link_pattern = re.compile(r'\[(.*?)\]\((https://arxiv\.org/abs/\d+\.\d+)\)')
274
+ pdf_link_pattern = re.compile(r'\[⬇️\]\((https://arxiv\.org/pdf/\d+\.\d+)\)')
275
+ title_pattern = re.compile(r'### \d{2} \w{3} \d{4} \| \[(.*?)\]')
276
+ date_matches = date_pattern.findall(text)
277
+ abs_link_matches = abs_link_pattern.findall(text)
278
+ pdf_link_matches = pdf_link_pattern.findall(text)
279
+ title_matches = title_pattern.findall(text)
280
+ # markdown with the extracted fields
281
+ markdown_text = ""
282
+ for i in range(len(date_matches)):
283
+ date = date_matches[i]
284
+ title = title_matches[i]
285
+ abs_link = abs_link_matches[i][1]
286
+ pdf_link = pdf_link_matches[i]
287
+ markdown_text += f"**Date:** {date}\n\n"
288
+ markdown_text += f"**Title:** {title}\n\n"
289
+ markdown_text += f"**Abstract Link:** [{abs_link}]({abs_link})\n\n"
290
+ markdown_text += f"**PDF Link:** [{pdf_link}]({pdf_link})\n\n"
291
+ markdown_text += "---\n\n"
292
+ return markdown_text
293
+ except:
294
+ st.write('.')
295
+ return ''
296
+
297
+
298
+ def search_arxiv(query):
299
+ st.write("Performing AI Lookup...")
300
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
301
+ result1 = client.predict(
302
+ prompt=query,
303
+ llm_model_picked="mistralai/Mixtral-8x7B-Instruct-v0.1",
304
+ stream_outputs=True,
305
+ api_name="/ask_llm"
306
+ )
307
+ st.markdown("### Mixtral-8x7B-Instruct-v0.1 Result")
308
+ st.markdown(result1)
309
+ result2 = client.predict(
310
+ prompt=query,
311
+ llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
312
+ stream_outputs=True,
313
+ api_name="/ask_llm"
314
+ )
315
+ st.markdown("### Mistral-7B-Instruct-v0.2 Result")
316
+ st.markdown(result2)
317
+ combined_result = f"{result1}\n\n{result2}"
318
+ return combined_result
319
+ #return responseall
320
+
321
+
322
+ # Function to generate a filename based on prompt and time (because names matter πŸ•’)
323
+ def generate_filename(prompt, file_type):
324
+ central = pytz.timezone('US/Central')
325
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
326
+ safe_prompt = re.sub(r'\W+', '_', prompt)[:90]
327
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
328
+
329
+ # Function to create and save a file (and avoid the black hole of lost data πŸ•³)
330
+ def create_file(filename, prompt, response):
331
+ with open(filename, 'w', encoding='utf-8') as file:
332
+ file.write(prompt + "\n\n" + response)
333
+
334
+
335
+ def perform_ai_lookup(query):
336
+ start_time = time.strftime("%Y-%m-%d %H:%M:%S")
337
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
338
+ response1 = client.predict(
339
+ query,
340
+ 20,
341
+ "Semantic Search",
342
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
343
+ api_name="/update_with_rag_md"
344
+ )
345
+ Question = '### πŸ”Ž ' + query + '\r\n' # Format for markdown display with links
346
+ References = response1[0]
347
+ ReferenceLinks = extract_urls(References)
348
+ RunSecondQuery = True
349
+ results=''
350
+ if RunSecondQuery:
351
+ # Search 2 - Retrieve the Summary with Papers Context and Original Query
352
+ response2 = client.predict(
353
+ query,
354
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
355
+ True,
356
+ api_name="/ask_llm"
357
+ )
358
+ if len(response2) > 10:
359
+ Answer = response2
360
+ SpeechSynthesis(Answer)
361
+ # Restructure results to follow format of Question, Answer, References, ReferenceLinks
362
+ results = Question + '\r\n' + Answer + '\r\n' + References + '\r\n' + ReferenceLinks
363
+ st.markdown(results)
364
+ st.write('πŸ”Run of Multi-Agent System Paper Summary Spec is Complete')
365
+ end_time = time.strftime("%Y-%m-%d %H:%M:%S")
366
+ start_timestamp = time.mktime(time.strptime(start_time, "%Y-%m-%d %H:%M:%S"))
367
+ end_timestamp = time.mktime(time.strptime(end_time, "%Y-%m-%d %H:%M:%S"))
368
+ elapsed_seconds = end_timestamp - start_timestamp
369
+ st.write(f"Start time: {start_time}")
370
+ st.write(f"Finish time: {end_time}")
371
+ st.write(f"Elapsed time: {elapsed_seconds:.2f} seconds")
372
+ filename = generate_filename(query, "md")
373
+ create_file(filename, query, results)
374
+ return results
375
+
376
+ # Chat Processing Functions
377
+ def process_with_gpt(text_input):
378
+ """Process text with GPT-4o."""
379
+ if text_input:
380
+ st.session_state.messages.append({"role": "user", "content": text_input})
381
+ with st.chat_message("user"):
382
+ st.markdown(text_input)
383
+ with st.chat_message("assistant"):
384
+ completion = openai_client.chat.completions.create(
385
+ model=st.session_state["openai_model"],
386
+ messages=[
387
+ {"role": m["role"], "content": m["content"]}
388
+ for m in st.session_state.messages
389
+ ],
390
+ stream=False
391
+ )
392
+ return_text = completion.choices[0].message.content
393
+ st.write("GPT-4o: " + return_text)
394
+ #filename = generate_filename(text_input, "md")
395
+ filename = generate_filename("GPT-4o: " + return_text, "md")
396
+ create_file(filename, text_input, return_text)
397
+ st.session_state.messages.append({"role": "assistant", "content": return_text})
398
+ return return_text
399
+
400
+ def process_with_claude(text_input):
401
+ """Process text with Claude."""
402
+ if text_input:
403
+ with st.chat_message("user"):
404
+ st.markdown(text_input)
405
+ with st.chat_message("assistant"):
406
+ response = claude_client.messages.create(
407
+ model="claude-3-sonnet-20240229",
408
+ max_tokens=1000,
409
+ messages=[
410
+ {"role": "user", "content": text_input}
411
+ ]
412
+ )
413
+ response_text = response.content[0].text
414
+ st.write("Claude: " + response_text)
415
+ #filename = generate_filename(text_input, "md")
416
+ filename = generate_filename("Claude: " + response_text, "md")
417
+ create_file(filename, text_input, response_text)
418
+ st.session_state.chat_history.append({
419
+ "user": text_input,
420
+ "claude": response_text
421
+ })
422
+ return response_text
423
+
424
+ # File Management Functions
425
+ def load_file(file_name):
426
+ """Load file content."""
427
+ with open(file_name, "r", encoding='utf-8') as file:
428
+ content = file.read()
429
+ return content
430
+
431
+ def create_zip_of_files(files):
432
+ """Create zip archive of files."""
433
+ zip_name = "all_files.zip"
434
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
435
+ for file in files:
436
+ zipf.write(file)
437
+ return zip_name
438
+
439
+ def get_media_html(media_path, media_type="video", width="100%"):
440
+ """Generate HTML for media player."""
441
+ media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
442
+ if media_type == "video":
443
+ return f'''
444
+ <video width="{width}" controls autoplay muted loop>
445
+ <source src="data:video/mp4;base64,{media_data}" type="video/mp4">
446
+ Your browser does not support the video tag.
447
+ </video>
448
+ '''
449
+ else: # audio
450
+ return f'''
451
+ <audio controls style="width: {width};">
452
+ <source src="data:audio/mpeg;base64,{media_data}" type="audio/mpeg">
453
+ Your browser does not support the audio element.
454
+ </audio>
455
+ '''
456
+
457
+ def create_media_gallery():
458
+ """Create the media gallery interface."""
459
+ st.header("🎬 Media Gallery")
460
+ tabs = st.tabs(["πŸ–ΌοΈ Images", "🎡 Audio", "πŸŽ₯ Video"])
461
+ with tabs[0]:
462
+ image_files = glob.glob("*.png") + glob.glob("*.jpg")
463
+ if image_files:
464
+ num_cols = st.slider("Number of columns", 1, 5, 3)
465
+ cols = st.columns(num_cols)
466
+ for idx, image_file in enumerate(image_files):
467
+ with cols[idx % num_cols]:
468
+ img = Image.open(image_file)
469
+ st.image(img, use_container_width=True)
470
+ # Add GPT vision analysis option
471
+ if st.button(f"Analyze {os.path.basename(image_file)}"):
472
+ analysis = process_image(image_file,
473
+ "Describe this image in detail and identify key elements.")
474
+ st.markdown(analysis)
475
+ with tabs[1]:
476
+ audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
477
+ for audio_file in audio_files:
478
+ with st.expander(f"🎡 {os.path.basename(audio_file)}"):
479
+ st.markdown(get_media_html(audio_file, "audio"), unsafe_allow_html=True)
480
+ if st.button(f"Transcribe {os.path.basename(audio_file)}"):
481
+ with open(audio_file, "rb") as f:
482
+ transcription = process_audio(f)
483
+ st.write(transcription)
484
+ with tabs[2]:
485
+ video_files = glob.glob("*.mp4")
486
+ for video_file in video_files:
487
+ with st.expander(f"πŸŽ₯ {os.path.basename(video_file)}"):
488
+ st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True)
489
+ if st.button(f"Analyze {os.path.basename(video_file)}"):
490
+ analysis = process_video_with_gpt(video_file,
491
+ "Describe what's happening in this video.")
492
+ st.markdown(analysis)
493
+
494
+
495
+ def display_file_manager():
496
+ """Display file management sidebar with guaranteed unique button keys."""
497
+ st.sidebar.title("πŸ“ File Management")
498
+ all_files = glob.glob("*.md")
499
+ all_files.sort(reverse=True)
500
+ if st.sidebar.button("πŸ—‘ Delete All", key="delete_all_files_button"):
501
+ for file in all_files:
502
+ os.remove(file)
503
+ st.rerun()
504
+ if st.sidebar.button("⬇️ Download All", key="download_all_files_button"):
505
+ zip_file = create_zip_of_files(all_files)
506
+ st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
507
+ # Create unique keys using file attributes
508
+ for idx, file in enumerate(all_files):
509
+ # Get file stats for unique identification
510
+ file_stat = os.stat(file)
511
+ unique_id = f"{idx}_{file_stat.st_size}_{file_stat.st_mtime}"
512
+ col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
513
+ with col1:
514
+ if st.button("🌐", key=f"view_{unique_id}"):
515
+ st.session_state.current_file = file
516
+ st.session_state.file_content = load_file(file)
517
+ with col2:
518
+ st.markdown(get_download_link(file), unsafe_allow_html=True)
519
+ with col3:
520
+ if st.button("πŸ“‚", key=f"edit_{unique_id}"):
521
+ st.session_state.current_file = file
522
+ st.session_state.file_content = load_file(file)
523
+ with col4:
524
+ if st.button("πŸ—‘", key=f"delete_{unique_id}"):
525
+ os.remove(file)
526
+ st.rerun()
527
+
528
+
529
+ # Speech Recognition HTML Component
530
+ speech_recognition_html = """
531
+ <!DOCTYPE html>
532
+ <html>
533
+ <head>
534
+ <title>Continuous Speech Demo</title>
535
+ <style>
536
+ body {
537
+ font-family: sans-serif;
538
+ padding: 20px;
539
+ max-width: 800px;
540
+ margin: 0 auto;
541
+ }
542
+ button {
543
+ padding: 10px 20px;
544
+ margin: 10px 5px;
545
+ font-size: 16px;
546
+ }
547
+ #status {
548
+ margin: 10px 0;
549
+ padding: 10px;
550
+ background: #e8f5e9;
551
+ border-radius: 4px;
552
+ }
553
+ #output {
554
+ white-space: pre-wrap;
555
+ padding: 15px;
556
+ background: #f5f5f5;
557
+ border-radius: 4px;
558
+ margin: 10px 0;
559
+ min-height: 100px;
560
+ max-height: 400px;
561
+ overflow-y: auto;
562
+ }
563
+ .controls {
564
+ margin: 10px 0;
565
+ }
566
+ </style>
567
+ </head>
568
+ <body>
569
+ <div class="controls">
570
+ <button id="start">Start Listening</button>
571
+ <button id="stop" disabled>Stop Listening</button>
572
+ <button id="clear">Clear Text</button>
573
+ </div>
574
+ <div id="status">Ready</div>
575
+ <div id="output"></div>
576
+
577
+ <!-- Add the hidden input here -->
578
+ <input type="hidden" id="streamlit-data" value="">
579
+
580
+ <script>
581
+ if (!('webkitSpeechRecognition' in window)) {
582
+ alert('Speech recognition not supported');
583
+ } else {
584
+ const recognition = new webkitSpeechRecognition();
585
+ const startButton = document.getElementById('start');
586
+ const stopButton = document.getElementById('stop');
587
+ const clearButton = document.getElementById('clear');
588
+ const status = document.getElementById('status');
589
+ const output = document.getElementById('output');
590
+ let fullTranscript = '';
591
+ let lastUpdateTime = Date.now();
592
+
593
+ // Configure recognition
594
+ recognition.continuous = true;
595
+ recognition.interimResults = true;
596
+
597
+ // Function to start recognition
598
+ const startRecognition = () => {
599
+ try {
600
+ recognition.start();
601
+ status.textContent = 'Listening...';
602
+ startButton.disabled = true;
603
+ stopButton.disabled = false;
604
+ } catch (e) {
605
+ console.error(e);
606
+ status.textContent = 'Error: ' + e.message;
607
+ }
608
+ };
609
+
610
+ // Auto-start on load
611
+ window.addEventListener('load', () => {
612
+ setTimeout(startRecognition, 1000);
613
+ });
614
+
615
+ startButton.onclick = startRecognition;
616
+
617
+ stopButton.onclick = () => {
618
+ recognition.stop();
619
+ status.textContent = 'Stopped';
620
+ startButton.disabled = false;
621
+ stopButton.disabled = true;
622
+ };
623
+
624
+ clearButton.onclick = () => {
625
+ fullTranscript = '';
626
+ output.textContent = '';
627
+ window.parent.postMessage({
628
+ type: 'clear_transcript',
629
+ }, '*');
630
+ };
631
+
632
+ recognition.onresult = (event) => {
633
+ let interimTranscript = '';
634
+ let finalTranscript = '';
635
+
636
+ for (let i = event.resultIndex; i < event.results.length; i++) {
637
+ const transcript = event.results[i][0].transcript;
638
+ if (event.results[i].isFinal) {
639
+ finalTranscript += transcript + '\\n';
640
+ } else {
641
+ interimTranscript += transcript;
642
+ }
643
+ }
644
+
645
+ if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
646
+ if (finalTranscript) {
647
+ fullTranscript += finalTranscript;
648
+
649
+ // Update the hidden input value
650
+ document.getElementById('streamlit-data').value = fullTranscript;
651
+ }
652
+ lastUpdateTime = Date.now();
653
+ }
654
+
655
+ output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
656
+ output.scrollTop = output.scrollHeight;
657
+
658
+ document.getElementById('streamlit-data').value = fullTranscript;
659
+
660
+ };
661
+
662
+ recognition.onend = () => {
663
+ if (!stopButton.disabled) {
664
+ try {
665
+ recognition.start();
666
+ console.log('Restarted recognition');
667
+ } catch (e) {
668
+ console.error('Failed to restart recognition:', e);
669
+ status.textContent = 'Error restarting: ' + e.message;
670
+ startButton.disabled = false;
671
+ stopButton.disabled = true;
672
+ }
673
+ }
674
+ };
675
+
676
+ recognition.onerror = (event) => {
677
+ console.error('Recognition error:', event.error);
678
+ status.textContent = 'Error: ' + event.error;
679
+
680
+ if (event.error === 'not-allowed' || event.error === 'service-not-allowed') {
681
+ startButton.disabled = false;
682
+ stopButton.disabled = true;
683
+ }
684
+ };
685
+ }
686
+ </script>
687
+ </body>
688
+ </html>
689
+ """
690
+
691
+ # Helper Functions
692
+ def generate_filename(prompt, file_type):
693
+ central = pytz.timezone('US/Central')
694
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
695
+ replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
696
+ safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:230]
697
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
698
+
699
+ # File Management Functions
700
+ def load_file(file_name):
701
+ """Load file content."""
702
+ with open(file_name, "r", encoding='utf-8') as file:
703
+ content = file.read()
704
+ return content
705
+
706
+ def create_zip_of_files(files):
707
+ """Create zip archive of files."""
708
+ zip_name = "all_files.zip"
709
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
710
+ for file in files:
711
+ zipf.write(file)
712
+ return zip_name
713
+
714
+ def get_download_link(file):
715
+ """Create download link for file."""
716
+ with open(file, "rb") as f:
717
+ contents = f.read()
718
+ b64 = base64.b64encode(contents).decode()
719
+ return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file)}">Download {os.path.basename(file)}πŸ“‚</a>'
720
+
721
+ def display_file_manager():
722
+ """Display file management sidebar."""
723
+ st.sidebar.title("πŸ“ File Management")
724
+
725
+ all_files = glob.glob("*.md")
726
+ all_files.sort(reverse=True)
727
+
728
+ if st.sidebar.button("πŸ—‘ Delete All"):
729
+ for file in all_files:
730
+ os.remove(file)
731
+ st.rerun()
732
+
733
+ if st.sidebar.button("⬇️ Download All"):
734
+ zip_file = create_zip_of_files(all_files)
735
+ st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
736
+
737
+ for file in all_files:
738
+ col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
739
+ with col1:
740
+ if st.button("🌐", key="view_"+file):
741
+ st.session_state.current_file = file
742
+ st.session_state.file_content = load_file(file)
743
+ st.write(file)
744
+ markdown_target.markdown(st.session_state.file_content) # view 🌐
745
+ with col2:
746
+ st.markdown(get_download_link(file), unsafe_allow_html=True)
747
+ with col3:
748
+ if st.button("πŸ“‚", key="edit_"+file):
749
+ st.session_state.current_file = file
750
+ st.session_state.file_content = load_file(file)
751
+ with col4:
752
+ if st.button("πŸ—‘", key="delete_"+file):
753
+ os.remove(file)
754
+ st.rerun()
755
+
756
+ def create_media_gallery():
757
+ """Create the media gallery interface."""
758
+ st.header("🎬 Media Gallery")
759
+
760
+ tabs = st.tabs(["πŸ–ΌοΈ Images", "🎡 Audio", "πŸŽ₯ Video"])
761
+
762
+ with tabs[0]:
763
+ image_files = glob.glob("*.png") + glob.glob("*.jpg")
764
+ if image_files:
765
+ num_cols = st.slider("Number of columns", 1, 5, 3)
766
+ cols = st.columns(num_cols)
767
+ for idx, image_file in enumerate(image_files):
768
+ with cols[idx % num_cols]:
769
+ img = Image.open(image_file)
770
+ st.image(img, use_container_width=True)
771
+
772
+ # Add GPT vision analysis option
773
+ if st.button(f"Analyze {os.path.basename(image_file)}"):
774
+ analysis = process_image(image_file,
775
+ "Describe this image in detail and identify key elements.")
776
+ st.markdown(analysis)
777
+
778
+ with tabs[1]:
779
+ audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
780
+ for audio_file in audio_files:
781
+ with st.expander(f"🎡 {os.path.basename(audio_file)}"):
782
+ st.markdown(get_media_html(audio_file, "audio"), unsafe_allow_html=True)
783
+ if st.button(f"Transcribe {os.path.basename(audio_file)}"):
784
+ with open(audio_file, "rb") as f:
785
+ transcription = process_audio(f)
786
+ st.write(transcription)
787
+
788
+ with tabs[2]:
789
+ video_files = glob.glob("*.mp4")
790
+ for video_file in video_files:
791
+ with st.expander(f"πŸŽ₯ {os.path.basename(video_file)}"):
792
+ st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True)
793
+ if st.button(f"Analyze {os.path.basename(video_file)}"):
794
+ analysis = process_video_with_gpt(video_file,
795
+ "Describe what's happening in this video.")
796
+ st.markdown(analysis)
797
+
798
+
799
+
800
+ def get_media_html(media_path, media_type="video", width="100%"):
801
+ """Generate HTML for media player."""
802
+ media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
803
+ if media_type == "video":
804
+ return f'''
805
+ <video width="{width}" controls autoplay muted loop>
806
+ <source src="data:video/mp4;base64,{media_data}" type="video/mp4">
807
+ Your browser does not support the video tag.
808
+ </video>
809
+ '''
810
+ else: # audio
811
+ return f'''
812
+ <audio controls style="width: {width};">
813
+ <source src="data:audio/mpeg;base64,{media_data}" type="audio/mpeg">
814
+ Your browser does not support the audio element.
815
+ </audio>
816
+ '''
817
+
818
+ @st.cache_resource
819
+ def set_transcript(text):
820
+ """Set transcript in session state."""
821
+ st.session_state.voice_transcript = text
822
+
823
+ def main():
824
+ st.sidebar.markdown("### 🚲BikeAIπŸ† Claude and GPT Multi-Agent Research AI")
825
+
826
+
827
+
828
+
829
+
830
+ # Main navigation
831
+ tab_main = st.radio("Choose Action:",
832
+ ["🎀 Voice Input", "πŸ“Έ Media Gallery", "πŸ” Search ArXiv", "πŸ“ File Editor"],
833
+ horizontal=True)
834
+ # Model Selection
835
+ model_choice = st.sidebar.radio(
836
+ "Choose AI Model:",
837
+ [ "GPT+Claude+Arxiv", "GPT-4o", "Claude-3"]
838
+ )
839
+
840
+ # πŸ†################ Component Magic ###############πŸ†
841
+ mycomponent = components.declare_component("mycomponent", path="mycomponent") # load from __init__.py and index.html in mycomponent folder
842
+ from mycomponent import mycomponent
843
+ value = mycomponent(my_input_value="hello there")
844
+ st.write("Received", value) # value is speech recognition full text result with \n dividing
845
+ if (value is not None):
846
+ user_input = value
847
+ if model_choice == "GPT-4o":
848
+ gpt_response = process_with_gpt(user_input)
849
+ elif model_choice == "Claude-3":
850
+ claude_response = process_with_claude(user_input)
851
+ else: # All Three AIs!
852
+ col1, col2, col3 = st.columns(3)
853
+ with col2:
854
+ st.subheader("Claude-3.5 Sonnet:")
855
+ try:
856
+ claude_response = process_with_claude(user_input)
857
+ except:
858
+ st.write('Claude 3.5 Sonnet out of tokens.')
859
+ with col1:
860
+ st.subheader("GPT-4o Omni:")
861
+ try:
862
+ gpt_response = process_with_gpt(user_input)
863
+ except:
864
+ st.write('GPT 4o out of tokens')
865
+ with col3:
866
+ st.subheader("Arxiv and Mistral Research:")
867
+ with st.spinner("Searching ArXiv..."):
868
+ try:
869
+ results = perform_ai_lookup(user_input)
870
+ st.markdown(results)
871
+ except:
872
+ st.write("Arxiv Mistral too busy - try again.")
873
+ # πŸ†################ Component Magic ###############πŸ†
874
+
875
+
876
+
877
+
878
+ if tab_main == "🎀 Voice Input":
879
+ st.subheader("Voice Recognition")
880
+
881
+ # Initialize session state for the transcript
882
+ if 'voice_transcript' not in st.session_state:
883
+ st.session_state.voice_transcript = ""
884
+
885
+ # Display speech recognition component and capture returned value
886
+ #transcript = st.components.v1.html(speech_recognition_html, height=400)
887
+
888
+ # Update session state if there's new data
889
+ #if transcript is not None and transcript != "":
890
+ # st.session_state.voice_transcript = transcript
891
+
892
+ # Display the transcript in a Streamlit text area
893
+ # st.markdown("### Processed Voice Input:")
894
+ # st.text_area("Voice Transcript", st.session_state.voice_transcript, height=100)
895
+
896
+
897
+
898
+ # Chat Interface
899
+ user_input = st.text_area("Message:", height=100)
900
+
901
+ if st.button("Send πŸ“¨"):
902
+ if user_input:
903
+ if model_choice == "GPT-4o":
904
+ gpt_response = process_with_gpt(user_input)
905
+ elif model_choice == "Claude-3":
906
+ claude_response = process_with_claude(user_input)
907
+ else: # Both
908
+ col1, col2, col3 = st.columns(3)
909
+ with col2:
910
+ st.subheader("Claude-3.5 Sonnet:")
911
+ try:
912
+ claude_response = process_with_claude(user_input)
913
+ except:
914
+ st.write('Claude 3.5 Sonnet out of tokens.')
915
+ with col1:
916
+ st.subheader("GPT-4o Omni:")
917
+ try:
918
+ gpt_response = process_with_gpt(user_input)
919
+ except:
920
+ st.write('GPT 4o out of tokens')
921
+ with col3:
922
+ st.subheader("Arxiv and Mistral Research:")
923
+ with st.spinner("Searching ArXiv..."):
924
+ #results = search_arxiv(user_input)
925
+ results = perform_ai_lookup(user_input)
926
+
927
+ st.markdown(results)
928
+
929
+ # Display Chat History
930
+ st.subheader("Chat History πŸ“œ")
931
+ tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
932
+
933
+ with tab1:
934
+ for chat in st.session_state.chat_history:
935
+ st.text_area("You:", chat["user"], height=100)
936
+ st.text_area("Claude:", chat["claude"], height=200)
937
+ st.markdown(chat["claude"])
938
+
939
+ with tab2:
940
+ for message in st.session_state.messages:
941
+ with st.chat_message(message["role"]):
942
+ st.markdown(message["content"])
943
+
944
+ elif tab_main == "πŸ“Έ Media Gallery":
945
+ create_media_gallery()
946
+
947
+ elif tab_main == "πŸ” Search ArXiv":
948
+ query = st.text_input("Enter your research query:")
949
+ if query:
950
+ with st.spinner("Searching ArXiv..."):
951
+ results = search_arxiv(query)
952
+ st.markdown(results)
953
+
954
+ elif tab_main == "πŸ“ File Editor":
955
+ if hasattr(st.session_state, 'current_file'):
956
+ st.subheader(f"Editing: {st.session_state.current_file}")
957
+ new_content = st.text_area("Content:", st.session_state.file_content, height=300)
958
+ if st.button("Save Changes"):
959
+ with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
960
+ file.write(new_content)
961
+ st.success("File updated successfully!")
962
+
963
+
964
+ # Always show file manager in sidebar
965
+ display_file_manager()
966
+
967
+ if __name__ == "__main__":
968
+ main()