awacke1's picture
Update app.py
f14628d verified
import streamlit as st
import anthropic, openai, base64, cv2, glob, json, math, os, pytz, random, re, requests, textract, time, zipfile
import plotly.graph_objects as go
import streamlit.components.v1 as components
from datetime import datetime
from audio_recorder_streamlit import audio_recorder
from bs4 import BeautifulSoup
from collections import defaultdict, deque, Counter
from dotenv import load_dotenv
from gradio_client import Client
from huggingface_hub import InferenceClient
from io import BytesIO
from PIL import Image
from PyPDF2 import PdfReader
from urllib.parse import quote
from xml.etree import ElementTree as ET
from openai import OpenAI
import extra_streamlit_components as stx
from streamlit.runtime.scriptrunner import get_script_run_ctx
import asyncio
import edge_tts
from streamlit_marquee import streamlit_marquee
# ─────────────────────────────────────────────────────────
# 1. CORE CONFIGURATION & SETUP
# ─────────────────────────────────────────────────────────
st.set_page_config(
page_title="🚲TalkingAIResearcherπŸ†",
page_icon="πŸš²πŸ†",
layout="wide",
initial_sidebar_state="auto",
menu_items={
'Get Help': 'https://huggingface.co/awacke1',
'Report a bug': 'https://huggingface.co/spaces/awacke1',
'About': "🚲TalkingAIResearcherπŸ†"
}
)
load_dotenv()
# Available English voices for Edge TTS
EDGE_TTS_VOICES = [
"en-US-AriaNeural",
"en-US-GuyNeural",
"en-US-JennyNeural",
"en-GB-SoniaNeural",
"en-GB-RyanNeural",
"en-AU-NatashaNeural",
"en-AU-WilliamNeural",
"en-CA-ClaraNeural",
"en-CA-LiamNeural"
]
# Session state variables
if 'marquee_settings' not in st.session_state:
st.session_state['marquee_settings'] = {
"background": "#1E1E1E",
"color": "#FFFFFF",
"font-size": "14px",
"animationDuration": "20s",
"width": "100%",
"lineHeight": "35px"
}
if 'tts_voice' not in st.session_state:
st.session_state['tts_voice'] = EDGE_TTS_VOICES[0]
if 'audio_format' not in st.session_state:
st.session_state['audio_format'] = 'mp3'
if 'transcript_history' not in st.session_state:
st.session_state['transcript_history'] = []
if 'chat_history' not in st.session_state:
st.session_state['chat_history'] = []
if 'openai_model' not in st.session_state:
st.session_state['openai_model'] = "gpt-4o-2024-05-13"
if 'messages' not in st.session_state:
st.session_state['messages'] = []
if 'last_voice_input' not in st.session_state:
st.session_state['last_voice_input'] = ""
if 'editing_file' not in st.session_state:
st.session_state['editing_file'] = None
if 'edit_new_name' not in st.session_state:
st.session_state['edit_new_name'] = ""
if 'edit_new_content' not in st.session_state:
st.session_state['edit_new_content'] = ""
if 'viewing_prefix' not in st.session_state:
st.session_state['viewing_prefix'] = None
if 'should_rerun' not in st.session_state:
st.session_state['should_rerun'] = False
if 'old_val' not in st.session_state:
st.session_state['old_val'] = None
if 'last_query' not in st.session_state:
st.session_state['last_query'] = ""
if 'marquee_content' not in st.session_state:
st.session_state['marquee_content'] = "πŸš€ Welcome to TalkingAIResearcher | πŸ€– Your Research Assistant"
# API Keys
openai_api_key = os.getenv('OPENAI_API_KEY', "")
anthropic_key = os.getenv('ANTHROPIC_API_KEY_3', "")
xai_key = os.getenv('xai',"")
if 'OPENAI_API_KEY' in st.secrets:
openai_api_key = st.secrets['OPENAI_API_KEY']
if 'ANTHROPIC_API_KEY' in st.secrets:
anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
openai.api_key = openai_api_key
openai_client = OpenAI(api_key=openai.api_key, organization=os.getenv('OPENAI_ORG_ID'))
HF_KEY = os.getenv('HF_KEY')
API_URL = os.getenv('API_URL')
# Helper constants
FILE_EMOJIS = {
"md": "πŸ“",
"mp3": "🎡",
"wav": "πŸ”Š"
}
# ─────────────────────────────────────────────────────────
# 2. HELPER FUNCTIONS
# ─────────────────────────────────────────────────────────
def get_central_time():
"""Get current time in US Central timezone."""
central = pytz.timezone('US/Central')
return datetime.now(central)
def format_timestamp_prefix():
"""Generate timestamp prefix in format MM_dd_yy_hh_mm_AM/PM."""
ct = get_central_time()
return ct.strftime("%m_%d_%y_%I_%M_%p")
def initialize_marquee_settings():
if 'marquee_settings' not in st.session_state:
st.session_state['marquee_settings'] = {
"background": "#1E1E1E",
"color": "#FFFFFF",
"font-size": "14px",
"animationDuration": "20s",
"width": "100%",
"lineHeight": "35px"
}
def get_marquee_settings():
initialize_marquee_settings()
return st.session_state['marquee_settings']
def update_marquee_settings_ui():
"""Add color pickers & sliders for marquee config in sidebar."""
st.sidebar.markdown("### 🎯 Marquee Settings")
cols = st.sidebar.columns(2)
with cols[0]:
bg_color = st.color_picker("🎨 Background",
st.session_state['marquee_settings']["background"],
key="bg_color_picker")
text_color = st.color_picker("✍️ Text",
st.session_state['marquee_settings']["color"],
key="text_color_picker")
with cols[1]:
font_size = st.slider("πŸ“ Size", 10, 24, 14, key="font_size_slider")
duration = st.slider("⏱️ Speed", 1, 20, 20, key="duration_slider")
st.session_state['marquee_settings'].update({
"background": bg_color,
"color": text_color,
"font-size": f"{font_size}px",
"animationDuration": f"{duration}s"
})
def display_marquee(text, settings, key_suffix=""):
"""Show marquee text with style from settings."""
truncated_text = text[:280] + "..." if len(text) > 280 else text
streamlit_marquee(
content=truncated_text,
**settings,
key=f"marquee_{key_suffix}"
)
st.write("")
def get_high_info_terms(text: str, top_n=10) -> list:
"""Extract top_n freq words or bigrams (excluding stopwords)."""
stop_words = set(['the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with'])
words = re.findall(r'\b\w+(?:-\w+)*\b', text.lower())
bi_grams = [' '.join(pair) for pair in zip(words, words[1:])]
combined = words + bi_grams
filtered = [term for term in combined if term not in stop_words and len(term.split()) <= 2]
counter = Counter(filtered)
return [term for term, freq in counter.most_common(top_n)]
def clean_text_for_filename(text: str) -> str:
"""Remove special chars, short words, etc. for filenames."""
text = text.lower()
text = re.sub(r'[^\w\s-]', '', text)
words = text.split()
# remove short or unhelpful words
stop_short = set(['the', 'and', 'for', 'with', 'this', 'that', 'ai', 'library'])
filtered = [w for w in words if len(w) > 3 and w not in stop_short]
return '_'.join(filtered)[:200]
def generate_filename(prompt, response, file_type="md", max_length=200):
"""
Generate a shortened filename by:
1) extracting high-info terms,
2) snippet from prompt+response,
3) remove duplicates,
4) truncate if needed.
"""
prefix = format_timestamp_prefix() + "_"
combined_text = (prompt + " " + response)[:200]
info_terms = get_high_info_terms(combined_text, top_n=5)
snippet = (prompt[:40] + " " + response[:40]).strip()
snippet_cleaned = clean_text_for_filename(snippet)
# remove duplicates
name_parts = info_terms + [snippet_cleaned]
seen = set()
unique_parts = []
for part in name_parts:
if part not in seen:
seen.add(part)
unique_parts.append(part)
full_name = '_'.join(unique_parts).strip('_')
leftover_chars = max_length - len(prefix) - len(file_type) - 1
if len(full_name) > leftover_chars:
full_name = full_name[:leftover_chars]
return f"{prefix}{full_name}.{file_type}"
def create_file(prompt, response, file_type="md"):
"""Create a text file from prompt + response with sanitized filename."""
filename = generate_filename(prompt.strip(), response.strip(), file_type)
with open(filename, 'w', encoding='utf-8') as f:
f.write(prompt + "\n\n" + response)
return filename
def get_download_link(file, file_type="zip"):
"""
Convert a file to base64 and return an HTML link for download.
"""
with open(file, "rb") as f:
b64 = base64.b64encode(f.read()).decode()
if file_type == "zip":
return f'<a href="data:application/zip;base64,{b64}" download="{os.path.basename(file)}">πŸ“‚ Download {os.path.basename(file)}</a>'
elif file_type == "mp3":
return f'<a href="data:audio/mpeg;base64,{b64}" download="{os.path.basename(file)}">🎡 Download {os.path.basename(file)}</a>'
elif file_type == "wav":
return f'<a href="data:audio/wav;base64,{b64}" download="{os.path.basename(file)}">πŸ”Š Download {os.path.basename(file)}</a>'
elif file_type == "md":
return f'<a href="data:text/markdown;base64,{b64}" download="{os.path.basename(file)}">πŸ“ Download {os.path.basename(file)}</a>'
else:
return f'<a href="data:application/octet-stream;base64,{b64}" download="{os.path.basename(file)}">Download {os.path.basename(file)}</a>'
def clean_for_speech(text: str) -> str:
"""Clean up text for TTS output."""
text = text.replace("\n", " ")
text = text.replace("</s>", " ")
text = text.replace("#", "")
text = re.sub(r"\(https?:\/\/[^\)]+\)", "", text)
text = re.sub(r"\s+", " ", text).strip()
return text
async def edge_tts_generate_audio(text, voice="en-US-AriaNeural", rate=0, pitch=0, file_format="mp3"):
"""Async TTS generation with edge-tts library."""
text = clean_for_speech(text)
if not text.strip():
return None
rate_str = f"{rate:+d}%"
pitch_str = f"{pitch:+d}Hz"
communicate = edge_tts.Communicate(text, voice, rate=rate_str, pitch=pitch_str)
out_fn = generate_filename(text, text, file_type=file_format)
await communicate.save(out_fn)
return out_fn
def speak_with_edge_tts(text, voice="en-US-AriaNeural", rate=0, pitch=0, file_format="mp3"):
"""Wrapper for the async TTS generate call."""
return asyncio.run(edge_tts_generate_audio(text, voice, rate, pitch, file_format))
def play_and_download_audio(file_path, file_type="mp3"):
"""Streamlit audio + a quick download link."""
if file_path and os.path.exists(file_path):
st.audio(file_path)
dl_link = get_download_link(file_path, file_type=file_type)
st.markdown(dl_link, unsafe_allow_html=True)
def save_qa_with_audio(question, answer, voice=None):
"""Save Q&A to markdown and also generate audio."""
if not voice:
voice = st.session_state['tts_voice']
combined_text = f"# Question\n{question}\n\n# Answer\n{answer}"
md_file = create_file(question, answer, "md")
audio_text = f"{question}\n\nAnswer: {answer}"
audio_file = speak_with_edge_tts(
audio_text,
voice=voice,
file_format=st.session_state['audio_format']
)
return md_file, audio_file
# ─────────────────────────────────────────────────────────
# 3. PAPER PARSING & DISPLAY
# ─────────────────────────────────────────────────────────
def parse_arxiv_refs(ref_text: str):
"""
Given a multi-line markdown with arxiv references, parse them into
a list of dicts: {date, title, url, authors, summary, ...}.
"""
if not ref_text:
return []
results = []
current_paper = {}
lines = ref_text.split('\n')
for i, line in enumerate(lines):
if line.count('|') == 2:
# Found a new paper line
if current_paper:
results.append(current_paper)
if len(results) >= 20:
break
try:
header_parts = line.strip('* ').split('|')
date = header_parts[0].strip()
title = header_parts[1].strip()
url_match = re.search(r'(https://arxiv.org/\S+)', line)
url = url_match.group(1) if url_match else f"paper_{len(results)}"
current_paper = {
'date': date,
'title': title,
'url': url,
'authors': '',
'summary': '',
'full_audio': None,
'download_base64': '',
}
except Exception as e:
st.warning(f"Error parsing paper header: {str(e)}")
current_paper = {}
continue
elif current_paper:
# If authors not set, fill it; otherwise, fill summary
if not current_paper['authors']:
current_paper['authors'] = line.strip('* ')
else:
if current_paper['summary']:
current_paper['summary'] += ' ' + line.strip()
else:
current_paper['summary'] = line.strip()
if current_paper:
results.append(current_paper)
return results[:20]
def create_paper_links_md(papers):
"""Creates a minimal .md content linking to each paper's arxiv URL."""
lines = ["# Paper Links\n"]
for i, p in enumerate(papers, start=1):
lines.append(f"{i}. **{p['title']}** β€” [Arxiv]({p['url']})")
return "\n".join(lines)
def create_paper_audio_files(papers, input_question):
"""
For each paper, generate TTS audio summary, store the path in `paper['full_audio']`,
and also store a base64 link for stable downloading.
"""
for paper in papers:
try:
audio_text = f"{paper['title']} by {paper['authors']}. {paper['summary']}"
audio_text = clean_for_speech(audio_text)
file_format = st.session_state['audio_format']
audio_file = speak_with_edge_tts(
audio_text,
voice=st.session_state['tts_voice'],
file_format=file_format
)
paper['full_audio'] = audio_file
if audio_file:
with open(audio_file, "rb") as af:
b64_data = base64.b64encode(af.read()).decode()
download_filename = os.path.basename(audio_file)
mime_type = "mpeg" if file_format == "mp3" else "wav"
paper['download_base64'] = (
f'<a href="data:audio/{mime_type};base64,{b64_data}" '
f'download="{download_filename}">🎡 Download {download_filename}</a>'
)
except Exception as e:
st.warning(f"Error processing paper {paper['title']}: {str(e)}")
paper['full_audio'] = None
paper['download_base64'] = ''
def display_file_history_in_sidebar():
"""
Shows a history of files grouped by query, with lazy loading of audio and content.
"""
st.sidebar.markdown("---")
st.sidebar.markdown("### πŸ“‚ File History")
# Gather all files
md_files = glob.glob("*.md")
mp3_files = glob.glob("*.mp3")
wav_files = glob.glob("*.wav")
all_files = md_files + mp3_files + wav_files
if not all_files:
st.sidebar.write("No files found.")
return
# Group files by their query prefix (timestamp_query)
grouped_files = {}
for f in all_files:
fname = os.path.basename(f)
prefix = '_'.join(fname.split('_')[:6]) # Get timestamp part
if prefix not in grouped_files:
grouped_files[prefix] = {'md': [], 'audio': [], 'loaded': False}
ext = os.path.splitext(fname)[1].lower()
if ext == '.md':
grouped_files[prefix]['md'].append(f)
elif ext in ['.mp3', '.wav']:
grouped_files[prefix]['audio'].append(f)
# Sort groups by timestamp (newest first)
sorted_groups = sorted(grouped_files.items(), key=lambda x: x[0], reverse=True)
# πŸ—‘β¬‡οΈ Sidebar delete all and zip all download
col1, col4 = st.sidebar.columns(2)
with col1:
if st.button("πŸ—‘ Delete All"):
for f in all_files:
os.remove(f)
st.session_state.should_rerun = True
with col4:
if st.button("⬇️ Zip All"):
zip_name = create_zip_of_files(md_files, mp3_files, wav_files,
st.session_state.get('last_query', ''))
if zip_name:
st.sidebar.markdown(get_download_link(zip_name, "zip"),
unsafe_allow_html=True)
# Display grouped files
for prefix, files in sorted_groups:
# Get a preview of content from first MD file
preview = ""
if files['md']:
with open(files['md'][0], "r", encoding="utf-8") as f:
preview = f.read(200).replace("\n", " ")
if len(preview) > 200:
preview += "..."
# Create unique key for this group
group_key = f"group_{prefix}"
if group_key not in st.session_state:
st.session_state[group_key] = False
# Display group expander
with st.sidebar.expander(f"πŸ“‘ Query Group: {prefix}"):
st.write("**Preview:**")
st.write(preview)
# Load full content button
if st.button("πŸ“– View Full Content", key=f"btn_{prefix}"):
st.session_state[group_key] = True
# Only show full content and audio if button was clicked
if st.session_state[group_key]:
# Display markdown files
for md_file in files['md']:
with open(md_file, "r", encoding="utf-8") as f:
content = f.read()
st.markdown("**Full Content:**")
st.markdown(content)
st.markdown(get_download_link(md_file, file_type="md"),
unsafe_allow_html=True)
# Display audio files
usePlaySidebar=False
if usePlaySidebar:
for audio_file in files['audio']:
ext = os.path.splitext(audio_file)[1].replace('.', '')
st.audio(audio_file)
st.markdown(get_download_link(audio_file, file_type=ext),
unsafe_allow_html=True)
def display_papers(papers, marquee_settings):
"""Display paper info with both abs and PDF links."""
st.write("## Research Papers")
for i, paper in enumerate(papers, start=1):
marquee_text = f"πŸ“„ {paper['title']} | πŸ‘€ {paper['authors'][:120]}"
display_marquee(marquee_text, marquee_settings, key_suffix=f"paper_{i}")
with st.expander(f"{i}. πŸ“„ {paper['title']}", expanded=True):
# Create PDF link by replacing 'abs' with 'pdf' in arxiv URL
pdf_url = paper['url'].replace('/abs/', '/pdf/')
st.markdown(f"""
**{paper['date']} | {paper['title']}**
πŸ“„ [Abstract]({paper['url']}) | πŸ“‘ [PDF]({pdf_url})
""")
st.markdown(f"*Authors:* {paper['authors']}")
st.markdown(paper['summary'])
if paper.get('full_audio'):
st.write("πŸ“š Paper Audio")
st.audio(paper['full_audio'])
if paper['download_base64']:
st.markdown(paper['download_base64'], unsafe_allow_html=True)
def display_papers_in_sidebar(papers):
"""Mirrors the paper listing in sidebar with lazy loading."""
st.sidebar.title("🎢 Papers & Audio")
for i, paper in enumerate(papers, start=1):
paper_key = f"paper_{paper['url']}"
if paper_key not in st.session_state:
st.session_state[paper_key] = False
with st.sidebar.expander(f"{i}. {paper['title']}"):
# Create PDF link
pdf_url = paper['url'].replace('/abs/', '/pdf/')
st.markdown(f"πŸ“„ [Abstract]({paper['url']}) | πŸ“‘ [PDF]({pdf_url})")
# Preview of authors and summary
st.markdown(f"**Authors:** {paper['authors'][:100]}...")
if paper['summary']:
st.markdown(f"**Summary:** {paper['summary'][:200]}...")
# Load audio button
if paper['full_audio'] and st.button("🎡 Load Audio",
key=f"btn_{paper_key}"):
st.session_state[paper_key] = True
# Show audio player and download only if requested
if st.session_state[paper_key] and paper['full_audio']:
st.audio(paper['full_audio'])
if paper['download_base64']:
st.markdown(paper['download_base64'], unsafe_allow_html=True)
# ─────────────────────────────────────────────────────────
# 4. ZIP FUNCTION
# ─────────────────────────────────────────────────────────
def create_zip_of_files(md_files, mp3_files, wav_files, input_question):
"""
Zip up all relevant files, limiting the final zip name to ~20 chars
to avoid overly long base64 strings.
"""
md_files = [f for f in md_files if os.path.basename(f).lower() != 'readme.md']
all_files = md_files + mp3_files + wav_files
if not all_files:
return None
all_content = []
for f in all_files:
if f.endswith('.md'):
with open(f, 'r', encoding='utf-8') as file:
all_content.append(file.read())
elif f.endswith('.mp3') or f.endswith('.wav'):
basename = os.path.splitext(os.path.basename(f))[0]
words = basename.replace('_', ' ')
all_content.append(words)
all_content.append(input_question)
combined_content = " ".join(all_content)
info_terms = get_high_info_terms(combined_content, top_n=10)
timestamp = format_timestamp_prefix()
name_text = '-'.join(term for term in info_terms[:5])
short_zip_name = (timestamp + "_" + name_text)[:20] + ".zip"
with zipfile.ZipFile(short_zip_name, 'w') as z:
for f in all_files:
z.write(f)
return short_zip_name
# ─────────────────────────────────────────────────────────
# 5. MAIN LOGIC: AI LOOKUP & VOICE INPUT
# ─────────────────────────────────────────────────────────
def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
titles_summary=True, full_audio=False):
"""Main routine that uses Anthropic (Claude) + Gradio ArXiv RAG pipeline."""
start = time.time()
ai_constitution = """
You are a talented AI coder and songwriter...
"""
# --- 1) Claude API
client = anthropic.Anthropic(api_key=anthropic_key)
user_input = q
response = client.messages.create(
model="claude-3-sonnet-20240229",
max_tokens=1000,
messages=[
{"role": "user", "content": user_input}
])
st.write("Claude's reply 🧠:")
st.markdown(response.content[0].text)
# Save & produce audio
result = response.content[0].text
create_file(q, result)
md_file, audio_file = save_qa_with_audio(q, result)
st.subheader("πŸ“ Main Response Audio")
play_and_download_audio(audio_file, st.session_state['audio_format'])
# --- 2) Arxiv RAG
#st.write("Arxiv's AI this Evening is Mixtral 8x7B...")
st.write('Running Arxiv RAG with Claude inputs.')
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
refs = client.predict(
q,
10,
"Semantic Search",
"mistralai/Mixtral-8x7B-Instruct-v0.1",
api_name="/update_with_rag_md"
)[0]
#r2 = client.predict(
# q,
# "mistralai/Mixtral-8x7B-Instruct-v0.1",
# True,
# api_name="/ask_llm"
#)
# --- 3) Claude API with arxiv list of papers to app.py
client = anthropic.Anthropic(api_key=anthropic_key)
user_input = q + '\n\n' + 'Use the paper list below to answer the question thinking through step by step how to create a streamlit app.py and requirements.txt for the solution that answers the questions with a working app to demonstrate.'+ '\n\n'
response = client.messages.create(
model="claude-3-sonnet-20240229",
max_tokens=1000,
messages=[
{"role": "user", "content": user_input}
])
r2 = response.content[0].text
st.write("Claude's reply 🧠:")
st.markdown(r2)
#result = f"### πŸ”Ž {q}\n\n{r2}\n\n{refs}"
result = f"πŸ”Ž {r2}\n\n{refs}"
md_file, audio_file = save_qa_with_audio(q, result)
st.subheader("πŸ“ Main Response Audio")
play_and_download_audio(audio_file, st.session_state['audio_format'])
# --- 3) Parse + handle papers
papers = parse_arxiv_refs(refs)
if papers:
# Create minimal links page first
paper_links = create_paper_links_md(papers)
links_file = create_file(q, paper_links, "md")
st.markdown(paper_links)
# Then create audio for each paper
create_paper_audio_files(papers, input_question=q)
display_papers(papers, get_marquee_settings())
display_papers_in_sidebar(papers)
else:
st.warning("No papers found in the response.")
elapsed = time.time() - start
st.write(f"**Total Elapsed:** {elapsed:.2f} s")
return result
def process_voice_input(text):
"""When user sends voice query, we run the AI lookup + Q&A with audio."""
if not text:
return
st.subheader("πŸ” Search Results")
result = perform_ai_lookup(
text,
vocal_summary=True,
extended_refs=False,
titles_summary=True,
full_audio=True
)
md_file, audio_file = save_qa_with_audio(text, result)
st.subheader("πŸ“ Generated Files")
st.write(f"Markdown: {md_file}")
st.write(f"Audio: {audio_file}")
play_and_download_audio(audio_file, st.session_state['audio_format'])
# ─────────────────────────────────────────────────────────
# 6. FILE HISTORY SIDEBAR
# ─────────────────────────────────────────────────────────
def display_file_history_in_sidebar():
"""
Shows a history of each local .md, .mp3, .wav file in descending
order of modification time, with quick icons and optional download links.
"""
st.sidebar.markdown("---")
st.sidebar.markdown("### πŸ“‚ File History")
# Gather all files
md_files = glob.glob("*.md")
mp3_files = glob.glob("*.mp3")
wav_files = glob.glob("*.wav")
all_files = md_files + mp3_files + wav_files
if not all_files:
st.sidebar.write("No files found.")
return
# πŸ—‘β¬‡οΈ Sidebar delete all and zip all download
col1, col4 = st.sidebar.columns(2)
with col1:
if st.button("πŸ—‘ Delete All"):
for f in all_md:
os.remove(f)
for f in all_mp3:
os.remove(f)
for f in all_wav:
os.remove(f)
st.session_state.should_rerun = True
with col4:
if st.button("⬇️ Zip All"):
zip_name = create_zip_of_files(md_files, mp3_files, wav_files, st.session_state.get('last_query', ''))
if zip_name:
st.sidebar.markdown(get_download_link(zip_name, "zip"), unsafe_allow_html=True)
# Sort newest first
all_files = sorted(all_files, key=os.path.getmtime, reverse=True)
for f in all_files:
fname = os.path.basename(f)
ext = os.path.splitext(fname)[1].lower().strip('.')
emoji = FILE_EMOJIS.get(ext, 'πŸ“¦')
time_str = datetime.fromtimestamp(os.path.getmtime(f)).strftime("%Y-%m-%d %H:%M:%S")
with st.sidebar.expander(f"{emoji} {fname}"):
st.write(f"**Modified:** {time_str}")
if ext == "md":
with open(f, "r", encoding="utf-8") as file_in:
snippet = file_in.read(200).replace("\n", " ")
if len(snippet) == 200:
snippet += "..."
st.write(snippet)
st.markdown(get_download_link(f, file_type="md"), unsafe_allow_html=True)
elif ext in ["mp3","wav"]:
st.audio(f)
st.markdown(get_download_link(f, file_type=ext), unsafe_allow_html=True)
else:
st.markdown(get_download_link(f), unsafe_allow_html=True)
# ─────────────────────────────────────────────────────────
# 7. MAIN APP
# ─────────────────────────────────────────────────────────
def main():
# 1) Setup marquee UI in the sidebar
update_marquee_settings_ui()
marquee_settings = get_marquee_settings()
# 2) Display the marquee welcome
display_marquee(st.session_state['marquee_content'],
{**marquee_settings, "font-size": "28px", "lineHeight": "50px"},
key_suffix="welcome")
# 3) Main action tabs
tab_main = st.radio("Action:", ["🎀 Voice", "πŸ“Έ Media", "πŸ” ArXiv", "πŸ“ Editor"],
horizontal=True)
# Example custom component usage
mycomponent = components.declare_component("mycomponent", path="mycomponent")
val = mycomponent(my_input_value="Hello")
if val:
val_stripped = val.replace('\\n', ' ')
edited_input = st.text_area("✏️ Edit Input:", value=val_stripped, height=100)
run_option = st.selectbox("Model:", ["Arxiv"])
col1, col2 = st.columns(2)
with col1:
autorun = st.checkbox("βš™ AutoRun", value=True)
with col2:
full_audio = st.checkbox("πŸ“šFullAudio", value=False)
input_changed = (val != st.session_state.old_val)
if autorun and input_changed:
st.session_state.old_val = val
st.session_state.last_query = edited_input
perform_ai_lookup(edited_input,
vocal_summary=True,
extended_refs=False,
titles_summary=True,
full_audio=full_audio)
else:
if st.button("β–Ά Run"):
st.session_state.old_val = val
st.session_state.last_query = edited_input
perform_ai_lookup(edited_input,
vocal_summary=True,
extended_refs=False,
titles_summary=True,
full_audio=full_audio)
# ─────────────────────────────────────────────────────────
# TAB: ArXiv
# ─────────────────────────────────────────────────────────
if tab_main == "πŸ” ArXiv":
st.subheader("πŸ” Query ArXiv")
q = st.text_input("πŸ” Query:", key="arxiv_query")
st.markdown("### πŸŽ› Options")
vocal_summary = st.checkbox("πŸŽ™ShortAudio", value=True, key="option_vocal_summary")
extended_refs = st.checkbox("πŸ“œLongRefs", value=False, key="option_extended_refs")
titles_summary = st.checkbox("πŸ”–TitlesOnly", value=True, key="option_titles_summary")
full_audio = st.checkbox("πŸ“šFullAudio", value=False, key="option_full_audio")
full_transcript = st.checkbox("🧾FullTranscript", value=False, key="option_full_transcript")
if q and st.button("πŸ”Run"):
st.session_state.last_query = q
result = perform_ai_lookup(q, vocal_summary=vocal_summary, extended_refs=extended_refs,
titles_summary=titles_summary, full_audio=full_audio)
if full_transcript:
create_file(q, result, "md")
# ─────────────────────────────────────────────────────────
# TAB: Voice
# ─────────────────────────────────────────────────────────
elif tab_main == "🎀 Voice":
st.subheader("🎀 Voice Input")
st.markdown("### 🎀 Voice Settings")
selected_voice = st.selectbox(
"Select TTS Voice:",
options=EDGE_TTS_VOICES,
index=EDGE_TTS_VOICES.index(st.session_state['tts_voice'])
)
st.markdown("### πŸ”Š Audio Format")
selected_format = st.radio(
"Choose Audio Format:",
options=["MP3", "WAV"],
index=0
)
# Update session state if voice/format changes
if selected_voice != st.session_state['tts_voice']:
st.session_state['tts_voice'] = selected_voice
st.rerun()
if selected_format.lower() != st.session_state['audio_format']:
st.session_state['audio_format'] = selected_format.lower()
st.rerun()
# Input text
user_text = st.text_area("πŸ’¬ Message:", height=100)
user_text = user_text.strip().replace('\n', ' ')
if st.button("πŸ“¨ Send"):
process_voice_input(user_text)
st.subheader("πŸ“œ Chat History")
for c in st.session_state.chat_history:
st.write("**You:**", c["user"])
st.write("**Response:**", c["claude"])
# ─────────────────────────────────────────────────────────
# TAB: Media
# ─────────────────────────────────────────────────────────
elif tab_main == "πŸ“Έ Media":
st.header("πŸ“Έ Media Gallery")
# By default, show audio first
tabs = st.tabs(["🎡 Audio", "πŸ–Ό Images", "πŸŽ₯ Video"])
# AUDIO sub-tab
with tabs[0]:
st.subheader("🎡 Audio Files")
audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
if audio_files:
for a in audio_files:
with st.expander(os.path.basename(a)):
st.audio(a)
ext = os.path.splitext(a)[1].replace('.', '')
dl_link = get_download_link(a, file_type=ext)
st.markdown(dl_link, unsafe_allow_html=True)
else:
st.write("No audio files found.")
# IMAGES sub-tab
with tabs[1]:
st.subheader("πŸ–Ό Image Files")
imgs = glob.glob("*.png") + glob.glob("*.jpg") + glob.glob("*.jpeg")
if imgs:
c = st.slider("Cols", 1, 5, 3, key="cols_images")
cols = st.columns(c)
for i, f in enumerate(imgs):
with cols[i % c]:
st.image(Image.open(f), use_container_width=True)
else:
st.write("No images found.")
# VIDEO sub-tab
with tabs[2]:
st.subheader("πŸŽ₯ Video Files")
vids = glob.glob("*.mp4") + glob.glob("*.mov") + glob.glob("*.avi")
if vids:
for v in vids:
with st.expander(os.path.basename(v)):
st.video(v)
else:
st.write("No videos found.")
# ─────────────────────────────────────────────────────────
# TAB: Editor
# ─────────────────────────────────────────────────────────
elif tab_main == "πŸ“ Editor":
st.write("Select or create a file to edit. (Currently minimal demo)")
# ─────────────────────────────────────────────────────────
# SIDEBAR: FILE HISTORY
# ─────────────────────────────────────────────────────────
display_file_history_in_sidebar()
# Some light CSS styling
st.markdown("""
<style>
.main { background: linear-gradient(to right, #1a1a1a, #2d2d2d); color: #fff; }
.stMarkdown { font-family: 'Helvetica Neue', sans-serif; }
.stButton>button { margin-right: 0.5rem; }
</style>
""", unsafe_allow_html=True)
# Rerun if needed
if st.session_state.should_rerun:
st.session_state.should_rerun = False
st.rerun()
if __name__ == "__main__":
main()