Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
@@ -1,213 +1,360 @@
|
|
1 |
import streamlit as st
|
|
|
|
|
2 |
import streamlit.components.v1 as components
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import asyncio
|
4 |
import edge_tts
|
5 |
-
import os
|
6 |
-
import base64
|
7 |
-
import json
|
8 |
-
from datetime import datetime
|
9 |
-
from typing import Optional, Dict, List
|
10 |
-
import glob
|
11 |
|
12 |
-
#
|
|
|
13 |
st.set_page_config(
|
14 |
-
page_title=
|
15 |
page_icon="🔬",
|
16 |
layout="wide",
|
17 |
-
initial_sidebar_state="
|
|
|
|
|
|
|
|
|
|
|
18 |
)
|
|
|
19 |
|
20 |
-
#
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
st.
|
32 |
-
|
33 |
-
|
34 |
-
.
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
.
|
48 |
-
|
49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
}
|
51 |
-
|
52 |
-
"""
|
53 |
-
|
54 |
-
def
|
55 |
-
"""Generate
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
</div>
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
document.addEventListener('click', (e) => {{
|
70 |
-
if (e.target.tagName === 'g' && e.target.classList.contains('node')) {{
|
71 |
-
const nodeId = e.target.id;
|
72 |
-
window.parent.postMessage({{
|
73 |
-
type: 'node_clicked',
|
74 |
-
nodeId: nodeId,
|
75 |
-
isStreamlitMessage: true
|
76 |
-
}}, '*');
|
77 |
-
}}
|
78 |
-
}});
|
79 |
-
</script>
|
80 |
-
</div>
|
81 |
-
"""
|
82 |
-
|
83 |
-
async def generate_speech(text: str, voice: str = "en-US-AriaNeural") -> Optional[str]:
|
84 |
-
"""Generate speech using Edge TTS."""
|
85 |
if not text.strip():
|
86 |
return None
|
87 |
|
88 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
89 |
-
output_file = f"
|
90 |
|
91 |
-
communicate = edge_tts.Communicate(text, voice)
|
92 |
await communicate.save(output_file)
|
93 |
|
94 |
return output_file
|
95 |
|
96 |
-
def
|
97 |
-
"""
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
}
|
|
|
|
|
|
|
|
|
|
|
106 |
|
107 |
-
def
|
108 |
-
"""
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
</audio>
|
119 |
-
<a href="data:audio/mp3;base64,{audio_b64}"
|
120 |
-
download="{os.path.basename(file_path)}"
|
121 |
-
style="margin-top: 5px; display: inline-block;">
|
122 |
-
Download Audio
|
123 |
-
</a>
|
124 |
-
</div>
|
125 |
-
"""
|
126 |
-
|
127 |
-
def handle_node_click(node_id: str):
|
128 |
-
"""Handle Mermaid diagram node clicks."""
|
129 |
-
# Convert node ID to search query
|
130 |
-
query = node_id.replace('_', ' ')
|
131 |
-
|
132 |
-
# Perform search
|
133 |
-
results = process_arxiv_search(query)
|
134 |
-
|
135 |
-
# Generate speech from results
|
136 |
-
asyncio.run(generate_speech(results['abstract']))
|
137 |
-
|
138 |
-
# Update session state
|
139 |
-
st.session_state.current_query = query
|
140 |
-
st.session_state.last_response = results
|
141 |
-
|
142 |
-
# Main Mermaid diagram definition
|
143 |
-
RESEARCH_DIAGRAM = """
|
144 |
-
graph TD
|
145 |
-
A[Literature Review] --> B[Data Analysis]
|
146 |
-
B --> C[Results]
|
147 |
-
C --> D[Conclusions]
|
148 |
-
|
149 |
-
click A callback "Research Methodology"
|
150 |
-
click B callback "Statistical Analysis"
|
151 |
-
click C callback "Research Findings"
|
152 |
-
click D callback "Research Impact"
|
153 |
-
|
154 |
-
style A fill:#f9f,stroke:#333,stroke-width:4px
|
155 |
-
style B fill:#bbf,stroke:#333,stroke-width:4px
|
156 |
-
style C fill:#bfb,stroke:#333,stroke-width:4px
|
157 |
-
style D fill:#fbb,stroke:#333,stroke-width:4px
|
158 |
-
"""
|
159 |
|
160 |
-
|
161 |
-
|
|
|
|
|
162 |
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
|
170 |
-
|
171 |
-
|
|
|
|
|
172 |
|
|
|
173 |
with col1:
|
174 |
-
st.
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
scrolling=True
|
179 |
-
)
|
180 |
-
|
181 |
-
st.markdown("### Recent Searches")
|
182 |
-
for query in st.session_state.mermaid_history[-5:]:
|
183 |
-
st.info(query)
|
184 |
|
185 |
with col2:
|
186 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
187 |
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
handle_node_click(search_query)
|
192 |
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
198 |
|
199 |
-
|
200 |
-
|
201 |
-
if audio_files:
|
202 |
-
latest_audio = max(audio_files, key=os.path.getctime)
|
203 |
-
st.markdown(create_audio_player(latest_audio), unsafe_allow_html=True)
|
204 |
-
|
205 |
-
# Cleanup old audio files
|
206 |
-
for file in glob.glob("speech_*.mp3")[:-5]: # Keep only last 5 files
|
207 |
-
try:
|
208 |
-
os.remove(file)
|
209 |
-
except:
|
210 |
-
pass
|
211 |
-
|
212 |
-
if __name__ == "__main__":
|
213 |
-
main()
|
|
|
1 |
import streamlit as st
|
2 |
+
import anthropic, openai, base64, cv2, glob, json, math, os, pytz, random, re, requests, time, zipfile
|
3 |
+
import plotly.graph_objects as go
|
4 |
import streamlit.components.v1 as components
|
5 |
+
from datetime import datetime
|
6 |
+
from audio_recorder_streamlit import audio_recorder
|
7 |
+
from bs4 import BeautifulSoup
|
8 |
+
from collections import defaultdict, deque
|
9 |
+
from dotenv import load_dotenv
|
10 |
+
from gradio_client import Client
|
11 |
+
from huggingface_hub import InferenceClient
|
12 |
+
from io import BytesIO
|
13 |
+
from PIL import Image
|
14 |
+
from PyPDF2 import PdfReader
|
15 |
+
from urllib.parse import quote
|
16 |
+
from xml.etree import ElementTree as ET
|
17 |
+
from openai import OpenAI
|
18 |
+
import extra_streamlit_components as stx
|
19 |
import asyncio
|
20 |
import edge_tts
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
+
# 1. App Configuration
|
23 |
+
Site_Name = '🔬 Research Assistant Pro'
|
24 |
st.set_page_config(
|
25 |
+
page_title=Site_Name,
|
26 |
page_icon="🔬",
|
27 |
layout="wide",
|
28 |
+
initial_sidebar_state="auto",
|
29 |
+
menu_items={
|
30 |
+
'Get Help': 'https://huggingface.co/awacke1',
|
31 |
+
'Report a bug': 'https://huggingface.co/spaces/awacke1',
|
32 |
+
'About': Site_Name
|
33 |
+
}
|
34 |
)
|
35 |
+
load_dotenv()
|
36 |
|
37 |
+
# 2. API and Client Setup
|
38 |
+
openai_api_key = os.getenv('OPENAI_API_KEY', st.secrets.get('OPENAI_API_KEY', ''))
|
39 |
+
anthropic_key = os.getenv('ANTHROPIC_API_KEY', st.secrets.get('ANTHROPIC_API_KEY', ''))
|
40 |
+
hf_key = os.getenv('HF_KEY', st.secrets.get('HF_KEY', ''))
|
41 |
+
|
42 |
+
openai_client = OpenAI(api_key=openai_api_key)
|
43 |
+
claude_client = anthropic.Anthropic(api_key=anthropic_key)
|
44 |
+
|
45 |
+
# 3. Session State Management
|
46 |
+
if 'chat_history' not in st.session_state:
|
47 |
+
st.session_state.chat_history = []
|
48 |
+
if 'current_audio' not in st.session_state:
|
49 |
+
st.session_state.current_audio = None
|
50 |
+
if 'autoplay_audio' not in st.session_state:
|
51 |
+
st.session_state.autoplay_audio = True
|
52 |
+
if 'last_search' not in st.session_state:
|
53 |
+
st.session_state.last_search = None
|
54 |
+
if 'file_content' not in st.session_state:
|
55 |
+
st.session_state.file_content = None
|
56 |
+
if 'current_file' not in st.session_state:
|
57 |
+
st.session_state.current_file = None
|
58 |
+
|
59 |
+
# 4. Utility Functions
|
60 |
+
def get_download_link(file_path):
|
61 |
+
"""Generate download link for any file type"""
|
62 |
+
with open(file_path, "rb") as file:
|
63 |
+
contents = file.read()
|
64 |
+
b64 = base64.b64encode(contents).decode()
|
65 |
+
file_name = os.path.basename(file_path)
|
66 |
+
file_type = file_name.split('.')[-1]
|
67 |
+
mime_types = {
|
68 |
+
'md': 'text/markdown',
|
69 |
+
'mp3': 'audio/mpeg',
|
70 |
+
'mp4': 'video/mp4',
|
71 |
+
'pdf': 'application/pdf',
|
72 |
+
'txt': 'text/plain'
|
73 |
}
|
74 |
+
mime_type = mime_types.get(file_type, 'application/octet-stream')
|
75 |
+
return f'<a href="data:{mime_type};base64,{b64}" download="{file_name}">⬇️ Download {file_name}</a>'
|
76 |
+
|
77 |
+
def generate_filename(content, file_type="md"):
|
78 |
+
"""Generate unique filename with timestamp"""
|
79 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
80 |
+
safe_content = re.sub(r'[^\w\s-]', '', content[:50])
|
81 |
+
return f"{timestamp}_{safe_content}.{file_type}"
|
82 |
+
|
83 |
+
def get_autoplay_audio_html(audio_path, width="100%"):
|
84 |
+
"""Create HTML for autoplaying audio with controls"""
|
85 |
+
try:
|
86 |
+
with open(audio_path, "rb") as audio_file:
|
87 |
+
audio_bytes = audio_file.read()
|
88 |
+
audio_b64 = base64.b64encode(audio_bytes).decode()
|
89 |
+
return f'''
|
90 |
+
<audio controls autoplay style="width: {width};">
|
91 |
+
<source src="data:audio/mpeg;base64,{audio_b64}" type="audio/mpeg">
|
92 |
+
Your browser does not support the audio element.
|
93 |
+
</audio>
|
94 |
+
<div style="margin-top: 5px;">
|
95 |
+
<a href="data:audio/mpeg;base64,{audio_b64}"
|
96 |
+
download="{os.path.basename(audio_path)}"
|
97 |
+
style="text-decoration: none;">
|
98 |
+
⬇️ Download Audio
|
99 |
+
</a>
|
100 |
+
</div>
|
101 |
+
'''
|
102 |
+
except Exception as e:
|
103 |
+
return f"Error loading audio: {str(e)}"
|
104 |
+
|
105 |
+
def get_video_html(video_path, width="100%"):
|
106 |
+
"""Create HTML for autoplaying video with controls"""
|
107 |
+
video_url = f"data:video/mp4;base64,{base64.b64encode(open(video_path, 'rb').read()).decode()}"
|
108 |
+
return f'''
|
109 |
+
<video width="{width}" controls autoplay muted loop>
|
110 |
+
<source src="{video_url}" type="video/mp4">
|
111 |
+
Your browser does not support the video tag.
|
112 |
+
</video>
|
113 |
+
'''
|
114 |
+
|
115 |
+
# 5. Voice Recognition Component
|
116 |
+
def create_voice_component():
|
117 |
+
"""Create voice recognition component with visual feedback"""
|
118 |
+
return components.html(
|
119 |
+
"""
|
120 |
+
<div style="padding: 20px; border-radius: 10px; background: #f0f2f6;">
|
121 |
+
<button id="startBtn" class="streamlit-button">Start Voice Search</button>
|
122 |
+
<p id="status">Click to start speaking</p>
|
123 |
+
<div id="result"></div>
|
124 |
+
<script>
|
125 |
+
if ('webkitSpeechRecognition' in window) {
|
126 |
+
const recognition = new webkitSpeechRecognition();
|
127 |
+
recognition.continuous = false;
|
128 |
+
recognition.interimResults = true;
|
129 |
+
|
130 |
+
const startBtn = document.getElementById('startBtn');
|
131 |
+
const status = document.getElementById('status');
|
132 |
+
const result = document.getElementById('result');
|
133 |
+
|
134 |
+
startBtn.onclick = () => {
|
135 |
+
recognition.start();
|
136 |
+
status.textContent = 'Listening...';
|
137 |
+
};
|
138 |
+
|
139 |
+
recognition.onresult = (event) => {
|
140 |
+
const transcript = Array.from(event.results)
|
141 |
+
.map(result => result[0].transcript)
|
142 |
+
.join('');
|
143 |
+
result.textContent = transcript;
|
144 |
+
|
145 |
+
if (event.results[0].isFinal) {
|
146 |
+
window.parent.postMessage({
|
147 |
+
type: 'voice_search',
|
148 |
+
query: transcript
|
149 |
+
}, '*');
|
150 |
+
}
|
151 |
+
};
|
152 |
+
|
153 |
+
recognition.onend = () => {
|
154 |
+
status.textContent = 'Click to start speaking';
|
155 |
+
};
|
156 |
+
}
|
157 |
+
</script>
|
158 |
</div>
|
159 |
+
""",
|
160 |
+
height=200
|
161 |
+
)
|
162 |
+
|
163 |
+
# 6. Audio Processing Functions
|
164 |
+
async def generate_audio(text, voice="en-US-AriaNeural", rate="+0%", pitch="+0Hz"):
|
165 |
+
"""Generate audio using Edge TTS with automatic playback"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
if not text.strip():
|
167 |
return None
|
168 |
|
169 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
170 |
+
output_file = f"response_{timestamp}.mp3"
|
171 |
|
172 |
+
communicate = edge_tts.Communicate(text, voice, rate=rate, pitch=pitch)
|
173 |
await communicate.save(output_file)
|
174 |
|
175 |
return output_file
|
176 |
|
177 |
+
def render_audio_result(audio_file, title="Generated Audio"):
|
178 |
+
"""Render audio result with autoplay in Streamlit"""
|
179 |
+
if audio_file and os.path.exists(audio_file):
|
180 |
+
st.markdown(f"### {title}")
|
181 |
+
st.markdown(get_autoplay_audio_html(audio_file), unsafe_allow_html=True)
|
182 |
+
|
183 |
+
# 7. Search and Process Functions
|
184 |
+
def perform_arxiv_search(query, response_type="summary"):
|
185 |
+
"""Perform Arxiv search with voice response"""
|
186 |
+
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
187 |
+
|
188 |
+
# Get search results
|
189 |
+
refs = client.predict(
|
190 |
+
query,
|
191 |
+
20,
|
192 |
+
"Semantic Search",
|
193 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
194 |
+
api_name="/update_with_rag_md"
|
195 |
+
)[0]
|
196 |
+
|
197 |
+
# Get AI interpretation
|
198 |
+
summary = client.predict(
|
199 |
+
query,
|
200 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
201 |
+
True,
|
202 |
+
api_name="/ask_llm"
|
203 |
+
)
|
204 |
+
|
205 |
+
response_text = summary if response_type == "summary" else refs
|
206 |
+
return response_text, refs
|
207 |
+
|
208 |
+
async def process_voice_search_with_autoplay(query):
|
209 |
+
"""Process voice search with automatic audio playback"""
|
210 |
+
summary, full_results = perform_arxiv_search(query)
|
211 |
+
|
212 |
+
audio_file = await generate_audio(summary)
|
213 |
+
|
214 |
+
st.session_state.current_audio = audio_file
|
215 |
+
st.session_state.last_search = {
|
216 |
+
'query': query,
|
217 |
+
'summary': summary,
|
218 |
+
'full_results': full_results,
|
219 |
+
'audio': audio_file,
|
220 |
+
'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
221 |
}
|
222 |
+
|
223 |
+
if audio_file:
|
224 |
+
render_audio_result(audio_file, "Search Results")
|
225 |
+
|
226 |
+
return audio_file
|
227 |
|
228 |
+
def display_search_results_with_audio():
|
229 |
+
"""Display search results with autoplaying audio"""
|
230 |
+
if st.session_state.last_search:
|
231 |
+
st.subheader("Latest Results")
|
232 |
+
st.markdown(st.session_state.last_search['summary'])
|
233 |
+
|
234 |
+
with st.expander("View Full Results"):
|
235 |
+
st.markdown(st.session_state.last_search['full_results'])
|
236 |
+
|
237 |
+
if st.session_state.current_audio:
|
238 |
+
render_audio_result(st.session_state.current_audio, "Audio Summary")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
239 |
|
240 |
+
# 8. UI Components
|
241 |
+
def render_search_interface():
|
242 |
+
"""Render main search interface"""
|
243 |
+
st.header("🔍 Voice Search")
|
244 |
|
245 |
+
create_voice_component()
|
246 |
+
|
247 |
+
col1, col2 = st.columns([3, 1])
|
248 |
+
with col1:
|
249 |
+
query = st.text_input("Or type your query:")
|
250 |
+
with col2:
|
251 |
+
if st.button("🔍 Search"):
|
252 |
+
asyncio.run(process_voice_search_with_autoplay(query))
|
253 |
+
|
254 |
+
display_search_results_with_audio()
|
255 |
+
|
256 |
+
def display_search_history():
|
257 |
+
"""Display search history with audio playback"""
|
258 |
+
st.header("Search History")
|
259 |
+
if st.session_state.chat_history:
|
260 |
+
for idx, entry in enumerate(reversed(st.session_state.chat_history)):
|
261 |
+
with st.expander(
|
262 |
+
f"🔍 {entry['timestamp']} - {entry['query'][:50]}...",
|
263 |
+
expanded=False
|
264 |
+
):
|
265 |
+
st.markdown(entry['summary'])
|
266 |
+
if 'audio' in entry and entry['audio']:
|
267 |
+
render_audio_result(entry['audio'], "Recorded Response")
|
268 |
+
|
269 |
+
def render_settings():
|
270 |
+
"""Render settings interface"""
|
271 |
+
st.sidebar.title("⚙️ Settings")
|
272 |
+
|
273 |
+
voice_options = [
|
274 |
+
"en-US-AriaNeural",
|
275 |
+
"en-US-GuyNeural",
|
276 |
+
"en-GB-SoniaNeural",
|
277 |
+
"en-AU-NatashaNeural"
|
278 |
+
]
|
279 |
+
|
280 |
+
settings = {
|
281 |
+
'voice': st.sidebar.selectbox("Select Voice", voice_options),
|
282 |
+
'autoplay': st.sidebar.checkbox("Autoplay Responses", value=True),
|
283 |
+
'rate': st.sidebar.slider("Speech Rate", -50, 50, 0, 5),
|
284 |
+
'pitch': st.sidebar.slider("Pitch", -50, 50, 0, 5)
|
285 |
+
}
|
286 |
+
|
287 |
+
return settings
|
288 |
+
|
289 |
+
def display_file_manager():
|
290 |
+
"""Display file manager in sidebar"""
|
291 |
+
st.sidebar.title("📁 File Manager")
|
292 |
|
293 |
+
all_files = []
|
294 |
+
for ext in ['.md', '.mp3', '.mp4']:
|
295 |
+
all_files.extend(glob.glob(f"*{ext}"))
|
296 |
+
all_files.sort(key=os.path.getmtime, reverse=True)
|
297 |
|
298 |
+
col1, col2 = st.sidebar.columns(2)
|
299 |
with col1:
|
300 |
+
if st.button("🗑 Delete All"):
|
301 |
+
for file in all_files:
|
302 |
+
os.remove(file)
|
303 |
+
st.rerun()
|
|
|
|
|
|
|
|
|
|
|
|
|
304 |
|
305 |
with col2:
|
306 |
+
if st.button("⬇️ Download All"):
|
307 |
+
zip_name = f"archive_{datetime.now().strftime('%Y%m%d_%H%M%S')}.zip"
|
308 |
+
with zipfile.ZipFile(zip_name, 'w') as zipf:
|
309 |
+
for file in all_files:
|
310 |
+
zipf.write(file)
|
311 |
+
st.sidebar.markdown(get_download_link(zip_name), unsafe_allow_html=True)
|
312 |
+
|
313 |
+
for file in all_files:
|
314 |
+
with st.sidebar.expander(f"📄 {os.path.basename(file)}", expanded=False):
|
315 |
+
st.write(f"Last modified: {datetime.fromtimestamp(os.path.getmtime(file)).strftime('%Y-%m-%d %H:%M:%S')}")
|
316 |
+
col1, col2 = st.columns(2)
|
317 |
+
with col1:
|
318 |
+
st.markdown(get_download_link(file), unsafe_allow_html=True)
|
319 |
+
with col2:
|
320 |
+
if st.button("🗑 Delete", key=f"del_{file}"):
|
321 |
+
os.remove(file)
|
322 |
+
st.rerun()
|
323 |
+
|
324 |
+
# 9. Main Application
|
325 |
+
def main():
|
326 |
+
st.title("🔬 Research Assistant Pro")
|
327 |
+
|
328 |
+
settings = render_settings()
|
329 |
+
display_file_manager()
|
330 |
+
|
331 |
+
tabs = st.tabs(["🎤 Voice Search", "📚 History", "🎵 Media", "⚙️ Settings"])
|
332 |
+
|
333 |
+
with tabs[0]:
|
334 |
+
render_search_interface()
|
335 |
+
|
336 |
+
with tabs[1]:
|
337 |
+
display_search_history()
|
338 |
|
339 |
+
with tabs[2]:
|
340 |
+
st.header("Media Gallery")
|
341 |
+
media_tabs = st.tabs(["🎵 Audio", "🎥 Video", "📷 Images"])
|
|
|
342 |
|
343 |
+
with media_tabs[0]:
|
344 |
+
audio_files = glob.glob("*.mp3")
|
345 |
+
if audio_files:
|
346 |
+
for audio_file in audio_files:
|
347 |
+
st.markdown(get_autoplay_audio_html(audio_file), unsafe_allow_html=True)
|
348 |
+
else:
|
349 |
+
st.write("No audio files found")
|
350 |
+
|
351 |
+
with media_tabs[1]:
|
352 |
+
video_files = glob.glob("*.mp4")
|
353 |
+
if video_files:
|
354 |
+
for video_file in video_files:
|
355 |
+
st.markdown(get_video_html(video_file), unsafe_allow_html=True)
|
356 |
+
else:
|
357 |
+
st.write("No video files found")
|
358 |
|
359 |
+
with media_tabs[2]:
|
360 |
+
image_files = glob.glob("*.png") + glob.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|