File size: 8,775 Bytes
b790fc2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 |
import gradio as gr
import ssl
from openai import OpenAI
import time
import os
import shutil
from datetime import datetime
import Arcana
# SSL configuration to avoid verification issues
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
# OpenAI client setup
client = OpenAI(
base_url='https://api.openai-proxy.org/v1',
api_key='sk-Nxf8HmLpfIMhCd83n3TOr00TR57uBZ0jMbAgGCOzppXvlsx1',
)
# Retry logic for OpenAI API call
def openai_api_call(messages, retries=3, delay=5):
for attempt in range(retries):
try:
completion = client.chat.completions.create(
model="gpt-4o",
messages=messages,
timeout=10
)
return completion.choices[0].message.content
except Exception as e:
print(f"Attempt {attempt + 1} failed: {e}")
time.sleep(delay)
return "Sorry, I am having trouble connecting to the server. Please try again later."
# Chatbot response function
def chatbot_response(message, history):
messages = [{"role": "system", "content": '''You are Arcana, a dynamic study resource database designed to help students excel in their exams. Your responses should be accurate, informative, and evidence-based whenever possible. Follow these guidelines:
Your primary goal is to provide students with the most helpful and accurate study information, utilizing both your internal knowledge and the PDF resources at your disposal.'''}]
for human, assistant in history:
messages.append({"role": "user", "content": human})
messages.append({"role": "assistant", "content": assistant})
messages.append({"role": "user", "content": message})
response = openai_api_call(messages)
return response
selected = None
def upload_file(file):
foldername = 'cache'
if not os.path.exists(foldername):
os.mkdir(foldername)
file_path = os.path.join(foldername, os.path.basename(file.name))
shutil.copy(file.name, file_path)
return list_uploaded_files()
def list_uploaded_files():
foldername = 'cache'
if not os.path.exists(foldername):
return []
files = os.listdir(foldername)
return [[file] for file in files]
def on_select(evt: gr.SelectData):
global selected
selected_value = evt.value
selected_index = evt.index
selected = selected_value
print(f"Selected value: {selected_value} at index: {selected_index}")
file_path = os.path.join("cache", selected_value) if selected_value else None
status_message = f"Selected: {selected_value}" if selected_value else "No file selected"
file_size = get_file_size(file_path) if file_path else ""
file_creation_time = get_file_creation_time(file_path) if file_path else ""
return file_path, status_message, file_size, file_creation_time
def get_file_size(file_path):
if file_path and os.path.exists(file_path):
size_bytes = os.path.getsize(file_path)
if size_bytes < 1024:
return f"{size_bytes} bytes"
elif size_bytes < 1024 * 1024:
return f"{size_bytes / 1024:.2f} KB"
else:
return f"{size_bytes / (1024 * 1024):.2f} MB"
return ""
def get_file_creation_time(file_path):
if file_path and os.path.exists(file_path):
creation_time = os.path.getctime(file_path)
return datetime.fromtimestamp(creation_time).strftime("%Y-%m-%d %H:%M:%S")
return ""
def delete_file():
global selected
if selected:
foldername = 'cache'
file_path = os.path.join(foldername, selected)
if os.path.exists(file_path):
os.remove(file_path)
return list_uploaded_files(), None, f"File {selected} deleted successfully", "", ""
else:
return list_uploaded_files(), None, f"File {selected} not found", "", ""
else:
return list_uploaded_files(), None, "No file selected for deletion", "", ""
def refresh_files():
return list_uploaded_files()
def display_file(evt: gr.SelectData, df):
file_path = os.path.join("cache", evt.value)
return file_path, file_path if file_path.lower().endswith(('.png', '.jpg', '.jpeg', '.gif')) else None, f"Displaying: {evt.value}"
def render_to_database():
# This function is undefined as per your request
Arcana.main()
def rename_file(new_name):
global selected
if selected and new_name:
old_path = os.path.join('cache', selected)
new_path = os.path.join('cache', new_name+'.'+selected.split('.')[-1])
if os.path.exists(old_path):
os.rename(old_path, new_path)
selected = new_name
return list_uploaded_files(), f"File renamed to {new_name}", new_path, get_file_size(new_path), get_file_creation_time(new_path)
else:
return list_uploaded_files(), f"File {selected} not found", None, "", ""
return list_uploaded_files(), "No file selected or new name not provided", None, "", ""
# Create the Gradio interface for the chatbot
chatbot_interface = gr.ChatInterface(
chatbot_response,
chatbot=gr.Chatbot(height=400),
textbox=gr.Textbox(placeholder="Type your message here...", container=True, scale=100),
title="Review With Arcana",
description="ArcanaUI v0.7 - Chatbot",
theme="soft",
examples=[
"What is Hydrogen Bonding?",
"Tell me the difference between impulse and force.",
"Tell me a joke that Calculus students will know.",
"How should I review for the AP Biology Exam?",
"What kind of resources are available in PA and Indexademics?",
"What is the StandardCAS™ group?"
],
cache_examples=False,
retry_btn=None,
undo_btn="Delete Previous",
clear_btn="Clear"
)
# Combine the interfaces using Tabs
with gr.Blocks() as demo:
gr.Markdown("# ArcanaUI v0.7")
with gr.Tabs():
with gr.TabItem("Welcome Page"):
gr.Markdown("""
hi
""")
with gr.TabItem("Chatbot"):
chatbot_interface.render()
# File uploading interface
with gr.TabItem('Upload'):
gr.Markdown('# Upload and View Files')
with gr.Row():
# Left column: File list and buttons
with gr.Column(scale=1):
uploaded_files_list = gr.DataFrame(headers=["Uploaded Files"], datatype="str", interactive=False)
with gr.Row():
upload_button = gr.UploadButton('Upload File')
refresh_button = gr.Button('Refresh')
delete_button = gr.Button('Delete Selected File')
# Right column: File viewer and Image viewer
with gr.Column(scale=1):
with gr.Tab("File Viewer"):
file_viewer = gr.File(label="File Restore")
file_status = gr.Textbox(label="File Status", interactive=False)
file_size = gr.Textbox(label="File Size", interactive=False)
file_creation_time = gr.Textbox(label="File Creation Time", interactive=False)
with gr.Row():
new_file_name = gr.Textbox(label="New File Name", placeholder="Enter new file name")
rename_button = gr.Button("Rename File")
with gr.Tab("Image Viewer"):
image_viewer = gr.Image(label="Image Viewer", type="filepath")
# Event handlers
refresh_button.click(fn=refresh_files, outputs=uploaded_files_list)
upload_button.upload(upload_file, inputs=upload_button, outputs=uploaded_files_list)
delete_button.click(fn=delete_file, outputs=[uploaded_files_list, file_viewer, file_status, file_size, file_creation_time])
uploaded_files_list.select(fn=display_file, inputs=uploaded_files_list, outputs=[file_viewer, image_viewer, file_status])
uploaded_files_list.select(fn=on_select, outputs=[file_viewer, file_status, file_size, file_creation_time])
rename_button.click(fn=rename_file,
inputs=new_file_name,
outputs=[uploaded_files_list, file_status, file_viewer, file_size, file_creation_time])
render_button = gr.Button("Render all PDFs to Database")
render_button.click(fn=render_to_database)
# Launch the interface
demo.launch(share=True)
|