CNIHUBQA101424 / app.py
on1onmangoes's picture
Update app.py
b951b08 verified
import gradio as gr
from gradio_client import Client, handle_file
import os
# Define your Hugging Face token (make sure to set it as an environment variable)
HF_TOKEN = os.getenv("HF_TOKEN") # Replace with your actual token if not using an environment variable
# Initialize the Gradio Client for the specified API
client = Client("on1onmangoes/CNIHUB10724v9", hf_token=HF_TOKEN)
# Function to handle chat API call
def stream_chat_with_rag(
message: str,
history: list,
client_name: str,
system_prompt: str,
num_retrieved_docs: int,
num_docs_final: int,
temperature: float,
max_new_tokens: int,
top_p: float,
top_k: int,
penalty: float,
):
# Use the parameters provided by the UI
response = client.predict(
message=message,
client_name=client_name,
system_prompt=system_prompt,
num_retrieved_docs=num_retrieved_docs,
num_docs_final=num_docs_final,
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
top_k=top_k,
penalty=penalty,
api_name="/chat"
)
# Return the assistant's reply
return response
# # OG code in V9
# def stream_chat_with_rag(
# message: str,
# history: list,
# client_name: str,
# system_prompt: str,
# num_retrieved_docs: int = 10,
# num_docs_final: int = 9,
# temperature: float = 0,
# max_new_tokens: int = 1024,
# top_p: float = 1.0,
# top_k: int = 20,
# penalty: float = 1.2,
# ):
# # Function to handle chat API call
# # def stream_chat_with_rag(message, system_prompt, num_retrieved_docs, num_docs_final, temperature, max_new_tokens, top_p, top_k, penalty):
# # response = client.predict(
# # message=message,
# # client_name="rosariarossi", # Hardcoded client name
# # system_prompt=system_prompt,
# # num_retrieved_docs=num_retrieved_docs,
# # num_docs_final=num_docs_final,
# # temperature=temperature,
# # max_new_tokens=max_new_tokens,
# # top_p=top_p,
# # top_k=top_k,
# # penalty=penalty,
# # api_name="/chat"
# # )
# # return response
# result = client.predict(
# message=message,
# client_name="rosariarossi",
# system_prompt="You are an expert assistant",
# num_retrieved_docs=10,
# num_docs_final=9,
# temperature=0,
# max_new_tokens=1024,
# top_p=1,
# top_k=20,
# penalty=1.2,
# api_name="/chat"
# )
# return result
# Function to handle PDF processing API call
def process_pdf(pdf_file):
return client.predict(
pdf_file=handle_file(pdf_file),
client_name="rosariarossi", # Hardcoded client name
api_name="/process_pdf2"
)[1] # Return only the result string
# Function to handle search API call
def search_api(query):
return client.predict(query=query, api_name="/search_with_confidence")
# Function to handle RAG API call
def rag_api(question):
return client.predict(question=question, api_name="/answer_with_rag")
# CSS for custom styling
CSS = """
# chat-container {
height: 100vh;
}
"""
# Title for the application
TITLE = "<h1 style='text-align:center;'>My Gradio Chat App</h1>"
# Create the Gradio Blocks interface
with gr.Blocks(css=CSS) as demo:
gr.HTML(TITLE)
## OG v9 comments
# gr.ChatInterface(
# fn=stream_chat_with_rag,
# chatbot=chatbot,
# fill_height=True,
# #gr.dropdown(['rosariarossi','bianchifiordaliso','lorenzoverdi'],label="Select Client"),
# additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
# additional_inputs=[
# gr.Dropdown(['rosariarossi','bianchifiordaliso','lorenzoverdi'],value="rosariarossi",label="Select Client", render=False,),
# gr.Textbox(
# # value="""Using the information contained in the context,
# # give a comprehensive answer to the question.
# # Respond only to the question asked, response should be concise and relevant to the question.
# # Provide the number of the source document when relevant.
# # If the answer cannot be deduced from the context, do not give an answer""",
# value ="""You are an expert assistant""",
# label="System Prompt",
# render=False,
# ),
# gr.Slider(
# minimum=1,
# maximum=10,
# step=1,
# value=10,
# label="Number of Initial Documents to Retrieve",
# render=False,
# ),
# gr.Slider(
# minimum=1,
# maximum=10,
# step=1,
# value=9,
# label="Number of Final Documents to Retrieve",
# render=False,
# ),
# gr.Slider(
# minimum=0.2,
# maximum=1,
# step=0.1,
# value=0,
# label="Temperature",
# render=False,
# ),
# gr.Slider(
# minimum=128,
# maximum=8192,
# step=1,
# value=1024,
# label="Max new tokens",
# render=False,
# ),
# gr.Slider(
# minimum=0.0,
# maximum=1.0,
# step=0.1,
# value=1.0,
# label="top_p",
# render=False,
# ),
# gr.Slider(
# minimum=1,
# maximum=20,
# step=1,
# value=20,
# label="top_k",
# render=False,
# ),
# gr.Slider(
# minimum=0.0,
# maximum=2.0,
# step=0.1,
# value=1.2,
# label="Repetition penalty",
# render=False,
# ),
# ],
# )
with gr.Tab("Chat"):
chatbot = gr.Chatbot() # Create a chatbot interface
chat_interface = gr.ChatInterface(
fn=stream_chat_with_rag,
chatbot=chatbot,
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
additional_inputs=[
gr.Dropdown(['rosariarossi','bianchifiordaliso','lorenzoverdi'],value="rosariarossi",label="Select Client", render=False,),
gr.Textbox(
value="You are an expert assistant",
label="System Prompt",
render=False,
),
gr.Slider(
minimum=1,
maximum=10,
step=1,
value=10,
label="Number of Initial Documents to Retrieve",
render=False,
),
gr.Slider(
minimum=1,
maximum=10,
step=1,
value=9,
label="Number of Final Documents to Retrieve",
render=False,
),
gr.Slider(
minimum=0.2,
maximum=1,
step=0.1,
value=0,
label="Temperature",
render=False,
),
gr.Slider(
minimum=128,
maximum=8192,
step=1,
value=1024,
label="Max new tokens",
render=False,
),
gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.1,
value=1.0,
label="Top P",
render=False,
),
gr.Slider(
minimum=1,
maximum=20,
step=1,
value=20,
label="Top K",
render=False,
),
gr.Slider(
minimum=0.0,
maximum=2.0,
step=0.1,
value=1.2,
label="Repetition Penalty",
render=False,
),
],
)
with gr.Tab("Process PDF"):
pdf_input = gr.File(label="Upload PDF File")
pdf_output = gr.Textbox(label="PDF Result", interactive=False)
pdf_button = gr.Button("Process PDF")
pdf_button.click(
process_pdf,
inputs=[pdf_input],
outputs=pdf_output
)
with gr.Tab("Search"):
query_input = gr.Textbox(label="Enter Search Query")
search_output = gr.Textbox(label="Search Confidence Result", interactive=False)
search_button = gr.Button("Search")
search_button.click(
search_api,
inputs=query_input,
outputs=search_output
)
with gr.Tab("Answer with RAG"):
question_input = gr.Textbox(label="Enter Question for RAG")
rag_output = gr.Textbox(label="RAG Answer Result", interactive=False)
rag_button = gr.Button("Get Answer")
rag_button.click(
rag_api,
inputs=question_input,
outputs=rag_output
)
# Launch the app
if __name__ == "__main__":
demo.launch()
# import gradio as gr
# from gradio_client import Client, handle_file
# import os
# # Define your Hugging Face token (make sure to set it as an environment variable)
# HF_TOKEN = os.getenv("HF_TOKEN") # Replace with your actual token if not using an environment variable
# # Initialize the Gradio Client for the specified API
# client = Client("on1onmangoes/CNIHUB10724v9", hf_token=HF_TOKEN)
# # Function to handle chat API call
# def stream_chat_with_rag(message, system_prompt, num_retrieved_docs, num_docs_final, temperature, max_new_tokens, top_p, top_k, penalty):
# response = client.predict(
# message=message,
# client_name="rosariarossi", # Hardcoded client name
# system_prompt=system_prompt,
# num_retrieved_docs=num_retrieved_docs,
# num_docs_final=num_docs_final,
# temperature=temperature,
# max_new_tokens=max_new_tokens,
# top_p=top_p,
# top_k=top_k,
# penalty=penalty,
# api_name="/chat"
# )
# return response
# # Function to handle PDF processing API call
# def process_pdf(pdf_file):
# return client.predict(
# pdf_file=handle_file(pdf_file),
# client_name="rosariarossi", # Hardcoded client name
# api_name="/process_pdf2"
# )[1] # Return only the result string
# # Function to handle search API call
# def search_api(query):
# return client.predict(query=query, api_name="/search_with_confidence")
# # Function to handle RAG API call
# def rag_api(question):
# return client.predict(question=question, api_name="/answer_with_rag")
# # CSS for custom styling
# CSS = """
# # chat-container {
# height: 100vh;
# }
# """
# # Title for the application
# TITLE = "<h1 style='text-align:center;'>My Gradio Chat App</h1>"
# # Create the Gradio Blocks interface
# with gr.Blocks(css=CSS) as demo:
# gr.HTML(TITLE)
# with gr.Tab("Chat"):
# chatbot = gr.Chatbot() # Create a chatbot interface
# chat_interface = gr.ChatInterface(
# fn=stream_chat_with_rag,
# chatbot=chatbot,
# additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
# additional_inputs=[
# gr.Dropdown(
# ['rosariarossi', 'bianchifiordaliso', 'lorenzoverdi'],
# value="rosariarossi",
# label="Select Client",
# render=False,
# ),
# gr.Textbox(
# value="You are an expert assistant",
# label="System Prompt",
# render=False,
# ),
# gr.Slider(
# minimum=1,
# maximum=10,
# step=1,
# value=10,
# label="Number of Initial Documents to Retrieve",
# render=False,
# ),
# gr.Slider(
# minimum=1,
# maximum=10,
# step=1,
# value=9,
# label="Number of Final Documents to Retrieve",
# render=False,
# ),
# gr.Slider(
# minimum=0.2,
# maximum=1,
# step=0.1,
# value=0,
# label="Temperature",
# render=False,
# ),
# gr.Slider(
# minimum=128,
# maximum=8192,
# step=1,
# value=1024,
# label="Max new tokens",
# render=False,
# ),
# gr.Slider(
# minimum=0.0,
# maximum=1.0,
# step=0.1,
# value=1.0,
# label="Top P",
# render=False,
# ),
# gr.Slider(
# minimum=1,
# maximum=20,
# step=1,
# value=20,
# label="Top K",
# render=False,
# ),
# gr.Slider(
# minimum=0.0,
# maximum=2.0,
# step=0.1,
# value=1.2,
# label="Repetition Penalty",
# render=False,
# ),
# ],
# )
# with gr.Tab("Process PDF"):
# pdf_input = gr.File(label="Upload PDF File")
# pdf_output = gr.Textbox(label="PDF Result", interactive=False)
# pdf_button = gr.Button("Process PDF")
# pdf_button.click(
# process_pdf,
# inputs=[pdf_input],
# outputs=pdf_output
# )
# with gr.Tab("Search"):
# query_input = gr.Textbox(label="Enter Search Query")
# search_output = gr.Textbox(label="Search Confidence Result", interactive=False)
# search_button = gr.Button("Search")
# search_button.click(
# search_api,
# inputs=query_input,
# outputs=search_output
# )
# with gr.Tab("Answer with RAG"):
# question_input = gr.Textbox(label="Enter Question for RAG")
# rag_output = gr.Textbox(label="RAG Answer Result", interactive=False)
# rag_button = gr.Button("Get Answer")
# rag_button.click(
# rag_api,
# inputs=question_input,
# outputs=rag_output
# )
# # Launch the app
# if __name__ == "__main__":
# demo.launch()
# import gradio as gr
# from gradio_client import Client, handle_file
# import os
# # Define your Hugging Face token (make sure to set it as an environment variable)
# HF_TOKEN = os.getenv("HF_TOKEN") # Replace with your actual token if not using an environment variable
# # Initialize the Gradio Client for the specified API
# client = Client("on1onmangoes/CNIHUB10724v9", hf_token=HF_TOKEN)
# # Function to handle chat API call
# def stream_chat_with_rag(message, client_name, system_prompt, num_retrieved_docs, num_docs_final, temperature, max_new_tokens, top_p, top_k, penalty):
# response = client.predict(
# message=message,
# client_name=client_name,
# system_prompt=system_prompt,
# num_retrieved_docs=num_retrieved_docs,
# num_docs_final=num_docs_final,
# temperature=temperature,
# max_new_tokens=max_new_tokens,
# top_p=top_p,
# top_k=top_k,
# penalty=penalty,
# api_name="/chat"
# )
# return response
# # Function to handle PDF processing API call
# def process_pdf(pdf_file, client_name):
# return client.predict(
# pdf_file=handle_file(pdf_file),
# client_name=client_name,
# api_name="/process_pdf2"
# )[1] # Return only the result string
# # Function to handle search API call
# def search_api(query):
# return client.predict(query=query, api_name="/search_with_confidence")
# # Function to handle RAG API call
# def rag_api(question):
# return client.predict(question=question, api_name="/answer_with_rag")
# # Create the Gradio Blocks interface
# with gr.Blocks() as app:
# gr.Markdown("### Login")
# with gr.Row():
# username_input = gr.Textbox(label="Username", placeholder="Enter your username")
# password_input = gr.Textbox(label="Password", placeholder="Enter your password", type="password")
# with gr.Tab("Chat"):
# chatbot = gr.Chatbot() # Create a chatbot interface
# chat_interface = gr.ChatInterface(
# fn=stream_chat_with_rag,
# chatbot=chatbot,
# fill_height=True,
# additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
# additional_inputs=[
# gr.Dropdown(
# ['rosariarossi', 'bianchifiordaliso', 'lorenzoverdi'],
# value="rosariarossi",
# label="Select Client",
# render=False,
# ),
# gr.Textbox(
# value="You are an expert assistant",
# label="System Prompt",
# render=False,
# ),
# gr.Slider(
# minimum=1,
# maximum=10,
# step=1,
# value=10,
# label="Number of Initial Documents to Retrieve",
# render=False,
# ),
# gr.Slider(
# minimum=1,
# maximum=10,
# step=1,
# value=9,
# label="Number of Final Documents to Retrieve",
# render=False,
# ),
# gr.Slider(
# minimum=0.2,
# maximum=1,
# step=0.1,
# value=0,
# label="Temperature",
# render=False,
# ),
# gr.Slider(
# minimum=128,
# maximum=8192,
# step=1,
# value=1024,
# label="Max new tokens",
# render=False,
# ),
# gr.Slider(
# minimum=0.0,
# maximum=1.0,
# step=0.1,
# value=1.0,
# label="Top P",
# render=False,
# ),
# gr.Slider(
# minimum=1,
# maximum=20,
# step=1,
# value=20,
# label="Top K",
# render=False,
# ),
# gr.Slider(
# minimum=0.0,
# maximum=2.0,
# step=0.1,
# value=1.2,
# label="Repetition Penalty",
# render=False,
# ),
# ],
# )
# with gr.Tab("Process PDF"):
# pdf_input = gr.File(label="Upload PDF File")
# pdf_output = gr.Textbox(label="PDF Result", interactive=False)
# pdf_button = gr.Button("Process PDF")
# pdf_button.click(
# process_pdf,
# inputs=[pdf_input, client_name_dropdown],
# outputs=pdf_output
# )
# with gr.Tab("Search"):
# query_input = gr.Textbox(label="Enter Search Query")
# search_output = gr.Textbox(label="Search Confidence Result", interactive=False)
# search_button = gr.Button("Search")
# search_button.click(
# search_api,
# inputs=query_input,
# outputs=search_output
# )
# with gr.Tab("Answer with RAG"):
# question_input = gr.Textbox(label="Enter Question for RAG")
# rag_output = gr.Textbox(label="RAG Answer Result", interactive=False)
# rag_button = gr.Button("Get Answer")
# rag_button.click(
# rag_api,
# inputs=question_input,
# outputs=rag_output
# )
# # Launch the app
# app.launch()
# import gradio as gr
# from gradio_client import Client, handle_file
# import os
# # Define your Hugging Face token (make sure to set it as an environment variable)
# HF_TOKEN = os.getenv("HF_TOKEN") # Replace with your actual token if not using env variable
# # Initialize the Gradio Client for the specified API
# client = Client("on1onmangoes/CNIHUB10724v9", hf_token=HF_TOKEN)
# # Authentication function
# def login(username, password):
# if username == "your_username" and password == "your_password": # Update with actual credentials
# return True
# else:
# return False
# # Function to handle different API calls based on user input
# def handle_api_call(username, password, message=None, client_name="rosariarossi",
# system_prompt="You are an expert assistant", num_retrieved_docs=10,
# num_docs_final=9, temperature=0, max_new_tokens=1024,
# top_p=1, top_k=20, penalty=1.2,
# pdf_file=None, query=None, question=None):
# if not login(username, password):
# return "Invalid credentials! Please try again."
# if message:
# # Handle chat message
# chat_result = client.predict(
# message=message,
# client_name=client_name,
# system_prompt=system_prompt,
# num_retrieved_docs=num_retrieved_docs,
# num_docs_final=num_docs_final,
# temperature=temperature,
# max_new_tokens=max_new_tokens,
# top_p=top_p,
# top_k=top_k,
# penalty=penalty,
# api_name="/chat"
# )
# return chat_result
# elif pdf_file:
# # Handle PDF file
# pdf_result = client.predict(
# pdf_file=handle_file(pdf_file),
# client_name=client_name,
# api_name="/process_pdf2"
# )
# return pdf_result[1] # Returning the string result from the PDF processing
# elif query:
# # Handle search query
# search_result = client.predict(query=query, api_name="/search_with_confidence")
# return search_result
# elif question:
# # Handle question for RAG
# rag_result = client.predict(question=question, api_name="/answer_with_rag")
# return rag_result
# else:
# return "No valid input provided!"
# # Create the Gradio Blocks interface
# with gr.Blocks() as app:
# gr.Markdown("### Login")
# with gr.Row():
# username_input = gr.Textbox(label="Username", placeholder="Enter your username")
# password_input = gr.Textbox(label="Password", placeholder="Enter your password", type="password")
# with gr.Tab("Chat"):
# message_input = gr.Textbox(label="Message", placeholder="Type your message here")
# gr.Markdown("### Client Options")
# client_name_dropdown = gr.Dropdown(
# label="Select Client",
# choices=["rosariarossi", "bianchifiordaliso", "lorenzoverdi"],
# value="rosariarossi"
# )
# system_prompt_input = gr.Textbox(
# label="System Prompt",
# placeholder="Enter system prompt here",
# value="You are an expert assistant"
# )
# num_retrieved_docs_slider = gr.Slider(
# label="Number of Initial Documents to Retrieve",
# minimum=1,
# maximum=100,
# step=1,
# value=10
# )
# num_docs_final_slider = gr.Slider(
# label="Number of Final Documents to Retrieve",
# minimum=1,
# maximum=100,
# step=1,
# value=9
# )
# temperature_slider = gr.Slider(
# label="Temperature",
# minimum=0,
# maximum=2,
# step=0.1,
# value=0
# )
# max_new_tokens_slider = gr.Slider(
# label="Max New Tokens",
# minimum=1,
# maximum=2048,
# step=1,
# value=1024
# )
# top_p_slider = gr.Slider(
# label="Top P",
# minimum=0,
# maximum=1,
# step=0.01,
# value=1
# )
# top_k_slider = gr.Slider(
# label="Top K",
# minimum=1,
# maximum=100,
# step=1,
# value=20
# )
# penalty_slider = gr.Slider(
# label="Repetition Penalty",
# minimum=1,
# maximum=5,
# step=0.1,
# value=1.2
# )
# chat_output = gr.Textbox(label="Chat Response", interactive=False)
# with gr.Tab("Process PDF"):
# pdf_input = gr.File(label="Upload PDF File")
# pdf_output = gr.Textbox(label="PDF Result", interactive=False)
# with gr.Tab("Search"):
# query_input = gr.Textbox(label="Enter Search Query")
# search_output = gr.Textbox(label="Search Confidence Result", interactive=False)
# with gr.Tab("Answer with RAG"):
# question_input = gr.Textbox(label="Enter Question for RAG")
# rag_output = gr.Textbox(label="RAG Answer Result", interactive=False)
# api_button = gr.Button("Submit")
# # Bind the button click to the handle_api_call function
# api_button.click(
# handle_api_call,
# inputs=[
# username_input, password_input,
# message_input, client_name_dropdown,
# system_prompt_input, num_retrieved_docs_slider,
# num_docs_final_slider, temperature_slider,
# max_new_tokens_slider, top_p_slider,
# top_k_slider, penalty_slider,
# pdf_input, query_input, question_input
# ],
# outputs=[
# chat_output, pdf_output, search_output, rag_output
# ]
# )
# # Launch the app
# app.launch()
# import gradio as gr
# from gradio_client import Client, handle_file
# import os
# # Define your Hugging Face token (make sure to set it as an environment variable)
# HF_TOKEN = os.getenv("HF_TOKEN") # Replace with your actual token if not using env variable
# # Initialize the Gradio Client for the specified API
# client = Client("on1onmangoes/CNIHUB10724v9", hf_token=HF_TOKEN)
# # Authentication function
# def login(username, password):
# if username == "your_username" and password == "your_password": # Update with actual credentials
# return True
# else:
# return False
# # Function to handle different API calls based on user input
# def handle_api_call(username, password, audio_file=None, pdf_file=None, message=None, query=None, question=None):
# if not login(username, password):
# return "Invalid credentials! Please try again."
# if audio_file:
# # Handle audio file using the appropriate API
# result = client.predict(audio=handle_file(audio_file), api_name="/process_audio") # Example endpoint for audio processing
# return result
# elif pdf_file:
# # Handle PDF file
# pdf_result = client.predict(pdf_file=handle_file(pdf_file), client_name="rosariarossi", api_name="/process_pdf2")
# return pdf_result[1] # Returning the string result from the PDF processing
# elif message:
# # Handle chat message
# chat_result = client.predict(
# message=message,
# client_name="rosariarossi",
# system_prompt="You are an expert assistant",
# num_retrieved_docs=10,
# num_docs_final=9,
# temperature=0,
# max_new_tokens=1024,
# top_p=1,
# top_k=20,
# penalty=1.2,
# api_name="/chat"
# )
# return chat_result
# elif query:
# # Handle search query
# search_result = client.predict(query=query, api_name="/search_with_confidence")
# return search_result
# elif question:
# # Handle question for RAG
# rag_result = client.predict(question=question, api_name="/answer_with_rag")
# return rag_result
# else:
# return "No valid input provided!"
# # Create the Gradio Blocks interface
# with gr.Blocks() as app:
# gr.Markdown("### Login")
# with gr.Row():
# username_input = gr.Textbox(label="Username", placeholder="Enter your username")
# password_input = gr.Textbox(label="Password", placeholder="Enter your password", type="password")
# audio_input = gr.Audio(label="Upload Audio File", type="filepath")
# pdf_input = gr.File(label="Upload PDF File")
# message_input = gr.Textbox(label="Enter Message for Chat")
# query_input = gr.Textbox(label="Enter Search Query")
# question_input = gr.Textbox(label="Enter Question for RAG")
# output_text = gr.Textbox(label="Output", interactive=False)
# # Bind the button click to the handle_api_call function
# api_button = gr.Button("Submit")
# api_button.click(
# handle_api_call,
# inputs=[username_input, password_input, audio_input, pdf_input, message_input, query_input, question_input],
# outputs=output_text
# )
# # Launch the app
# app.launch()
# import gradio as gr
# # Define a function for the main application
# def greet(name):
# return f"Hello {name}!"
# # Define a function for the authentication
# def login(username, password):
# if username == "your_username" and password == "your_password":
# return True
# else:
# return False
# # Create the Gradio Blocks interface
# with gr.Blocks() as app:
# gr.Markdown("### Login")
# with gr.Row():
# username_input = gr.Textbox(label="Username", placeholder="Enter your username")
# password_input = gr.Textbox(label="Password", placeholder="Enter your password", type="password")
# login_button = gr.Button("Login")
# output_text = gr.Textbox(label="Output", interactive=False)
# # Function to handle login and display greeting
# def handle_login(username, password):
# if login(username, password):
# # Clear the password field and display the greeting
# #password_input.clear()
# return greet(username)
# else:
# return "Invalid credentials! Please try again."
# # Bind the button click to the handle_login function
# login_button.click(handle_login, inputs=[username_input, password_input], outputs=output_text)
# # Launch the app
# app.launch()