Spaces:
Sleeping
Sleeping
File size: 2,957 Bytes
c0db518 1c15c4f 3f6e910 1c15c4f 3f6e910 9910dfa 3f6e910 9910dfa 1c15c4f 3f6e910 1c15c4f 3f6e910 9910dfa 1c15c4f 3f6e910 c0db518 9910dfa 1c15c4f 3f6e910 9910dfa 1c15c4f 3f6e910 9910dfa 3f6e910 9910dfa 3f6e910 9910dfa c4054ea 9910dfa 3f6e910 9910dfa 3f6e910 1c15c4f 9910dfa 3f6e910 9910dfa 3f6e910 9910dfa 3f6e910 9910dfa 1c15c4f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import gradio as gr
from transformers import pipeline
# Initialize the conversational model pipeline
chatbot_pipeline = pipeline("text-generation", model="Aditya0619/Medbot")
# Chatbot response function
def respond(message, history, system_message, max_tokens, temperature, top_p):
if history is None:
history = []
# Build conversation context
chat_input = ""
for user_input, bot_response in history:
chat_input += f"User: {user_input}\nBot: {bot_response}\n"
chat_input += f"User: {message}\nBot:"
# Generate response
response = chatbot_pipeline(
chat_input,
max_length=max_tokens,
temperature=temperature,
top_p=top_p,
pad_token_id=50256 # Avoid padding issues with GPT-2 models
)[0]["generated_text"].split("Bot:")[-1].strip()
# Update history
history.append((message, response))
return history, history
# API function to expose chatbot responses programmatically
def api_chat(message, history=None):
if history is None:
history = []
updated_history, _ = respond(
message, history, "", max_tokens=250, temperature=0.7, top_p=0.9
)
return {"response": updated_history[-1][1], "history": updated_history}
# Gradio UI layout
with gr.Blocks() as demo:
gr.Markdown("# 🤖 AI Chatbot with API Access\nChat with AI or use the API!")
# Configurable parameters in an accordion menu
with gr.Row():
with gr.Accordion("⚙️ Configure Chatbot Settings", open=False):
system_message = gr.Textbox(label="System Message (Optional)", placeholder="e.g., You are a helpful assistant.")
max_tokens = gr.Slider(label="Max Tokens", minimum=50, maximum=500, value=250, step=10)
temperature = gr.Slider(label="Temperature", minimum=0.0, maximum=1.0, value=0.7, step=0.1)
top_p = gr.Slider(label="Top P", minimum=0.0, maximum=1.0, value=0.9, step=0.1)
# Chatbot interface and user input field
chatbot = gr.Chatbot(label="Chat with AI")
user_input = gr.Textbox(label="Your Message", placeholder="Type a message...", lines=2)
state = gr.State([]) # Store conversation history
submit = gr.Button("Send")
# Link input to chatbot response
submit.click(
respond,
inputs=[user_input, state, system_message, max_tokens, temperature, top_p],
outputs=[chatbot, state]
)
# Initial greeting message
demo.load(lambda: [("Hi! How can I assist you today?", "")], outputs=chatbot)
# Launch Gradio app and print the hosted link in terminal
print("Launching the Gradio app...")
ui_url = demo.launch(share=True, server_name="0.0.0.0", server_port=7860)
print(f"App hosted at: {ui_url}")
# API endpoint setup with Gradio
api = gr.Interface(fn=api_chat, inputs=[gr.Textbox(), gr.State([])], outputs="json")
api_url = api.launch(share=True, server_name="0.0.0.0", server_port=7861)
print(f"API hosted at: {api_url}")
|