Iris / app.py
PierreJousselin's picture
Update app.py
66c9272 verified
raw
history blame
1.36 kB
import gradio as gr
from huggingface_hub import InferenceClient
from transformers import AutoTokenizer
# Set the model name and initialize the InferenceClient and tokenizer
model_name = "gpt2" # Replace with your model's name
client = InferenceClient(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Define a function to interact with the model
def chat_with_model(input_text):
# Tokenize the input text
inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True, max_length=512)
# Send the request to the Hugging Face Inference API
result = client.text_generation(
inputs=inputs["input_ids"].tolist(), # Send tokenized input
parameters={"max_length": 150, "temperature": 1.0}
)
# Decode the generated text back to a readable format
response = tokenizer.decode(result[0]["generated_text"], skip_special_tokens=True)
return response
# Set up the Gradio interface
interface = gr.Interface(
fn=chat_with_model,
inputs=[gr.Textbox(lines=5, placeholder="Enter your text here...", label="Input Text")],
outputs=gr.Textbox(lines=5, label="Response"),
title="Hugging Face Chatbot",
description="A simple chatbot powered by Hugging Face and InferenceClient."
)
# Launch the Gradio app
if __name__ == "__main__":
interface.launch()