meta-llama3 / app.py
nnilayy's picture
Create app.py
ada097e verified
raw
history blame contribute delete
611 Bytes
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B")
model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B")
def generate_text(prompt):
inputs = tokenizer.encode(prompt, return_tensors="pt")
outputs = model.generate(inputs, max_length=50, num_return_sequences=1)
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
return generated_text
iface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
iface.launch()