JakeTurner616's picture
Update app.py
1715e8a verified
raw
history blame
1.52 kB
import gradio as gr
from transformers import GPT2Tokenizer, GPT2LMHeadModel, pipeline
# Load the model and tokenizer
model_name = "JakeTurner616/Adonalsium-gpt2"
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
model = GPT2LMHeadModel.from_pretrained(model_name)
# Create a pipeline for text generation
text_generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
# Define a function that uses the model to generate text based on the given prompt and parameters
def generate_text(prompt, max_length, temperature, top_p, repetition_penalty):
return text_generator(
prompt,
max_length=max_length,
temperature=temperature,
top_p=top_p,
repetition_penalty=repetition_penalty,
num_return_sequences=1
)[0]['generated_text']
# Create the Gradio interface
iface = gr.Interface(
fn=generate_text,
inputs=[
gr.inputs.Textbox(lines=2, label="Input Prompt"),
gr.inputs.Slider(minimum=10, maximum=300, step=10, default=100, label="Max Length"),
gr.inputs.Slider(minimum=0.0, maximum=1.0, step=0.1, default=0.7, label="Temperature"),
gr.inputs.Slider(minimum=0.0, maximum=1.0, step=0.1, default=0.9, label="Top P"),
gr.inputs.Slider(minimum=1.0, maximum=2.0, step=0.1, default=1.1, label="Repetition Penalty"),
],
outputs="text",
title="Cosmere Series Text Generator",
description="Adjust the sliders to control text generation parameters.",
)
# Launch the interface
iface.launch()