JakeTurner616's picture
Update app.py
2721be4 verified
import gradio as gr
from transformers import GPTNeoForCausalLM, GPT2TokenizerFast
def generate_text(prompt, max_length, temperature, top_p, repetition_penalty):
tokenizer = GPT2TokenizerFast.from_pretrained("JakeTurner616/Adonalsium-gpt-neo-1.3B")
model = GPTNeoForCausalLM.from_pretrained("JakeTurner616/Adonalsium-gpt-neo-1.3B")
# Set pad token
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
# Ensure that pad_token_id is set for the model
model.config.pad_token_id = tokenizer.pad_token_id
outputs = model.generate(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"], # Explicitly pass attention mask
max_length=max_length,
temperature=temperature,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=2, # Correctly specify the no_repeat_ngram_size
do_sample=True, # Ensure sampling is enabled for temperature and top_p to take effect
pad_token_id=tokenizer.pad_token_id
)
generated_texts = [tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
return generated_texts
# Define your interface with the correct output component
iface = gr.Interface(
fn=generate_text,
inputs=[
gr.Textbox(lines=2, label="Input Prompt"),
gr.Slider(minimum=10, maximum=300, step=10, value=40, label="Max Length"),
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.70, label="Temperature"),
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.5, label="Top P"),
gr.Slider(minimum=1.0, maximum=2.0, step=0.1, value=1.2, label="Repetition Penalty")
],
outputs="text",
title="Cosmere Series Text Generator",
description="Adjust the sliders to control text generation parameters."
)
iface.launch()