|
import gradio as gr |
|
from transformers import GPTNeoForCausalLM, GPT2TokenizerFast |
|
|
|
def generate_text(prompt, max_length, temperature, top_p, repetition_penalty): |
|
tokenizer = GPT2TokenizerFast.from_pretrained("JakeTurner616/Adonalsium-gpt-neo-1.3B") |
|
model = GPTNeoForCausalLM.from_pretrained("JakeTurner616/Adonalsium-gpt-neo-1.3B") |
|
|
|
|
|
if tokenizer.pad_token is None: |
|
tokenizer.pad_token = tokenizer.eos_token |
|
|
|
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True) |
|
|
|
model.config.pad_token_id = tokenizer.pad_token_id |
|
|
|
outputs = model.generate( |
|
input_ids=inputs["input_ids"], |
|
attention_mask=inputs["attention_mask"], |
|
max_length=max_length, |
|
temperature=temperature, |
|
top_p=top_p, |
|
repetition_penalty=repetition_penalty, |
|
no_repeat_ngram_size=2, |
|
do_sample=True, |
|
pad_token_id=tokenizer.pad_token_id |
|
) |
|
|
|
generated_texts = [tokenizer.decode(output, skip_special_tokens=True) for output in outputs] |
|
return generated_texts |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_text, |
|
inputs=[ |
|
gr.Textbox(lines=2, label="Input Prompt"), |
|
gr.Slider(minimum=10, maximum=300, step=10, value=40, label="Max Length"), |
|
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.70, label="Temperature"), |
|
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.5, label="Top P"), |
|
gr.Slider(minimum=1.0, maximum=2.0, step=0.1, value=1.2, label="Repetition Penalty") |
|
], |
|
outputs="text", |
|
title="Cosmere Series Text Generator", |
|
description="Adjust the sliders to control text generation parameters." |
|
) |
|
|
|
iface.launch() |