File size: 1,528 Bytes
57ddded ea5389f 57ddded 5dea783 8f12d1b fd809b0 ea5389f 5dea783 ea5389f 5dea783 ea5389f 907baaa 5dea783 ea5389f 5dea783 ea5389f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import gradio as gr
from transformers import GPTNeoModel, GPT2TokenizerFast
def generate_text(prompt, max_length, temperature, top_p, repetition_penalty):
tokenizer = GPT2TokenizerFast.from_pretrained("JakeTurner616/Adonalsium-gpt-neo-1.3B")
model = GPTNeoModel.from_pretrained("JakeTurner616/Adonalsium-gpt-neo-1.3B")
# Check if tokenizer has a padding token, if not, add one
if tokenizer.pad_token is None:
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
model.resize_token_embeddings(len(tokenizer)) # Resize model embeddings to fit the new tokenizer
inputs = tokenizer(prompt, return_tensors="pt", padding=True)
outputs = model.generate(
input_ids=inputs["input_ids"],
max_length=max_length,
temperature=temperature,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=2
)
generated_texts = [tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
return generated_texts
iface = gr.Interface(
fn=generate_text,
inputs=[
gr.Textbox(lines=2, label="Input Prompt"),
gr.Slider(minimum=10, maximum=300, step=10, value=100, label="Max Length"),
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.7, label="Temperature"),
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.9, label="Top P"),
gr.Slider(minimum=1.0, maximum=2.0, step=0.1, value=1.1, label="Repetition Penalty")
],
outputs="text",
)
iface.launch() |