import gradio as gr
from transformers import AutoTokenizer, pipeline
import torch

tokenizer = AutoTokenizer.from_pretrained("notexist/ttt")
tdk = pipeline('text-generation', model='notexist/ttt', tokenizer=tokenizer)

def predict(name):
    x = tdk(f"<|endoftext|>{name}\n\n",
        do_sample=True, 
        max_length=64, 
        top_k=75, 
        top_p=0.95, 
        num_return_sequences=1,
        repetition_penalty=1.3
    )[0]["generated_text"]

    return x[len(f"<|endoftext|>{name}\n\n"):]



iface = gr.Interface(fn=predict, inputs="text", outputs="text")
iface.launch()