Jaehan's picture
Update app.py
fdc396d
raw
history blame contribute delete
863 Bytes
from transformers import AutoModelWithLMHead, AutoTokenizer
import gradio as gr
model_name = "mrm8488/t5-base-finetuned-question-generation-ap"
text2text_tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelWithLMHead.from_pretrained(model_name)
def text2text(context, answer):
input_text = f"answer: {answer} context: <{context}>"
features = text2text_tokenizer([input_text], return_tensors="pt")
output = model.generate(input_ids=features["input_ids"], attention_mask=features["attention_mask"], max_length=100)
response = text2text_tokenizer.decode(output[0])
return response
context = gr.Textbox(lines=10, label="English", placeholder="Context")
answer = gr.Textbox(lines=1, label="Answer")
out = gr.Textbox(lines=1, label="Generated question")
gr.Interface(text2text, inputs=[context, answer], outputs=out).launch()