import gradio as gr import torch from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline # Load model & tokenizer # model_name = "bert-large-uncased-whole-word-masking-finetuned-squad" model_name = "deepset/roberta-base-squad2" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForQuestionAnswering.from_pretrained(model_name) # Define a fixed context for the QA function fixed_context = """Ishaan is a 6-year-old kid. He is very good at football. He is a very good sportsperson. He is a smart kid. He can run very fast, as fast as 10 meters in 1 minute. He goes to Vidyani Ketan School. He goes to school from 8 am to 3:30 pm. Ishaan has many friends. Vineet is Ishaan's brother.""" # Define the QA function def get_answer(question): QA_input = { 'question': question, 'context': fixed_context } nlp = pipeline('question-answering', model=model_name, tokenizer=model_name) res = nlp(QA_input) return res['answer'] # Create the Gradio interface gradio_ui = gr.Interface( fn=get_answer, inputs=gr.Textbox(label="Question"), outputs=gr.Textbox(label="Answer"), ) # Launch the Gradio interface gradio_ui.launch()