pratik-aivantage commited on
Commit
c5aa45b
·
verified ·
1 Parent(s): a860483

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -9
app.py CHANGED
@@ -23,18 +23,16 @@
23
  # iface.launch(share=True)
24
 
25
  import gradio as gr
26
- from transformers import AutoModelForCausalLM, AutoTokenizer
27
-
28
- model_name = "gpt2"
29
- model = AutoModelForCausalLM.from_pretrained(model_name)
30
- tokenizer = AutoTokenizer.from_pretrained(model_name)
31
 
 
 
32
 
 
33
  def generate_answer(question):
34
- inputs = tokenizer.encode("Question: " + question, return_tensors="pt")
35
- outputs = model.generate(inputs, max_length=100, num_return_sequences=1, early_stopping=True, do_sample=True)
36
- answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
37
- return answer
38
 
39
  iface = gr.Interface(
40
  fn=generate_answer,
 
23
  # iface.launch(share=True)
24
 
25
  import gradio as gr
26
+ from transformers import pipeline
 
 
 
 
27
 
28
+ # Load the question answering pipeline
29
+ qa_pipeline = pipeline("question-answering", model="distilbert-base-cased-distilled-squad", tokenizer="distilbert-base-cased")
30
 
31
+ # Define a function to generate answer for the given question
32
  def generate_answer(question):
33
+ # Call the question answering pipeline
34
+ result = qa_pipeline(question=question, context=None)
35
+ return result["answer"]
 
36
 
37
  iface = gr.Interface(
38
  fn=generate_answer,