pratik-aivantage commited on
Commit
756d547
·
verified ·
1 Parent(s): c28beda

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -14
app.py CHANGED
@@ -1,23 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
- from transformers import pipeline
 
 
 
 
3
 
4
- # Load the pre-trained model
5
- generator = pipeline("question-answering", model="EleutherAI/gpt-neo-2.7B")
6
 
7
- # Define Gradio interface
8
- def generate_response(prompt):
9
- # Generate response based on the prompt
10
- response = generator(prompt, max_length=50, do_sample=True, temperature=0.9)
11
- return response[0]['generated_text']
12
 
13
- # Create Gradio interface
14
  iface = gr.Interface(
15
- fn=generate_response,
16
  inputs="text",
17
  outputs="text",
18
- title="OpenAI Text Generation Model",
19
- description="Enter a prompt and get a generated text response.",
 
20
  )
21
 
22
- # Deploy the Gradio interface
23
- iface.launch(share=True)
 
1
+ # import gradio as gr
2
+ # from transformers import pipeline
3
+
4
+ # # Load the pre-trained model
5
+ # generator = pipeline("question-answering", model="EleutherAI/gpt-neo-2.7B")
6
+
7
+ # # Define Gradio interface
8
+ # def generate_response(prompt):
9
+ # # Generate response based on the prompt
10
+ # response = generator(prompt, max_length=50, do_sample=True, temperature=0.9)
11
+ # return response[0]['generated_text']
12
+
13
+ # # Create Gradio interface
14
+ # iface = gr.Interface(
15
+ # fn=generate_response,
16
+ # inputs="text",
17
+ # outputs="text",
18
+ # title="OpenAI Text Generation Model",
19
+ # description="Enter a prompt and get a generated text response.",
20
+ # )
21
+
22
+ # # Deploy the Gradio interface
23
+ # iface.launch(share=True)
24
+
25
  import gradio as gr
26
+ from transformers import AutoModelForCausalLM, AutoTokenizer
27
+
28
+ model_name = "gpt2"
29
+ model = AutoModelForCausalLM.from_pretrained(model_name)
30
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
31
 
 
 
32
 
33
+ def generate_answer(question):
34
+ inputs = tokenizer.encode("Question: " + question, return_tensors="pt")
35
+ outputs = model.generate(inputs, max_length=100, num_return_sequences=1, early_stopping=True)
36
+ answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
37
+ return answer
38
 
 
39
  iface = gr.Interface(
40
+ fn=generate_answer,
41
  inputs="text",
42
  outputs="text",
43
+ title="Open-Domain Question Answering",
44
+ description="Enter your question to get an answer.",
45
+ theme="compact"
46
  )
47
 
48
+ iface.launch(share=True) # Deploy the interface
49
+