Jezia commited on
Commit
5826562
·
1 Parent(s): f2c0af4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -4,13 +4,7 @@ import torch
4
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
5
  from huggingface_hub import from_pretrained_keras
6
 
7
- title = "Miniature"
8
- description = "Gradio Demo for a miniature with GPT. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
9
-
10
-
11
-
12
  tokenizer = AutoTokenizer.from_pretrained("aditi2222/automatic_title_generation")
13
-
14
  model = from_pretrained_keras("keras-io/text-generation-miniature-gpt")
15
 
16
  def tokenize_data(text):
@@ -31,5 +25,9 @@ def generate_answers(text):
31
  answer = tokenizer.decode(results[0], skip_special_tokens=True)
32
  return answer
33
 
 
 
 
 
34
  iface = gr.Interface(fn=generate_answers, inputs=['text'], outputs=["text"])
35
  iface.launch(inline=False, share=True)
 
4
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
5
  from huggingface_hub import from_pretrained_keras
6
 
 
 
 
 
 
7
  tokenizer = AutoTokenizer.from_pretrained("aditi2222/automatic_title_generation")
 
8
  model = from_pretrained_keras("keras-io/text-generation-miniature-gpt")
9
 
10
  def tokenize_data(text):
 
25
  answer = tokenizer.decode(results[0], skip_special_tokens=True)
26
  return answer
27
 
28
+
29
+ title = "Miniature"
30
+ description = "Gradio Demo for a miniature with GPT. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
31
+
32
  iface = gr.Interface(fn=generate_answers, inputs=['text'], outputs=["text"])
33
  iface.launch(inline=False, share=True)