Jezia commited on
Commit
b9158a9
·
1 Parent(s): 3947971

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -12,16 +12,16 @@ model = from_pretrained_keras("keras-io/text-generation-miniature-gpt")
12
  with open('tokenizer.pickle', 'rb') as handle:
13
  tokenizer = pickle.load(handle)
14
 
15
- def tokenize_data(text):
16
- # Tokenize the review body
17
- input_ = str(text) + ' </s>'
18
- max_len = 80
19
  # tokenize inputs
20
- tokenized_inputs = tokenizer(input_, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='pt')
21
 
22
- inputs={"input_ids": tokenized_inputs['input_ids'],
23
- "attention_mask": tokenized_inputs['attention_mask']}
24
- return inputs
25
 
26
  def generate_answers(text):
27
  sequence_test = tokenizer.texts_to_sequences([text])
 
12
  with open('tokenizer.pickle', 'rb') as handle:
13
  tokenizer = pickle.load(handle)
14
 
15
+ #def tokenize_data(text):
16
+ # Tokenize the review body
17
+ # input_ = str(text) + ' </s>'
18
+ # max_len = 80
19
  # tokenize inputs
20
+ # tokenized_inputs = tokenizer(input_, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='pt')
21
 
22
+ # inputs={"input_ids": tokenized_inputs['input_ids'],
23
+ # "attention_mask": tokenized_inputs['attention_mask']}
24
+ # return inputs
25
 
26
  def generate_answers(text):
27
  sequence_test = tokenizer.texts_to_sequences([text])