Jezia commited on
Commit
1b0511d
·
1 Parent(s): cedcd4a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -41,23 +41,21 @@ def sample_from(logits):
41
  preds = np.asarray(preds).astype("float32")
42
  return np.random.choice(i, p=preds)
43
 
44
- def generate_answers(start_prompt):
45
  num_tokens_generated = 0
46
- sample_index = len(start_prompt) - 1
47
- start_tokens = [word_to_index.get(_, 1) for _ in start_prompt]
48
  tokens_generated= []
49
 
50
- text_out = text_process_pipeline(start_prompt)
51
  predictions,_ = model.predict(text_out)
52
  results = np.argmax(predictions, axis=1)[0]
53
 
54
  while num_tokens_generated <= 40:
55
  sample_token = sample_from(predictions[0][sample_index])
56
  tokens_generated.append(sample_token)
57
- start_tokens.append(sample_token)
58
  num_tokens_generated = len(tokens_generated)
59
 
60
- text_out = tokenizer.sequences_to_texts([tokens_generated])
61
  return text_out[0]
62
 
63
  examples = [["The movie was nice, "], ["It was showing nothing special to "]]
 
41
  preds = np.asarray(preds).astype("float32")
42
  return np.random.choice(i, p=preds)
43
 
44
+ def generate_answers(text):
45
  num_tokens_generated = 0
46
+ sample_index = len([text]) - 1
 
47
  tokens_generated= []
48
 
49
+ text_out = text_process_pipeline([text])
50
  predictions,_ = model.predict(text_out)
51
  results = np.argmax(predictions, axis=1)[0]
52
 
53
  while num_tokens_generated <= 40:
54
  sample_token = sample_from(predictions[0][sample_index])
55
  tokens_generated.append(sample_token)
 
56
  num_tokens_generated = len(tokens_generated)
57
 
58
+ text_out = tokenizer.sequences_to_texts([tokens_generated])
59
  return text_out[0]
60
 
61
  examples = [["The movie was nice, "], ["It was showing nothing special to "]]