EITD commited on
Commit
b8e77bb
·
1 Parent(s): 84981a1
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -72,11 +72,12 @@ def respond(
72
  # model.generate(input_ids = inputs, streamer = text_streamer, max_new_tokens = max_tokens,
73
  # use_cache = True, temperature = temperature, min_p = top_p)
74
  response = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
 
75
 
76
  if "assistant" in response:
77
  response = response.split("assistant")[-1].strip()
78
 
79
- print(response)
80
 
81
 
82
  """
 
72
  # model.generate(input_ids = inputs, streamer = text_streamer, max_new_tokens = max_tokens,
73
  # use_cache = True, temperature = temperature, min_p = top_p)
74
  response = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
75
+ print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
76
 
77
  if "assistant" in response:
78
  response = response.split("assistant")[-1].strip()
79
 
80
+ yield response
81
 
82
 
83
  """