EITD commited on
Commit
84981a1
·
1 Parent(s): bf0fcb3
Files changed (1) hide show
  1. app.py +5 -1
app.py CHANGED
@@ -71,8 +71,12 @@ def respond(
71
  # text_streamer = TextStreamer(tokenizer, skip_prompt = True)
72
  # model.generate(input_ids = inputs, streamer = text_streamer, max_new_tokens = max_tokens,
73
  # use_cache = True, temperature = temperature, min_p = top_p)
 
 
 
 
74
 
75
- yield tokenizer.batch_decode(outputs, skip_special_tokens = True)
76
 
77
 
78
  """
 
71
  # text_streamer = TextStreamer(tokenizer, skip_prompt = True)
72
  # model.generate(input_ids = inputs, streamer = text_streamer, max_new_tokens = max_tokens,
73
  # use_cache = True, temperature = temperature, min_p = top_p)
74
+ response = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
75
+
76
+ if "assistant" in response:
77
+ response = response.split("assistant")[-1].strip()
78
 
79
+ print(response)
80
 
81
 
82
  """