han1997 commited on
Commit
a15ba40
·
verified ·
1 Parent(s): 2ecd703

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -13
app.py CHANGED
@@ -75,20 +75,23 @@ def bot_streaming(message, history, temperature, top_k, max_new_tokens):
75
  "top_k": top_k,
76
  }
77
 
78
- # Generate from the VLM
79
- thread = Thread(target=vlm.generate, kwargs=generation_kwargs)
80
- thread.start()
 
 
 
81
 
82
- buffer = ""
83
- output_started = False
84
- for new_text in streamer:
85
- if not output_started:
86
- if "<|assistant|>\n" in new_text:
87
- output_started = True
88
- continue
89
- buffer += new_text
90
- if len(buffer) > 1:
91
- yield buffer
92
 
93
  prompt_builder.add_turn(role="gpt", message=buffer)
94
 
 
75
  "top_k": top_k,
76
  }
77
 
78
+ # Generate from the VLM
79
+ buffer = vlm.generate(
80
+ **generation_kwargs
81
+ )
82
+ # thread = Thread(target=vlm.generate, kwargs=generation_kwargs)
83
+ # thread.start()
84
 
85
+ # buffer = ""
86
+ # output_started = False
87
+ # for new_text in streamer:
88
+ # if not output_started:
89
+ # if "<|assistant|>\n" in new_text:
90
+ # output_started = True
91
+ # continue
92
+ # buffer += new_text
93
+ # if len(buffer) > 1:
94
+ # yield buffer
95
 
96
  prompt_builder.add_turn(role="gpt", message=buffer)
97