curry tang commited on
Commit
c62892c
·
1 Parent(s): 8b4c265
Files changed (2) hide show
  1. app.py +11 -7
  2. llm.py +1 -1
app.py CHANGED
@@ -47,8 +47,9 @@ def predict(message, history, chat):
47
  yield response_message
48
 
49
 
50
- def update_chat(_provider: str, _chat, _model: str, _temperature: float, _max_tokens: int):
51
- print('?????', _provider, _chat, _model, _temperature, _max_tokens)
 
52
  if _provider == 'DeepSeek':
53
  _chat = deep_seek_llm.get_chat_engine(model=_model, temperature=_temperature, max_tokens=_max_tokens)
54
  if _provider == 'OpenRouter':
@@ -59,6 +60,7 @@ def update_chat(_provider: str, _chat, _model: str, _temperature: float, _max_to
59
 
60
 
61
  def explain_code(_code_type: str, _code: str, _chat):
 
62
  if _chat is None:
63
  _chat = init_chat()
64
  chat_messages = [
@@ -177,11 +179,13 @@ with gr.Blocks() as app:
177
 
178
  @gr.render(inputs=provider)
179
  def show_model_config_panel(_provider):
180
- _support_llm = deep_seek_llm
181
  if _provider == 'OpenRouter':
182
  _support_llm = open_router_llm
183
  if _provider == 'Tongyi':
184
  _support_llm = tongyi_llm
 
 
185
  with gr.Column():
186
  model = gr.Dropdown(
187
  label='模型',
@@ -197,7 +201,7 @@ with gr.Blocks() as app:
197
  key="temperature",
198
  )
199
  max_tokens = gr.Slider(
200
- minimum=1024,
201
  maximum=_support_llm.default_max_tokens,
202
  step=128,
203
  value=_support_llm.default_max_tokens,
@@ -206,17 +210,17 @@ with gr.Blocks() as app:
206
  )
207
  model.change(
208
  fn=update_chat,
209
- inputs=[provider, chat_engine, model, temperature, max_tokens],
210
  outputs=[chat_engine],
211
  )
212
  temperature.change(
213
  fn=update_chat,
214
- inputs=[provider, chat_engine, model, temperature, max_tokens],
215
  outputs=[chat_engine],
216
  )
217
  max_tokens.change(
218
  fn=update_chat,
219
- inputs=[provider, chat_engine, model, temperature, max_tokens],
220
  outputs=[chat_engine],
221
  )
222
 
 
47
  yield response_message
48
 
49
 
50
+ def update_chat(_provider: str, _model: str, _temperature: float, _max_tokens: int):
51
+ print('?????', _provider, _model, _temperature, _max_tokens)
52
+ _chat = None
53
  if _provider == 'DeepSeek':
54
  _chat = deep_seek_llm.get_chat_engine(model=_model, temperature=_temperature, max_tokens=_max_tokens)
55
  if _provider == 'OpenRouter':
 
60
 
61
 
62
  def explain_code(_code_type: str, _code: str, _chat):
63
+ print('>>>>>???', _code_type, _code, _chat)
64
  if _chat is None:
65
  _chat = init_chat()
66
  chat_messages = [
 
179
 
180
  @gr.render(inputs=provider)
181
  def show_model_config_panel(_provider):
182
+ _support_llm = None
183
  if _provider == 'OpenRouter':
184
  _support_llm = open_router_llm
185
  if _provider == 'Tongyi':
186
  _support_llm = tongyi_llm
187
+ if _provider == 'DeepSeek':
188
+ _support_llm = deep_seek_llm
189
  with gr.Column():
190
  model = gr.Dropdown(
191
  label='模型',
 
201
  key="temperature",
202
  )
203
  max_tokens = gr.Slider(
204
+ minimum=512,
205
  maximum=_support_llm.default_max_tokens,
206
  step=128,
207
  value=_support_llm.default_max_tokens,
 
210
  )
211
  model.change(
212
  fn=update_chat,
213
+ inputs=[provider, model, temperature, max_tokens],
214
  outputs=[chat_engine],
215
  )
216
  temperature.change(
217
  fn=update_chat,
218
+ inputs=[provider, model, temperature, max_tokens],
219
  outputs=[chat_engine],
220
  )
221
  max_tokens.change(
222
  fn=update_chat,
223
+ inputs=[provider, model, temperature, max_tokens],
224
  outputs=[chat_engine],
225
  )
226
 
llm.py CHANGED
@@ -76,4 +76,4 @@ class TongYiLLM(BaseLLM):
76
  _support_models = ['qwen-turbo', 'qwen-plus', 'qwen-max', 'qwen-long']
77
  _default_model = 'qwen-turbo'
78
  _base_url = 'https://dashscope.aliyuncs.com/compatible-mode/v1'
79
- _default_max_tokens: int = 32 * 1024
 
76
  _support_models = ['qwen-turbo', 'qwen-plus', 'qwen-max', 'qwen-long']
77
  _default_model = 'qwen-turbo'
78
  _base_url = 'https://dashscope.aliyuncs.com/compatible-mode/v1'
79
+ _default_max_tokens: int = 2000