mariagrandury commited on
Commit
4c8df35
·
1 Parent(s): 8fe3ba2

reorganize configuration tab

Browse files
Files changed (1) hide show
  1. app.py +73 -67
app.py CHANGED
@@ -218,7 +218,7 @@ def demo():
218
  """
219
  )
220
 
221
- with gr.Tab("Configure the chatbot"):
222
  with gr.Row():
223
  document = gr.Files(
224
  height=100,
@@ -230,84 +230,90 @@ def demo():
230
  # upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
231
 
232
  with gr.Row():
233
- db_btn = gr.Radio(
234
- ["ChromaDB"],
235
- label="Vector database type",
236
- value="ChromaDB",
237
- type="index",
238
- info="Choose your vector database",
239
- )
240
- with gr.Accordion("Advanced options - Document text splitter", open=False):
241
  with gr.Row():
242
- slider_chunk_size = gr.Slider(
243
- minimum=100,
244
- maximum=1000,
245
- value=600,
246
- step=20,
247
- label="Chunk size",
248
- info="Chunk size",
249
- interactive=True,
250
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
  with gr.Row():
252
- slider_chunk_overlap = gr.Slider(
253
- minimum=10,
254
- maximum=200,
255
- value=40,
256
- step=10,
257
- label="Chunk overlap",
258
- info="Chunk overlap",
259
- interactive=True,
260
- )
261
  with gr.Row():
262
  db_progress = gr.Textbox(
263
- label="Vector database initialization", value="None"
264
  )
265
- with gr.Row():
266
- db_btn = gr.Button("Generate vector database")
267
 
268
  with gr.Row():
269
- llm_btn = gr.Radio(
270
- list_llm_simple,
271
- label="LLM models",
272
- value=list_llm_simple[0],
273
- type="index",
274
- info="Choose your LLM model",
275
- )
276
- with gr.Accordion("Advanced options - LLM model", open=False):
277
  with gr.Row():
278
- slider_temperature = gr.Slider(
279
- minimum=0.01,
280
- maximum=1.0,
281
- value=0.7,
282
- step=0.1,
283
- label="Temperature",
284
- info="Model temperature",
285
- interactive=True,
286
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
  with gr.Row():
288
- slider_maxtokens = gr.Slider(
289
- minimum=224,
290
- maximum=4096,
291
- value=1024,
292
- step=32,
293
- label="Max Tokens",
294
- info="Model max tokens",
295
- interactive=True,
296
- )
297
- with gr.Row():
298
- slider_topk = gr.Slider(
299
- minimum=1,
300
- maximum=10,
301
- value=3,
302
- step=1,
303
- label="top-k samples",
304
- info="Model top-k samples",
305
- interactive=True,
306
- )
307
  with gr.Row():
308
- llm_progress = gr.Textbox(value="None", label="QA chain initialization")
309
- with gr.Row():
310
- qachain_btn = gr.Button("Initialize Question Answering chain")
311
 
312
  with gr.Tab("Chatbot"):
313
  chatbot = gr.Chatbot(height=300)
 
218
  """
219
  )
220
 
221
+ with gr.Tab("Chatbot configuration"):
222
  with gr.Row():
223
  document = gr.Files(
224
  height=100,
 
230
  # upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
231
 
232
  with gr.Row():
 
 
 
 
 
 
 
 
233
  with gr.Row():
234
+ db_btn = gr.Radio(
235
+ ["ChromaDB"],
236
+ label="Vector database type",
237
+ value="ChromaDB",
238
+ type="index",
239
+ info="Choose your vector database",
 
 
240
  )
241
+ with gr.Accordion(
242
+ "Advanced options - Document text splitter", open=False
243
+ ):
244
+ with gr.Row():
245
+ slider_chunk_size = gr.Slider(
246
+ minimum=100,
247
+ maximum=1000,
248
+ value=600,
249
+ step=20,
250
+ label="Chunk size",
251
+ info="Chunk size",
252
+ interactive=True,
253
+ )
254
+ with gr.Row():
255
+ slider_chunk_overlap = gr.Slider(
256
+ minimum=10,
257
+ maximum=200,
258
+ value=40,
259
+ step=10,
260
+ label="Chunk overlap",
261
+ info="Chunk overlap",
262
+ interactive=True,
263
+ )
264
  with gr.Row():
265
+ db_btn = gr.Button("Generate vector database")
 
 
 
 
 
 
 
 
266
  with gr.Row():
267
  db_progress = gr.Textbox(
268
+ label="Vector database initialization", value="0% Configure the DB"
269
  )
 
 
270
 
271
  with gr.Row():
 
 
 
 
 
 
 
 
272
  with gr.Row():
273
+ llm_btn = gr.Radio(
274
+ list_llm_simple,
275
+ label="LLM models",
276
+ value=list_llm_simple[0],
277
+ type="index",
278
+ info="Choose your LLM model",
 
 
279
  )
280
+ with gr.Accordion("Advanced options - LLM model", open=False):
281
+ with gr.Row():
282
+ slider_temperature = gr.Slider(
283
+ minimum=0.01,
284
+ maximum=1.0,
285
+ value=0.7,
286
+ step=0.1,
287
+ label="Temperature",
288
+ info="Model temperature",
289
+ interactive=True,
290
+ )
291
+ with gr.Row():
292
+ slider_maxtokens = gr.Slider(
293
+ minimum=224,
294
+ maximum=4096,
295
+ value=1024,
296
+ step=32,
297
+ label="Max Tokens",
298
+ info="Model max tokens",
299
+ interactive=True,
300
+ )
301
+ with gr.Row():
302
+ slider_topk = gr.Slider(
303
+ minimum=1,
304
+ maximum=10,
305
+ value=3,
306
+ step=1,
307
+ label="top-k samples",
308
+ info="Model top-k samples",
309
+ interactive=True,
310
+ )
311
  with gr.Row():
312
+ qachain_btn = gr.Button("Initialize Question Answering chain")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313
  with gr.Row():
314
+ llm_progress = gr.Textbox(
315
+ value="None", label="0% Configure the QA chain"
316
+ )
317
 
318
  with gr.Tab("Chatbot"):
319
  chatbot = gr.Chatbot(height=300)