mariagrandury commited on
Commit
8fe3ba2
·
1 Parent(s): 2614912

simplify tabs

Browse files
Files changed (1) hide show
  1. app.py +8 -10
app.py CHANGED
@@ -207,18 +207,18 @@ def demo():
207
  collection_name = gr.State()
208
 
209
  gr.Markdown(
210
- """<center><h2>PDF-based chatbot</center></h2>
211
- <h3>Ask any questions about your PDF documents</h3>"""
 
 
212
  )
213
  gr.Markdown(
214
  """<b>Note:</b> This AI assistant, using Langchain and open-source LLMs, performs retrieval-augmented generation (RAG) from your PDF documents. \
215
- The user interface explicitely shows multiple steps to help understand the RAG workflow.
216
- This chatbot takes past questions into account when generating answers (via conversational memory), and includes document references for clarity purposes.<br>
217
- <br><b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate a reply.
218
- """
219
  )
220
 
221
- with gr.Tab("Step 1 - Upload PDF"):
222
  with gr.Row():
223
  document = gr.Files(
224
  height=100,
@@ -229,7 +229,6 @@ def demo():
229
  )
230
  # upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
231
 
232
- with gr.Tab("Step 2 - Process document"):
233
  with gr.Row():
234
  db_btn = gr.Radio(
235
  ["ChromaDB"],
@@ -266,7 +265,6 @@ def demo():
266
  with gr.Row():
267
  db_btn = gr.Button("Generate vector database")
268
 
269
- with gr.Tab("Step 3 - Initialize QA chain"):
270
  with gr.Row():
271
  llm_btn = gr.Radio(
272
  list_llm_simple,
@@ -311,7 +309,7 @@ def demo():
311
  with gr.Row():
312
  qachain_btn = gr.Button("Initialize Question Answering chain")
313
 
314
- with gr.Tab("Step 4 - Chatbot"):
315
  chatbot = gr.Chatbot(height=300)
316
  with gr.Accordion("Advanced - Document references", open=False):
317
  with gr.Row():
 
207
  collection_name = gr.State()
208
 
209
  gr.Markdown(
210
+ """
211
+ <center><h2>PDF-based chatbot</center></h2>
212
+ <h3>Ask any questions about your PDF documents</h3>
213
+ """
214
  )
215
  gr.Markdown(
216
  """<b>Note:</b> This AI assistant, using Langchain and open-source LLMs, performs retrieval-augmented generation (RAG) from your PDF documents. \
217
+ This chatbot takes past questions into account when generating answers (via conversational memory), and includes document references for clarity purposes.<br>
218
+ """
 
 
219
  )
220
 
221
+ with gr.Tab("Configure the chatbot"):
222
  with gr.Row():
223
  document = gr.Files(
224
  height=100,
 
229
  )
230
  # upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
231
 
 
232
  with gr.Row():
233
  db_btn = gr.Radio(
234
  ["ChromaDB"],
 
265
  with gr.Row():
266
  db_btn = gr.Button("Generate vector database")
267
 
 
268
  with gr.Row():
269
  llm_btn = gr.Radio(
270
  list_llm_simple,
 
309
  with gr.Row():
310
  qachain_btn = gr.Button("Initialize Question Answering chain")
311
 
312
+ with gr.Tab("Chatbot"):
313
  chatbot = gr.Chatbot(height=300)
314
  with gr.Accordion("Advanced - Document references", open=False):
315
  with gr.Row():