Sarath0x8f commited on
Commit
6847a85
·
verified ·
1 Parent(s): 8d9b617

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -51
app.py CHANGED
@@ -6,13 +6,17 @@ from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
6
  import os
7
  from dotenv import load_dotenv
8
  import gradio as gr
 
 
9
 
10
  # Load environment variables
11
  load_dotenv()
12
 
13
- models = [
14
  "mistralai/Mixtral-8x7B-Instruct-v0.1",
15
  "meta-llama/Meta-Llama-3-8B-Instruct",
 
 
16
  # "NousResearch/Yarn-Mistral-7b-64k", ## 14GB>10GB
17
  # "impira/layoutlm-document-qa", ## ERR
18
  # "Qwen/Qwen1.5-7B", ## 15GB
@@ -20,7 +24,6 @@ models = [
20
  # "google/gemma-2-2b-jpn-it", ## high response time
21
  # "impira/layoutlm-invoices", ## bad req
22
  # "google/pix2struct-docvqa-large", ## bad req
23
- "mistralai/Mistral-7B-Instruct-v0.2",
24
  # "google/gemma-7b-it", ## 17GB > 10GB
25
  # "google/gemma-2b-it", ## high response time
26
  # "HuggingFaceH4/zephyr-7b-beta", ## high response time
@@ -28,14 +31,23 @@ models = [
28
  # "microsoft/phi-2", ## high response time
29
  # "TinyLlama/TinyLlama-1.1B-Chat-v1.0", ## high response time
30
  # "mosaicml/mpt-7b-instruct", ## 13GB>10GB
31
- "tiiuae/falcon-7b-instruct",
32
  # "google/flan-t5-xxl" ## high respons time
33
  # "NousResearch/Yarn-Mistral-7b-128k", ## 14GB>10GB
34
  # "Qwen/Qwen2.5-7B-Instruct", ## 15GB>10GB
35
  ]
36
 
 
 
 
 
 
 
 
 
37
  # Global variable for selected model
38
- selected_model_name = models[0] # Default to the first model in the list
 
 
39
 
40
  # Initialize the parser
41
  parser = LlamaParse(api_key=os.getenv("LLAMA_INDEX_API"), result_type='markdown')
@@ -67,24 +79,12 @@ file_extractor = {
67
  '.svg': parser, # SVG files (vector format, may contain embedded text)
68
  }
69
 
70
- # Embedding model and index initialization (to be populated by uploaded files)
71
- embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5") ## Works good and fast
72
- # embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-large-en") ## works good
73
- # embed_model2 = HuggingFaceEmbedding(model_name="NeuML/pubmedbert-base-embeddings") ## works good
74
-
75
- # sentence-transformers/distilbert-base-nli-mean-tokens
76
- # BAAI/bge-large-en
77
- # embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
78
-
79
- # Global variable to store documents loaded from user-uploaded files
80
- vector_index = None
81
-
82
-
83
  # File processing function
84
- def load_files(file_path: str):
85
  try:
86
  global vector_index
87
  document = SimpleDirectoryReader(input_files=[file_path], file_extractor=file_extractor).load_data()
 
88
  vector_index = VectorStoreIndex.from_documents(document, embed_model=embed_model)
89
  print(f"Parsing done for {file_path}")
90
  filename = os.path.basename(file_path)
@@ -94,9 +94,9 @@ def load_files(file_path: str):
94
 
95
 
96
  # Function to handle the selected model from dropdown
97
- def set_model(selected_model):
98
- global selected_model_name
99
- selected_model_name = selected_model # Update the global variable
100
  # print(f"Model selected: {selected_model_name}")
101
  # return f"Model set to: {selected_model_name}"
102
 
@@ -106,51 +106,68 @@ def respond(message, history):
106
  try:
107
  # Initialize the LLM with the selected model
108
  llm = HuggingFaceInferenceAPI(
109
- model_name=selected_model_name,
110
- contextWindow = 4096,
111
- maxTokens = 4096,
112
- temperature=0.7,
113
- topP=0.95,
114
- # token=os.getenv("TOKEN")
 
 
115
  )
116
 
117
- # Check selected model
118
- # print(f"Using model: {selected_model_name}")
119
-
120
  # Set up the query engine with the selected LLM
121
  query_engine = vector_index.as_query_engine(llm=llm)
122
  bot_message = query_engine.query(message)
123
 
124
- print(f"\n{datetime.now()}:{selected_model_name}:: {message} --> {str(bot_message)}\n")
125
- return f"{selected_model_name}:\n{str(bot_message)}"
126
  except Exception as e:
127
  if str(e) == "'NoneType' object has no attribute 'as_query_engine'":
128
  return "Please upload a file."
129
  return f"An error occurred: {e}"
130
 
 
 
 
 
 
 
 
 
131
 
132
  # UI Setup
133
- with gr.Blocks() as demo:
134
- with gr.Row():
135
- with gr.Column(scale=1):
136
- file_input = gr.File(file_count="single", type='filepath', label="Step-1: Upload document")
137
- with gr.Row():
138
- clear = gr.ClearButton()
139
- btn = gr.Button("Submit", variant='primary')
140
- output = gr.Text(label='Vector Index')
141
- model_dropdown = gr.Dropdown(models, label="Step-2: Select Model", interactive=True)
142
-
143
- with gr.Column(scale=3):
144
- gr.ChatInterface(
145
- fn=respond,
146
- chatbot=gr.Chatbot(height=500),
147
- textbox=gr.Textbox(placeholder="Step-3: Ask me questions on the uploaded document!", container=False, scale=7)
148
- )
149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  # Set up Gradio interactions
151
- model_dropdown.change(fn=set_model, inputs=model_dropdown)
152
- btn.click(fn=load_files, inputs=file_input, outputs=output)
153
- clear.click(lambda: [None] * 2, outputs=[file_input, output])
154
 
155
  # Launch the demo with a public link option
156
  if __name__ == "__main__":
 
6
  import os
7
  from dotenv import load_dotenv
8
  import gradio as gr
9
+ import markdowm as md
10
+ import base64
11
 
12
  # Load environment variables
13
  load_dotenv()
14
 
15
+ llm_models = [
16
  "mistralai/Mixtral-8x7B-Instruct-v0.1",
17
  "meta-llama/Meta-Llama-3-8B-Instruct",
18
+ "mistralai/Mistral-7B-Instruct-v0.2",
19
+ "tiiuae/falcon-7b-instruct",
20
  # "NousResearch/Yarn-Mistral-7b-64k", ## 14GB>10GB
21
  # "impira/layoutlm-document-qa", ## ERR
22
  # "Qwen/Qwen1.5-7B", ## 15GB
 
24
  # "google/gemma-2-2b-jpn-it", ## high response time
25
  # "impira/layoutlm-invoices", ## bad req
26
  # "google/pix2struct-docvqa-large", ## bad req
 
27
  # "google/gemma-7b-it", ## 17GB > 10GB
28
  # "google/gemma-2b-it", ## high response time
29
  # "HuggingFaceH4/zephyr-7b-beta", ## high response time
 
31
  # "microsoft/phi-2", ## high response time
32
  # "TinyLlama/TinyLlama-1.1B-Chat-v1.0", ## high response time
33
  # "mosaicml/mpt-7b-instruct", ## 13GB>10GB
 
34
  # "google/flan-t5-xxl" ## high respons time
35
  # "NousResearch/Yarn-Mistral-7b-128k", ## 14GB>10GB
36
  # "Qwen/Qwen2.5-7B-Instruct", ## 15GB>10GB
37
  ]
38
 
39
+ embed_models = [
40
+ "BAAI/bge-small-en-v1.5", # 33.4M
41
+ "NeuML/pubmedbert-base-embeddings",
42
+ "sentence-transformers/all-mpnet-base-v2" # 109M
43
+ "BAAI/llm-embedder", # 109M
44
+ "BAAI/bge-large-en" # 335
45
+ ]
46
+
47
  # Global variable for selected model
48
+ selected_llm_model_name = llm_models[0] # Default to the first model in the list
49
+ selected_embed_model_name = embed_models[0] # Default to the first model in the list
50
+ vector_index = None
51
 
52
  # Initialize the parser
53
  parser = LlamaParse(api_key=os.getenv("LLAMA_INDEX_API"), result_type='markdown')
 
79
  '.svg': parser, # SVG files (vector format, may contain embedded text)
80
  }
81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  # File processing function
83
+ def load_files(file_path: str, embed_model_name: str):
84
  try:
85
  global vector_index
86
  document = SimpleDirectoryReader(input_files=[file_path], file_extractor=file_extractor).load_data()
87
+ embed_model = HuggingFaceEmbedding(model_name=embed_model_name)
88
  vector_index = VectorStoreIndex.from_documents(document, embed_model=embed_model)
89
  print(f"Parsing done for {file_path}")
90
  filename = os.path.basename(file_path)
 
94
 
95
 
96
  # Function to handle the selected model from dropdown
97
+ def set_llm_model(selected_model):
98
+ global selected_llm_model_name
99
+ selected_llm_model_name = selected_model # Update the global variable
100
  # print(f"Model selected: {selected_model_name}")
101
  # return f"Model set to: {selected_model_name}"
102
 
 
106
  try:
107
  # Initialize the LLM with the selected model
108
  llm = HuggingFaceInferenceAPI(
109
+ model_name=selected_llm_model_name,
110
+ contextWindow=8192, # Context window size (typically max length of the model)
111
+ maxTokens=1024, # Tokens per response generation (512-1024 works well for detailed answers)
112
+ temperature=0.3, # Lower temperature for more focused answers (0.2-0.4 for factual info)
113
+ topP=0.9, # Top-p sampling to control diversity while retaining quality
114
+ frequencyPenalty=0.5, # Slight penalty to avoid repetition
115
+ presencePenalty=0.5, # Encourages exploration without digressing too much
116
+ token=os.getenv("TOKEN")
117
  )
118
 
 
 
 
119
  # Set up the query engine with the selected LLM
120
  query_engine = vector_index.as_query_engine(llm=llm)
121
  bot_message = query_engine.query(message)
122
 
123
+ print(f"\n{datetime.now()}:{selected_llm_model_name}:: {message} --> {str(bot_message)}\n")
124
+ return f"{selected_llm_model_name}:\n{str(bot_message)}"
125
  except Exception as e:
126
  if str(e) == "'NoneType' object has no attribute 'as_query_engine'":
127
  return "Please upload a file."
128
  return f"An error occurred: {e}"
129
 
130
+ def encode_image(image_path):
131
+ with open(image_path, "rb") as image_file:
132
+ return base64.b64encode(image_file.read()).decode('utf-8')
133
+
134
+ # Encode the images
135
+ github_logo_encoded = encode_image("Images/github-logo.png")
136
+ linkedin_logo_encoded = encode_image("Images/linkedin-logo.png")
137
+ website_logo_encoded = encode_image("Images/ai-logo.png")
138
 
139
  # UI Setup
140
+ with gr.Blocks(theme=gr.themes.Soft(font=[gr.themes.GoogleFont("Roboto Mono")]), css='footer {visibility: hidden}') as demo:
141
+ gr.Markdown("# DocBot📄🤖")
142
+ with gr.Tabs():
143
+ with gr.TabItem("Intro"):
144
+ gr.Markdown(md.description)
 
 
 
 
 
 
 
 
 
 
 
145
 
146
+ with gr.TabItem("DocBot"):
147
+ with gr.Row():
148
+ with gr.Column(scale=1):
149
+ file_input = gr.File(file_count="single", type='filepath', label="Step-1: Upload document")
150
+ gr.Markdown("Dont know what to select check out in Intro tab")
151
+ embed_model_dropdown = gr.Dropdown(embed_models, label="Step-2: Select Embedding", interactive=True)
152
+ with gr.Row():
153
+ clear = gr.ClearButton()
154
+ btn = gr.Button("Submit", variant='primary')
155
+ output = gr.Text(label='Vector Index')
156
+ llm_model_dropdown = gr.Dropdown(llm_models, label="Step-3: Select LLM", interactive=True)
157
+ with gr.Column(scale=3):
158
+ gr.ChatInterface(
159
+ fn=respond,
160
+ chatbot=gr.Chatbot(height=500),
161
+ theme = "soft",
162
+ show_progress='full',
163
+ # cache_mode='lazy',
164
+ textbox=gr.Textbox(placeholder="Step-4: Ask me questions on the uploaded document!", container=False)
165
+ )
166
+ gr.HTML(md.footer.format(github_logo_encoded, linkedin_logo_encoded, website_logo_encoded))
167
  # Set up Gradio interactions
168
+ llm_model_dropdown.change(fn=set_llm_model, inputs=llm_model_dropdown)
169
+ btn.click(fn=load_files, inputs=[file_input, embed_model_dropdown], outputs=output)
170
+ clear.click(lambda: [None] * 3, outputs=[file_input, embed_model_dropdown, output])
171
 
172
  # Launch the demo with a public link option
173
  if __name__ == "__main__":