AbeerTrial commited on
Commit
7f15565
·
1 Parent(s): 0a3a31f

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +366 -252
app.py CHANGED
@@ -1,91 +1,8 @@
1
- # # -*- coding: utf-8 -*-
2
- # """fiver-app.ipynb
3
-
4
- # Automatically generated by Colaboratory.
5
-
6
- # Original file is located at
7
- # https://colab.research.google.com/drive/1YQm_fGxa2nfiV8pTN4oBrlzzfefGadaP
8
- # """
9
-
10
- # !pip uninstall -y numpy
11
- # !pip install --ignore-installed numpy==1.22.0
12
-
13
- # !pip install langchain
14
- # !pip install PyPDF2
15
- # !pip install docx2txt
16
- # !pip install gradio
17
- # !pip install faiss-gpu
18
- # !pip install openai
19
- # !pip install tiktoken
20
- # !pip install python-docx
21
-
22
- # !pip install git+https://github.com/openai/whisper.git
23
- # !pip install sounddevice
24
-
25
- # import shutil
26
- # import os
27
-
28
- # def copy_files(source_folder, destination_folder):
29
- # # Create the destination folder if it doesn't exist
30
- # if not os.path.exists(destination_folder):
31
- # os.makedirs(destination_folder)
32
-
33
- # # Get a list of files in the source folder
34
-
35
- # files_to_copy = os.listdir(source_folder)
36
- # for file_name in files_to_copy:
37
- # source_file_path = os.path.join(source_folder, file_name)
38
- # destination_file_path = os.path.join(destination_folder, file_name)
39
-
40
- # # Copy the file to the destination folder
41
- # shutil.copy(source_file_path, destination_file_path)
42
-
43
- # print(f"Copied {file_name} to {destination_folder}")
44
-
45
- # # Specify the source folder and destination folder paths
46
- # source_folder = "/kaggle/input/fiver-app5210"
47
- # destination_folder = "/home/user/app/local_db"
48
-
49
- # copy_files(source_folder, destination_folder)
50
-
51
- # import shutil
52
- # import os
53
-
54
- # def copy_files(source_folder, destination_folder):
55
- # # Create the destination folder if it doesn't exist
56
- # if not os.path.exists(destination_folder):
57
- # os.makedirs(destination_folder)
58
-
59
- # # Get a list of files in the source folder
60
- # files_to_copy = os.listdir(source_folder)
61
-
62
- # for file_name in files_to_copy:
63
- # source_file_path = os.path.join(source_folder, file_name)
64
- # destination_file_path = os.path.join(destination_folder, file_name)
65
-
66
- # # Copy the file to the destination folder
67
- # shutil.copy(source_file_path, destination_file_path)
68
-
69
- # print(f"Copied {file_name} to {destination_folder}")
70
-
71
- # # Specify the source folder and destination folder paths
72
- # source_folder = "/kaggle/input/fiver-app-docs"
73
- # destination_folder = "/home/user/app/docs"
74
-
75
- # copy_files(source_folder, destination_folder)
76
-
77
-
78
- def api_key(key):
79
- import os
80
- import openai
81
-
82
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
83
- os.environ["OPENAI_API_KEY"] = key
84
- openai.api_key = key
85
-
86
- return "Successful!"
87
-
88
 
 
 
89
  def save_file(input_file):
90
  import shutil
91
  import os
@@ -93,14 +10,13 @@ def save_file(input_file):
93
  destination_dir = "/home/user/app/file/"
94
  os.makedirs(destination_dir, exist_ok=True)
95
 
96
- output_dir = "/home/user/app/file/"
97
 
98
  for file in input_file:
99
- shutil.copy(file.name, output_dir)
100
 
101
  return "File(s) saved successfully!"
102
 
103
-
104
  def process_file():
105
  from langchain.document_loaders import PyPDFLoader
106
  from langchain.document_loaders import DirectoryLoader
@@ -108,30 +24,26 @@ def process_file():
108
  from langchain.document_loaders import Docx2txtLoader
109
  from langchain.vectorstores import FAISS
110
  from langchain.embeddings.openai import OpenAIEmbeddings
111
- from langchain.text_splitter import CharacterTextSplitter
112
  import openai
113
 
114
- loader1 = DirectoryLoader(
115
- "/home/user/app/file/", glob="./*.pdf", loader_cls=PyPDFLoader
116
- )
117
  document1 = loader1.load()
118
 
119
- loader2 = DirectoryLoader(
120
- "/home/user/app/file/", glob="./*.txt", loader_cls=TextLoader
121
- )
122
  document2 = loader2.load()
123
 
124
- loader3 = DirectoryLoader(
125
- "/home/user/app/file/", glob="./*.docx", loader_cls=Docx2txtLoader
126
- )
127
  document3 = loader3.load()
128
 
129
  document1.extend(document2)
130
  document1.extend(document3)
131
 
132
- text_splitter = CharacterTextSplitter(
133
- separator="\n", chunk_size=1000, chunk_overlap=200, length_function=len
134
- )
 
 
135
 
136
  docs = text_splitter.split_documents(document1)
137
  embeddings = OpenAIEmbeddings()
@@ -141,16 +53,14 @@ def process_file():
141
 
142
  return "File(s) processed successfully!"
143
 
144
-
145
  def formatted_response(docs, response):
146
  formatted_output = response + "\n\nSources"
147
 
148
  for i, doc in enumerate(docs):
149
- source_info = doc.metadata.get("source", "Unknown source")
150
- page_info = doc.metadata.get("page", None)
151
 
152
- # Get the file name without the directory path
153
- file_name = source_info.split("/")[-1].strip()
154
 
155
  if page_info is not None:
156
  formatted_output += f"\n{file_name}\tpage no {page_info}"
@@ -159,7 +69,6 @@ def formatted_response(docs, response):
159
 
160
  return formatted_output
161
 
162
-
163
  def search_file(question):
164
  from langchain.embeddings.openai import OpenAIEmbeddings
165
  from langchain.vectorstores import FAISS
@@ -173,16 +82,16 @@ def search_file(question):
173
  file_db = FAISS.load_local("/home/user/app/file_db/", embeddings)
174
  docs = file_db.similarity_search(question)
175
 
176
- llm = ChatOpenAI(model_name="gpt-3.5-turbo")
177
  chain = load_qa_chain(llm, chain_type="stuff")
 
178
  with get_openai_callback() as cb:
179
  response = chain.run(input_documents=docs, question=question)
180
  print(cb)
181
 
182
  return formatted_response(docs, response)
183
 
184
-
185
- def search_local(question):
186
  from langchain.embeddings.openai import OpenAIEmbeddings
187
  from langchain.vectorstores import FAISS
188
  from langchain.chains.question_answering import load_qa_chain
@@ -195,18 +104,17 @@ def search_local(question):
195
  file_db = FAISS.load_local("/home/user/app/local_db/", embeddings)
196
  docs = file_db.similarity_search(question)
197
 
198
- print(docs)
199
- type(docs)
200
- llm = ChatOpenAI(model_name="gpt-3.5-turbo")
201
  chain = load_qa_chain(llm, chain_type="stuff")
 
202
  with get_openai_callback() as cb:
203
  response = chain.run(input_documents=docs, question=question)
204
  print(cb)
205
 
206
  return formatted_response(docs, response)
207
 
208
-
209
  def delete_file():
 
210
  import shutil
211
 
212
  path1 = "/home/user/app/file/"
@@ -220,34 +128,46 @@ def delete_file():
220
  except:
221
  return "Already Deleted"
222
 
 
 
 
223
 
224
- import os
225
- import gradio as gr
226
-
227
 
228
- def list_files():
229
- directory = "/home/user/app/docs"
230
  file_list = []
 
231
  for root, dirs, files in os.walk(directory):
232
  for file in files:
233
  file_list.append(file)
234
  return gr.Dropdown.update(choices=file_list)
235
 
 
 
 
236
 
237
- file_list = list_files()
 
 
238
 
239
- print("List of file names in the directory:")
240
- for file_name in file_list:
241
- print(file_name)
242
 
 
 
 
 
243
 
244
- def soap_report(doc_name, question):
245
  from langchain.llms import OpenAI
246
  from langchain import PromptTemplate, LLMChain
 
247
  import openai
248
  import docx
249
 
250
- docx_path = "/home/user/app/docs/" + doc_name
251
 
252
  doc = docx.Document(docx_path)
253
  extracted_text = "Extracted text:\n\n\n"
@@ -270,52 +190,88 @@ def soap_report(doc_name, question):
270
  Answer: Let's think step by step."""
271
 
272
  prompt = PromptTemplate(template=template, input_variables=["question"])
273
- llm = OpenAI()
 
274
  llm_chain = LLMChain(prompt=prompt, llm=llm)
275
  response = llm_chain.run(extracted_text)
276
 
277
  return response
278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
 
280
  def search_gpt(question):
281
  from langchain.llms import OpenAI
282
  from langchain import PromptTemplate, LLMChain
 
283
 
284
  template = """Question: {question}
285
 
286
  Answer: Let's think step by step."""
287
 
288
  prompt = PromptTemplate(template=template, input_variables=["question"])
289
- llm = OpenAI()
 
290
  llm_chain = LLMChain(prompt=prompt, llm=llm)
291
  response = llm_chain.run(question)
292
 
293
  return response
294
 
295
-
296
  def local_gpt(question):
297
  from langchain.llms import OpenAI
298
  from langchain import PromptTemplate, LLMChain
 
299
 
300
  template = """Question: {question}
301
 
302
  Answer: Let's think step by step."""
303
 
304
  prompt = PromptTemplate(template=template, input_variables=["question"])
305
- llm = OpenAI()
 
306
  llm_chain = LLMChain(prompt=prompt, llm=llm)
307
  response = llm_chain.run(question)
308
 
309
  return response
310
 
311
-
312
  global output
313
- global response
314
-
315
 
316
  def audio_text(filepath):
317
  import openai
318
-
319
  global output
320
 
321
  audio = open(filepath, "rb")
@@ -324,12 +280,15 @@ def audio_text(filepath):
324
 
325
  return output
326
 
 
 
327
 
328
- def transcript(text):
329
  from langchain.llms import OpenAI
330
  from langchain import PromptTemplate, LLMChain
 
331
 
332
- global response
333
 
334
  question = (
335
  "Use the following context given below to generate a detailed SOAP Report:\n\n"
@@ -341,20 +300,60 @@ def transcript(text):
341
 
342
  Answer: Let's think step by step."""
343
 
 
344
  prompt = PromptTemplate(template=template, input_variables=["question"])
345
- llm = OpenAI()
 
 
 
 
 
 
 
346
  llm_chain = LLMChain(prompt=prompt, llm=llm)
347
- response = llm_chain.run(question)
348
 
349
- return response
 
 
 
 
 
 
 
350
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351
 
352
  def text_soap():
353
  from langchain.llms import OpenAI
354
  from langchain import PromptTemplate, LLMChain
 
355
 
356
  global output
357
- global response
358
  output = output
359
 
360
  question = (
@@ -367,31 +366,90 @@ def text_soap():
367
 
368
  Answer: Let's think step by step."""
369
 
 
370
  prompt = PromptTemplate(template=template, input_variables=["question"])
371
- llm = OpenAI()
 
 
 
 
 
 
 
372
  llm_chain = LLMChain(prompt=prompt, llm=llm)
373
- response = llm_chain.run(question)
374
 
375
- return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
376
 
 
 
 
377
 
378
- global path
 
379
 
 
 
 
 
 
 
 
 
 
380
 
381
- def docx(name):
382
- global response
383
- response = response
 
 
384
  import docx
 
 
 
 
 
385
 
386
- global path
387
- path = f"/home/user/app/docs/{name}.docx"
388
 
389
  doc = docx.Document()
390
- doc.add_paragraph(response)
391
  doc.save(path)
392
 
393
  return "Successfully saved .docx File"
394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
395
 
396
  import gradio as gr
397
 
@@ -407,111 +465,167 @@ css = """
407
  """
408
 
409
  with gr.Blocks(css=css) as demo:
410
- gr.Markdown("File Chatting App")
411
-
412
- with gr.Tab("Chat with Files"):
413
- with gr.Column(elem_classes="col"):
414
- with gr.Tab("Upload and Process Files"):
415
- with gr.Column():
416
- api_key_input = gr.Textbox(label="Enter API Key here")
417
- api_key_button = gr.Button("Submit")
418
- api_key_output = gr.Textbox(label="Output")
419
-
420
- file_input = gr.Files(label="Upload File(s) here")
421
- upload_button = gr.Button("Upload")
422
- file_output = gr.Textbox(label="Output")
423
-
424
- process_button = gr.Button("Process")
425
- process_output = gr.Textbox(label="Output")
426
-
427
- with gr.Tab("Ask Questions to Files"):
428
- with gr.Column():
429
- search_input = gr.Textbox(label="Enter Question here")
430
- search_button = gr.Button("Search")
431
- search_output = gr.Textbox(label="Output")
432
-
433
- search_gpt_button = gr.Button("Ask ChatGPT")
434
- search_gpt_output = gr.Textbox(label="Output")
435
-
436
- delete_button = gr.Button("Delete")
437
- delete_output = gr.Textbox(label="Output")
438
-
439
- with gr.Tab("Chat with Local Files"):
440
- with gr.Column(elem_classes="col"):
441
- local_search_input = gr.Textbox(label="Enter Question here")
442
- local_search_button = gr.Button("Search")
443
- local_search_output = gr.Textbox(label="Output")
444
-
445
- local_gpt_button = gr.Button("Ask ChatGPT")
446
- local_gpt_output = gr.Textbox(label="Output")
447
-
448
- with gr.Tab("Ask Question to SOAP Report"):
449
- with gr.Column(elem_classes="col"):
450
- refresh_button = gr.Button("Refresh")
451
- soap_input = gr.Dropdown(label="Choose File")
452
- soap_question = gr.Textbox(label="Enter Question here")
453
- soap_button = gr.Button("Submit")
454
- soap_output = gr.Textbox(label="Output")
455
-
456
- with gr.Tab("Convert Audio to SOAP Report"):
457
- with gr.Column(elem_classes="col"):
458
- mic_text_input = gr.Audio(
459
- source="microphone", type="filepath", label="Speak to the Microphone"
460
- )
461
- mic_text_button = gr.Button("Generate Transcript")
462
- mic_text_output = gr.Textbox(label="Output")
463
-
464
- upload_text_input = gr.Audio(
465
- source="upload", type="filepath", label="Upload Audio File here"
466
- )
467
- upload_text_button = gr.Button("Generate Transcript")
468
- upload_text_output = gr.Textbox(label="Output")
469
-
470
- transcript_input = gr.Textbox(label="Enter Transcript here")
471
- transcript_button = gr.Button("Generate SOAP Report")
472
- transcript_output = gr.Textbox(label="Output")
473
-
474
- text_soap_button = gr.Button("Generate SOAP Report")
475
- text_soap_output = gr.Textbox(label="Output")
476
-
477
- docx_input = gr.Textbox(label="Enter the name of .docx File")
478
- docx_button = gr.Button("Save .docx File")
479
- docx_output = gr.Textbox(label="Output")
480
-
481
- api_key_button.click(api_key, inputs=api_key_input, outputs=api_key_output)
482
-
483
- upload_button.click(save_file, inputs=file_input, outputs=file_output)
484
-
485
- process_button.click(process_file, inputs=None, outputs=process_output)
486
-
487
- search_button.click(search_file, inputs=search_input, outputs=search_output)
488
- search_gpt_button.click(search_gpt, inputs=search_input, outputs=search_gpt_output)
489
-
490
- delete_button.click(delete_file, inputs=None, outputs=delete_output)
491
-
492
- local_search_button.click(
493
- search_local, inputs=local_search_input, outputs=local_search_output
494
- )
495
- local_gpt_button.click(
496
- local_gpt, inputs=local_search_input, outputs=local_gpt_output
497
- )
498
 
499
- refresh_button.click(list_files, inputs=None, outputs=soap_input)
500
- soap_button.click(
501
- soap_report, inputs=[soap_input, soap_question], outputs=soap_output
502
- )
503
 
504
- mic_text_button.click(audio_text, inputs=mic_text_input, outputs=mic_text_output)
505
- upload_text_button.click(
506
- audio_text, inputs=upload_text_input, outputs=upload_text_output
507
- )
508
 
509
- transcript_button.click(
510
- transcript, inputs=transcript_input, outputs=transcript_output
511
- )
512
- text_soap_button.click(text_soap, inputs=None, outputs=text_soap_output)
513
- docx_button.click(docx, inputs=docx_input, outputs=docx_output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
514
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
515
 
516
  demo.queue()
517
  demo.launch()
 
 
1
+ import os
2
+ import openai
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
5
+ os.environ["OPENAI_API_KEY"]
6
  def save_file(input_file):
7
  import shutil
8
  import os
 
10
  destination_dir = "/home/user/app/file/"
11
  os.makedirs(destination_dir, exist_ok=True)
12
 
13
+ output_dir="/home/user/app/file/"
14
 
15
  for file in input_file:
16
+ shutil.copy(file.name, output_dir)
17
 
18
  return "File(s) saved successfully!"
19
 
 
20
  def process_file():
21
  from langchain.document_loaders import PyPDFLoader
22
  from langchain.document_loaders import DirectoryLoader
 
24
  from langchain.document_loaders import Docx2txtLoader
25
  from langchain.vectorstores import FAISS
26
  from langchain.embeddings.openai import OpenAIEmbeddings
27
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
28
  import openai
29
 
30
+ loader1 = DirectoryLoader('/home/user/app/file/', glob="./*.pdf", loader_cls=PyPDFLoader)
 
 
31
  document1 = loader1.load()
32
 
33
+ loader2 = DirectoryLoader('/home/user/app/file/', glob="./*.txt", loader_cls=TextLoader)
 
 
34
  document2 = loader2.load()
35
 
36
+ loader3 = DirectoryLoader('/home/user/app/file/', glob="./*.docx", loader_cls=Docx2txtLoader)
 
 
37
  document3 = loader3.load()
38
 
39
  document1.extend(document2)
40
  document1.extend(document3)
41
 
42
+ text_splitter = RecursiveCharacterTextSplitter(
43
+ separator="\n",
44
+ chunk_size=1000,
45
+ chunk_overlap=200,
46
+ length_function=len)
47
 
48
  docs = text_splitter.split_documents(document1)
49
  embeddings = OpenAIEmbeddings()
 
53
 
54
  return "File(s) processed successfully!"
55
 
 
56
  def formatted_response(docs, response):
57
  formatted_output = response + "\n\nSources"
58
 
59
  for i, doc in enumerate(docs):
60
+ source_info = doc.metadata.get('source', 'Unknown source')
61
+ page_info = doc.metadata.get('page', None)
62
 
63
+ file_name = source_info.split('/')[-1].strip()
 
64
 
65
  if page_info is not None:
66
  formatted_output += f"\n{file_name}\tpage no {page_info}"
 
69
 
70
  return formatted_output
71
 
 
72
  def search_file(question):
73
  from langchain.embeddings.openai import OpenAIEmbeddings
74
  from langchain.vectorstores import FAISS
 
82
  file_db = FAISS.load_local("/home/user/app/file_db/", embeddings)
83
  docs = file_db.similarity_search(question)
84
 
85
+ llm = ChatOpenAI(model_name='gpt-3.5-turbo')
86
  chain = load_qa_chain(llm, chain_type="stuff")
87
+
88
  with get_openai_callback() as cb:
89
  response = chain.run(input_documents=docs, question=question)
90
  print(cb)
91
 
92
  return formatted_response(docs, response)
93
 
94
+ def local_search(question):
 
95
  from langchain.embeddings.openai import OpenAIEmbeddings
96
  from langchain.vectorstores import FAISS
97
  from langchain.chains.question_answering import load_qa_chain
 
104
  file_db = FAISS.load_local("/home/user/app/local_db/", embeddings)
105
  docs = file_db.similarity_search(question)
106
 
107
+ llm = ChatOpenAI(model_name='gpt-3.5-turbo')
 
 
108
  chain = load_qa_chain(llm, chain_type="stuff")
109
+
110
  with get_openai_callback() as cb:
111
  response = chain.run(input_documents=docs, question=question)
112
  print(cb)
113
 
114
  return formatted_response(docs, response)
115
 
 
116
  def delete_file():
117
+
118
  import shutil
119
 
120
  path1 = "/home/user/app/file/"
 
128
  except:
129
  return "Already Deleted"
130
 
131
+ def soap_refresh():
132
+ import os
133
+ import gradio as gr
134
 
135
+ destination_folder = "/home/user/app/soap_docs/"
136
+ if not os.path.exists(destination_folder):
137
+ os.makedirs(destination_folder)
138
 
139
+ directory = '/home/user/app/soap_docs/'
 
140
  file_list = []
141
+
142
  for root, dirs, files in os.walk(directory):
143
  for file in files:
144
  file_list.append(file)
145
  return gr.Dropdown.update(choices=file_list)
146
 
147
+ def sbar_refresh():
148
+ import os
149
+ import gradio as gr
150
 
151
+ destination_folder = "/home/user/app/sbar_docs/"
152
+ if not os.path.exists(destination_folder):
153
+ os.makedirs(destination_folder)
154
 
155
+ directory = '/home/user/app/sbar_docs/'
156
+ file_list = []
 
157
 
158
+ for root, dirs, files in os.walk(directory):
159
+ for file in files:
160
+ file_list.append(file)
161
+ return gr.Dropdown.update(choices=file_list)
162
 
163
+ def ask_soap(doc_name, question):
164
  from langchain.llms import OpenAI
165
  from langchain import PromptTemplate, LLMChain
166
+ from langchain.chat_models import ChatOpenAI
167
  import openai
168
  import docx
169
 
170
+ docx_path = "/home/user/app/soap_docs/" + doc_name
171
 
172
  doc = docx.Document(docx_path)
173
  extracted_text = "Extracted text:\n\n\n"
 
190
  Answer: Let's think step by step."""
191
 
192
  prompt = PromptTemplate(template=template, input_variables=["question"])
193
+
194
+ llm = ChatOpenAI(model_name="gpt-3.5-turbo")
195
  llm_chain = LLMChain(prompt=prompt, llm=llm)
196
  response = llm_chain.run(extracted_text)
197
 
198
  return response
199
 
200
+ def ask_sbar(doc_name, question):
201
+ from langchain.llms import OpenAI
202
+ from langchain import PromptTemplate, LLMChain
203
+ from langchain.chat_models import ChatOpenAI
204
+ import openai
205
+ import docx
206
+
207
+ docx_path = "/home/user/app/sbar_docs/" + doc_name
208
+
209
+ doc = docx.Document(docx_path)
210
+ extracted_text = "Extracted text:\n\n\n"
211
+
212
+ for paragraph in doc.paragraphs:
213
+ extracted_text += paragraph.text + "\n"
214
+
215
+ question = (
216
+ "\n\nUse the 'Extracted text' to answer the following question:\n" + question
217
+ )
218
+ extracted_text += question
219
+
220
+ if extracted_text:
221
+ print(extracted_text)
222
+ else:
223
+ print("failed")
224
+
225
+ template = """Question: {question}
226
+
227
+ Answer: Let's think step by step."""
228
+
229
+ prompt = PromptTemplate(template=template, input_variables=["question"])
230
+
231
+ llm = ChatOpenAI(model_name="gpt-3.5-turbo")
232
+ llm_chain = LLMChain(prompt=prompt, llm=llm)
233
+ response = llm_chain.run(extracted_text)
234
+
235
+ return response
236
 
237
  def search_gpt(question):
238
  from langchain.llms import OpenAI
239
  from langchain import PromptTemplate, LLMChain
240
+ from langchain.chat_models import ChatOpenAI
241
 
242
  template = """Question: {question}
243
 
244
  Answer: Let's think step by step."""
245
 
246
  prompt = PromptTemplate(template=template, input_variables=["question"])
247
+
248
+ llm = ChatOpenAI(model_name="gpt-3.5-turbo")
249
  llm_chain = LLMChain(prompt=prompt, llm=llm)
250
  response = llm_chain.run(question)
251
 
252
  return response
253
 
 
254
  def local_gpt(question):
255
  from langchain.llms import OpenAI
256
  from langchain import PromptTemplate, LLMChain
257
+ from langchain.chat_models import ChatOpenAI
258
 
259
  template = """Question: {question}
260
 
261
  Answer: Let's think step by step."""
262
 
263
  prompt = PromptTemplate(template=template, input_variables=["question"])
264
+
265
+ llm = ChatOpenAI(model_name="gpt-3.5-turbo")
266
  llm_chain = LLMChain(prompt=prompt, llm=llm)
267
  response = llm_chain.run(question)
268
 
269
  return response
270
 
 
271
  global output
 
 
272
 
273
  def audio_text(filepath):
274
  import openai
 
275
  global output
276
 
277
  audio = open(filepath, "rb")
 
280
 
281
  return output
282
 
283
+ global soap_response
284
+ global sbar_response
285
 
286
+ def transcript_soap(text):
287
  from langchain.llms import OpenAI
288
  from langchain import PromptTemplate, LLMChain
289
+ from langchain.chat_models import ChatOpenAI
290
 
291
+ global soap_response
292
 
293
  question = (
294
  "Use the following context given below to generate a detailed SOAP Report:\n\n"
 
300
 
301
  Answer: Let's think step by step."""
302
 
303
+ word_count = len(text.split())
304
  prompt = PromptTemplate(template=template, input_variables=["question"])
305
+
306
+ if word_count < 2000:
307
+ llm = ChatOpenAI(model="gpt-3.5-turbo")
308
+ elif word_count < 5000:
309
+ llm = ChatOpenAI(model="gpt-4")
310
+ else:
311
+ llm = ChatOpenAI(model="gpt-4-32k")
312
+
313
  llm_chain = LLMChain(prompt=prompt, llm=llm)
314
+ soap_response = llm_chain.run(question)
315
 
316
+ return soap_response
317
+
318
+ def transcript_sbar(text):
319
+ from langchain.llms import OpenAI
320
+ from langchain import PromptTemplate, LLMChain
321
+ from langchain.chat_models import ChatOpenAI
322
+
323
+ global sbar_response
324
 
325
+ question = (
326
+ "Use the following context given below to generate a detailed SBAR Report:\n\n"
327
+ )
328
+ question += text
329
+ print(question)
330
+
331
+ template = """Question: {question}
332
+
333
+ Answer: Let's think step by step."""
334
+
335
+ word_count = len(text.split())
336
+ prompt = PromptTemplate(template=template, input_variables=["question"])
337
+
338
+ if word_count < 2000:
339
+ llm = ChatOpenAI(model="gpt-3.5-turbo")
340
+ elif word_count < 5000:
341
+ llm = ChatOpenAI(model="gpt-4")
342
+ else:
343
+ llm = ChatOpenAI(model="gpt-4-32k")
344
+
345
+ llm_chain = LLMChain(prompt=prompt, llm=llm)
346
+ sbar_response = llm_chain.run(question)
347
+
348
+ return sbar_response
349
 
350
  def text_soap():
351
  from langchain.llms import OpenAI
352
  from langchain import PromptTemplate, LLMChain
353
+ from langchain.chat_models import ChatOpenAI
354
 
355
  global output
356
+ global soap_response
357
  output = output
358
 
359
  question = (
 
366
 
367
  Answer: Let's think step by step."""
368
 
369
+ word_count = len(output.split())
370
  prompt = PromptTemplate(template=template, input_variables=["question"])
371
+
372
+ if word_count < 2000:
373
+ llm = ChatOpenAI(model="gpt-3.5-turbo")
374
+ elif word_count < 5000:
375
+ llm = ChatOpenAI(model="gpt-4")
376
+ else:
377
+ llm = ChatOpenAI(model="gpt-4-32k")
378
+
379
  llm_chain = LLMChain(prompt=prompt, llm=llm)
380
+ soap_response = llm_chain.run(question)
381
 
382
+ return soap_response
383
+
384
+ def text_sbar():
385
+ from langchain.llms import OpenAI
386
+ from langchain import PromptTemplate, LLMChain
387
+ from langchain.chat_models import ChatOpenAI
388
+
389
+ global output
390
+ global sbar_response
391
+ output = output
392
+
393
+ question = (
394
+ "Use the following context given below to generate a detailed SBAR Report:\n\n"
395
+ )
396
+ question += output
397
+ print(question)
398
 
399
+ template = """Question: {question}
400
+
401
+ Answer: Let's think step by step."""
402
 
403
+ word_count = len(output.split())
404
+ prompt = PromptTemplate(template=template, input_variables=["question"])
405
 
406
+ if word_count < 2000:
407
+ llm = ChatOpenAI(model="gpt-3.5-turbo")
408
+ elif word_count < 5000:
409
+ llm = ChatOpenAI(model="gpt-4")
410
+ else:
411
+ llm = ChatOpenAI(model="gpt-4-32k")
412
+
413
+ llm_chain = LLMChain(prompt=prompt, llm=llm)
414
+ sbar_response = llm_chain.run(question)
415
 
416
+ return sbar_response
417
+
418
+ def soap_docx(name):
419
+ global soap_response
420
+ soap_response = soap_response
421
  import docx
422
+ import os
423
+
424
+ destination_folder = "/home/user/app/soap_docs/"
425
+ if not os.path.exists(destination_folder):
426
+ os.makedirs(destination_folder)
427
 
428
+ path = f"/home/user/app/soap_docs/SOAP_{name}.docx"
 
429
 
430
  doc = docx.Document()
431
+ doc.add_paragraph(soap_response)
432
  doc.save(path)
433
 
434
  return "Successfully saved .docx File"
435
 
436
+ def sbar_docx(name):
437
+ global sbar_response
438
+ sbar_response = sbar_response
439
+ import docx
440
+ import os
441
+
442
+ destination_folder = "/home/user/app/sbar_docs/"
443
+ if not os.path.exists(destination_folder):
444
+ os.makedirs(destination_folder)
445
+
446
+ path = f"/home/user/app/sbar_docs/SBAR_{name}.docx"
447
+
448
+ doc = docx.Document()
449
+ doc.add_paragraph(sbar_response)
450
+ doc.save(path)
451
+
452
+ return "Successfully saved .docx File"
453
 
454
  import gradio as gr
455
 
 
465
  """
466
 
467
  with gr.Blocks(css=css) as demo:
468
+ gr.Markdown('<div class="centered">## Medical App</div>')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
469
 
470
+ with gr.Tab("SOAP and SBAR Note Creation"):
471
+ with gr.Column(elem_classes="col"):
 
 
472
 
473
+ with gr.Tab("From Recorded Audio"):
474
+ with gr.Column():
 
 
475
 
476
+ mic_audio_input = gr.Audio(source="microphone", type="filepath", label="Speak to the Microphone")
477
+ mic_audio_button = gr.Button("Generate Transcript")
478
+ mic_audio_output = gr.Textbox(label="Output")
479
+
480
+ mic_text_soap_button = gr.Button("Generate SOAP Report")
481
+ mic_text_soap_output = gr.Textbox(label="Output")
482
+ mic_text_sbar_button = gr.Button("Generate SBAR Report")
483
+ mic_text_sbar_output = gr.Textbox(label="Output")
484
+
485
+ mic_docx_input = gr.Textbox(label="Enter the name of .docx File")
486
+ mic_soap_docx_button = gr.Button("Save SOAP .docx File")
487
+ mic_soap_docx_output = gr.Textbox(label="Output")
488
+ mic_sbar_docx_button = gr.Button("Save SBAR .docx File")
489
+ mic_sbar_docx_output = gr.Textbox(label="Output")
490
+
491
+ with gr.Tab("From Uploaded Audio"):
492
+ with gr.Column():
493
+
494
+ upload_audio_input = gr.Audio(source="upload", type="filepath", label="Upload Audio File here")
495
+ upload_audio_button = gr.Button("Generate Transcript")
496
+ upload_audio_output = gr.Textbox(label="Output")
497
+
498
+ upload_text_soap_button = gr.Button("Generate SOAP Report")
499
+ upload_text_soap_output = gr.Textbox(label="Output")
500
+ upload_text_sbar_button = gr.Button("Generate SBAR Report")
501
+ upload_text_sbar_output = gr.Textbox(label="Output")
502
+
503
+ upload_docx_input = gr.Textbox(label="Enter the name of .docx File")
504
+ upload_soap_docx_button = gr.Button("Save SOAP .docx File")
505
+ upload_soap_docx_output = gr.Textbox(label="Output")
506
+ upload_sbar_docx_button = gr.Button("Save SBAR .docx File")
507
+ upload_sbar_docx_output = gr.Textbox(label="Output")
508
+
509
+ with gr.Tab("From Text Transcript"):
510
+ with gr.Column():
511
+
512
+ text_transcript_input = gr.Textbox(label="Enter your Transcript here")
513
+
514
+ text_text_soap_button = gr.Button("Generate SOAP Report")
515
+ text_text_soap_output = gr.Textbox(label="Output")
516
+ text_text_sbar_button = gr.Button("Generate SBAR Report")
517
+ text_text_sbar_output = gr.Textbox(label="Output")
518
+
519
+ text_docx_input = gr.Textbox(label="Enter the name of .docx File")
520
+ text_soap_docx_button = gr.Button("Save SOAP .docx File")
521
+ text_soap_docx_output = gr.Textbox(label="Output")
522
+ text_sbar_docx_button = gr.Button("Save SBAR .docx File")
523
+ text_sbar_docx_output = gr.Textbox(label="Output")
524
+
525
+ with gr.Tab("SOAP and SBAR Queries"):
526
+ with gr.Column(elem_classes="col"):
527
+
528
+ with gr.Tab("Query SOAP Reports"):
529
+ with gr.Column():
530
+
531
+ soap_refresh_button = gr.Button("Refresh")
532
+ ask_soap_input = gr.Dropdown(label="Choose File")
533
+
534
+ ask_soap_question = gr.Textbox(label="Enter Question here")
535
+ ask_soap_button = gr.Button("Submit")
536
+ ask_soap_output = gr.Textbox(label="Output")
537
+
538
+ with gr.Tab("Query SBAR Reports"):
539
+ with gr.Column():
540
 
541
+ sbar_refresh_button = gr.Button("Refresh")
542
+ ask_sbar_input = gr.Dropdown(label="Choose File")
543
+
544
+ ask_sbar_question = gr.Textbox(label="Enter Question here")
545
+ ask_sbar_button = gr.Button("Submit")
546
+ ask_sbar_output = gr.Textbox(label="Output")
547
+
548
+ with gr.Tab("All Queries"):
549
+ with gr.Column(elem_classes="col"):
550
+
551
+ local_search_input = gr.Textbox(label="Enter Question here")
552
+ local_search_button = gr.Button("Search")
553
+ local_search_output = gr.Textbox(label="Output")
554
+
555
+ local_gpt_button = gr.Button("Ask ChatGPT")
556
+ local_gpt_output = gr.Textbox(label="Output")
557
+
558
+
559
+ with gr.Tab("Documents Queries"):
560
+ with gr.Column(elem_classes="col"):
561
+
562
+ with gr.Tab("Upload and Process Documents"):
563
+ with gr.Column():
564
+
565
+ file_upload_input = gr.Files(label="Upload File(s) here")
566
+ file_upload_button = gr.Button("Upload")
567
+ file_upload_output = gr.Textbox(label="Output")
568
+
569
+ file_process_button = gr.Button("Process")
570
+ file_process_output = gr.Textbox(label="Output")
571
+
572
+ with gr.Tab("Query Documents"):
573
+ with gr.Column():
574
+
575
+ file_search_input = gr.Textbox(label="Enter Question here")
576
+ file_search_button = gr.Button("Search")
577
+ file_search_output = gr.Textbox(label="Output")
578
+
579
+ search_gpt_button = gr.Button("Ask ChatGPT")
580
+ search_gpt_output = gr.Textbox(label="Output")
581
+
582
+ file_delete_button = gr.Button("Delete")
583
+ file_delete_output = gr.Textbox(label="Output")
584
+
585
+ ######################################################################################################
586
+ file_upload_button.click(save_file, inputs=file_upload_input, outputs=file_upload_output)
587
+ file_process_button.click(process_file, inputs=None, outputs=file_process_output)
588
+
589
+ file_search_button.click(search_file, inputs=file_search_input, outputs=file_search_output)
590
+ search_gpt_button.click(search_gpt, inputs=file_search_input, outputs=search_gpt_output)
591
+
592
+ file_delete_button.click(delete_file, inputs=None, outputs=file_delete_output)
593
+
594
+ ######################################################################################################
595
+ local_search_button.click(local_search, inputs=local_search_input, outputs=local_search_output)
596
+ local_gpt_button.click(local_gpt, inputs=local_search_input, outputs=local_gpt_output)
597
+
598
+ #######################################################################################################
599
+ soap_refresh_button.click(soap_refresh, inputs=None, outputs=ask_soap_input)
600
+ ask_soap_button.click(ask_soap, inputs=[ask_soap_input, ask_soap_question], outputs=ask_soap_output)
601
+
602
+ sbar_refresh_button.click(sbar_refresh, inputs=None, outputs=ask_sbar_input)
603
+ ask_sbar_button.click(ask_sbar, inputs=[ask_sbar_input, ask_sbar_question], outputs=ask_sbar_output)
604
+
605
+ ####################################################################################################
606
+ mic_audio_button.click(audio_text, inputs=mic_audio_input, outputs=mic_audio_output)
607
+
608
+ mic_text_soap_button.click(text_soap, inputs=None, outputs=mic_text_soap_output)
609
+ mic_text_sbar_button.click(text_sbar, inputs=None, outputs=mic_text_sbar_output)
610
+
611
+ mic_soap_docx_button.click(soap_docx, inputs=mic_docx_input, outputs=mic_soap_docx_output)
612
+ mic_sbar_docx_button.click(sbar_docx, inputs=mic_docx_input, outputs=mic_sbar_docx_output)
613
+ ####################################################################################################
614
+ upload_audio_button.click(audio_text, inputs=upload_audio_input, outputs=upload_audio_output)
615
+
616
+ upload_text_soap_button.click(text_soap, inputs=None, outputs=upload_text_soap_output)
617
+ upload_text_sbar_button.click(text_sbar, inputs=None, outputs=upload_text_sbar_output)
618
+
619
+ upload_soap_docx_button.click(soap_docx, inputs=upload_docx_input, outputs=upload_soap_docx_output)
620
+ upload_sbar_docx_button.click(sbar_docx, inputs=upload_docx_input, outputs=upload_sbar_docx_output)
621
+ ###########################################################################################################
622
+ text_text_soap_button.click(transcript_soap, inputs=text_transcript_input, outputs=text_text_soap_output)
623
+ text_text_sbar_button.click(transcript_sbar, inputs=text_transcript_input, outputs=text_text_sbar_output)
624
+
625
+ text_soap_docx_button.click(soap_docx, inputs=text_docx_input, outputs=text_soap_docx_output)
626
+ text_sbar_docx_button.click(sbar_docx, inputs=text_docx_input, outputs=text_sbar_docx_output)
627
+ #############################################################################################################
628
 
629
  demo.queue()
630
  demo.launch()
631
+