scholarly360 commited on
Commit
56696a1
·
verified ·
1 Parent(s): 52ad9f6

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +296 -0
app.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ st.set_page_config(layout="wide")
3
+ from annotated_text import annotated_text, annotation
4
+ import fitz
5
+ import os
6
+ import chromadb
7
+ import uuid
8
+ from pathlib import Path
9
+ import os
10
+ os.environ['OPENAI_API_KEY'] = os.environ['OPEN_API_KEY']
11
+ st.title("Contracts Multiple File Search ")
12
+ import pandas as pd
13
+
14
+ from langchain.retrievers import BM25Retriever, EnsembleRetriever
15
+ from langchain.schema import Document
16
+ from langchain.vectorstores import Chroma
17
+ from langchain.embeddings import HuggingFaceEmbeddings
18
+ embedding = HuggingFaceEmbeddings(model_name='BAAI/bge-base-en-v1.5')
19
+ from FlagEmbedding import FlagReranker
20
+ reranker = FlagReranker('BAAI/bge-reranker-base')
21
+ import spacy
22
+ # Load the English model from SpaCy
23
+ nlp = spacy.load("en_core_web_md")
24
+
25
+ def util_upload_file_and_return_list_docs(uploaded_files):
26
+ #util_del_cwd()
27
+ list_docs = []
28
+ list_save_path = []
29
+ for uploaded_file in uploaded_files:
30
+ save_path = Path(os.getcwd(), uploaded_file.name)
31
+ with open(save_path, mode='wb') as w:
32
+ w.write(uploaded_file.getvalue())
33
+ #print('save_path:', save_path)
34
+ docs = fitz.open(save_path)
35
+ list_docs.append(docs)
36
+ list_save_path.append(save_path)
37
+ return(list_docs, list_save_path)
38
+ #### Helper Functions to Split using Rolling Window (recomm : use smaller rolling window )
39
+ def split_txt_file_synthetic_sentence_rolling(ctxt, sentence_size_in_chars, sliding_size_in_chars,debug=False):
40
+ sliding_size_in_chars = sentence_size_in_chars - sliding_size_in_chars
41
+ pos_start = 0
42
+ pos_end = len(ctxt)
43
+ final_return = []
44
+ if(debug):
45
+ print('pos_start : ',pos_start)
46
+ print('pos_end : ',pos_end)
47
+ if(pos_end<sentence_size_in_chars):
48
+ return([{'section_org_text':ctxt[pos_start:pos_end],'section_char_start':pos_start,'section_char_end':pos_end}])
49
+ if(sentence_size_in_chars<sliding_size_in_chars):
50
+ return(None)
51
+ stop_condition = False
52
+ start = pos_start
53
+ end = start + sentence_size_in_chars
54
+ mydict = {}
55
+ mydict['section_org_text'] = ctxt[start:end]
56
+ mydict['section_char_start'] = start
57
+ mydict['section_char_end'] = end
58
+ final_return.append(mydict)
59
+ #### First Time ENDS
60
+ while(stop_condition==False):
61
+ start = end - sliding_size_in_chars
62
+ end = start + sentence_size_in_chars
63
+ if(end>pos_end):
64
+ if(start<pos_end):
65
+ end = pos_end
66
+ mydict = {}
67
+ mydict['section_org_text'] = ctxt[start:end]
68
+ mydict['section_char_start'] = start
69
+ mydict['section_char_end'] = end
70
+ final_return.append(mydict)
71
+ stop_condition=True
72
+ else:
73
+ stop_condition=True
74
+ else:
75
+ mydict = {}
76
+ mydict['section_org_text'] = ctxt[start:end]
77
+ mydict['section_char_start'] = start
78
+ mydict['section_char_end'] = end
79
+ final_return.append(mydict)
80
+ if(debug):
81
+ print('start : ', start)
82
+ print('end : ', end)
83
+ return(final_return)
84
+ ### helper to make string out of iw_status
85
+ # def util_get_list_page_and_passage(docs):
86
+ # page_documents = []
87
+ # passage_documents = []
88
+ # for txt_index, txt_page in enumerate(docs):
89
+ # page_document = txt_page.get_text()##.encode("utf8") # get plain text (is in UTF-8)
90
+ # page_documents.append(page_document)
91
+ # sections = split_txt_file_synthetic_sentence_rolling(page_document,700,200)
92
+ # for sub_sub_index, sub_sub_item in enumerate(sections):
93
+ # sub_text=sub_sub_item['section_org_text']
94
+ # passage_document = Document(page_content=sub_text, metadata={"page_index": txt_index})
95
+ # passage_documents.append(passage_document)
96
+ # return(page_documents,passage_documents)
97
+
98
+ def split_into_sentences_with_offsets(text):
99
+ """
100
+ Splits a paragraph into sentences and returns them along with their start and end offsets.
101
+ :param text: The input text to be split into sentences.
102
+ :return: A list of tuples, each containing a sentence and its start and end offsets.
103
+ """
104
+ doc = nlp(text)
105
+ return [(sent.text, sent.start_char, sent.end_char) for sent in doc.sents]
106
+
107
+ def util_get_list_page_and_passage(list_docs, list_save_path):
108
+ #page_documents = []
109
+ passage_documents = []
110
+ for ind_doc, docs in enumerate(list_docs):
111
+ for txt_index, txt_page in enumerate(docs):
112
+ page_document = txt_page.get_text()##.encode("utf8") # get plain text (is in UTF-8)
113
+ #page_documents.append(page_document)
114
+ sections = split_into_sentences_with_offsets(page_document)
115
+ for sub_sub_index, sub_sub_item in enumerate(sections):
116
+ sub_text=sub_sub_item[0]
117
+ passage_document = Document(page_content=sub_text, metadata={"page_content": page_document,"page_index": txt_index, "file_name" : str(list_save_path[ind_doc])})
118
+ passage_documents.append(passage_document)
119
+ return(passage_documents)
120
+
121
+ # def util_index_chromadb_passages():
122
+ # ##### PROCESSING
123
+ # # create client and a new collection
124
+ # collection_name = str(uuid.uuid4().hex)
125
+ # chroma_client = chromadb.EphemeralClient()
126
+ # chroma_collection = chroma_client.get_or_create_collection(collection_name)
127
+ # # define embedding function
128
+ # embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name="BAAI/bge-small-en"))
129
+ # vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
130
+ # return(chroma_client,chroma_collection,collection_name,vector_store,embed_model)
131
+
132
+ def util_get_only_content_inside_loop(page_no,page_documents):
133
+ for index, item in enumerate(page_documents):
134
+ if(page_documents[index].metadata['txt_page_index']==page_no):
135
+ return(page_documents[index].get_content())
136
+ return(None)
137
+ # def util_get_list_pageno_and_contents(page_documents,passage_documents,passage_nodes):
138
+ # ''' page no starts with index 1 '''
139
+ # return_value = []
140
+ # for index, item in enumerate(passage_nodes):
141
+ # page_no = passage_nodes[index].metadata['txt_page_index']
142
+ # page_content = util_get_only_content_inside_loop(page_no,page_documents)
143
+ # return_value.append((page_no+1,page_content))
144
+ # return(return_value)
145
+
146
+ def util_get_list_pageno_and_contents(some_query_passage,passage_documents,passage_nodes):
147
+ ''' page no starts with index 1 '''
148
+
149
+ return_value = []
150
+ rescore = reranker.compute_score([[some_query_passage , x.page_content] for x in passage_nodes])
151
+ print('rescore :: ',rescore)
152
+ tmp_array = []
153
+ for i, x in enumerate(passage_nodes):
154
+ tmp_dict = {"passage_content":x.page_content,
155
+ "page_no":int(x.metadata['page_index'])+1,
156
+ "page_content":str(x.metadata['page_content']),
157
+ "file_name": str(x.metadata['file_name']),
158
+ "score" : float(rescore[i])}
159
+ tmp_array.append(tmp_dict)
160
+ df = pd.DataFrame(tmp_array)
161
+ df = df.sort_values(by='score', ascending=False)
162
+ df = df.drop_duplicates(subset=['file_name'], keep='first')
163
+ df = df[["passage_content","file_name","page_no","page_content","score"]]
164
+ return(df)
165
+
166
+ # # def util_openai_extract_entity(example_passage, example_entity, page_content):
167
+ # # import openai
168
+ # # openai.api_key = os.environ['OPENAI_API_KEY']
169
+
170
+ # # content = """Find the Entity based on Text . Return empty string if Entity does not exists. Here is one example below
171
+ # # Text: """ + example_passage + """
172
+ # # Entity: """ + example_entity + """
173
+
174
+ # # Text: """ + page_content + """
175
+ # # Entity: """
176
+
177
+ # # return_value = openai.ChatCompletion.create(model="gpt-4",temperature=0.0001,messages=[{"role": "user", "content": content},])
178
+ # # return(str(return_value['choices'][0]['message']['content']))
179
+ def util_openai_extract_clause(example_prompt, page_content):
180
+ import openai
181
+ openai.api_key = os.environ['OPENAI_API_KEY']
182
+ content = example_prompt
183
+ content = content + "\n Answer precisely; do not add anything extra, and try to locate the answer in the below context \n context: "
184
+ return_value = openai.ChatCompletion.create(model="gpt-3.5-turbo",temperature=0.0001,messages=[{"role": "user", "content": content + "\n" + page_content},])
185
+ return(str(return_value['choices'][0]['message']['content']))
186
+
187
+
188
+ def util_openai_hyde(example_prompt):
189
+ import openai
190
+ openai.api_key = os.environ['OPENAI_API_KEY']
191
+ content = example_prompt
192
+ return_value = openai.ChatCompletion.create(model="gpt-3.5-turbo",temperature=0.0001,messages=[
193
+ {"role": "system", "content": "You are a legal contract lawyer. generate a summary from below text " + "\n"},
194
+ {"role": "user", "content": example_prompt + "\n"},
195
+
196
+ ]
197
+ )
198
+ return(str(return_value['choices'][0]['message']['content']))
199
+
200
+
201
+ def util_openai_format (example_passage, page_content):
202
+ '''
203
+ annotated_text(" ",annotation("ENTITY : ", str(page_no)),)
204
+ '''
205
+ if(True):
206
+ found_value = util_openai_extract_clause(example_passage, page_content)
207
+ if(len(found_value)>0):
208
+ found_value = found_value.strip()
209
+ first_index = page_content.find(found_value)
210
+ if(first_index!=-1):
211
+ print('first_index : ',first_index)
212
+ print('found_value : ',found_value)
213
+ return(annotated_text(page_content[0:first_index-1],annotation(found_value, " FOUND ENTITY "),page_content[first_index+len(found_value):]))
214
+ return(annotated_text(page_content))
215
+ def util_openai_modify_prompt(example_prompt, page_content):
216
+ import openai
217
+ openai.api_key = os.environ['OPENAI_API_KEY']
218
+ my_prompt = """Expand the original Query to show exact resuls for extraction\n
219
+ Query: """ + example_prompt # + """\nDocument: """ + page_content + """ """
220
+ return_value = openai.ChatCompletion.create(model="gpt-4",temperature=0.0001,messages=[{"role": "user", "content": my_prompt},])
221
+ return(str(return_value['choices'][0]['message']['content']))
222
+
223
+ # def create_bm25_page_rank(page_list_retrieve, page_query):
224
+ # """ page_corpus : array of page text , page_query is user query """
225
+ # from operator import itemgetter
226
+ # from rank_bm25 import BM25Okapi
227
+ # tokenized_corpus = [doc.split(" ") for x, doc in page_list_retrieve]
228
+ # tokenized_query = page_query.split(" ")
229
+ # bm25 = BM25Okapi(tokenized_corpus)
230
+ # doc_scores = bm25.get_scores(tokenized_query).tolist()
231
+ # tmp_list = []
232
+ # for index, item in enumerate(page_list_retrieve):
233
+ # tmp_list.append((item[0], item[1],doc_scores[index]))
234
+ # tmp_list = sorted(tmp_list, key=itemgetter(2), reverse=True)
235
+ # return(tmp_list)
236
+
237
+
238
+ passage_documents = []
239
+
240
+ if(True):
241
+ with st.form("my_form"):
242
+ multi = '''1. Download and Upload Multiple contracts (PDF)
243
+
244
+ e.g. https://www.barc.gov.in/tenders/GCC-LPS.pdf
245
+
246
+ e.g. https://www.montrosecounty.net/DocumentCenter/View/823/Sample-Construction-Contract
247
+ '''
248
+ st.markdown(multi)
249
+ multi = '''2. Insert Query to search or find similar language '''
250
+ st.markdown(multi)
251
+ multi = '''3. Press Index.'''
252
+ st.markdown(multi)
253
+ multi = '''
254
+ ** Attempt is made for appropriate page and passage retrieval ** \n
255
+ '''
256
+ st.markdown(multi)
257
+ #uploaded_file = st.file_uploader("Choose a file")
258
+
259
+ list_docs = []
260
+ list_save_path = []
261
+ uploaded_files = st.file_uploader("Choose file(s)", accept_multiple_files=True)
262
+ print('uploaded_files ', uploaded_files)
263
+
264
+
265
+ single_example_passage = st.text_area('Enter Query or similar passage Here and press Chat',"What is Governing Law?")
266
+ submitted = st.form_submit_button("Index and Answer")
267
+
268
+ if submitted and (uploaded_files is not None):
269
+ list_docs, list_save_path = util_upload_file_and_return_list_docs(uploaded_files)
270
+ passage_documents = util_get_list_page_and_passage(list_docs, list_save_path)
271
+
272
+
273
+ # st.button("Chat")
274
+ # if st.button('Chat'):
275
+ bm25_retriever = BM25Retriever.from_documents(passage_documents)
276
+ bm25_retriever.k = 2
277
+ chroma_vectorstore = Chroma.from_documents(passage_documents, embedding)
278
+ chroma_retriever = chroma_vectorstore.as_retriever(search_kwargs={"k": 2})
279
+ ensemble_retriever = EnsembleRetriever(retrievers=[bm25_retriever, chroma_retriever],weights=[0.25, 0.75])
280
+ passage_nodes = ensemble_retriever.get_relevant_documents(single_example_passage)
281
+ print('len(passage_nodes):', len(passage_nodes))
282
+ df = util_get_list_pageno_and_contents(single_example_passage,passage_documents,passage_nodes)
283
+ st.write(df)
284
+ # print('len(page_list_retrieve):', len(page_list_retrieve))
285
+ # if(len(page_list_retrieve)>0):
286
+ # page_list_retrieve = list(set(page_list_retrieve))
287
+ # for iindex in page_list_retrieve:
288
+ # page_no = iindex[0]
289
+ # page_content = iindex[1]
290
+ # annotated_text(" ",annotation("RELEVANT PAGENO : ", str(page_no), font_family="Comic Sans MS", border="2px dashed red"),)
291
+ # util_openai_format(single_example_passage, page_content)
292
+ # annotated_text(" ",annotation("RELEVANT PASSAGE : ", "", font_family="Comic Sans MS", border="2px dashed red"),)
293
+ # st.write(found_passage)
294
+ # pchroma_client = chromadb.Client()
295
+ # for citem in pchroma_client.list_collections():
296
+ # print(citem.name)