PyaeSoneK commited on
Commit
e31742a
·
1 Parent(s): 6eb200e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -73
app.py CHANGED
@@ -1,4 +1,4 @@
1
- from langchain.document_loaders import TextLoader
2
  import streamlit as st
3
  from streamlit_chat import message as st_message
4
  import pandas as pd
@@ -12,6 +12,9 @@ import json
12
  import torch
13
  from tqdm.auto import tqdm
14
  from langchain.text_splitter import RecursiveCharacterTextSplitter
 
 
 
15
 
16
 
17
  from transformers import AutoModel
@@ -34,76 +37,33 @@ from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_
34
 
35
 
36
 
37
- prompt_template = """
38
-
39
- You are the chatbot and the advanced legal assitant that can give answers to all the legal questions a common citizen would have . Your job is to give answers when questions about General legal information, Family law, Employment law, Consumer rights, Housing and tenancy, Personal injury, Wills and estates, Criminal law are asked.
40
- Your job is to answer questions only and only related to Legal aspect. Anything unrelated should be responded with the fact that your main job is solely to provide assistance regarding Legality.
41
- MUST only use the following pieces of context to answer the question at the end. If the answers are not in the context or you are not sure of the answer, just say that you don't know, don't try to make up an answer.
42
- {context}
43
- Question: {question}
44
- When encountering abusive, offensive, or harmful language, such as fuck, bitch,etc, just politely ask the users to maintain appropriate behaviours.
45
- Always make sure to elaborate your response and use vibrant, positive tone to represent good branding of the school.
46
- Never answer with any unfinished response
47
- Answer:
48
- """
49
-
50
- PROMPT = PromptTemplate(
51
- template=prompt_template, input_variables=["context", "question"]
52
- )
53
- chain_type_kwargs = {"prompt": PROMPT}
54
-
55
-
56
- #This version includes the memory and custom prompt, representing the final version
57
-
58
- import streamlit as st
59
- from streamlit_chat import message as st_message
60
- import pandas as pd
61
- import numpy as np
62
- import datetime
63
- import gspread
64
- import pickle
65
- import os
66
- import csv
67
- import json
68
- import torch
69
- from tqdm.auto import tqdm
70
- from langchain.text_splitter import RecursiveCharacterTextSplitter
71
-
72
 
73
  # from langchain.vectorstores import Chroma
74
  from langchain.vectorstores import FAISS
75
- from langchain.embeddings import HuggingFaceInstructEmbeddings
76
 
77
 
78
- from langchain import HuggingFacePipeline
79
  from langchain.chains import RetrievalQA
80
- from langchain.prompts import PromptTemplate
81
- from langchain.memory import ConversationBufferWindowMemory
82
 
83
 
84
- from langchain.chains import LLMChain
85
- from langchain.chains import ConversationalRetrievalChain
86
- from langchain.chains.question_answering import load_qa_chain
87
- from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT
88
 
89
 
90
 
91
  prompt_template = """
92
 
93
- You are the chatbot and the face of Asian Institute of Technology (AIT). Your job is to give answers to prospective and current students about the school.
94
- Your job is to answer questions only and only related to the AIT. Anything unrelated should be responded with the fact that your main job is solely to provide assistance regarding AIT.
95
  MUST only use the following pieces of context to answer the question at the end. If the answers are not in the context or you are not sure of the answer, just say that you don't know, don't try to make up an answer.
96
-
97
-
98
  {context}
99
  Question: {question}
100
-
101
  When encountering abusive, offensive, or harmful language, such as fuck, bitch,etc, just politely ask the users to maintain appropriate behaviours.
102
  Always make sure to elaborate your response and use vibrant, positive tone to represent good branding of the school.
103
  Never answer with any unfinished response
104
-
105
  Answer:
106
  """
 
 
107
  PROMPT = PromptTemplate(
108
  template=prompt_template, input_variables=["context", "question"]
109
  )
@@ -115,33 +75,22 @@ st.set_page_config(
115
  page_icon = '🕵')
116
 
117
 
118
-
119
-
120
- banksandbanking_web_documents = None
121
-
122
- @st.cache
123
- def load_scraped_web_info():
124
- global banksandbanking_web_documents
125
- text_splitter = RecursiveCharacterTextSplitter(...)
126
- chunked_text = text_splitter.create_documents(banksandbanking_web_documents)
127
- return chunked_text
128
-
129
- # Load pickle only once at startup
130
- with open("Banksandbanking.htm", "rb") as fp:
131
- banksandbanking_web_documents = pickle.load(fp)
132
-
133
 
134
  @st.cache_resource
135
- def load_embedding_model():
136
- embedding_model = HuggingFaceInstructEmbeddings(model_name='hkunlp/instructor-base',
137
- model_kwargs = {'device': torch.device('cuda' if torch.cuda.is_available() else 'cpu')})
138
- return embedding_model
139
-
140
- @st.cache_data
141
- def load_faiss_index():
142
- vector_database = FAISS.load_local("faiss_index_web_and_curri_new", embedding_model) #CHANGE THIS FAISS EMBEDDED KNOWLEDGE
143
  return vector_database
144
 
 
145
  @st.cache_resource
146
  def load_llm_model():
147
  llm = HuggingFacePipeline.from_model_id(model_id= 'PyaeSoneK/pythia_70m_legalQA',
 
1
+
2
  import streamlit as st
3
  from streamlit_chat import message as st_message
4
  import pandas as pd
 
12
  import torch
13
  from tqdm.auto import tqdm
14
  from langchain.text_splitter import RecursiveCharacterTextSplitter
15
+ from dotenv import load_dotenv
16
+ from PyPDF2 import PdfReader
17
+ from langchain.text_splitter import CharacterTextSplitter
18
 
19
 
20
  from transformers import AutoModel
 
37
 
38
 
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
  # from langchain.vectorstores import Chroma
42
  from langchain.vectorstores import FAISS
43
+ from langchain.embeddings import HuggingFaceInstructEmbeddings,OpenAIEmbeddings
44
 
45
 
 
46
  from langchain.chains import RetrievalQA
 
 
47
 
48
 
49
+
 
 
 
50
 
51
 
52
 
53
  prompt_template = """
54
 
55
+ You are the chatbot and the advanced legal assitant that can give answers to all the legal questions a common citizen would have . Your job is to give answers when questions about General legal information, Family law, Employment law, Consumer rights, Housing and tenancy, Personal injury, Wills and estates, Criminal law are asked.
56
+ Your job is to answer questions only and only related to Legal aspect. Anything unrelated should be responded with the fact that your main job is solely to provide assistance regarding Legality.
57
  MUST only use the following pieces of context to answer the question at the end. If the answers are not in the context or you are not sure of the answer, just say that you don't know, don't try to make up an answer.
 
 
58
  {context}
59
  Question: {question}
 
60
  When encountering abusive, offensive, or harmful language, such as fuck, bitch,etc, just politely ask the users to maintain appropriate behaviours.
61
  Always make sure to elaborate your response and use vibrant, positive tone to represent good branding of the school.
62
  Never answer with any unfinished response
 
63
  Answer:
64
  """
65
+
66
+
67
  PROMPT = PromptTemplate(
68
  template=prompt_template, input_variables=["context", "question"]
69
  )
 
75
  page_icon = '🕵')
76
 
77
 
78
+ @st.cache_data
79
+ def get_pdf_text(pdf_docs):
80
+ text = ""
81
+ for pdf in pdf_docs:
82
+ pdf_reader = PdfReader(pdf)
83
+ for page in pdf_reader.pages:
84
+ text += page.extract_text()
85
+ return text
 
 
 
 
 
 
 
86
 
87
  @st.cache_resource
88
+ def get_vectorstore(text_chunks):
89
+ embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
90
+ vector_database = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
 
 
 
 
 
91
  return vector_database
92
 
93
+
94
  @st.cache_resource
95
  def load_llm_model():
96
  llm = HuggingFacePipeline.from_model_id(model_id= 'PyaeSoneK/pythia_70m_legalQA',