nadaaaita commited on
Commit
5299cfa
·
1 Parent(s): 1b8dab4

passage finder version

Browse files
app.py CHANGED
@@ -1,167 +1,86 @@
1
  import gradio as gr
2
- import os
3
- from langchain_core.messages import HumanMessage
4
- import src.srf_bot as sb
5
- import prompts.system_prompts as sp
6
 
 
 
7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- # Initialize chatbot
10
- chatbot = sb.SRFChatbot()
11
-
12
- # Define the respond function
13
- def respond(query, history):
14
- formatted_query = [HumanMessage(content=query)]
15
- # Invoke the graph with properly formatted input
16
- result = chatbot.graph.invoke({"messages": formatted_query}, chatbot.config)
17
- # Get the passages from the graph and append to history if documents exist
18
- state = chatbot.graph.get_state(config=chatbot.config).values
19
- documents = state.get("documents")
20
- passages = ''
21
- if documents and len(documents) > 0:
22
- for d in documents:
23
- passages += f'<b>{d.metadata["publication_name"]} - {d.metadata["chapter_name"]}</b>\n{d.page_content}\n\n'
24
- history.append((f'Passages: {query}', passages))
25
- # Extract the assistant's response and append to history
26
- response = result["messages"][-1].content
27
- system_message_dropdown = state.get("system_message_dropdown")
28
- history.append((query, f"<i>[{system_message_dropdown}]</i>\n" + response))
29
- return history
30
-
31
 
32
- # Gradio interface with black and grey color scheme
33
  with gr.Blocks(css="""
34
- .gradio-container {
35
- background-color: #F0F0F0;
36
- font-family: 'Arial', sans-serif;
37
- }
38
- h1, h2, p {
39
- color: black;
40
- }
41
- h1 {
42
- font-size: 32px;
43
- text-align: left;
44
- }
45
- h2 {
46
- font-size: 24px;
47
- }
48
- p {
49
- font-size: 18px;
50
- margin-bottom: 15px;
51
- }
52
- .gr-button {
53
- background-color: #333333;
54
- color: white;
55
- font-size: 18px;
56
- padding: 10px;
57
- }
58
- .gr-textbox textarea {
59
- font-size: 18px;
60
- color: black;
61
- }
62
- .gr-dropdown {
63
- font-size: 18px;
64
- color: black;
65
- }
66
- .source-box {
67
- background-color: white;
68
- padding: 10px;
69
- border-radius: 8px;
70
- margin-top: 20px;
71
- color: black;
72
- border: 1px solid #D0D0D0;
73
- }
74
-
75
-
76
- /* Dark mode specific styles */
77
- @media (prefers-color-scheme: dark) {
78
- .gradio-container {
79
- background-color: #1e1e1e; /* Dark background */
80
- color: white; /* Light text color for contrast */
81
- }
82
- h1, h2, p {
83
- color: white; /* Light text for headings */
84
- }
85
- .gr-textbox textarea {
86
- background-color: #333333; /* Dark background for text area */
87
- color: white; /* Light text color */
88
- }
89
- .gr-button {
90
- background-color: #555555; /* Darker button for dark mode */
91
- color: white;
92
- }
93
- .gr-dropdown {
94
- background-color: #333333; /* Dark dropdown background */
95
- color: white;
96
- }
97
- .source-box {
98
- background-color: #333333; /* Dark background for sources box */
99
- color: white;
100
- border: 1px solid #555555; /* Lighter border for visibility */
101
- }
102
- }
103
-
104
- @media (max-width: 600px) {
105
- .gr-row { flex-direction: column !important; }
106
- .gr-column { width: 100% !important; }
107
- }
108
  """) as demo:
 
 
109
 
110
- # Title
111
- gr.Markdown("# SRF Chatbot")
 
 
 
 
112
 
113
- with gr.Row(elem_classes="gr-row"):
114
- with gr.Column(scale=4, elem_classes="gr-column"):
115
- # Chatbot interface
116
- chatbot_output = gr.Chatbot(height=600) # Increased height for longer chat interface
117
- user_input = gr.Textbox(placeholder="Type your question here...", label="Your Question", value="What is the meaning of life?")
118
- submit_button = gr.Button("Submit")
119
-
120
- with gr.Column(scale=1, elem_classes="gr-column"):
121
- # Dropdown to select system prompts
122
- system_prompt_dropdown = gr.Dropdown(
123
- choices=list(sp.system_prompt_templates.keys()),
124
- label="Select Chatbot Instructions",
125
- value=list(sp.system_prompt_templates.keys())[0],
126
- elem_classes="gr-dropdown"
127
- )
128
- # Display the selected system prompt
129
- system_prompt_display = gr.Textbox(
130
- value=sp.system_prompt_templates[list(sp.system_prompt_templates.keys())[0]],
131
- label="Current Chatbot Instructions",
132
- lines=5,
133
- interactive=False
134
- )
135
-
136
- # Sources box (Now white, matching the other boxes)
137
- gr.Markdown("""
138
- <div class="source-box">
139
- <strong>Available sources:</strong>
140
- <ul>
141
- <li>Journey to Self-Realization</li>
142
- <li>The Second Coming of Christ</li>
143
- <li>Autobiography of a Yogi</li>
144
- </ul>
145
- </div>
146
- """)
147
 
148
- # Update system prompt display when a new prompt is selected
149
- system_prompt_dropdown.change(
150
- fn=chatbot.reset_system_prompt,
151
- inputs=[system_prompt_dropdown],
152
- outputs=[system_prompt_display]
153
- )
154
 
155
- # Submit button logic to handle chatbot conversation
156
- submit_button.click(
157
- fn=respond,
158
- inputs=[user_input, chatbot_output],
159
- outputs=[chatbot_output]
160
- )
161
 
162
- # Access the secrets
163
- username = os.getenv("USERNAME")
164
- password = os.getenv("PASSWORD")
 
 
 
 
 
 
165
 
166
- # Launch the interface
167
- demo.launch(share=True, auth=(username, password), debug=True)
 
1
  import gradio as gr
2
+ from langchain_core.messages import HumanMessage
3
+ import src.passage_finder as pf
 
 
4
 
5
+ # Initialize PassageFinder
6
+ passage_finder = pf.PassageFinder()
7
 
8
+ def respond(message):
9
+ config = passage_finder.get_configurable()
10
+ results = passage_finder.graph.invoke({"messages": [HumanMessage(content=message)]}, config)
11
+
12
+ documents = results.get('documents', [])
13
+
14
+ output = []
15
+ for doc in documents:
16
+ quotes = doc.metadata.get('matched_quotes', [])
17
+ publication = doc.metadata.get('publication_name', 'Unknown Publication')
18
+ chapter = doc.metadata.get('chapter_name', 'Unknown Chapter')
19
+ full_passage = doc.metadata.get('highlighted_content', '')
20
+
21
+ quote_text = "\n".join([f"• \"{q.quote}\"" for q in quotes])
22
+ output.append({
23
+ "quotes": quote_text,
24
+ "reference": f"{publication}: {chapter}",
25
+ "full_passage": full_passage
26
+ })
27
+
28
+ return output
29
 
30
+ def process_input(message):
31
+ results = respond(message)
32
+ html_output = "<div class='response-container'>"
33
+ for result in results:
34
+ html_output += f"""
35
+ <div class='result-item'>
36
+ <h3 class='reference'>{result['reference']}</h3>
37
+ <div class='quotes'>{result['quotes'].replace("", "<br>• ")}</div>
38
+ <details>
39
+ <summary>Show full passage</summary>
40
+ <div class='full-passage'>{result['full_passage']}</div>
41
+ </details>
42
+ </div>
43
+ """
44
+ html_output += "</div>"
45
+ return html_output
 
 
 
 
 
 
46
 
 
47
  with gr.Blocks(css="""
48
+ body { background-color: #f0f0f0; }
49
+ .gradio-container { background-color: #ffffff; }
50
+ .response-container { border: 1px solid #e0e0e0; border-radius: 8px; padding: 20px; background-color: #f9f9f9; }
51
+ .result-item { margin-bottom: 20px; background-color: white; padding: 15px; border-radius: 5px; box-shadow: 0 2px 5px rgba(0,0,0,0.1); }
52
+ .reference { color: #2c3e50; margin-bottom: 10px; }
53
+ .quotes { font-style: italic; margin-bottom: 10px; }
54
+ .full-passage { margin-top: 10px; padding: 10px; background-color: #f0f0f0; border-radius: 5px; }
55
+ details summary { cursor: pointer; color: #3498db; font-weight: bold; }
56
+ details summary:hover { text-decoration: underline; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  """) as demo:
58
+ gr.Markdown("# SRF Teachings Chatbot")
59
+ gr.Markdown("Ask questions about Self-Realization Fellowship teachings and receive responses with relevant quotes.")
60
 
61
+ with gr.Row():
62
+ input_text = gr.Textbox(
63
+ placeholder="Ask about the meaning of life, spirituality, or any other topic...",
64
+ label="Your Question"
65
+ )
66
+ submit_btn = gr.Button("Submit", variant="primary")
67
 
68
+ output_area = gr.HTML()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
+ gr.Markdown("### Sources")
71
+ gr.Textbox(value="Journey to Self Realization, Second Coming of Christ, and Autobiography of a Yogi",
72
+ label="Available Sources", interactive=False)
 
 
 
73
 
74
+ submit_btn.click(process_input, inputs=input_text, outputs=output_area)
 
 
 
 
 
75
 
76
+ gr.Examples(
77
+ examples=[
78
+ "What is the meaning of life?",
79
+ "Importance of good posture",
80
+ "How can I find inner peace?",
81
+ "What does Paramahansa Yogananda say about meditation?",
82
+ ],
83
+ inputs=input_text,
84
+ )
85
 
86
+ demo.launch()
 
app2.py → app_old.py RENAMED
@@ -4,22 +4,16 @@ from langchain_core.messages import HumanMessage
4
  import src.srf_bot as sb
5
  import prompts.system_prompts as sp
6
 
 
 
7
  # Initialize chatbot
8
  chatbot = sb.SRFChatbot()
9
 
10
  # Define the respond function
11
- def respond(query, history, custom_instructions, use_custom_instructions, selected_system_prompt):
12
  formatted_query = [HumanMessage(content=query)]
13
-
14
- # Set the system instructions based on user input or dropdown
15
- if use_custom_instructions:
16
- chatbot.config["system_prompt"] = custom_instructions # Use custom instructions
17
- else:
18
- chatbot.config["system_prompt"] = sp.system_prompt_templates[selected_system_prompt] # Use selected from dropdown
19
-
20
  # Invoke the graph with properly formatted input
21
  result = chatbot.graph.invoke({"messages": formatted_query}, chatbot.config)
22
-
23
  # Get the passages from the graph and append to history if documents exist
24
  state = chatbot.graph.get_state(config=chatbot.config).values
25
  documents = state.get("documents")
@@ -28,12 +22,10 @@ def respond(query, history, custom_instructions, use_custom_instructions, select
28
  for d in documents:
29
  passages += f'<b>{d.metadata["publication_name"]} - {d.metadata["chapter_name"]}</b>\n{d.page_content}\n\n'
30
  history.append((f'Passages: {query}', passages))
31
-
32
  # Extract the assistant's response and append to history
33
  response = result["messages"][-1].content
34
  system_message_dropdown = state.get("system_message_dropdown")
35
  history.append((query, f"<i>[{system_message_dropdown}]</i>\n" + response))
36
-
37
  return history
38
 
39
 
@@ -79,31 +71,33 @@ with gr.Blocks(css="""
79
  color: black;
80
  border: 1px solid #D0D0D0;
81
  }
 
82
 
 
83
  @media (prefers-color-scheme: dark) {
84
  .gradio-container {
85
- background-color: #1e1e1e;
86
- color: white;
87
  }
88
  h1, h2, p {
89
- color: white;
90
  }
91
  .gr-textbox textarea {
92
- background-color: #333333;
93
- color: white;
94
  }
95
  .gr-button {
96
- background-color: #555555;
97
  color: white;
98
  }
99
  .gr-dropdown {
100
- background-color: #333333;
101
  color: white;
102
  }
103
  .source-box {
104
- background-color: #333333;
105
  color: white;
106
- border: 1px solid #555555;
107
  }
108
  }
109
 
@@ -131,7 +125,6 @@ with gr.Blocks(css="""
131
  value=list(sp.system_prompt_templates.keys())[0],
132
  elem_classes="gr-dropdown"
133
  )
134
-
135
  # Display the selected system prompt
136
  system_prompt_display = gr.Textbox(
137
  value=sp.system_prompt_templates[list(sp.system_prompt_templates.keys())[0]],
@@ -140,20 +133,7 @@ with gr.Blocks(css="""
140
  interactive=False
141
  )
142
 
143
- # Custom instructions input
144
- custom_instructions = gr.Textbox(
145
- placeholder="Write your own instructions here...",
146
- label="Or Write Your Own Instructions",
147
- lines=5
148
- )
149
-
150
- # Toggle between using dropdown or custom instructions
151
- use_custom_instructions = gr.Checkbox(
152
- label="Use Custom Instructions",
153
- value=False
154
- )
155
-
156
- # Sources box
157
  gr.Markdown("""
158
  <div class="source-box">
159
  <strong>Available sources:</strong>
@@ -164,10 +144,10 @@ with gr.Blocks(css="""
164
  </ul>
165
  </div>
166
  """)
167
-
168
  # Update system prompt display when a new prompt is selected
169
  system_prompt_dropdown.change(
170
- fn=lambda x: sp.system_prompt_templates[x],
171
  inputs=[system_prompt_dropdown],
172
  outputs=[system_prompt_display]
173
  )
@@ -175,7 +155,7 @@ with gr.Blocks(css="""
175
  # Submit button logic to handle chatbot conversation
176
  submit_button.click(
177
  fn=respond,
178
- inputs=[user_input, chatbot_output, custom_instructions, use_custom_instructions, system_prompt_dropdown],
179
  outputs=[chatbot_output]
180
  )
181
 
 
4
  import src.srf_bot as sb
5
  import prompts.system_prompts as sp
6
 
7
+
8
+
9
  # Initialize chatbot
10
  chatbot = sb.SRFChatbot()
11
 
12
  # Define the respond function
13
+ def respond(query, history):
14
  formatted_query = [HumanMessage(content=query)]
 
 
 
 
 
 
 
15
  # Invoke the graph with properly formatted input
16
  result = chatbot.graph.invoke({"messages": formatted_query}, chatbot.config)
 
17
  # Get the passages from the graph and append to history if documents exist
18
  state = chatbot.graph.get_state(config=chatbot.config).values
19
  documents = state.get("documents")
 
22
  for d in documents:
23
  passages += f'<b>{d.metadata["publication_name"]} - {d.metadata["chapter_name"]}</b>\n{d.page_content}\n\n'
24
  history.append((f'Passages: {query}', passages))
 
25
  # Extract the assistant's response and append to history
26
  response = result["messages"][-1].content
27
  system_message_dropdown = state.get("system_message_dropdown")
28
  history.append((query, f"<i>[{system_message_dropdown}]</i>\n" + response))
 
29
  return history
30
 
31
 
 
71
  color: black;
72
  border: 1px solid #D0D0D0;
73
  }
74
+
75
 
76
+ /* Dark mode specific styles */
77
  @media (prefers-color-scheme: dark) {
78
  .gradio-container {
79
+ background-color: #1e1e1e; /* Dark background */
80
+ color: white; /* Light text color for contrast */
81
  }
82
  h1, h2, p {
83
+ color: white; /* Light text for headings */
84
  }
85
  .gr-textbox textarea {
86
+ background-color: #333333; /* Dark background for text area */
87
+ color: white; /* Light text color */
88
  }
89
  .gr-button {
90
+ background-color: #555555; /* Darker button for dark mode */
91
  color: white;
92
  }
93
  .gr-dropdown {
94
+ background-color: #333333; /* Dark dropdown background */
95
  color: white;
96
  }
97
  .source-box {
98
+ background-color: #333333; /* Dark background for sources box */
99
  color: white;
100
+ border: 1px solid #555555; /* Lighter border for visibility */
101
  }
102
  }
103
 
 
125
  value=list(sp.system_prompt_templates.keys())[0],
126
  elem_classes="gr-dropdown"
127
  )
 
128
  # Display the selected system prompt
129
  system_prompt_display = gr.Textbox(
130
  value=sp.system_prompt_templates[list(sp.system_prompt_templates.keys())[0]],
 
133
  interactive=False
134
  )
135
 
136
+ # Sources box (Now white, matching the other boxes)
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  gr.Markdown("""
138
  <div class="source-box">
139
  <strong>Available sources:</strong>
 
144
  </ul>
145
  </div>
146
  """)
147
+
148
  # Update system prompt display when a new prompt is selected
149
  system_prompt_dropdown.change(
150
+ fn=chatbot.reset_system_prompt,
151
  inputs=[system_prompt_dropdown],
152
  outputs=[system_prompt_display]
153
  )
 
155
  # Submit button logic to handle chatbot conversation
156
  submit_button.click(
157
  fn=respond,
158
+ inputs=[user_input, chatbot_output],
159
  outputs=[chatbot_output]
160
  )
161
 
prompts/quote_finder_prompts.py CHANGED
@@ -3,6 +3,7 @@ from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTempla
3
  system_template = '''The following are passages from the books and teachings of Paramhansa Yogananda, his disciples, or other
4
  sources related to the Self-Realization Fellowship / Yogoda Satsanga Society.
5
  Extract the most relevant quotes from the given passage that directly address the user's original query and are the most helpful and/or insipriational to the user.
 
6
  Ensure that quotes are verbatim and provide specific information related to the query.
7
  '''
8
 
 
3
  system_template = '''The following are passages from the books and teachings of Paramhansa Yogananda, his disciples, or other
4
  sources related to the Self-Realization Fellowship / Yogoda Satsanga Society.
5
  Extract the most relevant quotes from the given passage that directly address the user's original query and are the most helpful and/or insipriational to the user.
6
+ Quotes can be up to three sentences long.
7
  Ensure that quotes are verbatim and provide specific information related to the query.
8
  '''
9
 
requirements.txt CHANGED
@@ -1,7 +1,8 @@
1
  aiofiles==23.2.1
2
- aiohappyeyeballs==2.4.0
3
- aiohttp==3.10.5
4
  aiosignal==1.3.1
 
5
  annotated-types==0.7.0
6
  anyio==4.6.0
7
  appnope==0.1.4
@@ -14,6 +15,8 @@ attrs==24.2.0
14
  babel==2.16.0
15
  beautifulsoup4==4.12.3
16
  bleach==6.1.0
 
 
17
  certifi==2024.8.30
18
  cffi==1.17.1
19
  charset-normalizer==3.3.2
@@ -21,7 +24,7 @@ click==8.1.7
21
  comm==0.2.2
22
  contourpy==1.3.0
23
  cycler==0.12.1
24
- debugpy==1.8.5
25
  decorator==5.1.1
26
  defusedxml==0.7.1
27
  distro==1.9.0
@@ -30,21 +33,24 @@ fastapi==0.115.0
30
  fastjsonschema==2.20.0
31
  ffmpy==0.4.0
32
  filelock==3.16.1
33
- fonttools==4.53.1
34
  fqdn==1.5.1
35
  frozenlist==1.4.1
36
  fsspec==2024.9.0
37
- gradio==4.44.0
 
 
 
38
  gradio_client==1.3.0
39
  greenlet==3.1.1
40
- grpcio==1.66.1
41
- grpcio-tools==1.66.1
42
  h11==0.14.0
43
  h2==4.1.0
44
  hpack==4.0.0
45
  httpcore==1.0.5
46
  httpx==0.27.2
47
- huggingface-hub==0.25.0
48
  hyperframe==6.0.1
49
  idna==3.10
50
  importlib_resources==6.4.5
@@ -73,14 +79,15 @@ jupyterlab_pygments==0.3.0
73
  jupyterlab_server==2.27.3
74
  jupyterlab_widgets==3.0.13
75
  kiwisolver==1.4.7
76
- langchain==0.3.0
77
- langchain-core==0.3.5
78
- langchain-openai==0.2.0
79
  langchain-qdrant==0.1.4
80
  langchain-text-splitters==0.3.0
81
- langgraph==0.2.23
82
- langgraph-checkpoint==1.0.10
83
- langsmith==0.1.125
 
84
  markdown-it-py==3.0.0
85
  MarkupSafe==2.1.5
86
  matplotlib==3.9.2
@@ -89,6 +96,7 @@ mdurl==0.1.2
89
  mistune==3.0.2
90
  msgpack==1.1.0
91
  multidict==6.1.0
 
92
  nbclient==0.10.0
93
  nbconvert==7.16.4
94
  nbformat==5.10.4
@@ -96,7 +104,7 @@ nest-asyncio==1.6.0
96
  notebook==7.2.2
97
  notebook_shim==0.2.4
98
  numpy==1.26.4
99
- openai==1.47.0
100
  orjson==3.10.7
101
  overrides==7.7.0
102
  packaging==24.1
@@ -108,46 +116,53 @@ pillow==10.4.0
108
  platformdirs==4.3.6
109
  portalocker==2.10.1
110
  prometheus_client==0.21.0
111
- prompt_toolkit==3.0.47
112
  protobuf==5.28.2
113
  psutil==6.0.0
114
  ptyprocess==0.7.0
115
  pure_eval==0.2.3
 
116
  pycparser==2.22
117
  pydantic==2.9.2
118
  pydantic_core==2.23.4
 
119
  pydub==0.25.1
120
  Pygments==2.18.0
121
  pyparsing==3.1.4
122
  python-dateutil==2.9.0.post0
123
  python-dotenv==1.0.1
124
  python-json-logger==2.0.7
125
- python-multipart==0.0.10
 
126
  pytz==2024.2
127
  PyYAML==6.0.2
128
  pyzmq==26.2.0
129
- qdrant-client==1.11.2
 
130
  referencing==0.35.1
131
  regex==2024.9.11
132
  requests==2.32.3
133
  rfc3339-validator==0.1.4
134
  rfc3986-validator==0.1.1
135
- rich==13.8.1
136
  rpds-py==0.20.0
137
- ruff==0.6.7
138
  semantic-version==2.10.0
139
  Send2Trash==1.8.3
140
  shellingham==1.5.4
141
  six==1.16.0
 
142
  sniffio==1.3.1
143
  soupsieve==2.6
144
  SQLAlchemy==2.0.35
145
  stack-data==0.6.3
146
- starlette==0.38.5
 
147
  tenacity==8.5.0
148
  terminado==0.18.1
149
  tiktoken==0.7.0
150
  tinycss2==1.3.0
 
151
  tomlkit==0.12.0
152
  tornado==6.4.1
153
  tqdm==4.66.5
@@ -155,14 +170,14 @@ traitlets==5.14.3
155
  typer==0.12.5
156
  types-python-dateutil==2.9.0.20240906
157
  typing_extensions==4.12.2
158
- tzdata==2024.1
159
  uri-template==1.3.0
160
  urllib3==2.2.3
161
- uvicorn==0.30.6
162
  wcwidth==0.2.13
163
  webcolors==24.8.0
164
  webencodings==0.5.1
165
  websocket-client==1.8.0
166
  websockets==12.0
167
  widgetsnbextension==4.0.13
168
- yarl==1.11.1
 
1
  aiofiles==23.2.1
2
+ aiohappyeyeballs==2.4.3
3
+ aiohttp==3.10.8
4
  aiosignal==1.3.1
5
+ altair==5.4.1
6
  annotated-types==0.7.0
7
  anyio==4.6.0
8
  appnope==0.1.4
 
15
  babel==2.16.0
16
  beautifulsoup4==4.12.3
17
  bleach==6.1.0
18
+ blinker==1.8.2
19
+ cachetools==5.5.0
20
  certifi==2024.8.30
21
  cffi==1.17.1
22
  charset-normalizer==3.3.2
 
24
  comm==0.2.2
25
  contourpy==1.3.0
26
  cycler==0.12.1
27
+ debugpy==1.8.6
28
  decorator==5.1.1
29
  defusedxml==0.7.1
30
  distro==1.9.0
 
33
  fastjsonschema==2.20.0
34
  ffmpy==0.4.0
35
  filelock==3.16.1
36
+ fonttools==4.54.1
37
  fqdn==1.5.1
38
  frozenlist==1.4.1
39
  fsspec==2024.9.0
40
+ fuzzywuzzy==0.18.0
41
+ gitdb==4.0.11
42
+ GitPython==3.1.43
43
+ gradio==4.44.1
44
  gradio_client==1.3.0
45
  greenlet==3.1.1
46
+ grpcio==1.66.2
47
+ grpcio-tools==1.66.2
48
  h11==0.14.0
49
  h2==4.1.0
50
  hpack==4.0.0
51
  httpcore==1.0.5
52
  httpx==0.27.2
53
+ huggingface-hub==0.25.1
54
  hyperframe==6.0.1
55
  idna==3.10
56
  importlib_resources==6.4.5
 
79
  jupyterlab_server==2.27.3
80
  jupyterlab_widgets==3.0.13
81
  kiwisolver==1.4.7
82
+ langchain==0.3.1
83
+ langchain-core==0.3.7
84
+ langchain-openai==0.2.1
85
  langchain-qdrant==0.1.4
86
  langchain-text-splitters==0.3.0
87
+ langgraph==0.2.31
88
+ langgraph-checkpoint==1.0.14
89
+ langsmith==0.1.129
90
+ Levenshtein==0.26.0
91
  markdown-it-py==3.0.0
92
  MarkupSafe==2.1.5
93
  matplotlib==3.9.2
 
96
  mistune==3.0.2
97
  msgpack==1.1.0
98
  multidict==6.1.0
99
+ narwhals==1.9.0
100
  nbclient==0.10.0
101
  nbconvert==7.16.4
102
  nbformat==5.10.4
 
104
  notebook==7.2.2
105
  notebook_shim==0.2.4
106
  numpy==1.26.4
107
+ openai==1.50.2
108
  orjson==3.10.7
109
  overrides==7.7.0
110
  packaging==24.1
 
116
  platformdirs==4.3.6
117
  portalocker==2.10.1
118
  prometheus_client==0.21.0
119
+ prompt_toolkit==3.0.48
120
  protobuf==5.28.2
121
  psutil==6.0.0
122
  ptyprocess==0.7.0
123
  pure_eval==0.2.3
124
+ pyarrow==17.0.0
125
  pycparser==2.22
126
  pydantic==2.9.2
127
  pydantic_core==2.23.4
128
+ pydeck==0.9.1
129
  pydub==0.25.1
130
  Pygments==2.18.0
131
  pyparsing==3.1.4
132
  python-dateutil==2.9.0.post0
133
  python-dotenv==1.0.1
134
  python-json-logger==2.0.7
135
+ python-Levenshtein==0.26.0
136
+ python-multipart==0.0.12
137
  pytz==2024.2
138
  PyYAML==6.0.2
139
  pyzmq==26.2.0
140
+ qdrant-client==1.11.3
141
+ RapidFuzz==3.10.0
142
  referencing==0.35.1
143
  regex==2024.9.11
144
  requests==2.32.3
145
  rfc3339-validator==0.1.4
146
  rfc3986-validator==0.1.1
147
+ rich==13.9.1
148
  rpds-py==0.20.0
149
+ ruff==0.6.8
150
  semantic-version==2.10.0
151
  Send2Trash==1.8.3
152
  shellingham==1.5.4
153
  six==1.16.0
154
+ smmap==5.0.1
155
  sniffio==1.3.1
156
  soupsieve==2.6
157
  SQLAlchemy==2.0.35
158
  stack-data==0.6.3
159
+ starlette==0.38.6
160
+ streamlit==1.38.0
161
  tenacity==8.5.0
162
  terminado==0.18.1
163
  tiktoken==0.7.0
164
  tinycss2==1.3.0
165
+ toml==0.10.2
166
  tomlkit==0.12.0
167
  tornado==6.4.1
168
  tqdm==4.66.5
 
170
  typer==0.12.5
171
  types-python-dateutil==2.9.0.20240906
172
  typing_extensions==4.12.2
173
+ tzdata==2024.2
174
  uri-template==1.3.0
175
  urllib3==2.2.3
176
+ uvicorn==0.31.0
177
  wcwidth==0.2.13
178
  webcolors==24.8.0
179
  webencodings==0.5.1
180
  websocket-client==1.8.0
181
  websockets==12.0
182
  widgetsnbextension==4.0.13
183
+ yarl==1.13.1
src/passage_finder.py CHANGED
@@ -51,7 +51,7 @@ class ToolManager:
51
 
52
  def add_tools(self):
53
  @tool
54
- def vector_search(query: str, k: int = 5) -> list[Document]:
55
  """Useful for simple queries. This tool will search a vector database for passages from the teachings of Paramhansa Yogananda and other publications from the Self Realization Fellowship (SRF).
56
  The user has the option to specify the number of passages they want the search to return, otherwise the number of passages will be set to the default value."""
57
  retriever = self.vectorstore.as_retriever(search_kwargs={"k": k})
@@ -59,7 +59,7 @@ class ToolManager:
59
  return documents
60
 
61
  @tool
62
- def multiple_query_vector_search(query: str, k: int = 5) -> list[Document]:
63
  """Useful when the user's query is vague, complex, or involves multiple concepts.
64
  This tool will write multiple versions of the user's query and search the vector database for relevant passages.
65
  Use this tool when the user asks for an in depth answer to their question."""
@@ -102,7 +102,7 @@ class BasicToolNode:
102
 
103
  # Create the Pydantic Model for the quote finder
104
  class Quote(BaseModel):
105
- '''Most relevant quotes to the user's query strictly pulled verbatim from the context provided.'''
106
  quote: str
107
 
108
  class QuoteList(BaseModel):
@@ -147,7 +147,7 @@ class QuoteFinder:
147
  # Replace the matched text with highlighted version
148
  highlighted_content = re.sub(
149
  escaped_match,
150
- f"<b>{best_match}</b>",
151
  highlighted_content,
152
  flags=re.IGNORECASE
153
  )
 
51
 
52
  def add_tools(self):
53
  @tool
54
+ def vector_search(query: str, k: int = 10) -> list[Document]:
55
  """Useful for simple queries. This tool will search a vector database for passages from the teachings of Paramhansa Yogananda and other publications from the Self Realization Fellowship (SRF).
56
  The user has the option to specify the number of passages they want the search to return, otherwise the number of passages will be set to the default value."""
57
  retriever = self.vectorstore.as_retriever(search_kwargs={"k": k})
 
59
  return documents
60
 
61
  @tool
62
+ def multiple_query_vector_search(query: str, k: int = 10) -> list[Document]:
63
  """Useful when the user's query is vague, complex, or involves multiple concepts.
64
  This tool will write multiple versions of the user's query and search the vector database for relevant passages.
65
  Use this tool when the user asks for an in depth answer to their question."""
 
102
 
103
  # Create the Pydantic Model for the quote finder
104
  class Quote(BaseModel):
105
+ '''Most relevant quotes to the user's query strictly pulled verbatim from the context provided. Quotes can be up to three sentences long.'''
106
  quote: str
107
 
108
  class QuoteList(BaseModel):
 
147
  # Replace the matched text with highlighted version
148
  highlighted_content = re.sub(
149
  escaped_match,
150
+ f"<mark>{best_match}</mark>",
151
  highlighted_content,
152
  flags=re.IGNORECASE
153
  )