nadaaaita commited on
Commit
b72c161
·
1 Parent(s): dfbf21d

cleaned up repo

Browse files
Files changed (4) hide show
  1. app2.py +0 -120
  2. app_multi_tab.py +0 -107
  3. example_app.py +0 -63
  4. src/srf_bot.py +12 -8
app2.py DELETED
@@ -1,120 +0,0 @@
1
- import gradio as gr
2
- import src.srf_bot as sb
3
- import prompts.system_prompts as sp
4
- from langchain_core.messages import HumanMessage
5
-
6
- # Initialize the chatbot
7
- chatbot = sb.SRFChatbot()
8
-
9
- # Dictionary to store passages with identifiers
10
- retrieved_passages = {}
11
-
12
- # Define the respond function
13
- def respond(query, history):
14
- formatted_query = [HumanMessage(content=query)]
15
- # Invoke the chatbot
16
- result = chatbot.graph.invoke({"messages": formatted_query}, chatbot.config)
17
-
18
- # Extract the assistant's response
19
- response = result["messages"][-1].content
20
-
21
- # Retrieve passages from your vector database based on the query
22
- # For the example, we'll use dummy passages
23
- passages = [
24
- "This is the full text of Passage 1.",
25
- "This is the full text of Passage 2.",
26
- "This is the full text of Passage 3."
27
- ]
28
-
29
- # Store passages with identifiers
30
- passage_ids = []
31
- for idx, passage in enumerate(passages):
32
- identifier = f"Passage {idx+1}"
33
- retrieved_passages[identifier] = passage
34
- passage_ids.append(identifier)
35
-
36
- # Reference passages in the response
37
- linked_response = f"{response}\n\nReferences:"
38
- for pid in passage_ids:
39
- linked_response += f" [{pid}]"
40
-
41
- # Append to history
42
- history.append((query, linked_response))
43
- return history, ""
44
-
45
- # Function to get passage content based on selection
46
- def get_passage_content(passage_id):
47
- return retrieved_passages.get(passage_id, "Passage not found.")
48
-
49
- # Function to update the system prompt
50
- def update_system_prompt(selected_prompt):
51
- # Update the chatbot's system prompt
52
- chatbot.reset_system_prompt(selected_prompt)
53
- # Update the displayed system prompt text
54
- return sp.system_prompt_templates[selected_prompt]
55
-
56
- # Gradio interface
57
- with gr.Blocks() as demo:
58
- gr.Markdown("# SRF Chatbot")
59
-
60
- with gr.Row():
61
- with gr.Column(scale=4):
62
- # Chatbot interface
63
- chatbot_output = gr.Chatbot()
64
- user_input = gr.Textbox(placeholder="Type your question here...", label="Your Question")
65
- submit_button = gr.Button("Submit")
66
-
67
- with gr.Column(scale=1):
68
- # Dropdown to select system prompts
69
- system_prompt_dropdown = gr.Dropdown(
70
- choices=list(sp.system_prompt_templates.keys()),
71
- label="Select Chatbot Instructions",
72
- value=list(sp.system_prompt_templates.keys())[0]
73
- )
74
- # Display the selected system prompt
75
- system_prompt_display = gr.Textbox(
76
- value=sp.system_prompt_templates[list(sp.system_prompt_templates.keys())[0]],
77
- label="Current Chatbot Instructions",
78
- lines=5,
79
- interactive=False
80
- )
81
-
82
- # Update system prompt display when a new prompt is selected
83
- system_prompt_dropdown.change(
84
- fn=update_system_prompt,
85
- inputs=[system_prompt_dropdown],
86
- outputs=[system_prompt_display]
87
- )
88
-
89
- # Passage selection and display
90
- gr.Markdown("### References")
91
- passage_selector = gr.Dropdown(label="Select a passage to view", choices=[])
92
- passage_display = gr.Markdown()
93
-
94
- # Update the chatbot when the submit button is clicked
95
- submit_button.click(
96
- fn=respond,
97
- inputs=[user_input, chatbot_output],
98
- outputs=[chatbot_output, user_input]
99
- )
100
-
101
- # Update the passage selector options when the chatbot output changes
102
- def update_passage_selector(chat_history):
103
- # Get the latest passages
104
- choices = list(retrieved_passages.keys())
105
- return gr.update(choices=choices)
106
-
107
- chatbot_output.change(
108
- fn=update_passage_selector,
109
- inputs=[chatbot_output],
110
- outputs=[passage_selector]
111
- )
112
-
113
- # Display the selected passage
114
- passage_selector.change(
115
- fn=get_passage_content,
116
- inputs=[passage_selector],
117
- outputs=[passage_display]
118
- )
119
-
120
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app_multi_tab.py DELETED
@@ -1,107 +0,0 @@
1
- import gradio as gr
2
- from prompts.system_prompts import get_systemprompt, system_prompt_templates
3
-
4
- # Define some pre-written templates for Tab 1
5
- templates = {
6
- "Friendly Chatbot": "You are a helpful, friendly chatbot that engages in light-hearted conversations.",
7
- "Technical Assistant": "You are a technical assistant specialized in answering questions related to Python programming.",
8
- "Nutrition Advisor": "You provide evidence-based advice on nutrition and healthy eating habits.",
9
- }
10
-
11
- # Define some agentic workflows for Tab 2
12
- agentic_workflows = {
13
- "Blog Post Generator": "This agent is designed to help generate a blog post based on user input.",
14
- "Document Summarizer": "This agent summarizes long documents by extracting key points.",
15
- "Task Manager": "This agent helps you organize tasks and provides step-by-step guidance."
16
- }
17
-
18
- # Chatbot logic for custom instructions or pre-written templates
19
- def chatbot_response(system_instructions, user_query):
20
- if "friendly" in system_instructions.lower():
21
- return f"Friendly Chatbot says: Hi there! 😊 How can I assist you today?"
22
- elif "technical" in system_instructions.lower():
23
- return f"Technical Assistant says: Sure! Here's some information on Python: {user_query}"
24
- elif "nutrition" in system_instructions.lower():
25
- return f"Nutrition Advisor says: Here's some advice about healthy eating: {user_query}"
26
- else:
27
- return f"Custom Chatbot says: {user_query}"
28
-
29
- # Chatbot conversation function
30
- def chatbot_conversation(system_instructions, chat_history, user_query):
31
- response = chatbot_response(system_instructions, user_query)
32
- chat_history.append((user_query, response))
33
- return chat_history, ""
34
-
35
- # Chatbot conversation for predefined agentic workflows
36
- def agentic_chatbot_conversation(workflow_instructions, chat_history, user_query):
37
- response = f"Agent Workflow ({workflow_instructions}) says: {user_query}"
38
- chat_history.append((user_query, response))
39
- return chat_history, ""
40
-
41
- # Function to update the interface when a selection is made from the dropdown (Tab 1)
42
- def update_interface(template_name, custom_instructions):
43
- if template_name == "Custom Instructions":
44
- return gr.update(visible=True), gr.update(visible=False)
45
- else:
46
- template_content = templates.get(template_name, "")
47
- return gr.update(visible=False), gr.update(visible=True, value=template_content)
48
-
49
- # Build the Gradio interface with Tabs
50
- with gr.Blocks(css=".gradio-container {background-color: #F0F0F0;} .gr-button {background-color: #1E3A8A; color: white;} .gr-textbox textarea {font-size: 16px;} .gr-markdown {font-size: 18px; color: #1E3A8A;}") as demo:
51
-
52
- # Add Tabs
53
- with gr.Tabs():
54
-
55
- # Tab 1: Custom Instructions or Pre-Written Templates
56
- with gr.Tab("Custom Instructions Chatbot"):
57
- gr.Markdown("""
58
- <div style='background-color:#E0E0E0; padding: 20px; border-radius: 10px;'>
59
- <h1 style='text-align: center; color: #1E3A8A;'>SRF Innovation Labs - AI Chatbot Use Case Explorer</h1>
60
- <p style='font-size: 18px; text-align: center; color: #1E3A8A;'>
61
- This tool allows you to experiment with different system prompts,
62
- giving you control over how the chatbot behaves. You can either use pre-defined templates
63
- or write your own custom instructions.
64
- </p>
65
- </div>
66
- """)
67
-
68
- # Section to select system instructions from dropdown
69
- gr.Markdown("<h2 style='color: #1E3A8A;'>Chatbot Setup</h2>")
70
- template_name = gr.Dropdown(choices=["Custom Instructions"] + list(templates.keys()), label="Choose Instructions", value="Friendly Chatbot")
71
- custom_instructions = gr.Textbox(label="Custom Instructions", visible=False, placeholder="Write your own instructions here...")
72
- template_display = gr.Textbox(label="Template Content", interactive=False, visible=True)
73
-
74
- # Chatbot interface
75
- gr.Markdown("<h2 style='color: #1E3A8A;'>Chatbot Interaction</h2>")
76
- chatbot = gr.Chatbot(label="Chatbot Conversation", height=300, show_label=False)
77
- user_query = gr.Textbox(label="Your Query", placeholder="Ask a question or say something to the chatbot...")
78
- submit_button = gr.Button("Send", elem_classes=["gr-button"])
79
-
80
- # Update logic for Tab 1
81
- template_name.change(fn=update_interface, inputs=[template_name, custom_instructions], outputs=[custom_instructions, template_display])
82
- submit_button.click(fn=chatbot_conversation, inputs=[custom_instructions if template_name == "Custom Instructions" else template_display, chatbot, user_query], outputs=[chatbot, user_query])
83
-
84
- # Tab 2: Predefined Agentic Workflows
85
- with gr.Tab("Agentic Workflow Chatbots"):
86
- gr.Markdown("""
87
- <div style='background-color:#E0E0E0; padding: 20px; border-radius: 10px;'>
88
- <h1 style='text-align: center; color: #1E3A8A;'>Agentic Workflow Explorer</h1>
89
- <p style='font-size: 18px; text-align: center; color: #1E3A8A;'>
90
- Explore predefined agentic workflows that execute specific tasks, such as generating blog posts,
91
- summarizing documents, or managing tasks.
92
- </p>
93
- </div>
94
- """)
95
-
96
- # Dropdown for selecting agentic workflows
97
- workflow_name = gr.Dropdown(choices=list(agentic_workflows.keys()), label="Choose Agent Workflow", value="Blog Post Generator")
98
- workflow_display = gr.Textbox(label="Workflow Description", interactive=False, visible=True)
99
- workflow_chatbot = gr.Chatbot(label="Agent Workflow Conversation", height=300, show_label=False)
100
- workflow_user_query = gr.Textbox(label="Your Query", placeholder="Ask the agent to perform a task...")
101
- workflow_submit_button = gr.Button("Send", elem_classes=["gr-button"])
102
-
103
- # Chatbot interaction for agentic workflows
104
- workflow_submit_button.click(fn=agentic_chatbot_conversation, inputs=[workflow_name, workflow_chatbot, workflow_user_query], outputs=[workflow_chatbot, workflow_user_query])
105
-
106
- # Launch the app
107
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
example_app.py DELETED
@@ -1,63 +0,0 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
59
- )
60
-
61
-
62
- if __name__ == "__main__":
63
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/srf_bot.py CHANGED
@@ -102,9 +102,9 @@ class SRFChatbot:
102
  ):
103
  # Initialize the LLM and the system message
104
  ## THIS SHOULD BE REFACTORED AS THERE IS REPEITITION OF THE CODE IN RESET SYSTEM PROMPT TOO
105
- self.chatbot_instructions_dropdown = chatbot_instructions_dropdown
106
- self.chatbot_instructions = sp.system_prompt_templates[self.chatbot_instructions_dropdown]
107
- self.system_message = SystemMessage(content=self.chatbot_instructions)
108
 
109
  self.llm = ChatOpenAI(model=model, temperature=temperature)
110
  self.tools = ToolManager().get_tools()
@@ -113,7 +113,7 @@ class SRFChatbot:
113
  # Build the graph
114
  self.graph = self.build_graph()
115
  # Get the configurable
116
- self.config = self.get_configurable()
117
 
118
  def reset_system_prompt(self, chatbot_instructions_dropdown: str):
119
  # Update the dropdown
@@ -122,9 +122,13 @@ class SRFChatbot:
122
  self.chatbot_instructions = sp.system_prompt_templates[self.chatbot_instructions_dropdown]
123
  # Reset the system prompt
124
  self.system_message = SystemMessage(content=self.chatbot_instructions)
125
- # Reset the configurable
126
  self.config = self.get_configurable()
127
- return self.chatbot_instructions
 
 
 
 
128
 
129
  def get_configurable(self):
130
  # This thread id is used to keep track of the chatbot's conversation
@@ -137,8 +141,8 @@ class SRFChatbot:
137
  # Add the system message onto the llm
138
  ## THIS SHOULD BE REFACTORED SO THAT THE STATE ALWAYS HAS THE DEFINITIVE SYSTEM MESSAGE THAT SHOULD BE IN USE
139
  def chatbot(self, state: AgentState):
140
- messages = [self.system_message] + state["messages"]
141
- return {"messages": [self.llm_with_tools.invoke(messages)], "system_message": self.system_message, "system_message_dropdown": self.chatbot_instructions_dropdown}
142
 
143
  def build_graph(self):
144
  # Add chatbot state
 
102
  ):
103
  # Initialize the LLM and the system message
104
  ## THIS SHOULD BE REFACTORED AS THERE IS REPEITITION OF THE CODE IN RESET SYSTEM PROMPT TOO
105
+ # self.chatbot_instructions_dropdown = chatbot_instructions_dropdown
106
+ # self.chatbot_instructions = sp.system_prompt_templates[self.chatbot_instructions_dropdown]
107
+ # self.system_message = SystemMessage(content=self.chatbot_instructions)
108
 
109
  self.llm = ChatOpenAI(model=model, temperature=temperature)
110
  self.tools = ToolManager().get_tools()
 
113
  # Build the graph
114
  self.graph = self.build_graph()
115
  # Get the configurable
116
+ self.reset_system_prompt(chatbot_instructions_dropdown)
117
 
118
  def reset_system_prompt(self, chatbot_instructions_dropdown: str):
119
  # Update the dropdown
 
122
  self.chatbot_instructions = sp.system_prompt_templates[self.chatbot_instructions_dropdown]
123
  # Reset the system prompt
124
  self.system_message = SystemMessage(content=self.chatbot_instructions)
125
+ # Get the configurable
126
  self.config = self.get_configurable()
127
+ # Reset the system prompt info in the graph
128
+ self.graph.update_state(values={"system_message": self.system_message, "system_message_dropdown": self.chatbot_instructions_dropdown},
129
+ config=self.config)
130
+
131
+ # return self.chatbot_instructions
132
 
133
  def get_configurable(self):
134
  # This thread id is used to keep track of the chatbot's conversation
 
141
  # Add the system message onto the llm
142
  ## THIS SHOULD BE REFACTORED SO THAT THE STATE ALWAYS HAS THE DEFINITIVE SYSTEM MESSAGE THAT SHOULD BE IN USE
143
  def chatbot(self, state: AgentState):
144
+ messages = [state['system_message']] + state["messages"]
145
+ return {"messages": [self.llm_with_tools.invoke(messages)]}
146
 
147
  def build_graph(self):
148
  # Add chatbot state