import streamlit as st import openai import subprocess import re import os import pandas as pd from PIL import Image df = pd.read_csv("./Financial_Sample.csv") metadata = { "columns": df.columns.tolist(), "dtypes": df.dtypes.apply(lambda x: x.name).to_dict(), "shape": df.shape, } def runcode(code: str) -> str: with open("code.py", "w") as file: file.write(code) try: result = subprocess.run( ["python", "code.py"], capture_output=True, text=True, check=True ) output = "Output Generated Successfully" + "\n" + result.stdout except subprocess.CalledProcessError as e: output = f"An error occurred while running the code:\nSTDOUT:\n{e.stdout}\nSTDERR:\n{e.stderr}" except Exception as e: output = f"An unexpected error occurred: {str(e)}" return output class Agent: def __init__(self, system_prompt="", known_actions=None): self.system = system_prompt self.messages = [] self.known_actions = known_actions if known_actions is not None else {} self.client = openai.OpenAI( api_key=os.environ.get('TOGETHER_API_KEY'), base_url="https://api.together.xyz/v1", ) self.messages.append({"role": "system", "content": self.system}) def __call__(self, message): self.messages.append({"role": "user", "content": message}) result = self.execute() self.messages.append({"role": "assistant", "content": result}) return result def execute(self): try: response = self.client.chat.completions.create( model="meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", stop=["PAUSE"], messages=self.messages, ) return response.choices[0].message.content except Exception as e: return f"Error executing model: {str(e)}" def query(self, question, max_turns=5): i = 0 next_prompt = question while i < max_turns: i += 1 result = self(next_prompt) st.session_state.logs += f"Assistant: {result}\n" action_re = re.search(r"Action: (\w+): (.*)", result, re.DOTALL) if action_re: action = action_re.group(1) action_input = action_re.group(2).strip() st.session_state.logs += ( f"Action: {action}\nAction Input: {action_input}\n" ) if action not in self.known_actions: raise Exception(f"Unknown action: {action}: {action_input}") st.session_state.logs += ( f" ------------------------\n running {action} {action_input}\n" ) observation = self.known_actions[action](action_input) st.session_state.logs += f"Observation: {observation}\n" next_prompt = f"Observation: {observation}" else: return known_actions = {"runcode": runcode} prompt = f""" You are an expert in writing python code based on user request and you run in a loop of Thought, Action, PAUSE, Observation. At the end of the loop you output an Answer Use Thought to describe your thoughts about the question you have been asked. Use Action to run one of the actions available to you - then return PAUSE. Observation will be the result of running those actions. for Graphing library use Plotly and save the plot as graph.html Always save generated plot as 'graph.html' Always return just code no need of ``` Your Task is help user get result of query about below dataset,Decide based on user query to make Plot or Just Textual Answer. Here is the metadata of the dataset and name of dataset is Financial_Sample.csv: Columns: {metadata['columns']} Dtypes: {metadata['dtypes']} Shape: {metadata['shape']} You can use this metadata to generate results. Your available actions are: runcode How to use actions Action : action_name: input_to_action if input_to_action is code then don't use ``` just write code Example session: Question: Get the average age of the customers Thought: I need to run a query to get the average age of the customers Action: runcode: import pandas as pd df=pd.read_csv('./Financial_Sample.csv') age = df['age'].mean() print(age) PAUSE Observation : understand the output based its stdout and take necessary steps. Answer: Final Answer for User Request if its graph send "Graph Generated and its Graph Interpretation" or Textual Answer "Your Interpretation of Answer" """.strip() bot = Agent(system_prompt=prompt, known_actions=known_actions) st.set_page_config(layout="wide") st.title("Customer Data Analysis") query = st.text_area("Enter your query about the dataset:", height=100) st.sidebar.title("Dataset Metadata") st.sidebar.write("Columns:", metadata["columns"]) st.sidebar.write("Dtypes:", metadata["dtypes"]) st.sidebar.write("Shape:", metadata["shape"]) st.sidebar.write("Sample Data:") st.sidebar.write(df.head()) col1, col2 = st.columns(2) if "logs" not in st.session_state: st.session_state.logs = "" with col1: if st.button("Submit"): if not query: st.error("Please enter a query.") else: if "graph.html" in os.listdir(): os.remove("graph.html") st.session_state.logs = "" with st.spinner("Generating response..."): bot.query(query) answer_match = re.search(r"Answer:", st.session_state.logs) if answer_match: answer_end_index = answer_match.end() subsequent_text = st.session_state.logs[answer_end_index:].strip() st.success(subsequent_text) with col2: st.header("Output") if "graph.html" in os.listdir(): st.components.v1.html(open("graph.html", "r").read(), height=600) else: st.write("No plot generated yet.") with st.expander("Logs"): st.code(st.session_state.logs)