samuelemarro commited on
Commit
c07f594
·
1 Parent(s): 7b2f713

Added cost tracking.

Browse files
toolformers/camel.py CHANGED
@@ -12,6 +12,19 @@ from camel.agents import ChatAgent
12
  from camel.toolkits.function_tool import FunctionTool
13
  from camel.configs.openai_config import ChatGPTConfig
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  class CamelConversation(Conversation):
16
  def __init__(self, toolformer, agent, category=None):
17
  self.toolformer = toolformer
@@ -32,6 +45,14 @@ class CamelConversation(Conversation):
32
 
33
  response = self.agent.step(formatted_message)
34
 
 
 
 
 
 
 
 
 
35
  reply = response.msg.content
36
 
37
  if print_output:
 
12
  from camel.toolkits.function_tool import FunctionTool
13
  from camel.configs.openai_config import ChatGPTConfig
14
 
15
+ from utils import register_cost
16
+
17
+ COSTS = {
18
+ 'gpt-4o': {
19
+ 'prompt_tokens': 2.5e-6,
20
+ 'completion_tokens': 10e-6
21
+ },
22
+ 'gpt-4o-mini': {
23
+ 'prompt_tokens': 0.15e-6,
24
+ 'completion_tokens': 0.6e-6
25
+ }
26
+ }
27
+
28
  class CamelConversation(Conversation):
29
  def __init__(self, toolformer, agent, category=None):
30
  self.toolformer = toolformer
 
45
 
46
  response = self.agent.step(formatted_message)
47
 
48
+ if response.info.get('usage', None) is not None:
49
+ usage_data = response.info['usage']
50
+
51
+ total = 0
52
+ for cost_name in ['prompt_tokens', 'completion_tokens']:
53
+ total += COSTS[str(self.toolformer.model_type)][cost_name] * usage_data[cost_name]
54
+ register_cost(self.category, total)
55
+
56
  reply = response.msg.content
57
 
58
  if print_output:
toolformers/gemini.py CHANGED
@@ -10,8 +10,17 @@ from toolformers.base import Conversation, Tool, Toolformer
10
  import google.generativeai as genai
11
  from google.generativeai.generative_models import ChatSession
12
 
 
 
13
  genai.configure(api_key=os.environ['GOOGLE_API_KEY'])
14
 
 
 
 
 
 
 
 
15
  class GeminiConversation(Conversation):
16
  def __init__(self, model_name, chat_agent : ChatSession, category=None):
17
  self.model_name = model_name
@@ -60,6 +69,12 @@ class GeminiConversation(Conversation):
60
  'completion_tokens': response.usage_metadata.candidates_token_count
61
  }
62
 
 
 
 
 
 
 
63
  #send_usage_to_db(
64
  # usage_info,
65
  # time_start,
 
10
  import google.generativeai as genai
11
  from google.generativeai.generative_models import ChatSession
12
 
13
+ from utils import register_cost
14
+
15
  genai.configure(api_key=os.environ['GOOGLE_API_KEY'])
16
 
17
+ COSTS = {
18
+ 'gemini-1.5-pro': {
19
+ 'prompt_tokens': 1.25e-6,
20
+ 'completion_tokens': 5e-6
21
+ }
22
+ }
23
+
24
  class GeminiConversation(Conversation):
25
  def __init__(self, model_name, chat_agent : ChatSession, category=None):
26
  self.model_name = model_name
 
69
  'completion_tokens': response.usage_metadata.candidates_token_count
70
  }
71
 
72
+ total_cost = 0
73
+ for cost_name in ['prompt_tokens', 'completion_tokens']:
74
+ total_cost += COSTS[self.model_name][cost_name] * usage_info[cost_name]
75
+
76
+ register_cost(self.category, total_cost)
77
+
78
  #send_usage_to_db(
79
  # usage_info,
80
  # time_start,
toolformers/langchain_agent.py CHANGED
@@ -15,10 +15,23 @@ from langchain_core.tools import tool as function_to_tool
15
 
16
  from toolformers.base import StringParameter, Toolformer, Conversation
17
 
 
18
 
19
 
 
 
 
 
 
 
 
 
 
 
 
20
  class LangChainConversation(Conversation):
21
- def __init__(self, agent, messages, category=None):
 
22
  self.agent = agent
23
  self.messages = messages
24
  self.category = category
@@ -26,6 +39,9 @@ class LangChainConversation(Conversation):
26
  def chat(self, message, role='user', print_output=True) -> str:
27
  self.messages.append(HumanMessage(content=message))
28
  final_message = ''
 
 
 
29
  for chunk in self.agent.stream({"messages": self.messages}, stream_mode="values"):
30
  print(chunk)
31
  print("----")
@@ -38,8 +54,18 @@ class LangChainConversation(Conversation):
38
  for content_chunk in content:
39
  if isinstance(content_chunk, str):
40
  final_message += content_chunk
 
 
41
  #final_message += chunk['agent']['messages'].content
42
 
 
 
 
 
 
 
 
 
43
  self.messages.append(AIMessage(content=final_message))
44
  #print(final_message)
45
 
@@ -55,7 +81,7 @@ class LangChainAnthropicToolformer(Toolformer):
55
  model = ChatAnthropic(model_name=self.model_name, api_key=self.api_key)
56
  agent_executor = create_react_agent(model, tools)
57
 
58
- return LangChainConversation(agent_executor, [SystemMessage(prompt)], category)
59
 
60
 
61
  #weather_tool = AgoraTool("WeatherForecastAPI", "A simple tool that returns the weather", [StringParameter(
 
15
 
16
  from toolformers.base import StringParameter, Toolformer, Conversation
17
 
18
+ from utils import register_cost
19
 
20
 
21
+ COSTS = {
22
+ 'claude-3-5-sonnet-latest': {
23
+ 'input_tokens': 3e-6,
24
+ 'output_tokens': 15e-6
25
+ },
26
+ 'claude-3-5-haiku-latest': {
27
+ 'input_tokens': 1e-6,
28
+ 'output_tokens': 5e-6
29
+ }
30
+ }
31
+
32
  class LangChainConversation(Conversation):
33
+ def __init__(self, model_name, agent, messages, category=None):
34
+ self.model_name = model_name
35
  self.agent = agent
36
  self.messages = messages
37
  self.category = category
 
39
  def chat(self, message, role='user', print_output=True) -> str:
40
  self.messages.append(HumanMessage(content=message))
41
  final_message = ''
42
+
43
+ aggregate = None
44
+
45
  for chunk in self.agent.stream({"messages": self.messages}, stream_mode="values"):
46
  print(chunk)
47
  print("----")
 
54
  for content_chunk in content:
55
  if isinstance(content_chunk, str):
56
  final_message += content_chunk
57
+
58
+ aggregate = chunk if aggregate is None else (aggregate + chunk)
59
  #final_message += chunk['agent']['messages'].content
60
 
61
+ total_cost = 0
62
+ for message in aggregate['messages']:
63
+ if isinstance(message, AIMessage):
64
+ for cost_name in ['input_tokens', 'output_tokens']:
65
+ total_cost += COSTS[self.model_name][cost_name] * message.usage_metadata[cost_name]
66
+
67
+ register_cost(self.category, total_cost)
68
+
69
  self.messages.append(AIMessage(content=final_message))
70
  #print(final_message)
71
 
 
81
  model = ChatAnthropic(model_name=self.model_name, api_key=self.api_key)
82
  agent_executor = create_react_agent(model, tools)
83
 
84
+ return LangChainConversation(self.model_name, agent_executor, [SystemMessage(prompt)], category)
85
 
86
 
87
  #weather_tool = AgoraTool("WeatherForecastAPI", "A simple tool that returns the weather", [StringParameter(
toolformers/sambanova/core.py CHANGED
@@ -5,6 +5,14 @@ from typing import List
5
  from toolformers.base import Conversation, Toolformer, Tool
6
  from toolformers.sambanova.function_calling import FunctionCallingLlm
7
 
 
 
 
 
 
 
 
 
8
  class SambanovaConversation(Conversation):
9
  def __init__(self, model_name, function_calling_llm : FunctionCallingLlm, category=None):
10
  self.model_name = model_name
@@ -26,8 +34,11 @@ class SambanovaConversation(Conversation):
26
  print('Usage data:', usage_data)
27
  if print_output:
28
  print(response)
29
-
30
- #send_usage_to_db(usage_data, start_time, end_time, agent_id, self.category, self.model_name)
 
 
 
31
 
32
  return response
33
 
 
5
  from toolformers.base import Conversation, Toolformer, Tool
6
  from toolformers.sambanova.function_calling import FunctionCallingLlm
7
 
8
+
9
+ COSTS = {
10
+ 'llama3-405b': {
11
+ 'prompt_tokens': 5e-6,
12
+ 'completion_tokens': 10e-6
13
+ }
14
+ }
15
+
16
  class SambanovaConversation(Conversation):
17
  def __init__(self, model_name, function_calling_llm : FunctionCallingLlm, category=None):
18
  self.model_name = model_name
 
34
  print('Usage data:', usage_data)
35
  if print_output:
36
  print(response)
37
+
38
+ cost = 0
39
+
40
+ for cost_name in ['prompt_tokens', 'completion_tokens']:
41
+ cost += COSTS[self.model_name][cost_name] * usage_data[cost_name]
42
 
43
  return response
44
 
utils.py CHANGED
@@ -1,6 +1,8 @@
1
  import base64
2
  import hashlib
3
 
 
 
4
  from functools import wraps
5
  from typing import Callable, Any
6
  from inspect import signature, Parameter, Signature
@@ -77,4 +79,20 @@ def add_params_and_annotations(name: str, description: str, params: dict, return
77
  print(docstring)
78
 
79
  return wrapper
80
- return decorator
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import base64
2
  import hashlib
3
 
4
+ import contextlib
5
+
6
  from functools import wraps
7
  from typing import Callable, Any
8
  from inspect import signature, Parameter, Signature
 
79
  print(docstring)
80
 
81
  return wrapper
82
+ return decorator
83
+
84
+ _cost_tracker = {}
85
+
86
+ @contextlib.contextmanager
87
+ def use_cost_tracker():
88
+ _cost_tracker.clear()
89
+ _cost_tracker['conversation'] = 0
90
+ _cost_tracker['negotiation'] = 0
91
+ _cost_tracker['programming'] = 0
92
+ yield
93
+
94
+ def register_cost(category, cost):
95
+ _cost_tracker[category] += cost
96
+
97
+ def get_costs():
98
+ return _cost_tracker