Spaces:
Running
Running
File size: 1,678 Bytes
3cad23b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 |
from huggingface_hub import InferenceClient
from transformers import ReactCodeAgent
from transformers import tool as function_to_tool
from toolformers.base import Conversation, Toolformer
class HuggingFaceConversation(Conversation):
def __init__(self, agent : ReactCodeAgent, prompt, category=None):
self.agent = agent
self.messages = [('system', prompt)]
self.category = category
def chat(self, message, role='user', print_output=True) -> str:
self.messages.append((role, message))
final_prompt = 'For context, here are the previous messages in the conversation:\n\n'
for role, message in self.messages:
final_prompt += f'{role.capitalize()}: {message}\n'
final_prompt += "Don't worry, you don't need to use the same format to reply. Stick with the Task:/Action:/etc. format.\n\n"
response = self.agent.run(final_prompt)
print(response)
return response
class HuggingFaceToolformer(Toolformer):
def __init__(self, model_name, max_tokens=2000):
self.model = InferenceClient(model=model_name)
self.max_tokens = max_tokens
def llm_engine(self, messages, stop_sequences=["Task"]) -> str:
response = self.model.chat_completion(messages, stop=stop_sequences, max_tokens=self.max_tokens)
answer = response.choices[0].message.content
return answer
def new_conversation(self, prompt, tools, category=None):
agent = ReactCodeAgent(tools=[function_to_tool(tool.as_annotated_function()) for tool in tools], llm_engine=self.llm_engine)
return HuggingFaceConversation(agent, prompt, category=category)
|