Spaces:
Running
Running
import json | |
import gradio as gr | |
from flow import full_flow | |
from utils import use_cost_tracker, get_costs, compute_hash | |
with open('schemas.json', 'r') as f: | |
SCHEMAS = json.load(f) | |
def parse_raw_messages(messages_raw): | |
messages_clean = [] | |
messages_agora = [] | |
for message in messages_raw: | |
role = message['role'] | |
message_without_role = dict(message) | |
del message_without_role['role'] | |
messages_agora.append({ | |
'role': role, | |
'content': '```\n' + json.dumps(message_without_role, indent=2) + '\n```' | |
}) | |
if message.get('status') == 'error': | |
messages_clean.append({ | |
'role': role, | |
'content': f"Error: {message['message']}" | |
}) | |
else: | |
messages_clean.append({ | |
'role': role, | |
'content': message['body'] | |
}) | |
return messages_clean, messages_agora | |
def main(): | |
with gr.Blocks() as demo: | |
gr.Markdown("# 🏛️Agora Demo") | |
gr.Markdown("[Agora](https://agoraprotocol.org/) is a protocol for efficient communication between heterogeneous agents.") | |
gr.Markdown("In short, with Agora very different agents can exchange messages efficiently, even if they've never interacted before.") | |
gr.Markdown("This demo shows how Agora solves various tasks through a mix of natural language and structured communication.") | |
chosen_task = gr.Dropdown(choices=[ | |
(v['display_name'], k) for k, v in SCHEMAS.items() | |
], label="Choose a Demo", value="weather_forecast") | |
def render2(chosen_task): | |
gr.Markdown('**Description**: ' + SCHEMAS[chosen_task]["description"]) | |
#custom_task = gr.Checkbox(label="Override Demo Parameters") | |
STATE_TRACKER = {} | |
def render(chosen_task): | |
if STATE_TRACKER.get('chosen_task') != chosen_task: | |
STATE_TRACKER['chosen_task'] = chosen_task | |
for k, v in SCHEMAS[chosen_task]['schema'].items(): | |
if isinstance(v, str): | |
STATE_TRACKER[k] = v | |
else: | |
STATE_TRACKER[k] = json.dumps(v, indent=2) | |
if False: # custom_task is disabled for now | |
gr.Markdown('#### Custom Demo Parameters') | |
gr.Markdown('You can override the default parameters for the demo. Note: recommended for advanced users only.') | |
gr.Text(label="Description", value=STATE_TRACKER["description"], interactive=True).change(lambda x: STATE_TRACKER.update({'description': x})) | |
with gr.Row(equal_height=True): | |
with gr.Column(scale=1): | |
gr.TextArea(label="Input Schema", value=STATE_TRACKER["input"], interactive=True).change(lambda x: STATE_TRACKER.update({'input': x})) | |
with gr.Column(scale=1): | |
gr.TextArea(label="Output Schema", value=STATE_TRACKER["output"], interactive=True).change(lambda x: STATE_TRACKER.update({'output': x})) | |
with gr.Row(equal_height=True): | |
with gr.Column(scale=1): | |
gr.TextArea(label="Tools", value=STATE_TRACKER["tools"], interactive=True).change(lambda x: STATE_TRACKER.update({'tools': x})) | |
with gr.Column(scale=1): | |
gr.TextArea(label="Examples", value=STATE_TRACKER["examples"], interactive=True).change(lambda x: STATE_TRACKER.update({'examples': x})) | |
model_options = [ | |
('GPT 4o (Camel AI)', 'gpt-4o'), | |
('GPT 4o-mini (Camel AI)', 'gpt-4o-mini'), | |
('Claude 3 Sonnet (LangChain)', 'claude-3-5-sonnet-latest'), | |
('Claude 3 Haiku (LangChain)', 'claude-3-5-haiku-latest'), | |
('Gemini 1.5 Pro (Google GenAI)', 'gemini-1.5-pro'), | |
('Llama3 405B (Sambanova + LangChain)', 'llama3-405b') | |
] | |
fallback_image = '' | |
images = { | |
'gpt-4o': 'https://uxwing.com/wp-content/themes/uxwing/download/brands-and-social-media/chatgpt-icon.png', | |
'gpt-4o-mini': 'https://uxwing.com/wp-content/themes/uxwing/download/brands-and-social-media/chatgpt-icon.png', | |
'claude-3-5-sonnet-latest': 'https://play-lh.googleusercontent.com/4S1nfdKsH_1tJodkHrBHimqlCTE6qx6z22zpMyPaMc_Rlr1EdSFDI1I6UEVMnokG5zI', | |
'claude-3-5-haiku-latest': 'https://play-lh.googleusercontent.com/4S1nfdKsH_1tJodkHrBHimqlCTE6qx6z22zpMyPaMc_Rlr1EdSFDI1I6UEVMnokG5zI', | |
'gemini-1.5-pro': 'https://uxwing.com/wp-content/themes/uxwing/download/brands-and-social-media/google-gemini-icon.png', | |
'llama3-405b': 'https://www.designstub.com/png-resources/wp-content/uploads/2023/03/meta-icon-social-media-flat-graphic-vector-3-novem.png' | |
} | |
with gr.Row(equal_height=True): | |
with gr.Column(scale=1): | |
alice_model_dd = gr.Dropdown(label="Alice Agent", choices=model_options, value="gpt-4o") | |
with gr.Column(scale=1): | |
bob_model_dd = gr.Dropdown(label="Bob Agent", choices=model_options, value="claude-3-5-sonnet-latest") | |
def render_with_images(alice_model, bob_model): | |
button = gr.Button('Start', elem_id='start_button') | |
gr.Markdown('## Natural Language') | |
gr.Markdown("When Agora operates without a protocol, it uses the LLM to send/receive messages.") | |
gr.Markdown("This is particularly useful for rare communications, where establishing a protocol would be superfluous.") | |
avatar_images = [images.get(bob_model, fallback_image), images.get(alice_model, fallback_image)] | |
chatbot_nl = gr.Chatbot(type="messages", avatar_images=avatar_images) | |
with gr.Accordion(label="Raw Messages", open=False): | |
chatbot_nl_raw = gr.Chatbot(type="messages", avatar_images=avatar_images) | |
gr.Markdown('## Negotiation') | |
gr.Markdown("If the agents realize that they have been communicating frequently, they negotiate a protocol.") | |
chatbot_negotiation = gr.Chatbot(type="messages", avatar_images=avatar_images) | |
gr.Markdown('## Protocol') | |
gr.Markdown("The agents now have an unambiguous protocol to follow. This reduces redundant communications and mistakes.") | |
gr.Markdown("The protocol is stored into a Protocol Document and is uniquely identified by its SHA1 hash.") | |
protocol_hash_result = gr.Text(interactive=False, label="Protocol Hash") | |
protocol_result = gr.TextArea(interactive=False, label="Protocol") | |
gr.Markdown('## Implementation') | |
gr.Markdown("If they desire, Alice and Bob can independently implement their side of the protocol as routines (e.g. Python modules).") | |
gr.Markdown("The routines handle the protocol communication without needing to invoke the LLM.") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
alice_implementation = gr.TextArea(interactive=False, label="Alice Implementation") | |
with gr.Column(scale=1): | |
bob_implementation = gr.TextArea(interactive=False, label="Bob Implementation") | |
gr.Markdown('## Structured Communication') | |
gr.Markdown("The agents now communicate using the routines. This is faster, more reliable and cheaper than using the LLM.") | |
structured_communication = gr.Chatbot(type="messages", avatar_images=avatar_images) | |
with gr.Accordion(label="Raw Messages", open=False): | |
structured_communication_raw = gr.Chatbot(type="messages", avatar_images=avatar_images) | |
gr.Markdown('## Cost') | |
gr.Markdown("Negotiation & implementation have a higher upfront cost, but once they're done, the cost of using a routine is neglible compared to invoking an LLM.") | |
gr.Markdown("This means that, for moderate to high frequency communications, negotiation & implementation with Agora is way cheaper than using natural language alone.") | |
gr.Markdown("Note: negotiated protocols can be reused for similar tasks and shared with other agents, which further reduces costs.") | |
cost_info = gr.State(value=None) | |
#cost_info = gr.TextArea(interactive=False, label="Cost") | |
query_slider = gr.Slider(label="Expected number of queries", minimum=1, maximum=10_000, step=1, value=50, interactive=True) | |
cost_display = gr.Markdown('') | |
gr.Markdown('## Next Steps') | |
gr.Markdown("This demo showcases a simple negotiation-and-implementation flow between two agents.") | |
gr.Markdown("In practice, Agora can be used to build entire networks of agents, with way more complex tasks.") | |
gr.Markdown("Check out our [paper](https://arxiv.org/pdf/2410.11905.pdf) for an example of a network of 100 agents, all interconnected!") | |
gr.Markdown("Also, don't forget us to follow us on [Twitter](https://twitter.com/Agora_Protocol) and join our [Discord](https://discord.gg/MXmfhwQ4FB)!") | |
def render_info(query_count, cost_info): | |
if not cost_info: | |
return '' | |
natural_cost = cost_info['conversation'] * query_count | |
agora_cost = cost_info['negotiation'] + cost_info['programming'] | |
cost_message = '' | |
cost_message += f""" | |
|Operation|Cost (USD)| | |
|---|---| | |
|Natural language conversation|{cost_info["conversation"]:.4f}| | |
|Negotiation|{cost_info["negotiation"]:.4f}| | |
|Programming|{cost_info["programming"]:.4f}| | |
Cost of {query_count} queries: | |
- With natural language: {natural_cost:.4f} USD | |
- With Agora: {agora_cost:.4f} USD | |
""" | |
if natural_cost < agora_cost: | |
factor = agora_cost / natural_cost | |
cost_message += f'#### Natural language is {factor:.2f}x cheaper than Agora.' | |
else: | |
factor = natural_cost / agora_cost | |
cost_message += f'## Agora is {factor:.2f}x cheaper than natural language.' | |
return cost_message | |
cost_info.change(render_info, [query_slider, cost_info], [cost_display]) | |
query_slider.change(render_info, [query_slider, cost_info], [cost_display]) | |
def respond(chosen_task, alice_model, bob_model, query_count): | |
with use_cost_tracker(): | |
yield gr.update(interactive=False), gr.update(interactive=False), gr.update(interactive=False), \ | |
None, None, None, None, None, None, None, None, None, None, None | |
if False: # custom_task is disabled for now | |
schema = dict(STATE_TRACKER) | |
for k, v in schema.items(): | |
if isinstance(v, str): | |
try: | |
schema[k] = json.loads(v) | |
except: | |
pass | |
else: | |
schema = SCHEMAS[chosen_task]["schema"] | |
for nl_messages_raw, negotiation_messages, structured_messages_raw, protocol, alice_implementation, bob_implementation in full_flow(schema, alice_model, bob_model): | |
nl_messages_clean, nl_messages_agora = parse_raw_messages(nl_messages_raw) | |
structured_messages_clean, structured_messages_agora = parse_raw_messages(structured_messages_raw) | |
protocol_hash = compute_hash(protocol) if protocol else None | |
yield gr.update(), gr.update(), gr.update(), None, None, nl_messages_clean, nl_messages_agora, negotiation_messages, structured_messages_clean, structured_messages_agora, protocol, protocol_hash, alice_implementation, bob_implementation | |
#yield from full_flow(schema, alice_model, bob_model) | |
cost_data = get_costs() | |
cost_data_formatted = render_info(query_count, cost_data) | |
yield gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), cost_data, cost_data_formatted, gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update() | |
button.click(respond, [chosen_task, alice_model_dd, bob_model_dd, query_slider], [button, alice_model_dd, bob_model_dd, cost_info, cost_display, chatbot_nl, chatbot_nl_raw, chatbot_negotiation, structured_communication, structured_communication_raw, protocol_result, protocol_hash_result, alice_implementation, bob_implementation]) | |
demo.launch(share=True) | |
if __name__ == '__main__': | |
main() | |