Spaces:
Running
Running
File size: 3,984 Bytes
3193174 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 | """
Multi-model setup β two agents backed by different LLMs.
Agent 1 (Doctor) β a stronger model that suggests immunity-boosting foods.
Agent 2 (Organiser) β a lighter model that picks the single best option.
Demonstrates:
- Per-agent LLM configuration (different endpoints, models, temperatures)
- LLMCallerFactory dispatching callers by agent
- Sequential workflow: Doctor β Organiser
Configure your models via environment variables:
DOCTOR_API_KEY / DOCTOR_BASE_URL / DOCTOR_MODEL
ORGANIZER_API_KEY / ORGANIZER_BASE_URL / ORGANIZER_MODEL
Run:
python -m examples.multi_model_example
"""
import os
from builder import GraphBuilder
from execution import LLMCallerFactory, MACPRunner
# ββ Model configurations βββββββββββββββββββββββββββββββββββββββββββββββββββββ
DOCTOR_CONFIG = {
"api_key": os.getenv("DOCTOR_API_KEY", "your-doctor-api-key"),
"base_url": os.getenv("DOCTOR_BASE_URL", "http://localhost:8000/v1"),
"model_name": os.getenv("DOCTOR_MODEL", "gpt-4o"),
}
ORGANIZER_CONFIG = {
"api_key": os.getenv("ORGANIZER_API_KEY", "your-organizer-api-key"),
"base_url": os.getenv("ORGANIZER_BASE_URL", "http://localhost:8001/v1"),
"model_name": os.getenv("ORGANIZER_MODEL", "gpt-4o-mini"),
}
# ββ Graph construction ββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
def _build_graph():
builder = GraphBuilder()
builder.add_agent(
"doctor",
display_name="Nutrition Doctor",
persona="You are an experienced nutritional doctor.",
description=("Suggest 3β5 foods that boost immunity, with a brief explanation of each benefit."),
llm_backbone=DOCTOR_CONFIG["model_name"],
base_url=DOCTOR_CONFIG["base_url"],
api_key=DOCTOR_CONFIG["api_key"],
temperature=0.7,
max_tokens=1000,
)
builder.add_agent(
"organizer",
display_name="Organiser",
persona="You are a practical organiser.",
description=(
"Choose THE SINGLE best option from those proposed. "
"Answer briefly: 'Best choice: [food] β [1β2 sentence justification]'"
),
llm_backbone=ORGANIZER_CONFIG["model_name"],
base_url=ORGANIZER_CONFIG["base_url"],
api_key=ORGANIZER_CONFIG["api_key"],
temperature=0.1,
max_tokens=200,
)
builder.add_workflow_edge("doctor", "organizer")
builder.add_task(query="What food is best for boosting immunity?")
builder.connect_task_to_agents()
return builder.build()
# ββ Entry point βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
def main():
graph = _build_graph()
print("Agents and their models:")
for agent in graph.agents:
if hasattr(agent, "llm_config") and agent.llm_config:
model = agent.llm_config.get("model_name", "?")
print(f" {agent.agent_id:<12} β {model}")
factory = LLMCallerFactory.create_openai_factory()
runner = MACPRunner(llm_factory=factory)
print("\nRunning consultationβ¦")
result = runner.run_round(graph, final_agent_id="organizer")
print("\n" + "=" * 50)
print("RESULTS")
print("=" * 50)
if "doctor" in result.messages:
print("\nDoctor's recommendations:")
print(result.messages["doctor"])
if "organizer" in result.messages:
print("\nOrganiser's pick:")
print(result.messages["organizer"])
print(f"\nTotal tokens : {result.total_tokens}")
print(f"Total time : {result.total_time:.2f}s")
print(f"Final answer : {result.final_answer}")
if __name__ == "__main__":
main()
|