| # app/orchestrator.py | |
| import json | |
| from backend.llm.client import call_llm, TOOLS | |
| async def run_turn(history): | |
| resp = await call_llm(history) | |
| msg = resp.choices[0].message | |
| # Tool invocation loop | |
| tool_calls = getattr(msg, "tool_calls", None) or [] | |
| results = [] | |
| for tc in tool_calls: | |
| fn = tc.function.name | |
| args = json.loads(tc.function.arguments or "{}") | |
| tool = TOOLS[fn] | |
| out = await tool.run(**args) | |
| results.append({"call_id": tc.id, "name": fn, "args": args, "output": out}) | |
| history.append({"role":"tool","tool_call_id":tc.id,"name":fn,"content":json.dumps(out)}) | |
| if tool_calls: | |
| # Ask LLM to finalize with tool results in context | |
| resp = await call_llm(history) | |
| msg = resp.choices[0].message | |
| return msg.content |