Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
|
@@ -56,10 +56,19 @@ NEXUS_MODEL = os.environ.get("NEXUS_MODEL", "nexus-auto")
|
|
| 56 |
REACT_MAX = int(os.environ.get("REACT_MAX_STEPS", "6"))
|
| 57 |
|
| 58 |
# ββ FORGE new infrastructure ββββββββββββββββββββββββββββββββββββββββ
|
| 59 |
-
PROMPTS_URL
|
| 60 |
-
TRACE_URL
|
| 61 |
-
LEARN_URL
|
| 62 |
-
LOOP_URL
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
# ββ Persona cache (fetched from agent-prompts, refreshed every 5min) β
|
| 65 |
_persona_cache: dict = {} # agent_name β {system_prompt, max_steps, ...}
|
|
@@ -321,285 +330,731 @@ async def space_post(space: str, path: str, data: dict) -> Optional[Any]:
|
|
| 321 |
log.warning(f"space_post {space}{path}: {e}")
|
| 322 |
return None
|
| 323 |
|
| 324 |
-
# ββ
|
| 325 |
-
TOOL_SPECS = [
|
| 326 |
-
{"name":"relay_send", "desc":"Send message to an agent or broadcast. Args: to, subject, body, priority(low/normal/high/urgent), channel(internal/telegram/browser)"},
|
| 327 |
-
{"name":"relay_inbox", "desc":"Read unread messages for an agent. Args: agent"},
|
| 328 |
-
{"name":"memory_search", "desc":"Search agent memory. Args: query, tier(all/episodic/semantic/procedural/working)"},
|
| 329 |
-
{"name":"memory_store", "desc":"Store a memory. Args: content, tier, tags(list), importance(0-10)"},
|
| 330 |
-
{"name":"kanban_list", "desc":"List tasks. Args: status(todo/doing/done/blocked/failed), agent(optional)"},
|
| 331 |
-
{"name":"kanban_move", "desc":"Move a task to new status. Args: id, status, slot_id(optional), llm_tokens(optional), react_steps(optional)"},
|
| 332 |
-
{"name":"kanban_create", "desc":"Create a task. Args: title, body, priority(low/medium/high/critical), agent, est_minutes(optional), deps(list of task ids, optional)"},
|
| 333 |
-
{"name":"vault_exec", "desc":"Execute code. Args: runtime(bash/python3/node/npm/pip/git), code, cwd(optional)"},
|
| 334 |
-
{"name":"vault_read", "desc":"Read a file. Args: path"},
|
| 335 |
-
{"name":"vault_write", "desc":"Write a file. Args: path, content"},
|
| 336 |
-
{"name":"forge_search", "desc":"Search for skills/tools in FORGE. Args: query"},
|
| 337 |
-
{"name":"slot_reserve", "desc":"Reserve the 35B GPU slot before a long task. Args: task_id, est_minutes(1-60), priority(1=critical). Returns slot_id or queue info. Use before any ki_fusion task >2 min."},
|
| 338 |
-
{"name":"slot_release", "desc":"Release the GPU slot when done. Args: slot_id. Always call this after finishing to unblock other agents."},
|
| 339 |
-
{"name":"slot_status", "desc":"Check who holds the GPU slot and queue. No args. Check before reserving to know wait time."},
|
| 340 |
-
{"name":"trigger_agent", "desc":"Wake another agent immediately (e.g. after delegation). Args: agent(name), content(task description). Use after delegate to ensure immediate pickup."},
|
| 341 |
-
{"name":"self_reflect", "desc":"Trigger your own self-reflection: reads your traces, proposes persona improvements. No args needed. Use weekly or after repeated failures."},
|
| 342 |
-
{"name":"delegate", "desc":"Delegate a task to another agent. Args: to_agent, task, priority. Always follow with trigger_agent to wake them immediately."},
|
| 343 |
-
{"name":"finish", "desc":"Complete the ReAct loop with a result. Args: result"},
|
| 344 |
-
]
|
| 345 |
|
| 346 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 347 |
|
| 348 |
-
async def
|
| 349 |
-
"""
|
|
|
|
|
|
|
| 350 |
try:
|
| 351 |
-
|
| 352 |
-
r = await
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
r = await
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
if tool == "kanban_list":
|
| 380 |
-
params = {}
|
| 381 |
-
if args.get("status"): params["status"] = args["status"]
|
| 382 |
-
if args.get("agent"): params["agent"] = args["agent"]
|
| 383 |
-
r = await space_get("kanban", "/api/tasks", params)
|
| 384 |
-
tasks = r if isinstance(r, list) else []
|
| 385 |
-
return json.dumps([{"id":t.get("id"),"title":t.get("title"),"status":t.get("status"),"priority":t.get("priority")} for t in tasks[:8]])
|
| 386 |
-
|
| 387 |
-
if tool == "kanban_move":
|
| 388 |
-
r = await space_post("kanban", "/api/move", {"id":args.get("id"),"status":args.get("status")})
|
| 389 |
-
return f"moved {args.get('id')} to {args.get('status')}" if r else "kanban_move failed"
|
| 390 |
-
|
| 391 |
-
if tool == "kanban_create":
|
| 392 |
-
r = await space_post("kanban", "/api/tasks", {
|
| 393 |
-
"title": args.get("title",""), "body": args.get("body",""),
|
| 394 |
-
"priority": args.get("priority","medium"), "agent": args.get("agent",agent_name),
|
| 395 |
-
"type": "ai"})
|
| 396 |
-
return f"created task id={r.get('id','?')}" if r else "kanban_create failed"
|
| 397 |
-
|
| 398 |
-
if tool == "vault_exec":
|
| 399 |
-
# Sanitize cwd: only allow actual workspace dirs, never template paths
|
| 400 |
-
_VALID_CWDS = {"code", "reports", "scratch", "shared", ""}
|
| 401 |
-
raw_cwd = str(args.get("cwd","scratch")).strip().strip("/")
|
| 402 |
-
safe_cwd = raw_cwd if raw_cwd in _VALID_CWDS else "scratch"
|
| 403 |
-
r = await space_post("vault", "/api/exec", {
|
| 404 |
-
"runtime": args.get("runtime","python3"),
|
| 405 |
-
"code": args.get("code",""), "cwd": safe_cwd,
|
| 406 |
-
"timeout": 30})
|
| 407 |
-
if not r: return "vault_exec failed"
|
| 408 |
-
return f"exit={r.get('exit_code')} ms={r.get('ms')}\n{r.get('output','')[:500]}"
|
| 409 |
-
|
| 410 |
-
if tool == "vault_read":
|
| 411 |
-
r = await space_get("vault", "/api/read", {"path": args.get("path","")})
|
| 412 |
-
return (r.get("content","")[:800] if r else "vault_read failed")
|
| 413 |
-
|
| 414 |
-
if tool == "vault_write":
|
| 415 |
-
r = await space_post("vault", "/api/write", {
|
| 416 |
-
"path": args.get("path",""), "content": args.get("content",""),
|
| 417 |
-
"agent": agent_name})
|
| 418 |
-
return f"written: {args.get('path')} snap={r.get('snapshot',{}).get('id','?')}" if r else "vault_write failed"
|
| 419 |
-
|
| 420 |
-
if tool == "forge_search":
|
| 421 |
-
r = await space_get("forge", "/api/v1/skills", {"q": args.get("query",""), "limit":5})
|
| 422 |
-
items = r if isinstance(r, list) else (r.get("skills",[]) if r else [])
|
| 423 |
-
return json.dumps([{"name":s.get("name"),"description":s.get("description","")[:100]} for s in items[:5]])
|
| 424 |
-
|
| 425 |
-
if tool == "slot_reserve":
|
| 426 |
-
r = await space_post("nexus", "/api/slot/reserve", {
|
| 427 |
-
"agent": agent_name,
|
| 428 |
-
"task_id": args.get("task_id",""),
|
| 429 |
-
"est_minutes": args.get("est_minutes", 5),
|
| 430 |
-
"priority": args.get("priority", 5),
|
| 431 |
-
})
|
| 432 |
-
if not r: return "slot_reserve failed"
|
| 433 |
-
status = r.get("status","unknown")
|
| 434 |
-
if status == "active":
|
| 435 |
-
return f"slot ACTIVE slot_id={r['slot_id']} expires in {args.get('est_minutes',5)} min"
|
| 436 |
-
elif status == "queued":
|
| 437 |
-
return f"slot QUEUED position={r['queue_position']} eta={r.get('eta_seconds',0)}s current_holder={r.get('current_holder','?')} β wait or use local_cpu"
|
| 438 |
-
else:
|
| 439 |
-
return f"slot status={status}: {r}"
|
| 440 |
-
|
| 441 |
-
if tool == "slot_release":
|
| 442 |
-
r = await space_post("nexus", "/api/slot/release", {"slot_id": args.get("slot_id","")})
|
| 443 |
-
return f"slot released (held {r.get('held_seconds',0)}s)" if r and r.get("released") else "slot_release failed or slot not found"
|
| 444 |
-
|
| 445 |
-
if tool == "slot_status":
|
| 446 |
-
r = await space_get("nexus", "/api/slot/status", {})
|
| 447 |
-
if not r: return "slot_status failed"
|
| 448 |
-
active = r.get("active")
|
| 449 |
-
queue = r.get("queue", [])
|
| 450 |
-
if active:
|
| 451 |
-
eta = int(active.get("expires_at",0) - __import__("time").time())
|
| 452 |
-
result = f"OCCUPIED by {active['agent']} task={active.get('task_id','')} expires_in={eta}s"
|
| 453 |
else:
|
| 454 |
-
|
| 455 |
-
|
| 456 |
-
|
| 457 |
-
|
| 458 |
-
|
| 459 |
-
|
| 460 |
-
|
| 461 |
-
|
| 462 |
-
|
| 463 |
-
|
| 464 |
-
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
content = args.get("content", f"Task delegated by {agent_name}")
|
| 470 |
-
try:
|
| 471 |
-
r = await space_post("pulse", f"/api/trigger/{target}",
|
| 472 |
-
{"from": agent_name, "content": content})
|
| 473 |
-
return f"triggered {target}: {r}" if r else f"trigger sent to {target}"
|
| 474 |
-
except Exception as e:
|
| 475 |
-
return f"trigger_agent failed: {e} β agent may still pick up via heartbeat"
|
| 476 |
|
| 477 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 478 |
try:
|
| 479 |
-
|
| 480 |
-
|
|
|
|
| 481 |
except Exception as e:
|
| 482 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 483 |
|
| 484 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 485 |
except Exception as e:
|
| 486 |
-
|
|
|
|
| 487 |
|
| 488 |
-
|
| 489 |
-
|
| 490 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 491 |
|
| 492 |
-
|
| 493 |
-
{tools}
|
| 494 |
|
| 495 |
-
|
| 496 |
-
|
|
|
|
| 497 |
|
| 498 |
-
|
| 499 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 500 |
|
| 501 |
-
|
| 502 |
-
1. Your CONTEXT already contains your current inbox messages and open tasks. Do NOT call relay_inbox or kanban_list as your first step - act on what is already in the context.
|
| 503 |
-
2. When you have an OPEN TASK: first call kanban_move(id="<id>", status="doing"), then do the work, then call kanban_move(id="<id>", status="done").
|
| 504 |
-
3. To write a file to vault: call vault_write(path="code/filename.py", content="...full file content...").
|
| 505 |
-
4. vault_exec cwd MUST be one of exactly: code, reports, scratch, shared. Never invent a path like path/to/vault.
|
| 506 |
-
5. Never call the same tool twice in a row with the same args. If it failed, try a different approach or finish.
|
| 507 |
-
6. Always finish within {max_steps} steps.
|
| 508 |
-
"""
|
| 509 |
|
| 510 |
async def react_loop(agent: dict, trigger_type: str, trigger_content: str) -> dict:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 511 |
name = agent["name"]
|
| 512 |
-
persona = agent.get("persona", "A helpful AI agent.")
|
| 513 |
cost_mode = agent.get("cost_mode", "balanced")
|
| 514 |
max_steps = agent.get("max_react_steps", REACT_MAX)
|
| 515 |
|
| 516 |
-
|
| 517 |
-
|
| 518 |
-
name=name, persona=persona, tools=tool_list, max_steps=max_steps)
|
| 519 |
|
| 520 |
-
|
| 521 |
-
|
| 522 |
-
|
| 523 |
-
|
| 524 |
|
| 525 |
-
|
| 526 |
-
|
|
|
|
|
|
|
| 527 |
|
| 528 |
-
|
| 529 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 530 |
|
| 531 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 532 |
|
| 533 |
-
|
| 534 |
-
# Call LLM with automatic fallback chain
|
| 535 |
-
try:
|
| 536 |
-
raw = await call_llm(messages, system_msg, max_tokens=900)
|
| 537 |
-
raw = raw.strip()
|
| 538 |
-
except Exception as e:
|
| 539 |
-
trace["result"] = f"all LLM providers failed: {e}"
|
| 540 |
-
trace["ok"] = False
|
| 541 |
-
push_live({"type":"error","agent":name,"message":f"LLM error: {str(e)[:80]}"})
|
| 542 |
-
break
|
| 543 |
|
| 544 |
-
|
| 545 |
-
|
| 546 |
-
|
| 547 |
-
|
| 548 |
-
|
| 549 |
-
|
| 550 |
-
|
| 551 |
-
except Exception:
|
| 552 |
-
step_json = {"thought": raw, "action": "finish", "args": {"result": raw}}
|
| 553 |
-
|
| 554 |
-
thought = step_json.get("thought","") or ""
|
| 555 |
-
action = step_json.get("action","finish") or "finish"
|
| 556 |
-
args = step_json.get("args",{}) or {} # guard against JSON null β None
|
| 557 |
-
|
| 558 |
-
step_record = {"n":step_n+1,"thought":thought,"action":action,"args":args,"observation":""}
|
| 559 |
-
push_live({"type":"react_step","agent":name,"step":step_n+1,
|
| 560 |
-
"thought":thought[:120],"action":action})
|
| 561 |
-
|
| 562 |
-
if action == "finish":
|
| 563 |
-
trace["result"] = args.get("result","done")
|
| 564 |
-
step_record["observation"] = "[FINISHED]"
|
| 565 |
-
trace["steps"].append(step_record)
|
| 566 |
-
break
|
| 567 |
-
|
| 568 |
-
if action not in TOOL_NAMES:
|
| 569 |
-
observation = f"unknown tool: {action}. Available: {sorted(TOOL_NAMES)}"
|
| 570 |
-
else:
|
| 571 |
-
observation = await exec_tool(name, action, args)
|
| 572 |
|
| 573 |
-
|
| 574 |
-
trace["steps"].append(step_record)
|
| 575 |
-
push_live({"type":"react_obs","agent":name,"step":step_n+1,
|
| 576 |
-
"observation":step_record["observation"][:100]})
|
| 577 |
|
| 578 |
-
|
| 579 |
-
|
|
|
|
|
|
|
|
|
|
| 580 |
|
| 581 |
-
|
| 582 |
-
asyncio.create_task(space_post("memory", "/api/memories", {
|
| 583 |
-
"content": f"[{name}] Step {step_n+1}: {action}({json.dumps(args)[:100]}) β {observation[:150]}",
|
| 584 |
-
"tier": "episodic", "tags": [name,"react","trace"], "importance": 3, "agent": name}))
|
| 585 |
|
| 586 |
-
|
| 587 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 588 |
|
| 589 |
-
|
| 590 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 591 |
|
| 592 |
-
|
| 593 |
-
|
| 594 |
-
|
|
|
|
| 595 |
|
| 596 |
-
|
| 597 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 598 |
|
| 599 |
return trace
|
| 600 |
|
| 601 |
# ββ Heartbeat engine βββββββββββββββββββββββββββββββββββββββββββββββ
|
| 602 |
scheduler = AsyncIOScheduler(timezone="UTC")
|
|
|
|
|
|
|
| 603 |
|
| 604 |
async def agent_tick(agent_name: str, trigger_type: str = "heartbeat", content: str = ""):
|
| 605 |
agents = load_json(AGENTS_FILE, [])
|
|
|
|
| 56 |
REACT_MAX = int(os.environ.get("REACT_MAX_STEPS", "6"))
|
| 57 |
|
| 58 |
# ββ FORGE new infrastructure ββββββββββββββββββββββββββββββββββββββββ
|
| 59 |
+
PROMPTS_URL = os.environ.get("PROMPTS_URL", "https://chris4k-agent-prompts.hf.space")
|
| 60 |
+
TRACE_URL = os.environ.get("TRACE_URL", "https://chris4k-agent-trace.hf.space")
|
| 61 |
+
LEARN_URL = os.environ.get("LEARN_URL", "https://chris4k-agent-learn.hf.space")
|
| 62 |
+
LOOP_URL = os.environ.get("LOOP_URL", "https://chris4k-agent-loop.hf.space")
|
| 63 |
+
HARNESS_URL = os.environ.get("HARNESS_URL", "https://chris4k-agent-harness.hf.space")
|
| 64 |
+
APPROVE_URL = os.environ.get("APPROVE_URL", "https://chris4k-agent-approve.hf.space")
|
| 65 |
+
COMPLIANCE_URL = os.environ.get("COMPLIANCE_URL", "https://chris4k-agent-compliance.hf.space")
|
| 66 |
+
BRAVE_API_KEY = os.environ.get("BRAVE_API_KEY", "")
|
| 67 |
+
|
| 68 |
+
# Risky tools that require approval gate
|
| 69 |
+
RISKY_TOOLS = {"vault_exec"}
|
| 70 |
+
RISKY_RUNTIMES = {"bash", "git"} # within vault_exec these trigger approve
|
| 71 |
+
RISKY_PATTERNS = {"rm ", "rmdir", "git push", "git force", "dd ", "chmod 777"}
|
| 72 |
|
| 73 |
# ββ Persona cache (fetched from agent-prompts, refreshed every 5min) β
|
| 74 |
_persona_cache: dict = {} # agent_name β {system_prompt, max_steps, ...}
|
|
|
|
| 330 |
log.warning(f"space_post {space}{path}: {e}")
|
| 331 |
return None
|
| 332 |
|
| 333 |
+
# ββ Sprint 5: Middleware helpers ββββββββββββββββββββββββββββββββββββ
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 334 |
|
| 335 |
+
async def harness_scan(agent: str, tool: str, content: str) -> tuple[bool, str]:
|
| 336 |
+
"""Scan tool output through agent-harness before LLM sees it.
|
| 337 |
+
Returns (safe, sanitised_content). On harness unavailable, pass-through."""
|
| 338 |
+
if not HARNESS_URL:
|
| 339 |
+
return True, content
|
| 340 |
+
try:
|
| 341 |
+
async with httpx.AsyncClient(timeout=4) as c:
|
| 342 |
+
r = await c.post(f"{HARNESS_URL}/api/scan/output",
|
| 343 |
+
json={"agent": agent, "tool": tool, "content": content})
|
| 344 |
+
if r.status_code == 200:
|
| 345 |
+
d = r.json()
|
| 346 |
+
return d.get("safe", True), d.get("sanitised", content)
|
| 347 |
+
except Exception as e:
|
| 348 |
+
log.debug(f"[HARNESS] scan failed (pass-through): {e}")
|
| 349 |
+
return True, content
|
| 350 |
+
|
| 351 |
+
async def request_approval(agent: str, tool: str, args: dict, risk: str = "high") -> tuple[bool, str]:
|
| 352 |
+
"""Gate risky tool calls through agent-approve.
|
| 353 |
+
Returns (approved, reason). Timeout = auto-reject."""
|
| 354 |
+
if not APPROVE_URL:
|
| 355 |
+
log.warning("[APPROVE] APPROVE_URL not set β auto-approving (unsafe!)")
|
| 356 |
+
return True, "approve_url_missing"
|
| 357 |
+
try:
|
| 358 |
+
async with httpx.AsyncClient(timeout=6) as c:
|
| 359 |
+
r = await c.post(f"{APPROVE_URL}/api/approval/request",
|
| 360 |
+
json={"agent": agent, "tool": tool, "args": args,
|
| 361 |
+
"risk": risk, "auto_timeout": 120})
|
| 362 |
+
if r.status_code == 200:
|
| 363 |
+
d = r.json()
|
| 364 |
+
approval_id = d.get("id")
|
| 365 |
+
# Poll for up to 90s (Telegram keyboard gives christof 2 min)
|
| 366 |
+
for _ in range(18):
|
| 367 |
+
await asyncio.sleep(5)
|
| 368 |
+
pr = await c.get(f"{APPROVE_URL}/api/approval/{approval_id}")
|
| 369 |
+
if pr.status_code == 200:
|
| 370 |
+
pd = pr.json()
|
| 371 |
+
status = pd.get("status")
|
| 372 |
+
if status == "approved":
|
| 373 |
+
return True, "human_approved"
|
| 374 |
+
if status in ("rejected", "expired"):
|
| 375 |
+
return False, status
|
| 376 |
+
return False, "timeout"
|
| 377 |
+
except Exception as e:
|
| 378 |
+
log.warning(f"[APPROVE] gate failed: {e} β blocking tool call")
|
| 379 |
+
return False, f"approve_error: {e}"
|
| 380 |
+
|
| 381 |
+
async def compliance_scan(agent: str, content: str) -> tuple[bool, str, list]:
|
| 382 |
+
"""Scan content for PII before writing to memory.
|
| 383 |
+
Returns (safe, redacted_content, pii_types_found)."""
|
| 384 |
+
if not COMPLIANCE_URL:
|
| 385 |
+
return True, content, []
|
| 386 |
+
try:
|
| 387 |
+
async with httpx.AsyncClient(timeout=4) as c:
|
| 388 |
+
r = await c.post(f"{COMPLIANCE_URL}/api/scan/pii",
|
| 389 |
+
json={"text": content, "agent": agent, "redact": True})
|
| 390 |
+
if r.status_code == 200:
|
| 391 |
+
d = r.json()
|
| 392 |
+
return (not d.get("pii_found", False),
|
| 393 |
+
d.get("redacted", content),
|
| 394 |
+
d.get("types_found", []))
|
| 395 |
+
except Exception as e:
|
| 396 |
+
log.debug(f"[COMPLIANCE] scan failed (pass-through): {e}")
|
| 397 |
+
return True, content, []
|
| 398 |
|
| 399 |
+
async def web_search_brave(query: str, count: int = 5) -> str:
|
| 400 |
+
"""Brave Search API call. Returns formatted results."""
|
| 401 |
+
if not BRAVE_API_KEY:
|
| 402 |
+
return "web_search unavailable: BRAVE_API_KEY not configured"
|
| 403 |
try:
|
| 404 |
+
async with httpx.AsyncClient(timeout=8) as c:
|
| 405 |
+
r = await c.get("https://api.search.brave.com/res/v1/web/search",
|
| 406 |
+
params={"q": query, "count": count, "text_decorations": False},
|
| 407 |
+
headers={"Accept": "application/json",
|
| 408 |
+
"Accept-Encoding": "gzip",
|
| 409 |
+
"X-Subscription-Token": BRAVE_API_KEY})
|
| 410 |
+
r.raise_for_status()
|
| 411 |
+
data = r.json()
|
| 412 |
+
results = data.get("web", {}).get("results", [])
|
| 413 |
+
if not results:
|
| 414 |
+
return "no results found"
|
| 415 |
+
lines = []
|
| 416 |
+
for i, res in enumerate(results[:count], 1):
|
| 417 |
+
lines.append(f"{i}. {res.get('title','?')} β {res.get('url','')}\n {res.get('description','')[:200]}")
|
| 418 |
+
return "\n\n".join(lines)
|
| 419 |
+
except Exception as e:
|
| 420 |
+
return f"web_search error: {e}"
|
| 421 |
+
|
| 422 |
+
async def fetch_url_content(url: str) -> str:
|
| 423 |
+
"""Fetch a URL and return stripped text (5000 char limit)."""
|
| 424 |
+
try:
|
| 425 |
+
async with httpx.AsyncClient(timeout=10, follow_redirects=True) as c:
|
| 426 |
+
r = await c.get(url, headers={"User-Agent": "FORGE-Agent/1.0"})
|
| 427 |
+
r.raise_for_status()
|
| 428 |
+
ct = r.headers.get("content-type", "")
|
| 429 |
+
if "html" in ct:
|
| 430 |
+
text = re.sub(r"<[^>]+>", " ", r.text)
|
| 431 |
+
text = re.sub(r"\s{2,}", " ", text).strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 432 |
else:
|
| 433 |
+
text = r.text.strip()
|
| 434 |
+
return text[:5000] + ("β¦[truncated]" if len(text) > 5000 else "")
|
| 435 |
+
except Exception as e:
|
| 436 |
+
return f"fetch_url error: {e}"
|
| 437 |
+
|
| 438 |
+
# ββ Sprint 5: Saga Orchestrator βββββββββββββββββββββββββββββββββββββ
|
| 439 |
+
|
| 440 |
+
class SagaStep:
|
| 441 |
+
def __init__(self, name: str, forward, compensate=None):
|
| 442 |
+
self.name = name
|
| 443 |
+
self.forward = forward # async callable β result str
|
| 444 |
+
self.compensate = compensate # async callable β None (undo)
|
| 445 |
+
|
| 446 |
+
class SagaOrchestrator:
|
| 447 |
+
"""Run a sequence of steps with automatic compensation on failure.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 448 |
|
| 449 |
+
Usage:
|
| 450 |
+
saga = SagaOrchestrator(agent_name, saga_id)
|
| 451 |
+
saga.add_step("reserve_slot", fwd=lambda: ..., comp=lambda: ...)
|
| 452 |
+
saga.add_step("vault_write", fwd=lambda: ..., comp=lambda: ...)
|
| 453 |
+
result = await saga.run()
|
| 454 |
+
"""
|
| 455 |
+
def __init__(self, agent: str, saga_id: str = ""):
|
| 456 |
+
self.agent = agent
|
| 457 |
+
self.saga_id = saga_id or str(uuid.uuid4())[:8]
|
| 458 |
+
self.steps: list[SagaStep] = []
|
| 459 |
+
self.completed: list[tuple[str, str]] = [] # (name, result)
|
| 460 |
+
|
| 461 |
+
def add_step(self, name: str, fwd, comp=None):
|
| 462 |
+
self.steps.append(SagaStep(name, fwd, comp))
|
| 463 |
+
|
| 464 |
+
async def run(self) -> dict:
|
| 465 |
+
emit_trace(self.agent, "saga_start",
|
| 466 |
+
{"saga_id": self.saga_id, "steps": [s.name for s in self.steps]})
|
| 467 |
+
for step in self.steps:
|
| 468 |
try:
|
| 469 |
+
result = await step.forward()
|
| 470 |
+
self.completed.append((step.name, str(result)))
|
| 471 |
+
log.info(f"[SAGA {self.saga_id}] {step.name} OK: {str(result)[:80]}")
|
| 472 |
except Exception as e:
|
| 473 |
+
log.error(f"[SAGA {self.saga_id}] {step.name} FAILED: {e} β compensating")
|
| 474 |
+
emit_trace(self.agent, "saga_failed",
|
| 475 |
+
{"saga_id": self.saga_id, "failed_step": step.name, "error": str(e)},
|
| 476 |
+
status="error")
|
| 477 |
+
# Compensate in reverse order
|
| 478 |
+
for name, _ in reversed(self.completed):
|
| 479 |
+
comp_step = next((s for s in self.steps if s.name == name), None)
|
| 480 |
+
if comp_step and comp_step.compensate:
|
| 481 |
+
try:
|
| 482 |
+
await comp_step.compensate()
|
| 483 |
+
log.info(f"[SAGA {self.saga_id}] compensated {name}")
|
| 484 |
+
except Exception as ce:
|
| 485 |
+
log.warning(f"[SAGA {self.saga_id}] compensate {name} failed: {ce}")
|
| 486 |
+
# Alert christof
|
| 487 |
+
try:
|
| 488 |
+
async with httpx.AsyncClient(timeout=4) as c:
|
| 489 |
+
await c.post(f"{SPACES['relay']}/api/notify", json={
|
| 490 |
+
"text": f"⚠️ SAGA {self.saga_id} failed at step <b>{step.name}</b>\nAgent: {self.agent}\nError: {str(e)[:200]}\nCompensations ran for: {[n for n,_ in self.completed]}",
|
| 491 |
+
"parse_mode": "HTML"})
|
| 492 |
+
except Exception:
|
| 493 |
+
pass
|
| 494 |
+
return {"ok": False, "saga_id": self.saga_id,
|
| 495 |
+
"failed_step": step.name, "error": str(e),
|
| 496 |
+
"saga_compensated": True}
|
| 497 |
+
emit_trace(self.agent, "saga_complete",
|
| 498 |
+
{"saga_id": self.saga_id, "steps_completed": len(self.completed)})
|
| 499 |
+
return {"ok": True, "saga_id": self.saga_id,
|
| 500 |
+
"steps": dict(self.completed)}
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
# ββ smolagents β Tool definitions ββββββββββββββββββββββββββββββββββ
|
| 504 |
+
# Each tool uses httpx synchronous client (tools run in a thread via asyncio.to_thread).
|
| 505 |
+
# CodeAgent writes Python code to call these tools, enabling loops, conditionals,
|
| 506 |
+
# and natural composition β far more powerful than JSON ReAct.
|
| 507 |
+
|
| 508 |
+
try:
|
| 509 |
+
from smolagents import CodeAgent, Tool, OpenAIServerModel, ToolCallingAgent
|
| 510 |
+
from smolagents.monitoring import LogLevel
|
| 511 |
+
SMOLAGENTS_OK = True
|
| 512 |
+
except ImportError:
|
| 513 |
+
SMOLAGENTS_OK = False
|
| 514 |
+
log.warning("[SMOLAGENTS] not installed β install 'smolagents[litellm]'")
|
| 515 |
+
|
| 516 |
+
def _sync_get(space: str, path: str, params: dict = {}) -> dict | None:
|
| 517 |
+
url = SPACES.get(space, space) + path
|
| 518 |
+
try:
|
| 519 |
+
r = httpx.get(url, params=params, timeout=HTTP_TIMEOUT)
|
| 520 |
+
r.raise_for_status()
|
| 521 |
+
return r.json()
|
| 522 |
+
except Exception as e:
|
| 523 |
+
log.warning(f"_sync_get {space}{path}: {e}")
|
| 524 |
+
return None
|
| 525 |
+
|
| 526 |
+
def _sync_post(space: str, path: str, data: dict) -> dict | None:
|
| 527 |
+
url = SPACES.get(space, space) + path
|
| 528 |
+
try:
|
| 529 |
+
r = httpx.post(url, json=data, timeout=HTTP_TIMEOUT)
|
| 530 |
+
r.raise_for_status()
|
| 531 |
+
return r.json()
|
| 532 |
+
except Exception as e:
|
| 533 |
+
log.warning(f"_sync_post {space}{path}: {e}")
|
| 534 |
+
return None
|
| 535 |
+
|
| 536 |
+
def _harness_scan_sync(agent: str, tool: str, content: str) -> str:
|
| 537 |
+
"""Synchronous harness scan β returns sanitised content."""
|
| 538 |
+
if not HARNESS_URL:
|
| 539 |
+
return content
|
| 540 |
+
try:
|
| 541 |
+
r = httpx.post(f"{HARNESS_URL}/api/scan/output",
|
| 542 |
+
json={"agent": agent, "tool": tool, "content": content}, timeout=4)
|
| 543 |
+
if r.status_code == 200:
|
| 544 |
+
d = r.json()
|
| 545 |
+
return d.get("sanitised", content)
|
| 546 |
+
except Exception:
|
| 547 |
+
pass
|
| 548 |
+
return content
|
| 549 |
+
|
| 550 |
+
def _approve_sync(agent: str, tool: str, args: dict, risk: str = "high") -> tuple[bool, str]:
|
| 551 |
+
"""Synchronous approval gate. Polls up to 90s."""
|
| 552 |
+
if not APPROVE_URL:
|
| 553 |
+
return True, "approve_url_missing"
|
| 554 |
+
try:
|
| 555 |
+
r = httpx.post(f"{APPROVE_URL}/api/approval/request",
|
| 556 |
+
json={"agent": agent, "tool": tool, "args": args,
|
| 557 |
+
"risk": risk, "auto_timeout": 120}, timeout=6)
|
| 558 |
+
if r.status_code == 200:
|
| 559 |
+
approval_id = r.json().get("id")
|
| 560 |
+
for _ in range(18):
|
| 561 |
+
time.sleep(5)
|
| 562 |
+
pr = httpx.get(f"{APPROVE_URL}/api/approval/{approval_id}", timeout=4)
|
| 563 |
+
if pr.status_code == 200:
|
| 564 |
+
status = pr.json().get("status")
|
| 565 |
+
if status == "approved": return True, "human_approved"
|
| 566 |
+
if status in ("rejected", "expired"): return False, status
|
| 567 |
+
return False, "timeout"
|
| 568 |
+
except Exception as e:
|
| 569 |
+
return False, f"approve_error: {e}"
|
| 570 |
+
|
| 571 |
+
def _compliance_scan_sync(agent: str, content: str) -> str:
|
| 572 |
+
"""Compliance PII scan β returns redacted content."""
|
| 573 |
+
if not COMPLIANCE_URL:
|
| 574 |
+
return content
|
| 575 |
+
try:
|
| 576 |
+
r = httpx.post(f"{COMPLIANCE_URL}/api/scan/pii",
|
| 577 |
+
json={"text": content, "agent": agent, "redact": True}, timeout=4)
|
| 578 |
+
if r.status_code == 200:
|
| 579 |
+
return r.json().get("redacted", content)
|
| 580 |
+
except Exception:
|
| 581 |
+
pass
|
| 582 |
+
return content
|
| 583 |
+
|
| 584 |
+
# ββ FORGE Tool classes ββββββββββββββββββββββββββββββββββββββββββββββ
|
| 585 |
+
|
| 586 |
+
class RelaySendTool(Tool):
|
| 587 |
+
name = "relay_send"
|
| 588 |
+
description = "Send a message to an agent or broadcast via RELAY. Use for notifications, delegations, status updates."
|
| 589 |
+
inputs = {
|
| 590 |
+
"to": {"type":"string","description":"Recipient agent name or 'broadcast'"},
|
| 591 |
+
"subject": {"type":"string","description":"Message subject (short)"},
|
| 592 |
+
"body": {"type":"string","description":"Full message body"},
|
| 593 |
+
"priority": {"type":"string","description":"low | normal | high | urgent", "nullable":True},
|
| 594 |
+
"channel": {"type":"string","description":"internal | telegram | browser", "nullable":True},
|
| 595 |
+
}
|
| 596 |
+
output_type = "string"
|
| 597 |
+
def __init__(self, agent_name): super().__init__(); self._agent = agent_name
|
| 598 |
+
def forward(self, to, subject, body, priority="normal", channel="internal"):
|
| 599 |
+
r = _sync_post("relay", "/api/messages", {
|
| 600 |
+
"from": self._agent, "to": to, "subject": subject, "body": body,
|
| 601 |
+
"priority": priority or "normal", "channel": channel or "internal"})
|
| 602 |
+
return f"sent id={r.get('id','?')}" if r else "relay_send failed"
|
| 603 |
+
|
| 604 |
+
class MemorySearchTool(Tool):
|
| 605 |
+
name = "memory_search"
|
| 606 |
+
description = "Search agent memory across tiers. Always search before answering questions β you may have relevant memories."
|
| 607 |
+
inputs = {
|
| 608 |
+
"query": {"type":"string","description":"Search query"},
|
| 609 |
+
"tier": {"type":"string","description":"all | episodic | semantic | procedural | working", "nullable":True},
|
| 610 |
+
}
|
| 611 |
+
output_type = "string"
|
| 612 |
+
def __init__(self, agent_name): super().__init__(); self._agent = agent_name
|
| 613 |
+
def forward(self, query, tier="all"):
|
| 614 |
+
r = _sync_get("memory", "/api/memories/search",
|
| 615 |
+
{"q": query, "tier": tier or "all", "limit": 8})
|
| 616 |
+
if not r: return "no results"
|
| 617 |
+
results = r if isinstance(r, list) else r.get("results", [])
|
| 618 |
+
import json as _json
|
| 619 |
+
return _json.dumps([{"content": m.get("content","")[:200],
|
| 620 |
+
"tier": m.get("tier"), "tags": m.get("tags")} for m in results[:5]])
|
| 621 |
+
|
| 622 |
+
class MemoryStoreTool(Tool):
|
| 623 |
+
name = "memory_store"
|
| 624 |
+
description = "Store a memory in MEMORY space. Content is PII-scanned before writing."
|
| 625 |
+
inputs = {
|
| 626 |
+
"content": {"type":"string","description":"Memory content to store"},
|
| 627 |
+
"tier": {"type":"string","description":"episodic | semantic | procedural | working"},
|
| 628 |
+
"tags": {"type":"array","description":"List of tag strings", "nullable":True},
|
| 629 |
+
"importance": {"type":"integer","description":"0-10 importance score", "nullable":True},
|
| 630 |
+
}
|
| 631 |
+
output_type = "string"
|
| 632 |
+
def __init__(self, agent_name): super().__init__(); self._agent = agent_name
|
| 633 |
+
def forward(self, content, tier="episodic", tags=None, importance=6):
|
| 634 |
+
# Compliance: PII scan before writing
|
| 635 |
+
safe_content = _compliance_scan_sync(self._agent, content)
|
| 636 |
+
r = _sync_post("memory", "/api/memories", {
|
| 637 |
+
"content": safe_content, "tier": tier, "tags": tags or [],
|
| 638 |
+
"importance": importance or 6, "agent": self._agent})
|
| 639 |
+
return f"stored id={r.get('id','?')}" if r else "memory_store failed"
|
| 640 |
+
|
| 641 |
+
class KanbanListTool(Tool):
|
| 642 |
+
name = "kanban_list"
|
| 643 |
+
description = "List tasks from KANBAN board. Filter by status and/or agent."
|
| 644 |
+
inputs = {
|
| 645 |
+
"status": {"type":"string","description":"todo | doing | done | blocked | failed", "nullable":True},
|
| 646 |
+
"agent": {"type":"string","description":"Filter by agent name", "nullable":True},
|
| 647 |
+
}
|
| 648 |
+
output_type = "string"
|
| 649 |
+
def __init__(self, agent_name): super().__init__(); self._agent = agent_name
|
| 650 |
+
def forward(self, status=None, agent=None):
|
| 651 |
+
params = {}
|
| 652 |
+
if status: params["status"] = status
|
| 653 |
+
if agent: params["agent"] = agent
|
| 654 |
+
import json as _json
|
| 655 |
+
r = _sync_get("kanban", "/api/tasks", params) or []
|
| 656 |
+
tasks = r if isinstance(r, list) else []
|
| 657 |
+
return _json.dumps([{"id":t.get("id"),"title":t.get("title"),
|
| 658 |
+
"status":t.get("status"),"priority":t.get("priority")} for t in tasks[:8]])
|
| 659 |
+
|
| 660 |
+
class KanbanMoveTool(Tool):
|
| 661 |
+
name = "kanban_move"
|
| 662 |
+
description = "Move a task to a new status on the KANBAN board."
|
| 663 |
+
inputs = {
|
| 664 |
+
"id": {"type":"string","description":"Task ID"},
|
| 665 |
+
"status": {"type":"string","description":"todo | doing | done | blocked | failed"},
|
| 666 |
+
"slot_id": {"type":"string","description":"GPU slot ID if applicable", "nullable":True},
|
| 667 |
+
"react_steps": {"type":"integer","description":"Number of ReAct steps taken", "nullable":True},
|
| 668 |
+
}
|
| 669 |
+
output_type = "string"
|
| 670 |
+
def __init__(self, agent_name): super().__init__(); self._agent = agent_name
|
| 671 |
+
def forward(self, id, status, slot_id=None, react_steps=None):
|
| 672 |
+
payload = {"id": id, "status": status}
|
| 673 |
+
if slot_id: payload["slot_id"] = slot_id
|
| 674 |
+
if react_steps: payload["react_steps"] = react_steps
|
| 675 |
+
r = _sync_post("kanban", "/api/move", payload)
|
| 676 |
+
return f"moved {id} → {status}" if r else "kanban_move failed"
|
| 677 |
+
|
| 678 |
+
class KanbanCreateTool(Tool):
|
| 679 |
+
name = "kanban_create"
|
| 680 |
+
description = "Create a new task on the KANBAN board and assign it to an agent."
|
| 681 |
+
inputs = {
|
| 682 |
+
"title": {"type":"string","description":"Short task title"},
|
| 683 |
+
"body": {"type":"string","description":"Full task description with context"},
|
| 684 |
+
"priority": {"type":"string","description":"low | medium | high | critical"},
|
| 685 |
+
"agent": {"type":"string","description":"Agent to assign task to"},
|
| 686 |
+
"est_minutes": {"type":"integer","description":"Estimated completion minutes", "nullable":True},
|
| 687 |
+
}
|
| 688 |
+
output_type = "string"
|
| 689 |
+
def __init__(self, agent_name): super().__init__(); self._agent = agent_name
|
| 690 |
+
def forward(self, title, body, priority="medium", agent=None, est_minutes=None):
|
| 691 |
+
payload = {"title": title, "body": body, "priority": priority,
|
| 692 |
+
"agent": agent or self._agent, "type": "ai"}
|
| 693 |
+
if est_minutes: payload["est_minutes"] = est_minutes
|
| 694 |
+
r = _sync_post("kanban", "/api/tasks", payload)
|
| 695 |
+
return f"created task id={r.get('id','?')}" if r else "kanban_create failed"
|
| 696 |
+
|
| 697 |
+
class VaultExecTool(Tool):
|
| 698 |
+
name = "vault_exec"
|
| 699 |
+
description = (
|
| 700 |
+
"Execute code in VAULT workspace. Runtimes: python3, bash, node, npm, pip, git. "
|
| 701 |
+
"IMPORTANT: cwd must be one of: code, reports, scratch, shared. "
|
| 702 |
+
"Bash and git commands that are destructive require human approval."
|
| 703 |
+
)
|
| 704 |
+
inputs = {
|
| 705 |
+
"runtime": {"type":"string","description":"python3 | bash | node | npm | pip | git"},
|
| 706 |
+
"code": {"type":"string","description":"Code or command to execute"},
|
| 707 |
+
"cwd": {"type":"string","description":"Working directory: code | reports | scratch | shared", "nullable":True},
|
| 708 |
+
}
|
| 709 |
+
output_type = "string"
|
| 710 |
+
def __init__(self, agent_name): super().__init__(); self._agent = agent_name
|
| 711 |
+
def forward(self, runtime, code, cwd="scratch"):
|
| 712 |
+
_VALID_CWDS = {"code","reports","scratch","shared",""}
|
| 713 |
+
safe_cwd = (cwd or "scratch").strip("/") if (cwd or "scratch").strip("/") in _VALID_CWDS else "scratch"
|
| 714 |
+
# Approval gate for risky bash/git
|
| 715 |
+
if runtime in RISKY_RUNTIMES or any(p in code for p in RISKY_PATTERNS):
|
| 716 |
+
approved, reason = _approve_sync(self._agent, "vault_exec",
|
| 717 |
+
{"runtime": runtime, "code": code[:200], "cwd": safe_cwd},
|
| 718 |
+
risk="high")
|
| 719 |
+
if not approved:
|
| 720 |
+
return f"vault_exec BLOCKED by approval gate: {reason}"
|
| 721 |
+
r = _sync_post("vault", "/api/exec", {
|
| 722 |
+
"runtime": runtime, "code": code, "cwd": safe_cwd, "timeout": 30})
|
| 723 |
+
if not r: return "vault_exec failed"
|
| 724 |
+
out = _harness_scan_sync(self._agent, "vault_exec",
|
| 725 |
+
f"exit={r.get('exit_code')} ms={r.get('ms')}\n{r.get('output','')[:500]}")
|
| 726 |
+
return out
|
| 727 |
+
|
| 728 |
+
class VaultReadTool(Tool):
|
| 729 |
+
name = "vault_read"
|
| 730 |
+
description = "Read a file from the VAULT workspace."
|
| 731 |
+
inputs = {"path": {"type":"string","description":"File path relative to workspace"}}
|
| 732 |
+
output_type = "string"
|
| 733 |
+
def __init__(self, agent_name): super().__init__(); self._agent = agent_name
|
| 734 |
+
def forward(self, path):
|
| 735 |
+
r = _sync_get("vault", "/api/read", {"path": path})
|
| 736 |
+
return r.get("content","")[:800] if r else "vault_read failed"
|
| 737 |
+
|
| 738 |
+
class VaultWriteTool(Tool):
|
| 739 |
+
name = "vault_write"
|
| 740 |
+
description = "Write a file to the VAULT workspace. Always write complete file content."
|
| 741 |
+
inputs = {
|
| 742 |
+
"path": {"type":"string","description":"File path, e.g. code/script.py"},
|
| 743 |
+
"content": {"type":"string","description":"Complete file content"},
|
| 744 |
+
}
|
| 745 |
+
output_type = "string"
|
| 746 |
+
def __init__(self, agent_name): super().__init__(); self._agent = agent_name
|
| 747 |
+
def forward(self, path, content):
|
| 748 |
+
r = _sync_post("vault", "/api/write", {"path": path, "content": content, "agent": self._agent})
|
| 749 |
+
return f"written: {path} snap={r.get('snapshot',{}).get('id','?')}" if r else "vault_write failed"
|
| 750 |
+
|
| 751 |
+
class ForgeSearchTool(Tool):
|
| 752 |
+
name = "forge_search"
|
| 753 |
+
description = "Search for skills and tools in the FORGE skill registry."
|
| 754 |
+
inputs = {"query": {"type":"string","description":"Search query"}}
|
| 755 |
+
output_type = "string"
|
| 756 |
+
def __init__(self, agent_name): super().__init__(); self._agent = agent_name
|
| 757 |
+
def forward(self, query):
|
| 758 |
+
import json as _json
|
| 759 |
+
r = _sync_get("forge", "/api/v1/skills", {"q": query, "limit": 5})
|
| 760 |
+
items = r if isinstance(r, list) else (r.get("skills",[]) if r else [])
|
| 761 |
+
return _json.dumps([{"name":s.get("name"),"description":s.get("description","")[:100]} for s in items[:5]])
|
| 762 |
+
|
| 763 |
+
class SlotReserveTool(Tool):
|
| 764 |
+
name = "slot_reserve"
|
| 765 |
+
description = "Reserve the RTX 5090 GPU slot before a long task. Returns slot_id or queue position."
|
| 766 |
+
inputs = {
|
| 767 |
+
"task_id": {"type":"string","description":"Task identifier"},
|
| 768 |
+
"est_minutes": {"type":"integer","description":"Estimated minutes needed (1-60)"},
|
| 769 |
+
"priority": {"type":"integer","description":"Priority 1=critical 5=normal 10=low", "nullable":True},
|
| 770 |
+
}
|
| 771 |
+
output_type = "string"
|
| 772 |
+
def __init__(self, agent_name): super().__init__(); self._agent = agent_name
|
| 773 |
+
def forward(self, task_id, est_minutes=5, priority=5):
|
| 774 |
+
r = _sync_post("nexus", "/api/slot/reserve", {
|
| 775 |
+
"agent": self._agent, "task_id": task_id,
|
| 776 |
+
"est_minutes": est_minutes, "priority": priority or 5})
|
| 777 |
+
if not r: return "slot_reserve failed"
|
| 778 |
+
status = r.get("status","unknown")
|
| 779 |
+
if status == "active":
|
| 780 |
+
return f"slot ACTIVE slot_id={r['slot_id']} expires_in={est_minutes}min"
|
| 781 |
+
elif status == "queued":
|
| 782 |
+
return f"slot QUEUED position={r.get('queue_position')} eta={r.get('eta_seconds',0)}s holder={r.get('current_holder','?')} β wait or use local_cpu"
|
| 783 |
+
return f"slot status={status}"
|
| 784 |
+
|
| 785 |
+
class SlotReleaseTool(Tool):
|
| 786 |
+
name = "slot_release"
|
| 787 |
+
description = "Release the GPU slot when done. Always call after finishing to unblock other agents."
|
| 788 |
+
inputs = {"slot_id": {"type":"string","description":"Slot ID from slot_reserve"}}
|
| 789 |
+
output_type = "string"
|
| 790 |
+
def __init__(self, agent_name): super().__init__(); self._agent = agent_name
|
| 791 |
+
def forward(self, slot_id):
|
| 792 |
+
r = _sync_post("nexus", "/api/slot/release", {"slot_id": slot_id})
|
| 793 |
+
return f"slot released (held {r.get('held_seconds',0)}s)" if r and r.get("released") else "slot_release failed"
|
| 794 |
+
|
| 795 |
+
class SlotStatusTool(Tool):
|
| 796 |
+
name = "slot_status"
|
| 797 |
+
description = "Check who holds the GPU slot and current queue. Use before slot_reserve."
|
| 798 |
+
inputs = {}
|
| 799 |
+
output_type = "string"
|
| 800 |
+
def __init__(self, agent_name): super().__init__(); self._agent = agent_name
|
| 801 |
+
def forward(self):
|
| 802 |
+
r = _sync_get("nexus", "/api/slot/status", {})
|
| 803 |
+
if not r: return "slot_status failed"
|
| 804 |
+
active = r.get("active")
|
| 805 |
+
queue = r.get("queue", [])
|
| 806 |
+
result = f"OCCUPIED by {active['agent']} expires_in={int(active.get('expires_at',0)-time.time())}s" if active else "FREE"
|
| 807 |
+
if queue: result += f" | Queue: {[q['agent'] for q in queue]}"
|
| 808 |
+
return result
|
| 809 |
+
|
| 810 |
+
class TriggerAgentTool(Tool):
|
| 811 |
+
name = "trigger_agent"
|
| 812 |
+
description = "Wake another agent immediately with a task. Always call after delegate to ensure pickup."
|
| 813 |
+
inputs = {
|
| 814 |
+
"agent": {"type":"string","description":"Agent name to wake"},
|
| 815 |
+
"content": {"type":"string","description":"Task content or context for the agent"},
|
| 816 |
+
}
|
| 817 |
+
output_type = "string"
|
| 818 |
+
def __init__(self, agent_name): super().__init__(); self._agent = agent_name
|
| 819 |
+
def forward(self, agent, content=""):
|
| 820 |
+
r = _sync_post("pulse", f"/api/trigger/{agent}",
|
| 821 |
+
{"from": self._agent, "content": content or f"Task delegated by {self._agent}"})
|
| 822 |
+
return f"triggered {agent}" if r else f"trigger queued for {agent} (heartbeat pickup)"
|
| 823 |
+
|
| 824 |
+
class WebSearchTool(Tool):
|
| 825 |
+
name = "web_search"
|
| 826 |
+
description = "Search the web via Brave Search API. Returns titles, URLs and snippets."
|
| 827 |
+
inputs = {
|
| 828 |
+
"query": {"type":"string","description":"Search query"},
|
| 829 |
+
"count": {"type":"integer","description":"Number of results 1-10, default 5", "nullable":True},
|
| 830 |
+
}
|
| 831 |
+
output_type = "string"
|
| 832 |
+
def __init__(self, agent_name): super().__init__(); self._agent = agent_name
|
| 833 |
+
def forward(self, query, count=5):
|
| 834 |
+
if not BRAVE_API_KEY:
|
| 835 |
+
return "web_search unavailable: BRAVE_API_KEY not set"
|
| 836 |
+
try:
|
| 837 |
+
r = httpx.get("https://api.search.brave.com/res/v1/web/search",
|
| 838 |
+
params={"q": query, "count": min(count or 5, 10), "text_decorations": False},
|
| 839 |
+
headers={"Accept": "application/json",
|
| 840 |
+
"X-Subscription-Token": BRAVE_API_KEY}, timeout=8)
|
| 841 |
+
r.raise_for_status()
|
| 842 |
+
results = r.json().get("web", {}).get("results", [])
|
| 843 |
+
if not results: return "no results"
|
| 844 |
+
lines = [f"{i}. {res.get('title','?')} β {res.get('url','')}\n {res.get('description','')[:200]}"
|
| 845 |
+
for i, res in enumerate(results[:count or 5], 1)]
|
| 846 |
+
return "\n\n".join(lines)
|
| 847 |
+
except Exception as e:
|
| 848 |
+
return f"web_search error: {e}"
|
| 849 |
+
|
| 850 |
+
class FetchUrlTool(Tool):
|
| 851 |
+
name = "fetch_url"
|
| 852 |
+
description = "Fetch a URL and return stripped text (5000 char limit). Use after web_search."
|
| 853 |
+
inputs = {"url": {"type":"string","description":"Full URL to fetch"}}
|
| 854 |
+
output_type = "string"
|
| 855 |
+
def __init__(self, agent_name): super().__init__(); self._agent = agent_name
|
| 856 |
+
def forward(self, url):
|
| 857 |
+
try:
|
| 858 |
+
r = httpx.get(url, headers={"User-Agent": "FORGE-Agent/1.0"},
|
| 859 |
+
timeout=10, follow_redirects=True)
|
| 860 |
+
r.raise_for_status()
|
| 861 |
+
ct = r.headers.get("content-type", "")
|
| 862 |
+
text = re.sub(r"<[^>]+>", " ", r.text) if "html" in ct else r.text
|
| 863 |
+
text = re.sub(r"\s{2,}", " ", text).strip()
|
| 864 |
+
return text[:5000] + ("…[truncated]" if len(text) > 5000 else "")
|
| 865 |
+
except Exception as e:
|
| 866 |
+
return f"fetch_url error: {e}"
|
| 867 |
+
|
| 868 |
+
# ββ FORGE OpenAI-compatible model (NEXUS backend) βββββββββββββββββββ
|
| 869 |
|
| 870 |
+
def build_forge_model(cost_mode: str = "balanced") -> object | None:
|
| 871 |
+
"""Build smolagents model pointing at NEXUS (OpenAI-compatible)."""
|
| 872 |
+
if not SMOLAGENTS_OK:
|
| 873 |
+
return None
|
| 874 |
+
nexus_url = SPACES.get("nexus", "")
|
| 875 |
+
model_name = {
|
| 876 |
+
"cheap": "nexus-fast",
|
| 877 |
+
"balanced": "nexus-auto",
|
| 878 |
+
"best": "nexus-best",
|
| 879 |
+
}.get(cost_mode, "nexus-auto")
|
| 880 |
+
try:
|
| 881 |
+
from smolagents import OpenAIServerModel
|
| 882 |
+
return OpenAIServerModel(
|
| 883 |
+
model_id = model_name,
|
| 884 |
+
api_base = nexus_url + "/v1",
|
| 885 |
+
api_key = os.environ.get("NEXUS_API_KEY", "forge-internal"),
|
| 886 |
+
)
|
| 887 |
except Exception as e:
|
| 888 |
+
log.warning(f"[SMOLAGENTS] OpenAIServerModel failed: {e}")
|
| 889 |
+
return None
|
| 890 |
|
| 891 |
+
def build_agent_tools(agent_name: str) -> list:
|
| 892 |
+
"""Instantiate all FORGE tools for the given agent."""
|
| 893 |
+
return [
|
| 894 |
+
RelaySendTool(agent_name),
|
| 895 |
+
MemorySearchTool(agent_name),
|
| 896 |
+
MemoryStoreTool(agent_name),
|
| 897 |
+
KanbanListTool(agent_name),
|
| 898 |
+
KanbanMoveTool(agent_name),
|
| 899 |
+
KanbanCreateTool(agent_name),
|
| 900 |
+
VaultExecTool(agent_name),
|
| 901 |
+
VaultReadTool(agent_name),
|
| 902 |
+
VaultWriteTool(agent_name),
|
| 903 |
+
ForgeSearchTool(agent_name),
|
| 904 |
+
SlotReserveTool(agent_name),
|
| 905 |
+
SlotReleaseTool(agent_name),
|
| 906 |
+
SlotStatusTool(agent_name),
|
| 907 |
+
TriggerAgentTool(agent_name),
|
| 908 |
+
WebSearchTool(agent_name),
|
| 909 |
+
FetchUrlTool(agent_name),
|
| 910 |
+
]
|
| 911 |
|
| 912 |
+
# ββ Step callback β trace + harness ββββββββββββββββββββββββββββββββ
|
|
|
|
| 913 |
|
| 914 |
+
def make_step_callback(agent_name: str, trace: dict):
|
| 915 |
+
"""Returns a step callback that emits trace events and scans tool outputs."""
|
| 916 |
+
from smolagents.memory import ActionStep
|
| 917 |
|
| 918 |
+
def _callback(step_log, agent=None):
|
| 919 |
+
if not isinstance(step_log, ActionStep):
|
| 920 |
+
return
|
| 921 |
+
# Harness: scan tool output before LLM re-ingests
|
| 922 |
+
obs = getattr(step_log, "observations", None) or ""
|
| 923 |
+
if obs and HARNESS_URL:
|
| 924 |
+
tool_name = ""
|
| 925 |
+
if step_log.tool_calls:
|
| 926 |
+
tool_name = step_log.tool_calls[0].name if hasattr(step_log.tool_calls[0], "name") else ""
|
| 927 |
+
sanitised = _harness_scan_sync(agent_name, tool_name, str(obs))
|
| 928 |
+
if sanitised != str(obs):
|
| 929 |
+
step_log.observations = sanitised
|
| 930 |
+
# Trace
|
| 931 |
+
step_info = {
|
| 932 |
+
"step": getattr(step_log, "step_number", len(trace["steps"])),
|
| 933 |
+
"thought": str(getattr(step_log, "model_output_message", ""))[:200],
|
| 934 |
+
"tool": step_log.tool_calls[0].name if getattr(step_log, "tool_calls", None) else "",
|
| 935 |
+
"obs": str(getattr(step_log, "observations", ""))[:200],
|
| 936 |
+
"error": str(step_log.error) if getattr(step_log, "error", None) else "",
|
| 937 |
+
}
|
| 938 |
+
trace["steps"].append(step_info)
|
| 939 |
+
push_live({"type": "step", "agent": agent_name, **step_info})
|
| 940 |
+
emit_trace(agent_name, "react_step", step_info,
|
| 941 |
+
status="error" if step_info["error"] else "ok")
|
| 942 |
+
return _callback
|
| 943 |
|
| 944 |
+
# ββ smolagents CodeAgent react_loop ββββββββββββββββββββββββββββββββ
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 945 |
|
| 946 |
async def react_loop(agent: dict, trigger_type: str, trigger_content: str) -> dict:
|
| 947 |
+
"""
|
| 948 |
+
Run a smolagents CodeAgent for this agent tick.
|
| 949 |
+
The agent writes Python code to call FORGE tools β loops, conditionals,
|
| 950 |
+
multi-step composition all work naturally.
|
| 951 |
+
Falls back to ToolCallingAgent if CodeAgent unavailable.
|
| 952 |
+
"""
|
| 953 |
name = agent["name"]
|
|
|
|
| 954 |
cost_mode = agent.get("cost_mode", "balanced")
|
| 955 |
max_steps = agent.get("max_react_steps", REACT_MAX)
|
| 956 |
|
| 957 |
+
trace = {"agent": name, "trigger": trigger_type,
|
| 958 |
+
"started": int(time.time()), "steps": [], "result": "", "ok": True}
|
|
|
|
| 959 |
|
| 960 |
+
if not SMOLAGENTS_OK:
|
| 961 |
+
trace["result"] = "smolagents not installed"
|
| 962 |
+
trace["ok"] = False
|
| 963 |
+
return trace
|
| 964 |
|
| 965 |
+
# Fetch persona from agent-prompts (cached)
|
| 966 |
+
persona_data = get_agent_persona(name)
|
| 967 |
+
system_prompt = persona_data.get("system_prompt", agent.get("persona", "You are a helpful AI agent."))
|
| 968 |
+
max_steps = persona_data.get("max_steps", max_steps)
|
| 969 |
|
| 970 |
+
# Load soul.md and user.md for context injection
|
| 971 |
+
soul_ctx = ""
|
| 972 |
+
try:
|
| 973 |
+
sv = _sync_get("vault", "/api/read", {"path": "soul.md"})
|
| 974 |
+
if sv: soul_ctx = sv.get("content", "")[:500]
|
| 975 |
+
except Exception: pass
|
| 976 |
+
user_ctx = ""
|
| 977 |
+
try:
|
| 978 |
+
uv = _sync_get("vault", "/api/read", {"path": "user.md"})
|
| 979 |
+
if uv: user_ctx = uv.get("content", "")[:300]
|
| 980 |
+
except Exception: pass
|
| 981 |
|
| 982 |
+
# Auto-load skills from FORGE at ReAct start
|
| 983 |
+
skills_ctx = ""
|
| 984 |
+
try:
|
| 985 |
+
skills = _sync_get("forge", "/api/v1/skills", {"agent": name, "limit": 5})
|
| 986 |
+
if skills:
|
| 987 |
+
items = skills if isinstance(skills, list) else skills.get("skills", [])
|
| 988 |
+
skills_ctx = "AVAILABLE SKILLS:\n" + "\n".join(
|
| 989 |
+
f" - {s.get('name')}: {s.get('description','')[:80]}" for s in items[:5])
|
| 990 |
+
except Exception: pass
|
| 991 |
|
| 992 |
+
full_system = "\n\n".join(filter(None, [system_prompt, soul_ctx, skills_ctx]))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 993 |
|
| 994 |
+
task = (
|
| 995 |
+
f"TRIGGER: {trigger_type}\n"
|
| 996 |
+
f"CONTEXT: {trigger_content}\n"
|
| 997 |
+
+ (f"OPERATOR: {user_ctx}\n" if user_ctx else "")
|
| 998 |
+
+ f"UTC: {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M')}\n"
|
| 999 |
+
"Execute your assigned task using the available tools."
|
| 1000 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1001 |
|
| 1002 |
+
push_live({"type": "react_start", "agent": name, "trigger": trigger_type})
|
|
|
|
|
|
|
|
|
|
| 1003 |
|
| 1004 |
+
def _run_agent():
|
| 1005 |
+
"""Sync function β runs in thread pool via asyncio.to_thread."""
|
| 1006 |
+
model = build_forge_model(cost_mode)
|
| 1007 |
+
if model is None:
|
| 1008 |
+
return {"ok": False, "result": "NEXUS model unavailable", "steps": []}
|
| 1009 |
|
| 1010 |
+
tools = build_agent_tools(name)
|
|
|
|
|
|
|
|
|
|
| 1011 |
|
| 1012 |
+
try:
|
| 1013 |
+
from smolagents import ToolCallingAgent
|
| 1014 |
+
agent_obj = ToolCallingAgent(
|
| 1015 |
+
tools = tools,
|
| 1016 |
+
model = model,
|
| 1017 |
+
max_steps = max_steps,
|
| 1018 |
+
instructions = full_system,
|
| 1019 |
+
verbosity_level = LogLevel.WARNING,
|
| 1020 |
+
step_callbacks = [make_step_callback(name, trace)],
|
| 1021 |
+
name = name,
|
| 1022 |
+
)
|
| 1023 |
+
except Exception as e:
|
| 1024 |
+
log.error(f"[SMOLAGENTS] agent init failed: {e}")
|
| 1025 |
+
return {"ok": False, "result": str(e), "steps": []}
|
| 1026 |
|
| 1027 |
+
try:
|
| 1028 |
+
result = agent_obj.run(task, reset=True)
|
| 1029 |
+
return {"ok": True, "result": str(result)[:500], "steps": trace["steps"]}
|
| 1030 |
+
except Exception as e:
|
| 1031 |
+
log.error(f"[SMOLAGENTS] agent.run failed: {e}")
|
| 1032 |
+
return {"ok": False, "result": str(e), "steps": trace["steps"]}
|
| 1033 |
|
| 1034 |
+
try:
|
| 1035 |
+
outcome = await asyncio.to_thread(_run_agent)
|
| 1036 |
+
except Exception as e:
|
| 1037 |
+
outcome = {"ok": False, "result": str(e), "steps": []}
|
| 1038 |
|
| 1039 |
+
trace["ok"] = outcome["ok"]
|
| 1040 |
+
trace["result"] = outcome["result"]
|
| 1041 |
+
trace["ms"] = int((time.time() - trace["started"]) * 1000)
|
| 1042 |
+
|
| 1043 |
+
# Emit final trace to TRACE + LEARN
|
| 1044 |
+
emit_trace(name, "react_complete",
|
| 1045 |
+
{"result": trace["result"], "steps": len(trace["steps"]),
|
| 1046 |
+
"trigger": trigger_type, "ms": trace["ms"]},
|
| 1047 |
+
status="ok" if trace["ok"] else "error")
|
| 1048 |
+
|
| 1049 |
+
push_live({"type": "react_done", "agent": name,
|
| 1050 |
+
"ok": trace["ok"], "ms": trace["ms"], "steps": len(trace["steps"])})
|
| 1051 |
|
| 1052 |
return trace
|
| 1053 |
|
| 1054 |
# ββ Heartbeat engine βββββββββββββββββββββββββββββββββββββββββββββββ
|
| 1055 |
scheduler = AsyncIOScheduler(timezone="UTC")
|
| 1056 |
+
# ββ Heartbeat engine βββββββββββββββββββββββββββββββββββββββββββββββ
|
| 1057 |
+
scheduler = AsyncIOScheduler(timezone="UTC")
|
| 1058 |
|
| 1059 |
async def agent_tick(agent_name: str, trigger_type: str = "heartbeat", content: str = ""):
|
| 1060 |
agents = load_json(AGENTS_FILE, [])
|