sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
agno-agi/agno:libs/agno/agno/approval/decorator.py | """The @approval decorator for marking tools as requiring approval."""
from __future__ import annotations
from typing import Any, Callable, Optional, Union, overload
from agno.approval.types import ApprovalType
# Sentinel attribute stamped on raw callables when @approval is below @tool
_APPROVAL_ATTR = "_agno_approval_type"
@overload
def approval(func_or_type: Callable) -> Any: ...
@overload
def approval(*, type: Union[str, ApprovalType] = ApprovalType.required) -> Callable: ...
def approval(
func_or_type: Optional[Callable] = None,
*,
type: Union[str, ApprovalType] = ApprovalType.required,
) -> Any:
"""Mark a tool as requiring approval.
Can be used as ``@approval``, ``@approval()``, or ``@approval(type="audit")``.
Composes with ``@tool()`` in either order.
When applied on top of ``@tool`` (receives a Function):
Sets ``approval_type`` on the Function directly.
When applied below ``@tool`` (receives a raw callable):
Stamps a sentinel attribute that ``@tool`` detects during processing.
Args:
type: Approval type. ``"required"`` (default) creates a blocking approval
that must be resolved before the run continues. ``"audit"`` creates
a non-blocking audit record after the HITL interaction resolves.
"""
from agno.tools.function import Function
approval_type_str = type.value if isinstance(type, ApprovalType) else type
if approval_type_str not in ("required", "audit"):
raise ValueError(f"Invalid approval type: {approval_type_str!r}. Must be 'required' or 'audit'.")
def _apply(target: Any) -> Any:
if isinstance(target, Function):
# @approval is on top of @tool -- Function already exists
target.approval_type = approval_type_str
if approval_type_str == "required":
if not any([target.requires_confirmation, target.requires_user_input, target.external_execution]):
target.requires_confirmation = True
elif approval_type_str == "audit":
if not any([target.requires_confirmation, target.requires_user_input, target.external_execution]):
raise ValueError(
"@approval(type='audit') requires at least one HITL flag "
"('requires_confirmation', 'requires_user_input', or 'external_execution') "
"to be set on @tool()."
)
return target
elif callable(target):
# @approval is below @tool (or standalone) -- stamp sentinel
setattr(target, _APPROVAL_ATTR, approval_type_str)
return target
else:
raise TypeError(f"@approval must be applied to a callable or Function, got {target.__class__.__name__}")
# @approval (bare, no parens) -- func_or_type IS the function/Function
if func_or_type is not None and (callable(func_or_type) or isinstance(func_or_type, Function)):
return _apply(func_or_type)
# @approval() or @approval(type=...) -- return the decorator
return _apply
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/approval/decorator.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/approval/types.py | from enum import Enum
class ApprovalType(str, Enum):
"""Approval types for the @approval decorator.
required: Blocking approval. The run cannot continue until the approval
is resolved via the approvals API.
audit: Non-blocking audit trail. An approval record is created after
the HITL interaction resolves, for compliance/logging purposes.
"""
required = "required"
audit = "audit"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/approval/types.py",
"license": "Apache License 2.0",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/agno/db/schemas/approval.py | from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from agno.utils.dttm import now_epoch_s, to_epoch_s
@dataclass
class Approval:
"""Model for a human approval request created when a tool with approval_type set pauses a run."""
id: str
run_id: str
session_id: str
status: str = "pending" # pending | approved | rejected | expired | cancelled
source_type: str = "agent" # agent | team | workflow
approval_type: Optional[str] = None # required | audit
pause_type: str = "confirmation" # confirmation | user_input | external_execution
tool_name: Optional[str] = None
tool_args: Optional[Dict[str, Any]] = None
expires_at: Optional[int] = None
agent_id: Optional[str] = None
team_id: Optional[str] = None
workflow_id: Optional[str] = None
user_id: Optional[str] = None
schedule_id: Optional[str] = None
schedule_run_id: Optional[str] = None
source_name: Optional[str] = None
requirements: Optional[List[Dict[str, Any]]] = None
context: Optional[Dict[str, Any]] = None
resolution_data: Optional[Dict[str, Any]] = None
resolved_by: Optional[str] = None
resolved_at: Optional[int] = None
created_at: Optional[int] = None
updated_at: Optional[int] = None
# Run status from the associated run. Updated when run completes/errors/cancels.
# Values: "PAUSED", "COMPLETED", "RUNNING", "ERROR", "CANCELLED", or None.
run_status: Optional[str] = None
def __post_init__(self) -> None:
self.created_at = now_epoch_s() if self.created_at is None else to_epoch_s(self.created_at)
if self.updated_at is not None:
self.updated_at = to_epoch_s(self.updated_at)
if self.resolved_at is not None:
self.resolved_at = to_epoch_s(self.resolved_at)
def to_dict(self) -> Dict[str, Any]:
"""Serialize to dict. Preserves None values (important for DB updates)."""
return {
"id": self.id,
"run_id": self.run_id,
"session_id": self.session_id,
"status": self.status,
"source_type": self.source_type,
"approval_type": self.approval_type,
"pause_type": self.pause_type,
"tool_name": self.tool_name,
"tool_args": self.tool_args,
"expires_at": self.expires_at,
"agent_id": self.agent_id,
"team_id": self.team_id,
"workflow_id": self.workflow_id,
"user_id": self.user_id,
"schedule_id": self.schedule_id,
"schedule_run_id": self.schedule_run_id,
"source_name": self.source_name,
"requirements": self.requirements,
"context": self.context,
"resolution_data": self.resolution_data,
"resolved_by": self.resolved_by,
"resolved_at": self.resolved_at,
"created_at": self.created_at,
"updated_at": self.updated_at,
"run_status": self.run_status,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "Approval":
data = dict(data)
valid_keys = {
"id",
"run_id",
"session_id",
"status",
"source_type",
"approval_type",
"pause_type",
"tool_name",
"tool_args",
"expires_at",
"agent_id",
"team_id",
"workflow_id",
"user_id",
"schedule_id",
"schedule_run_id",
"source_name",
"requirements",
"context",
"resolution_data",
"resolved_by",
"resolved_at",
"created_at",
"updated_at",
"run_status",
}
filtered = {k: v for k, v in data.items() if k in valid_keys}
return cls(**filtered)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/db/schemas/approval.py",
"license": "Apache License 2.0",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/os/routers/approvals/router.py | """Approval API router -- list, resolve, and delete human approvals."""
import asyncio
import time
from typing import Any, Dict, Literal, Optional
from fastapi import APIRouter, Depends, HTTPException, Query
from agno.os.routers.approvals.schema import (
ApprovalCountResponse,
ApprovalResolve,
ApprovalResponse,
ApprovalStatusResponse,
)
from agno.os.schema import PaginatedResponse, PaginationInfo
def get_approval_router(os_db: Any, settings: Any) -> APIRouter:
"""Factory that creates and returns the approval router.
Args:
os_db: The AgentOS-level DB adapter (must support approval methods).
settings: AgnoAPISettings instance.
Returns:
An APIRouter with all approval endpoints attached.
"""
from agno.os.auth import get_authentication_dependency
router = APIRouter(tags=["Approvals"])
auth_dependency = get_authentication_dependency(settings)
# ------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------
async def _db_call(method_name: str, *args: Any, **kwargs: Any) -> Any:
fn = getattr(os_db, method_name, None)
if fn is None:
raise HTTPException(status_code=503, detail="Approvals not supported by the configured database")
try:
if asyncio.iscoroutinefunction(fn):
return await fn(*args, **kwargs)
return fn(*args, **kwargs)
except NotImplementedError:
raise HTTPException(status_code=503, detail="Approvals not supported by the configured database")
# ------------------------------------------------------------------
# Endpoints
# ------------------------------------------------------------------
@router.get("/approvals", response_model=PaginatedResponse[ApprovalResponse])
async def list_approvals(
status: Optional[Literal["pending", "approved", "rejected", "expired", "cancelled"]] = Query(None),
source_type: Optional[str] = Query(None),
approval_type: Optional[Literal["required", "audit"]] = Query(None),
pause_type: Optional[str] = Query(None),
agent_id: Optional[str] = Query(None),
team_id: Optional[str] = Query(None),
workflow_id: Optional[str] = Query(None),
user_id: Optional[str] = Query(None),
schedule_id: Optional[str] = Query(None),
run_id: Optional[str] = Query(None),
limit: int = Query(100, ge=1, le=1000),
page: int = Query(1, ge=1),
_: bool = Depends(auth_dependency),
) -> PaginatedResponse[ApprovalResponse]:
approvals, total_count = await _db_call(
"get_approvals",
status=status,
source_type=source_type,
approval_type=approval_type,
pause_type=pause_type,
agent_id=agent_id,
team_id=team_id,
workflow_id=workflow_id,
user_id=user_id,
schedule_id=schedule_id,
run_id=run_id,
limit=limit,
page=page,
)
total_pages = (total_count + limit - 1) // limit if total_count > 0 else 0
return PaginatedResponse(
data=approvals,
meta=PaginationInfo(
page=page,
limit=limit,
total_pages=total_pages,
total_count=total_count,
),
)
@router.get("/approvals/count", response_model=ApprovalCountResponse)
async def get_approval_count(
user_id: Optional[str] = Query(None),
_: bool = Depends(auth_dependency),
) -> Dict[str, int]:
count = await _db_call("get_pending_approval_count", user_id=user_id)
return {"count": count}
@router.get("/approvals/{approval_id}/status", response_model=ApprovalStatusResponse)
async def get_approval_status(
approval_id: str,
_: bool = Depends(auth_dependency),
) -> ApprovalStatusResponse:
approval = await _db_call("get_approval", approval_id)
if approval is None:
raise HTTPException(status_code=404, detail="Approval not found")
return ApprovalStatusResponse(
approval_id=approval_id,
status=approval.get("status", "unknown"),
run_id=approval.get("run_id", ""),
resolved_at=approval.get("resolved_at"),
resolved_by=approval.get("resolved_by"),
)
@router.get("/approvals/{approval_id}", response_model=ApprovalResponse)
async def get_approval(
approval_id: str,
_: bool = Depends(auth_dependency),
) -> Dict[str, Any]:
approval = await _db_call("get_approval", approval_id)
if approval is None:
raise HTTPException(status_code=404, detail="Approval not found")
return approval
@router.post("/approvals/{approval_id}/resolve", response_model=ApprovalResponse)
async def resolve_approval(
approval_id: str,
body: ApprovalResolve,
_: bool = Depends(auth_dependency),
) -> Dict[str, Any]:
now = int(time.time())
update_kwargs: Dict[str, Any] = {
"status": body.status,
"resolved_by": body.resolved_by,
"resolved_at": now,
}
if body.resolution_data is not None:
update_kwargs["resolution_data"] = body.resolution_data
result = await _db_call(
"update_approval",
approval_id,
expected_status="pending",
**update_kwargs,
)
if result is None:
# Either the approval doesn't exist or it was already resolved
existing = await _db_call("get_approval", approval_id)
if existing is None:
raise HTTPException(status_code=404, detail="Approval not found")
raise HTTPException(
status_code=409,
detail=f"Approval is already '{existing.get('status')}' and cannot be resolved",
)
return result
@router.delete("/approvals/{approval_id}", status_code=204)
async def delete_approval(
approval_id: str,
_: bool = Depends(auth_dependency),
) -> None:
existing = await _db_call("get_approval", approval_id)
if existing is None:
raise HTTPException(status_code=404, detail="Approval not found")
deleted = await _db_call("delete_approval", approval_id)
if not deleted:
raise HTTPException(status_code=500, detail="Failed to delete approval")
return router
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/approvals/router.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/approvals/schema.py | """Pydantic request/response models for the approvals API."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from agno.run.base import RunStatus
class ApprovalResolve(BaseModel):
"""Request body for resolving (approve/reject) an approval."""
status: str = Field(..., pattern="^(approved|rejected)$")
resolved_by: Optional[str] = Field(default=None, max_length=255)
resolution_data: Optional[Dict[str, Any]] = Field(default=None)
class ApprovalResponse(BaseModel):
"""Response model for a single approval."""
id: str
run_id: str
session_id: str
status: str
source_type: str
approval_type: Optional[str] = None
pause_type: Optional[str] = None
tool_name: Optional[str] = None
tool_args: Optional[Dict[str, Any]] = None
expires_at: Optional[int] = None
agent_id: Optional[str] = None
team_id: Optional[str] = None
workflow_id: Optional[str] = None
user_id: Optional[str] = None
schedule_id: Optional[str] = None
schedule_run_id: Optional[str] = None
source_name: Optional[str] = None
requirements: Optional[List[Dict[str, Any]]] = None
context: Optional[Dict[str, Any]] = None
resolution_data: Optional[Dict[str, Any]] = None
resolved_by: Optional[str] = None
resolved_at: Optional[int] = None
created_at: Optional[int] = None
updated_at: Optional[int] = None
# Run status from the associated run, fetched at response time (not stored in DB).
# Used by the UI to determine if the run has already been continued.
run_status: Optional[RunStatus] = None
class ApprovalListResponse(BaseModel):
"""Response model for listing approvals with pagination."""
approvals: List[ApprovalResponse]
total: int
limit: int
page: int
class ApprovalCountResponse(BaseModel):
"""Response model for pending approval count."""
count: int
class ApprovalStatusResponse(BaseModel):
"""Lightweight response model for polling approval status."""
approval_id: str
status: str
run_id: str
resolved_at: Optional[int] = None
resolved_by: Optional[str] = None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/approvals/schema.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/run/approval.py | """Approval record creation and resolution gating for HITL tool runs."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from uuid import uuid4
from agno.run.base import RunStatus
from agno.utils.dttm import now_epoch_s
from agno.utils.log import log_debug, log_warning
def _get_pause_type(tool_execution: Any) -> str:
"""Determine the pause type from a tool execution's HITL flags."""
if getattr(tool_execution, "requires_user_input", False):
return "user_input"
if getattr(tool_execution, "external_execution_required", False):
return "external_execution"
return "confirmation"
def _get_first_approval_tool(tools: Optional[List[Any]], requirements: Optional[List[Any]] = None) -> Any:
"""Return the first tool execution that has approval_type set."""
if tools:
for tool in tools:
if getattr(tool, "approval_type", None) is not None:
return tool
if requirements:
for req in requirements:
te = getattr(req, "tool_execution", None)
if te and getattr(te, "approval_type", None) is not None:
return te
return None
def _has_approval_requirement(tools: Optional[List[Any]], requirements: Optional[List[Any]] = None) -> bool:
"""Check if any paused tool execution has approval_type set.
Checks both run_response.tools (agent-level) and run_response.requirements
(team-level, where member tools are propagated via requirements).
"""
tool = _get_first_approval_tool(tools, requirements)
return tool is not None and getattr(tool, "approval_type", None) == "required"
def _stamp_approval_id_on_tools(
tools: Optional[List[Any]], requirements: Optional[List[Any]], approval_id: str
) -> None:
"""Stamp approval_id on every tool that has approval_type set."""
if tools:
for tool in tools:
if getattr(tool, "approval_type", None) is not None:
tool.approval_id = approval_id
if requirements:
for req in requirements:
te = getattr(req, "tool_execution", None)
if te and getattr(te, "approval_type", None) is not None:
te.approval_id = approval_id
def _build_approval_dict(
run_response: Any,
agent_id: Optional[str] = None,
agent_name: Optional[str] = None,
team_id: Optional[str] = None,
team_name: Optional[str] = None,
workflow_id: Optional[str] = None,
workflow_name: Optional[str] = None,
user_id: Optional[str] = None,
schedule_id: Optional[str] = None,
schedule_run_id: Optional[str] = None,
) -> Dict[str, Any]:
"""Build the approval record dict from run response and context."""
# Determine source type
source_type = "agent"
source_name = agent_name
if team_id:
source_type = "team"
source_name = team_name
elif workflow_id:
source_type = "workflow"
source_name = workflow_name
# Serialize requirements
requirements_data: Optional[List[Dict[str, Any]]] = None
if hasattr(run_response, "requirements") and run_response.requirements:
requirements_data = []
for req in run_response.requirements:
if hasattr(req, "to_dict"):
requirements_data.append(req.to_dict())
elif isinstance(req, dict):
requirements_data.append(req)
# Find the first approval tool to extract pause_type, tool_name, tool_args
tools = getattr(run_response, "tools", None)
requirements = getattr(run_response, "requirements", None)
first_tool = _get_first_approval_tool(tools, requirements)
pause_type = _get_pause_type(first_tool) if first_tool else "confirmation"
tool_name = getattr(first_tool, "tool_name", None) if first_tool else None
tool_args = getattr(first_tool, "tool_args", None) if first_tool else None
# Build context with tool names for UI display.
tool_names: List[str] = []
if hasattr(run_response, "requirements") and run_response.requirements:
for req in run_response.requirements:
te = getattr(req, "tool_execution", None)
if te and getattr(te, "approval_type", None) is not None:
name = getattr(te, "tool_name", None)
if name:
tool_names.append(name)
# Fallback: extract from run_response.tools
if not tool_names and tools:
for t in tools:
if hasattr(t, "tool_name") and t.tool_name:
tool_names.append(t.tool_name)
context: Dict[str, Any] = {}
if tool_names:
context["tool_names"] = tool_names
if source_name:
context["source_name"] = source_name
return {
"id": str(uuid4()),
"run_id": getattr(run_response, "run_id", None) or str(uuid4()),
"session_id": getattr(run_response, "session_id", None) or "",
"status": "pending",
"approval_type": "required",
"pause_type": pause_type,
"tool_name": tool_name,
"tool_args": tool_args,
"source_type": source_type,
"agent_id": agent_id,
"team_id": team_id,
"workflow_id": workflow_id,
"user_id": user_id,
"schedule_id": schedule_id,
"schedule_run_id": schedule_run_id,
"source_name": source_name,
"requirements": requirements_data,
"context": context if context else None,
"resolved_by": None,
"resolved_at": None,
"created_at": now_epoch_s(),
"updated_at": None,
# Run status is PAUSED when the approval is created (run is paused waiting for approval)
"run_status": RunStatus.paused.value,
}
def create_approval_from_pause(
db: Any,
run_response: Any,
agent_id: Optional[str] = None,
agent_name: Optional[str] = None,
team_id: Optional[str] = None,
team_name: Optional[str] = None,
workflow_id: Optional[str] = None,
workflow_name: Optional[str] = None,
user_id: Optional[str] = None,
schedule_id: Optional[str] = None,
schedule_run_id: Optional[str] = None,
) -> Optional[str]:
"""Create an approval record when a run pauses for a tool with approval_type set.
Returns the approval_id if a record was created, None otherwise.
Silently returns None if no approval requirement is found or if DB doesn't support approvals.
"""
if db is None:
return None
tools = getattr(run_response, "tools", None)
requirements = getattr(run_response, "requirements", None)
if not _has_approval_requirement(tools, requirements):
return None
try:
approval_data = _build_approval_dict(
run_response,
agent_id=agent_id,
agent_name=agent_name,
team_id=team_id,
team_name=team_name,
workflow_id=workflow_id,
workflow_name=workflow_name,
user_id=user_id,
schedule_id=schedule_id,
schedule_run_id=schedule_run_id,
)
db.create_approval(approval_data)
approval_id: str = approval_data["id"]
# Stamp the approval_id on all tools with approval_type
_stamp_approval_id_on_tools(tools, requirements, approval_id)
log_debug(f"Created approval {approval_id} for run {approval_data['run_id']}")
return approval_id
except NotImplementedError:
pass
except Exception as e:
log_warning(f"Error creating approval record (sync): {e}")
return None
async def acreate_approval_from_pause(
db: Any,
run_response: Any,
agent_id: Optional[str] = None,
agent_name: Optional[str] = None,
team_id: Optional[str] = None,
team_name: Optional[str] = None,
workflow_id: Optional[str] = None,
workflow_name: Optional[str] = None,
user_id: Optional[str] = None,
schedule_id: Optional[str] = None,
schedule_run_id: Optional[str] = None,
) -> Optional[str]:
"""Async variant of create_approval_from_pause.
Returns the approval_id if a record was created, None otherwise.
"""
if db is None:
return None
tools = getattr(run_response, "tools", None)
requirements = getattr(run_response, "requirements", None)
if not _has_approval_requirement(tools, requirements):
return None
try:
approval_data = _build_approval_dict(
run_response,
agent_id=agent_id,
agent_name=agent_name,
team_id=team_id,
team_name=team_name,
workflow_id=workflow_id,
workflow_name=workflow_name,
user_id=user_id,
schedule_id=schedule_id,
schedule_run_id=schedule_run_id,
)
# Try async first, fall back to sync
create_fn = getattr(db, "create_approval", None)
if create_fn is None:
return None
from inspect import iscoroutinefunction
if iscoroutinefunction(create_fn):
await create_fn(approval_data)
else:
create_fn(approval_data)
approval_id: str = approval_data["id"]
# Stamp the approval_id on all tools with approval_type
_stamp_approval_id_on_tools(tools, requirements, approval_id)
log_debug(f"Created approval {approval_id} for run {approval_data['run_id']}")
return approval_id
except NotImplementedError:
pass
except Exception as e:
log_warning(f"Error creating approval record (async): {e}")
return None
def create_audit_approval(
db: Any,
tool_execution: Any,
run_response: Any,
status: str, # "approved" or "rejected"
agent_id: Optional[str] = None,
agent_name: Optional[str] = None,
team_id: Optional[str] = None,
team_name: Optional[str] = None,
user_id: Optional[str] = None,
) -> None:
"""Create an audit approval record AFTER a HITL interaction resolves.
Unlike create_approval_from_pause (which creates a 'pending' record before resolution),
this creates a completed record (status='approved'/'rejected') for audit logging.
Only called for tools with approval_type='audit'.
"""
if db is None:
return
try:
source_type = "agent"
source_name = agent_name
if team_id:
source_type = "team"
source_name = team_name
tool_name = getattr(tool_execution, "tool_name", None)
tool_args = getattr(tool_execution, "tool_args", None)
pause_type = _get_pause_type(tool_execution)
context: Dict[str, Any] = {}
if tool_name:
context["tool_names"] = [tool_name]
if source_name:
context["source_name"] = source_name
approval_data = {
"id": str(uuid4()),
"run_id": getattr(run_response, "run_id", None) or str(uuid4()),
"session_id": getattr(run_response, "session_id", None) or "",
"status": status,
"approval_type": "audit",
"pause_type": pause_type,
"tool_name": tool_name,
"tool_args": tool_args,
"source_type": source_type,
"agent_id": agent_id,
"team_id": team_id,
"user_id": user_id,
"source_name": source_name,
"context": context if context else None,
"resolved_at": now_epoch_s(),
"created_at": now_epoch_s(),
"updated_at": None,
}
db.create_approval(approval_data)
log_debug(f"Audit approval {approval_data['id']} for tool {tool_name}")
except NotImplementedError:
pass
except Exception as e:
log_warning(f"Error creating audit approval record (sync): {e}")
# ---------------------------------------------------------------------------
# Approval gate: enforce external resolution before continue
# ---------------------------------------------------------------------------
def _apply_approval_to_tools(tools: List[Any], approval_status: str, resolution_data: Optional[Dict[str, Any]]) -> None:
"""Apply approval resolution status to tools that require approval.
For 'approved': sets confirmed=True, applies resolution_data to user_input/external_execution fields.
For 'rejected': sets confirmed=False.
"""
for tool in tools:
if getattr(tool, "approval_type", None) != "required":
continue
if approval_status == "approved":
# Confirmation tools
if getattr(tool, "requires_confirmation", False):
tool.confirmed = True
# User input tools: apply resolution_data values to user_input_schema
if getattr(tool, "requires_user_input", False) and resolution_data:
values = resolution_data.get("values", resolution_data)
for ufield in tool.user_input_schema or []:
if ufield.name in values:
ufield.value = values[ufield.name]
# External execution tools: apply resolution_data result
if getattr(tool, "external_execution_required", False) and resolution_data:
if "result" in resolution_data:
tool.result = resolution_data["result"]
elif approval_status == "rejected":
if getattr(tool, "requires_confirmation", False):
tool.confirmed = False
if getattr(tool, "requires_user_input", False):
tool.confirmed = False
if getattr(tool, "external_execution_required", False):
tool.confirmed = False
def _get_approval_for_run(db: Any, run_id: str) -> Optional[Dict[str, Any]]:
"""Look up the most recent 'required' approval for a run_id (sync)."""
try:
approvals, _ = db.get_approvals(run_id=run_id, approval_type="required", limit=1)
return approvals[0] if approvals else None
except (NotImplementedError, Exception):
return None
async def _aget_approval_for_run(db: Any, run_id: str) -> Optional[Dict[str, Any]]:
"""Look up the most recent 'required' approval for a run_id (async)."""
try:
get_fn = getattr(db, "get_approvals", None)
if get_fn is None:
return None
from inspect import iscoroutinefunction
if iscoroutinefunction(get_fn):
approvals, _ = await get_fn(run_id=run_id, approval_type="required", limit=1)
else:
approvals, _ = get_fn(run_id=run_id, approval_type="required", limit=1)
return approvals[0] if approvals else None
except (NotImplementedError, Exception):
return None
def check_and_apply_approval_resolution(db: Any, run_id: str, run_response: Any) -> None:
"""Gate: if any tool has approval_type='required', verify the approval is resolved before continuing.
Raises RuntimeError if the approval is still pending or not found.
No-op if no tools require approval or if db is None.
"""
if db is None:
return
tools = getattr(run_response, "tools", None)
if not tools or not any(getattr(t, "approval_type", None) == "required" for t in tools):
return
approval = _get_approval_for_run(db, run_id)
if approval is None:
raise RuntimeError(
"No approval record found for this run. Cannot continue a run that requires external approval."
)
status = approval.get("status", "pending")
if status == "pending":
raise RuntimeError("Approval is still pending. Resolve the approval before continuing this run.")
_apply_approval_to_tools(tools, status, approval.get("resolution_data"))
async def acheck_and_apply_approval_resolution(db: Any, run_id: str, run_response: Any) -> None:
"""Async variant of check_and_apply_approval_resolution."""
if db is None:
return
tools = getattr(run_response, "tools", None)
if not tools or not any(getattr(t, "approval_type", None) == "required" for t in tools):
return
approval = await _aget_approval_for_run(db, run_id)
if approval is None:
raise RuntimeError(
"No approval record found for this run. Cannot continue a run that requires external approval."
)
status = approval.get("status", "pending")
if status == "pending":
raise RuntimeError("Approval is still pending. Resolve the approval before continuing this run.")
_apply_approval_to_tools(tools, status, approval.get("resolution_data"))
async def acreate_audit_approval(
db: Any,
tool_execution: Any,
run_response: Any,
status: str, # "approved" or "rejected"
agent_id: Optional[str] = None,
agent_name: Optional[str] = None,
team_id: Optional[str] = None,
team_name: Optional[str] = None,
user_id: Optional[str] = None,
) -> None:
"""Async variant of create_audit_approval."""
if db is None:
return
try:
source_type = "agent"
source_name = agent_name
if team_id:
source_type = "team"
source_name = team_name
tool_name = getattr(tool_execution, "tool_name", None)
tool_args = getattr(tool_execution, "tool_args", None)
pause_type = _get_pause_type(tool_execution)
context: Dict[str, Any] = {}
if tool_name:
context["tool_names"] = [tool_name]
if source_name:
context["source_name"] = source_name
approval_data = {
"id": str(uuid4()),
"run_id": getattr(run_response, "run_id", None) or str(uuid4()),
"session_id": getattr(run_response, "session_id", None) or "",
"status": status,
"approval_type": "audit",
"pause_type": pause_type,
"tool_name": tool_name,
"tool_args": tool_args,
"source_type": source_type,
"agent_id": agent_id,
"team_id": team_id,
"user_id": user_id,
"source_name": source_name,
"context": context if context else None,
"resolved_at": now_epoch_s(),
"created_at": now_epoch_s(),
"updated_at": None,
}
create_fn = getattr(db, "create_approval", None)
if create_fn is None:
return
from inspect import iscoroutinefunction
if iscoroutinefunction(create_fn):
await create_fn(approval_data)
else:
create_fn(approval_data)
log_debug(f"Audit approval {approval_data['id']} for tool {tool_name}")
except NotImplementedError:
pass
except Exception as e:
log_warning(f"Error creating audit approval record (async): {e}")
# ---------------------------------------------------------------------------
# Update approval run_status when run completes
# ---------------------------------------------------------------------------
def update_approval_run_status(db: Any, run_id: str, run_status: RunStatus) -> None:
"""Update run_status on all approvals for a given run_id.
Called when a run completes, errors, or is cancelled after being paused.
This allows the UI to know if the run has already been continued.
Args:
db: Database adapter instance.
run_id: The run ID to match.
run_status: The new run status.
"""
if db is None:
return
try:
update_fn = getattr(db, "update_approval_run_status", None)
if update_fn is None:
return
count = update_fn(run_id, run_status)
if count > 0:
log_debug(f"Updated run_status to {run_status} for {count} approval(s) on run {run_id}")
except NotImplementedError:
pass
except Exception as e:
log_warning(f"Error updating approval run_status (sync): {e}")
async def aupdate_approval_run_status(db: Any, run_id: str, run_status: RunStatus) -> None:
"""Async variant of update_approval_run_status.
Called when a run completes, errors, or is cancelled after being paused.
This allows the UI to know if the run has already been continued.
Args:
db: Database adapter instance.
run_id: The run ID to match.
run_status: The new run status.
"""
if db is None:
return
try:
update_fn = getattr(db, "update_approval_run_status", None)
if update_fn is None:
return
from inspect import iscoroutinefunction
if iscoroutinefunction(update_fn):
count = await update_fn(run_id, run_status)
else:
count = update_fn(run_id, run_status)
if count > 0:
log_debug(f"Updated run_status to {run_status} for {count} approval(s) on run {run_id}")
except NotImplementedError:
pass
except Exception as e:
log_warning(f"Error updating approval run_status (async): {e}")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/run/approval.py",
"license": "Apache License 2.0",
"lines": 486,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/db/test_approval_schema.py | """Unit tests for agno.db.schemas.approval — Approval dataclass and serialization."""
import time
import pytest
from agno.db.schemas.approval import Approval
def _has_sqlalchemy() -> bool:
try:
import sqlalchemy # noqa: F401
return True
except ImportError:
return False
# =============================================================================
# Construction and defaults
# =============================================================================
class TestApprovalConstruction:
def test_required_fields(self):
a = Approval(id="a1", run_id="r1", session_id="s1")
assert a.id == "a1"
assert a.run_id == "r1"
assert a.session_id == "s1"
def test_default_values(self):
a = Approval(id="a1", run_id="r1", session_id="s1")
assert a.status == "pending"
assert a.source_type == "agent"
assert a.pause_type == "confirmation"
assert a.approval_type is None
assert a.tool_name is None
assert a.tool_args is None
assert a.expires_at is None
assert a.agent_id is None
assert a.team_id is None
assert a.workflow_id is None
assert a.user_id is None
assert a.schedule_id is None
assert a.schedule_run_id is None
assert a.source_name is None
assert a.requirements is None
assert a.context is None
assert a.resolution_data is None
assert a.resolved_by is None
assert a.resolved_at is None
assert a.updated_at is None
def test_created_at_auto_set(self):
before = int(time.time())
a = Approval(id="a1", run_id="r1", session_id="s1")
after = int(time.time())
assert before <= a.created_at <= after
def test_created_at_preserved_when_provided(self):
a = Approval(id="a1", run_id="r1", session_id="s1", created_at=1000)
assert a.created_at == 1000
def test_resolved_at_converted_via_to_epoch_s(self):
ts = int(time.time())
a = Approval(id="a1", run_id="r1", session_id="s1", resolved_at=ts)
assert a.resolved_at == ts
def test_updated_at_converted_via_to_epoch_s(self):
ts = int(time.time())
a = Approval(id="a1", run_id="r1", session_id="s1", updated_at=ts)
assert a.updated_at == ts
# =============================================================================
# to_dict
# =============================================================================
class TestApprovalToDict:
def test_all_keys_present(self):
a = Approval(id="a1", run_id="r1", session_id="s1")
d = a.to_dict()
expected_keys = {
"id",
"run_id",
"session_id",
"status",
"source_type",
"approval_type",
"pause_type",
"tool_name",
"tool_args",
"expires_at",
"agent_id",
"team_id",
"workflow_id",
"user_id",
"schedule_id",
"schedule_run_id",
"source_name",
"requirements",
"context",
"resolution_data",
"resolved_by",
"resolved_at",
"created_at",
"updated_at",
"run_status",
}
assert set(d.keys()) == expected_keys
def test_preserves_none_values(self):
"""to_dict should preserve None values, not strip them."""
a = Approval(id="a1", run_id="r1", session_id="s1")
d = a.to_dict()
assert d["tool_name"] is None
assert d["agent_id"] is None
assert d["resolved_by"] is None
def test_round_trip_values(self):
a = Approval(
id="a1",
run_id="r1",
session_id="s1",
status="approved",
source_type="team",
approval_type="required",
pause_type="user_input",
tool_name="delete_file",
tool_args={"path": "/tmp/x"},
agent_id="agent-1",
team_id="team-1",
user_id="user-1",
source_name="MyTeam",
context={"tool_names": ["delete_file"]},
resolution_data={"values": {"reason": "ok"}},
resolved_by="admin",
)
d = a.to_dict()
assert d["status"] == "approved"
assert d["tool_args"] == {"path": "/tmp/x"}
assert d["context"]["tool_names"] == ["delete_file"]
assert d["resolution_data"]["values"]["reason"] == "ok"
# =============================================================================
# from_dict
# =============================================================================
class TestApprovalFromDict:
def test_basic_round_trip(self):
original = Approval(
id="a1",
run_id="r1",
session_id="s1",
status="approved",
approval_type="required",
tool_name="my_tool",
)
d = original.to_dict()
restored = Approval.from_dict(d)
assert restored.id == original.id
assert restored.run_id == original.run_id
assert restored.status == original.status
assert restored.approval_type == original.approval_type
assert restored.tool_name == original.tool_name
def test_ignores_unknown_keys(self):
data = {
"id": "a1",
"run_id": "r1",
"session_id": "s1",
"unknown_field": "should_be_ignored",
"another_extra": 42,
}
a = Approval.from_dict(data)
assert a.id == "a1"
assert not hasattr(a, "unknown_field")
def test_from_dict_does_not_mutate_input(self):
data = {
"id": "a1",
"run_id": "r1",
"session_id": "s1",
"extra_key": "should_stay",
}
original_data = dict(data)
Approval.from_dict(data)
assert data == original_data
def test_full_round_trip(self):
"""Create -> to_dict -> from_dict -> to_dict should produce same dict."""
original = Approval(
id="a1",
run_id="r1",
session_id="s1",
status="rejected",
source_type="team",
approval_type="audit",
pause_type="external_execution",
tool_name="run_cmd",
tool_args={"cmd": "ls"},
agent_id="ag1",
team_id="t1",
workflow_id="w1",
user_id="u1",
schedule_id="sch1",
schedule_run_id="sr1",
source_name="MyTeam",
requirements=[{"tool_execution": "run_cmd"}],
context={"tool_names": ["run_cmd"]},
resolution_data={"result": "ok"},
resolved_by="admin",
resolved_at=1700000000,
created_at=1700000000,
updated_at=1700000001,
)
d1 = original.to_dict()
restored = Approval.from_dict(d1)
d2 = restored.to_dict()
assert d1 == d2
# =============================================================================
# DB schema alignment
# =============================================================================
class TestApprovalSchemaAlignment:
"""Verify that the Approval dataclass fields match the DB schema columns."""
def _get_approval_dataclass_fields(self):
return {f.name for f in Approval.__dataclass_fields__.values()}
@pytest.mark.skipif(
not _has_sqlalchemy(),
reason="sqlalchemy not installed",
)
def test_postgres_schema_columns_match(self):
from agno.db.postgres.schemas import APPROVAL_TABLE_SCHEMA
schema_columns = set(APPROVAL_TABLE_SCHEMA.keys())
# Remove internal schema keys that aren't columns
schema_columns -= {"_unique_constraints", "_indexes"}
dataclass_fields = self._get_approval_dataclass_fields()
assert schema_columns == dataclass_fields
@pytest.mark.skipif(
not _has_sqlalchemy(),
reason="sqlalchemy not installed",
)
def test_sqlite_schema_columns_match(self):
from agno.db.sqlite.schemas import APPROVAL_TABLE_SCHEMA
schema_columns = set(APPROVAL_TABLE_SCHEMA.keys())
schema_columns -= {"_unique_constraints", "_indexes"}
dataclass_fields = self._get_approval_dataclass_fields()
assert schema_columns == dataclass_fields
@pytest.mark.skipif(
not _has_sqlalchemy(),
reason="sqlalchemy not installed",
)
def test_postgres_and_sqlite_schemas_have_same_columns(self):
from agno.db.postgres.schemas import APPROVAL_TABLE_SCHEMA as PG_SCHEMA
from agno.db.sqlite.schemas import APPROVAL_TABLE_SCHEMA as SQLITE_SCHEMA
pg_cols = set(PG_SCHEMA.keys()) - {"_unique_constraints", "_indexes"}
sqlite_cols = set(SQLITE_SCHEMA.keys()) - {"_unique_constraints", "_indexes"}
assert pg_cols == sqlite_cols
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/db/test_approval_schema.py",
"license": "Apache License 2.0",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/run/test_approval.py | """Unit tests for agno.run.approval — approval record creation and resolution gating."""
from dataclasses import dataclass
from typing import Any, Dict, Optional
from unittest.mock import AsyncMock, MagicMock
import pytest
from agno.run.approval import (
_apply_approval_to_tools,
_build_approval_dict,
_get_first_approval_tool,
_get_pause_type,
_has_approval_requirement,
acheck_and_apply_approval_resolution,
acreate_approval_from_pause,
acreate_audit_approval,
check_and_apply_approval_resolution,
create_approval_from_pause,
create_audit_approval,
)
# =============================================================================
# Helpers: lightweight stand-ins for ToolExecution / RunResponse / UserInputField
# =============================================================================
@dataclass
class FakeToolExecution:
tool_name: Optional[str] = None
tool_args: Optional[Dict[str, Any]] = None
approval_type: Optional[str] = None
approval_id: Optional[str] = None
requires_confirmation: Optional[bool] = None
requires_user_input: Optional[bool] = None
external_execution_required: Optional[bool] = None
user_input_schema: Optional[list] = None
confirmed: Optional[bool] = None
result: Optional[str] = None
@dataclass
class FakeRequirement:
tool_execution: Optional[FakeToolExecution] = None
def to_dict(self) -> Dict[str, Any]:
return {"tool_execution": self.tool_execution.tool_name if self.tool_execution else None}
@dataclass
class FakeRunResponse:
run_id: Optional[str] = "run-123"
session_id: Optional[str] = "sess-456"
tools: Optional[list] = None
requirements: Optional[list] = None
@dataclass
class FakeUserInputField:
name: str = ""
value: Optional[str] = None
# =============================================================================
# _get_pause_type
# =============================================================================
class TestGetPauseType:
def test_user_input(self):
te = FakeToolExecution(requires_user_input=True)
assert _get_pause_type(te) == "user_input"
def test_external_execution(self):
te = FakeToolExecution(external_execution_required=True)
assert _get_pause_type(te) == "external_execution"
def test_confirmation_default(self):
te = FakeToolExecution()
assert _get_pause_type(te) == "confirmation"
def test_user_input_takes_precedence(self):
"""user_input is checked before external_execution."""
te = FakeToolExecution(requires_user_input=True, external_execution_required=True)
assert _get_pause_type(te) == "user_input"
# =============================================================================
# _get_first_approval_tool
# =============================================================================
class TestGetFirstApprovalTool:
def test_returns_none_when_empty(self):
assert _get_first_approval_tool(None) is None
assert _get_first_approval_tool([]) is None
def test_finds_tool_in_tools_list(self):
t1 = FakeToolExecution(tool_name="t1", approval_type=None)
t2 = FakeToolExecution(tool_name="t2", approval_type="required")
assert _get_first_approval_tool([t1, t2]) is t2
def test_finds_tool_in_requirements(self):
te = FakeToolExecution(tool_name="req_tool", approval_type="audit")
req = FakeRequirement(tool_execution=te)
assert _get_first_approval_tool(None, requirements=[req]) is te
def test_tools_list_takes_precedence(self):
t_in_tools = FakeToolExecution(tool_name="from_tools", approval_type="required")
t_in_reqs = FakeToolExecution(tool_name="from_reqs", approval_type="required")
req = FakeRequirement(tool_execution=t_in_reqs)
result = _get_first_approval_tool([t_in_tools], requirements=[req])
assert result is t_in_tools
# =============================================================================
# _has_approval_requirement
# =============================================================================
class TestHasApprovalRequirement:
def test_false_when_no_tools(self):
assert _has_approval_requirement(None) is False
def test_false_when_approval_type_is_audit(self):
t = FakeToolExecution(approval_type="audit")
assert _has_approval_requirement([t]) is False
def test_true_when_approval_type_is_required(self):
t = FakeToolExecution(approval_type="required")
assert _has_approval_requirement([t]) is True
def test_true_via_requirements(self):
te = FakeToolExecution(approval_type="required")
req = FakeRequirement(tool_execution=te)
assert _has_approval_requirement(None, requirements=[req]) is True
# =============================================================================
# _build_approval_dict
# =============================================================================
class TestBuildApprovalDict:
def test_basic_agent_source(self):
rr = FakeRunResponse(
tools=[FakeToolExecution(tool_name="delete_file", approval_type="required", requires_confirmation=True)]
)
result = _build_approval_dict(rr, agent_id="a1", agent_name="MyAgent")
assert result["source_type"] == "agent"
assert result["source_name"] == "MyAgent"
assert result["agent_id"] == "a1"
assert result["tool_name"] == "delete_file"
assert result["approval_type"] == "required"
assert result["status"] == "pending"
assert result["run_id"] == "run-123"
assert result["session_id"] == "sess-456"
assert isinstance(result["id"], str)
assert isinstance(result["created_at"], int)
def test_team_source_overrides_agent(self):
rr = FakeRunResponse(tools=[FakeToolExecution(tool_name="t", approval_type="required")])
result = _build_approval_dict(rr, agent_id="a1", agent_name="A", team_id="t1", team_name="MyTeam")
assert result["source_type"] == "team"
assert result["source_name"] == "MyTeam"
def test_workflow_source(self):
rr = FakeRunResponse(tools=[FakeToolExecution(tool_name="t", approval_type="required")])
result = _build_approval_dict(rr, workflow_id="w1", workflow_name="MyWorkflow")
assert result["source_type"] == "workflow"
assert result["source_name"] == "MyWorkflow"
def test_session_id_falls_back_to_empty_string(self):
rr = FakeRunResponse(session_id=None, tools=[FakeToolExecution(approval_type="required")])
result = _build_approval_dict(rr)
assert result["session_id"] == ""
def test_run_id_falls_back_to_uuid(self):
rr = FakeRunResponse(run_id=None, tools=[FakeToolExecution(approval_type="required")])
result = _build_approval_dict(rr)
assert isinstance(result["run_id"], str)
assert len(result["run_id"]) > 0
def test_context_includes_tool_names_from_requirements(self):
te1 = FakeToolExecution(tool_name="tool_a", approval_type="required")
te2 = FakeToolExecution(tool_name="tool_b", approval_type="required")
rr = FakeRunResponse(requirements=[FakeRequirement(tool_execution=te1), FakeRequirement(tool_execution=te2)])
result = _build_approval_dict(rr)
assert result["context"]["tool_names"] == ["tool_a", "tool_b"]
def test_context_falls_back_to_tools_list(self):
t1 = FakeToolExecution(tool_name="my_tool", approval_type="required")
rr = FakeRunResponse(tools=[t1])
result = _build_approval_dict(rr)
assert result["context"]["tool_names"] == ["my_tool"]
def test_pause_type_from_user_input_tool(self):
t = FakeToolExecution(tool_name="ask", approval_type="required", requires_user_input=True)
rr = FakeRunResponse(tools=[t])
result = _build_approval_dict(rr)
assert result["pause_type"] == "user_input"
def test_pause_type_from_external_execution_tool(self):
t = FakeToolExecution(tool_name="ext", approval_type="required", external_execution_required=True)
rr = FakeRunResponse(tools=[t])
result = _build_approval_dict(rr)
assert result["pause_type"] == "external_execution"
def test_schedule_fields_passed_through(self):
rr = FakeRunResponse(tools=[FakeToolExecution(approval_type="required")])
result = _build_approval_dict(rr, schedule_id="sched-1", schedule_run_id="sr-1")
assert result["schedule_id"] == "sched-1"
assert result["schedule_run_id"] == "sr-1"
# =============================================================================
# create_approval_from_pause (sync)
# =============================================================================
class TestCreateApprovalFromPause:
def test_noop_when_db_is_none(self):
rr = FakeRunResponse(tools=[FakeToolExecution(approval_type="required")])
create_approval_from_pause(db=None, run_response=rr) # should not raise
def test_noop_when_no_approval_requirement(self):
db = MagicMock()
rr = FakeRunResponse(tools=[FakeToolExecution(approval_type=None)])
create_approval_from_pause(db=db, run_response=rr)
db.create_approval.assert_not_called()
def test_creates_approval_record(self):
db = MagicMock()
rr = FakeRunResponse(tools=[FakeToolExecution(tool_name="delete", approval_type="required")])
create_approval_from_pause(db=db, run_response=rr, agent_id="a1", agent_name="Agent")
db.create_approval.assert_called_once()
data = db.create_approval.call_args[0][0]
assert data["status"] == "pending"
assert data["agent_id"] == "a1"
def test_silently_handles_not_implemented(self):
db = MagicMock()
db.create_approval.side_effect = NotImplementedError
rr = FakeRunResponse(tools=[FakeToolExecution(approval_type="required")])
create_approval_from_pause(db=db, run_response=rr) # should not raise
def test_silently_handles_generic_exception(self):
db = MagicMock()
db.create_approval.side_effect = RuntimeError("db down")
rr = FakeRunResponse(tools=[FakeToolExecution(approval_type="required")])
create_approval_from_pause(db=db, run_response=rr) # should not raise
def test_passes_user_id(self):
db = MagicMock()
rr = FakeRunResponse(tools=[FakeToolExecution(approval_type="required")])
create_approval_from_pause(db=db, run_response=rr, user_id="user-1")
data = db.create_approval.call_args[0][0]
assert data["user_id"] == "user-1"
def test_passes_team_context(self):
db = MagicMock()
rr = FakeRunResponse(tools=[FakeToolExecution(approval_type="required")])
create_approval_from_pause(db=db, run_response=rr, team_id="t1", team_name="Team", user_id="u1")
data = db.create_approval.call_args[0][0]
assert data["team_id"] == "t1"
assert data["source_type"] == "team"
assert data["source_name"] == "Team"
assert data["user_id"] == "u1"
def test_returns_approval_id_on_success(self):
db = MagicMock()
tool = FakeToolExecution(tool_name="delete", approval_type="required")
rr = FakeRunResponse(tools=[tool])
result = create_approval_from_pause(db=db, run_response=rr, agent_id="a1", agent_name="Agent")
assert result is not None
assert isinstance(result, str)
assert len(result) > 0
# The returned ID must match what was passed to db.create_approval
data = db.create_approval.call_args[0][0]
assert result == data["id"]
# approval_id must also be stamped on the tool itself
assert tool.approval_id == result
# =============================================================================
# acreate_approval_from_pause (async)
# =============================================================================
class TestAsyncCreateApprovalFromPause:
@pytest.mark.asyncio
async def test_noop_when_db_is_none(self):
await acreate_approval_from_pause(db=None, run_response=FakeRunResponse())
@pytest.mark.asyncio
async def test_calls_async_create_approval(self):
db = MagicMock()
db.create_approval = AsyncMock()
rr = FakeRunResponse(tools=[FakeToolExecution(tool_name="t", approval_type="required")])
await acreate_approval_from_pause(db=db, run_response=rr)
db.create_approval.assert_awaited_once()
@pytest.mark.asyncio
async def test_falls_back_to_sync_create_approval(self):
db = MagicMock()
db.create_approval = MagicMock() # sync
rr = FakeRunResponse(tools=[FakeToolExecution(approval_type="required")])
await acreate_approval_from_pause(db=db, run_response=rr)
db.create_approval.assert_called_once()
@pytest.mark.asyncio
async def test_noop_when_create_approval_missing(self):
db = MagicMock(spec=[]) # no create_approval attribute
rr = FakeRunResponse(tools=[FakeToolExecution(approval_type="required")])
await acreate_approval_from_pause(db=db, run_response=rr) # should not raise
@pytest.mark.asyncio
async def test_returns_approval_id_on_success(self):
db = MagicMock()
db.create_approval = AsyncMock()
tool = FakeToolExecution(tool_name="delete", approval_type="required")
rr = FakeRunResponse(tools=[tool])
result = await acreate_approval_from_pause(db=db, run_response=rr, agent_id="a1", agent_name="Agent")
assert result is not None
assert isinstance(result, str)
assert len(result) > 0
data = db.create_approval.call_args[0][0]
assert result == data["id"]
# approval_id must also be stamped on the tool itself
assert tool.approval_id == result
# =============================================================================
# create_audit_approval (sync)
# =============================================================================
class TestCreateAuditApproval:
def test_noop_when_db_is_none(self):
te = FakeToolExecution(tool_name="t")
rr = FakeRunResponse()
create_audit_approval(db=None, tool_execution=te, run_response=rr, status="approved")
def test_creates_audit_record(self):
db = MagicMock()
te = FakeToolExecution(tool_name="send_email", tool_args={"to": "a@b.com"}, requires_confirmation=True)
rr = FakeRunResponse()
create_audit_approval(
db=db, tool_execution=te, run_response=rr, status="approved", agent_id="a1", agent_name="Bot"
)
db.create_approval.assert_called_once()
data = db.create_approval.call_args[0][0]
assert data["approval_type"] == "audit"
assert data["status"] == "approved"
assert data["tool_name"] == "send_email"
assert data["source_type"] == "agent"
assert data["source_name"] == "Bot"
def test_team_source_name_set(self):
"""Verify the fix: source_name is set to team_name when team_id is present."""
db = MagicMock()
te = FakeToolExecution(tool_name="t")
rr = FakeRunResponse()
create_audit_approval(
db=db, tool_execution=te, run_response=rr, status="rejected", team_id="t1", team_name="TheTeam"
)
data = db.create_approval.call_args[0][0]
assert data["source_type"] == "team"
assert data["source_name"] == "TheTeam"
def test_rejected_status(self):
db = MagicMock()
te = FakeToolExecution(tool_name="t")
rr = FakeRunResponse()
create_audit_approval(db=db, tool_execution=te, run_response=rr, status="rejected")
data = db.create_approval.call_args[0][0]
assert data["status"] == "rejected"
def test_silently_handles_not_implemented(self):
db = MagicMock()
db.create_approval.side_effect = NotImplementedError
te = FakeToolExecution(tool_name="t")
rr = FakeRunResponse()
create_audit_approval(db=db, tool_execution=te, run_response=rr, status="approved")
# =============================================================================
# acreate_audit_approval (async)
# =============================================================================
class TestAsyncCreateAuditApproval:
@pytest.mark.asyncio
async def test_creates_audit_record_async(self):
db = MagicMock()
db.create_approval = AsyncMock()
te = FakeToolExecution(tool_name="send_email")
rr = FakeRunResponse()
await acreate_audit_approval(
db=db, tool_execution=te, run_response=rr, status="approved", agent_id="a1", agent_name="Bot"
)
db.create_approval.assert_awaited_once()
data = db.create_approval.call_args[0][0]
assert data["approval_type"] == "audit"
assert data["status"] == "approved"
@pytest.mark.asyncio
async def test_team_source_name_set(self):
"""Verify the fix: source_name is set to team_name when team_id is present."""
db = MagicMock()
db.create_approval = AsyncMock()
te = FakeToolExecution(tool_name="t")
rr = FakeRunResponse()
await acreate_audit_approval(
db=db, tool_execution=te, run_response=rr, status="approved", team_id="t1", team_name="TheTeam"
)
data = db.create_approval.call_args[0][0]
assert data["source_type"] == "team"
assert data["source_name"] == "TheTeam"
@pytest.mark.asyncio
async def test_falls_back_to_sync(self):
db = MagicMock()
db.create_approval = MagicMock() # sync
te = FakeToolExecution(tool_name="t")
rr = FakeRunResponse()
await acreate_audit_approval(db=db, tool_execution=te, run_response=rr, status="approved")
db.create_approval.assert_called_once()
# =============================================================================
# _apply_approval_to_tools
# =============================================================================
class TestApplyApprovalToTools:
def test_approved_sets_confirmed_true(self):
t = FakeToolExecution(approval_type="required", requires_confirmation=True)
_apply_approval_to_tools([t], "approved", None)
assert t.confirmed is True
def test_rejected_sets_confirmed_false(self):
t = FakeToolExecution(approval_type="required", requires_confirmation=True)
_apply_approval_to_tools([t], "rejected", None)
assert t.confirmed is False
def test_skips_tools_without_approval_type_required(self):
t = FakeToolExecution(approval_type="audit", requires_confirmation=True)
_apply_approval_to_tools([t], "approved", None)
assert t.confirmed is None # untouched
def test_approved_applies_user_input_values(self):
ufield = FakeUserInputField(name="reason")
t = FakeToolExecution(
approval_type="required",
requires_user_input=True,
user_input_schema=[ufield],
)
_apply_approval_to_tools([t], "approved", {"values": {"reason": "looks good"}})
assert ufield.value == "looks good"
def test_approved_applies_external_execution_result(self):
t = FakeToolExecution(approval_type="required", external_execution_required=True)
_apply_approval_to_tools([t], "approved", {"result": "done"})
assert t.result == "done"
def test_rejected_user_input_sets_confirmed_false(self):
t = FakeToolExecution(approval_type="required", requires_user_input=True)
_apply_approval_to_tools([t], "rejected", None)
assert t.confirmed is False
def test_rejected_external_execution_sets_confirmed_false(self):
t = FakeToolExecution(approval_type="required", external_execution_required=True)
_apply_approval_to_tools([t], "rejected", None)
assert t.confirmed is False
# =============================================================================
# check_and_apply_approval_resolution (sync)
# =============================================================================
class TestCheckAndApplyApprovalResolution:
def test_noop_when_db_is_none(self):
rr = FakeRunResponse()
check_and_apply_approval_resolution(db=None, run_id="r1", run_response=rr)
def test_noop_when_no_tools_require_approval(self):
db = MagicMock()
rr = FakeRunResponse(tools=[FakeToolExecution(approval_type=None)])
check_and_apply_approval_resolution(db=db, run_id="r1", run_response=rr)
db.get_approvals.assert_not_called()
def test_raises_when_no_approval_record_found(self):
db = MagicMock()
db.get_approvals.return_value = ([], 0)
rr = FakeRunResponse(tools=[FakeToolExecution(approval_type="required")])
with pytest.raises(RuntimeError, match="No approval record found"):
check_and_apply_approval_resolution(db=db, run_id="r1", run_response=rr)
def test_raises_when_approval_still_pending(self):
db = MagicMock()
db.get_approvals.return_value = ([{"status": "pending"}], 1)
rr = FakeRunResponse(tools=[FakeToolExecution(approval_type="required")])
with pytest.raises(RuntimeError, match="still pending"):
check_and_apply_approval_resolution(db=db, run_id="r1", run_response=rr)
def test_applies_approved_status(self):
db = MagicMock()
db.get_approvals.return_value = ([{"status": "approved", "resolution_data": None}], 1)
t = FakeToolExecution(approval_type="required", requires_confirmation=True)
rr = FakeRunResponse(tools=[t])
check_and_apply_approval_resolution(db=db, run_id="r1", run_response=rr)
assert t.confirmed is True
def test_applies_rejected_status(self):
db = MagicMock()
db.get_approvals.return_value = ([{"status": "rejected", "resolution_data": None}], 1)
t = FakeToolExecution(approval_type="required", requires_confirmation=True)
rr = FakeRunResponse(tools=[t])
check_and_apply_approval_resolution(db=db, run_id="r1", run_response=rr)
assert t.confirmed is False
# =============================================================================
# acheck_and_apply_approval_resolution (async)
# =============================================================================
class TestAsyncCheckAndApplyApprovalResolution:
@pytest.mark.asyncio
async def test_noop_when_db_is_none(self):
rr = FakeRunResponse()
await acheck_and_apply_approval_resolution(db=None, run_id="r1", run_response=rr)
@pytest.mark.asyncio
async def test_raises_when_no_approval_record_found(self):
db = MagicMock()
db.get_approvals = AsyncMock(return_value=([], 0))
rr = FakeRunResponse(tools=[FakeToolExecution(approval_type="required")])
with pytest.raises(RuntimeError, match="No approval record found"):
await acheck_and_apply_approval_resolution(db=db, run_id="r1", run_response=rr)
@pytest.mark.asyncio
async def test_raises_when_approval_still_pending(self):
db = MagicMock()
db.get_approvals = AsyncMock(return_value=([{"status": "pending"}], 1))
rr = FakeRunResponse(tools=[FakeToolExecution(approval_type="required")])
with pytest.raises(RuntimeError, match="still pending"):
await acheck_and_apply_approval_resolution(db=db, run_id="r1", run_response=rr)
@pytest.mark.asyncio
async def test_applies_approved_status_async(self):
db = MagicMock()
db.get_approvals = AsyncMock(return_value=([{"status": "approved", "resolution_data": None}], 1))
t = FakeToolExecution(approval_type="required", requires_confirmation=True)
rr = FakeRunResponse(tools=[t])
await acheck_and_apply_approval_resolution(db=db, run_id="r1", run_response=rr)
assert t.confirmed is True
@pytest.mark.asyncio
async def test_falls_back_to_sync_get_approvals(self):
db = MagicMock()
db.get_approvals = MagicMock(return_value=([{"status": "approved", "resolution_data": None}], 1))
t = FakeToolExecution(approval_type="required", requires_confirmation=True)
rr = FakeRunResponse(tools=[t])
await acheck_and_apply_approval_resolution(db=db, run_id="r1", run_response=rr)
assert t.confirmed is True
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/run/test_approval.py",
"license": "Apache License 2.0",
"lines": 461,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/tools/test_approval_decorator.py | """Unit tests for the @approval decorator and its interaction with @tool."""
import pytest
from agno.approval import ApprovalType, approval
from agno.tools import Toolkit, tool
from agno.tools.function import Function
# =============================================================================
# Test 1: @approval on top of @tool sets approval_type and requires_confirmation
# =============================================================================
def test_approval_on_top_of_tool():
"""When @approval is stacked on top of @tool(), it receives the Function
object and sets approval_type='required' with requires_confirmation=True."""
@approval
@tool()
def delete_file(path: str) -> str:
"""Delete a file at the given path."""
return f"deleted {path}"
assert isinstance(delete_file, Function)
assert delete_file.approval_type == "required"
assert delete_file.requires_confirmation is True
# =============================================================================
# Test 2: @approval below @tool (sentinel path) produces same result
# =============================================================================
def test_approval_below_tool():
"""When @approval is below @tool(), the sentinel attribute is detected
by @tool and the resulting Function has approval_type='required' and
requires_confirmation=True."""
@tool()
@approval
def delete_file(path: str) -> str:
"""Delete a file at the given path."""
return f"deleted {path}"
assert isinstance(delete_file, Function)
assert delete_file.approval_type == "required"
assert delete_file.requires_confirmation is True
# =============================================================================
# Test 3: @approval() with parens works in both orderings
# =============================================================================
def test_approval_with_parens():
"""@approval() (with empty parens) should work identically to bare @approval
in both decorator orderings."""
# approval() on top of tool()
@approval()
@tool()
def func_a(x: int) -> int:
"""Function A."""
return x
assert isinstance(func_a, Function)
assert func_a.approval_type == "required"
assert func_a.requires_confirmation is True
# tool() on top of approval()
@tool()
@approval()
def func_b(x: int) -> int:
"""Function B."""
return x
assert isinstance(func_b, Function)
assert func_b.approval_type == "required"
assert func_b.requires_confirmation is True
# =============================================================================
# Test 4: @approval(type="audit") on @tool(requires_confirmation=True)
# =============================================================================
def test_approval_audit_with_confirmation():
"""@approval(type='audit') combined with @tool(requires_confirmation=True)
sets approval_type='audit' without raising, since a HITL flag is present."""
# approval on top
@approval(type="audit")
@tool(requires_confirmation=True)
def sensitive_action(data: str) -> str:
"""Perform a sensitive action."""
return data
assert isinstance(sensitive_action, Function)
assert sensitive_action.approval_type == "audit"
assert sensitive_action.requires_confirmation is True
# approval below
@tool(requires_confirmation=True)
@approval(type="audit")
def sensitive_action_2(data: str) -> str:
"""Perform a sensitive action."""
return data
assert isinstance(sensitive_action_2, Function)
assert sensitive_action_2.approval_type == "audit"
assert sensitive_action_2.requires_confirmation is True
# =============================================================================
# Test 5: @approval(type=ApprovalType.audit) with enum works
# =============================================================================
def test_approval_enum_type():
"""Passing an ApprovalType enum value works the same as passing a string."""
@approval(type=ApprovalType.audit)
@tool(requires_confirmation=True)
def audited_func(x: int) -> int:
"""Audited function."""
return x
assert isinstance(audited_func, Function)
assert audited_func.approval_type == "audit"
assert audited_func.requires_confirmation is True
# =============================================================================
# Test 6: @approval + @tool(requires_user_input=True) does NOT auto-set
# requires_confirmation
# =============================================================================
def test_approval_with_user_input():
"""When @tool already has requires_user_input=True, @approval should NOT
auto-set requires_confirmation, since a HITL flag is already present."""
# approval on top
@approval
@tool(requires_user_input=True)
def ask_user(question: str) -> str:
"""Ask the user a question."""
return question
assert isinstance(ask_user, Function)
assert ask_user.approval_type == "required"
assert ask_user.requires_user_input is True
# requires_confirmation should not have been auto-set
assert ask_user.requires_confirmation is not True
# approval below
@tool(requires_user_input=True)
@approval
def ask_user_2(question: str) -> str:
"""Ask the user a question."""
return question
assert isinstance(ask_user_2, Function)
assert ask_user_2.approval_type == "required"
assert ask_user_2.requires_user_input is True
assert ask_user_2.requires_confirmation is not True
# =============================================================================
# Test 7: @approval + @tool(external_execution=True) does NOT auto-set
# requires_confirmation
# =============================================================================
def test_approval_with_external_execution():
"""When @tool has external_execution=True, @approval should NOT auto-set
requires_confirmation, since a HITL flag is already present."""
# approval on top
@approval
@tool(external_execution=True)
def run_external(cmd: str) -> str:
"""Run an external command."""
return cmd
assert isinstance(run_external, Function)
assert run_external.approval_type == "required"
assert run_external.external_execution is True
assert run_external.requires_confirmation is not True
# approval below
@tool(external_execution=True)
@approval
def run_external_2(cmd: str) -> str:
"""Run an external command."""
return cmd
assert isinstance(run_external_2, Function)
assert run_external_2.approval_type == "required"
assert run_external_2.external_execution is True
assert run_external_2.requires_confirmation is not True
# =============================================================================
# Test 8: @tool(requires_approval=True) raises ValueError
# =============================================================================
def test_old_requires_approval_raises():
"""The old requires_approval kwarg has been removed from VALID_KWARGS,
so passing it should raise a ValueError."""
with pytest.raises(ValueError, match="Invalid tool configuration arguments"):
@tool(requires_approval=True)
def old_style(x: int) -> int:
"""Old style approval."""
return x
# =============================================================================
# Test 9: @approval(type="audit") + @tool() (no HITL flag) raises ValueError
# =============================================================================
def test_audit_without_hitl_raises():
"""@approval(type='audit') requires at least one HITL flag to be set.
Without any, it should raise a ValueError."""
# approval on top of tool (no HITL flags)
with pytest.raises(ValueError, match="requires at least one HITL flag"):
@approval(type="audit")
@tool()
def bare_audit(x: int) -> int:
"""Audit without HITL."""
return x
# approval below tool (no HITL flags)
with pytest.raises(ValueError, match="requires at least one HITL flag"):
@tool()
@approval(type="audit")
def bare_audit_2(x: int) -> int:
"""Audit without HITL."""
return x
# =============================================================================
# Test 10: @approval @tool(...) inside a Toolkit preserves approval_type
# =============================================================================
def test_toolkit_propagation():
"""When a @approval + @tool decorated method is registered inside a Toolkit,
the approval_type should be preserved on the registered Function."""
class MyToolkit(Toolkit):
def __init__(self):
super().__init__(name="approval_toolkit", tools=[self.dangerous_action])
@approval
@tool()
def dangerous_action(self, target: str) -> str:
"""Perform a dangerous action."""
return f"done: {target}"
toolkit = MyToolkit()
assert len(toolkit.functions) == 1
assert "dangerous_action" in toolkit.functions
func = toolkit.functions["dangerous_action"]
assert isinstance(func, Function)
assert func.approval_type == "required"
assert func.requires_confirmation is True
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_approval_decorator.py",
"license": "Apache License 2.0",
"lines": 202,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/05_agent_os/scheduler/async_schedule.py | """Async schedule management using the async ScheduleManager API.
This example demonstrates:
- Using acreate(), alist(), aget(), aupdate(), adelete() for async CRUD
- Using aenable() and adisable() to toggle schedules
- Using aget_runs() to list run history
- Rich-formatted display with SchedulerConsole
"""
import asyncio
from agno.db.sqlite import SqliteDb
from agno.scheduler import ScheduleManager
from agno.scheduler.cli import SchedulerConsole
async def main():
# --- Setup ---
db = SqliteDb(id="async-scheduler-demo", db_file="tmp/async_scheduler_demo.db")
mgr = ScheduleManager(db)
console = SchedulerConsole(mgr)
# --- Create schedules asynchronously ---
s1 = await mgr.acreate(
name="async-morning-report",
cron="0 8 * * *",
endpoint="/agents/async-agent/runs",
description="Morning report via async API",
payload={"message": "Generate the morning report"},
)
print(f"Created: {s1.name} (id={s1.id})")
s2 = await mgr.acreate(
name="async-evening-summary",
cron="0 18 * * *",
endpoint="/agents/async-agent/runs",
description="Evening summary via async API",
payload={"message": "Summarize the day"},
)
print(f"Created: {s2.name} (id={s2.id})")
# --- List all schedules ---
all_schedules = await mgr.alist()
print(f"\nTotal schedules: {len(all_schedules)}")
# --- Get by ID ---
fetched = await mgr.aget(s1.id)
print(f"Fetched: {fetched.name}")
# --- Update ---
updated = await mgr.aupdate(s1.id, description="Updated morning report description")
print(f"Updated description: {updated.description}")
# --- Disable and re-enable ---
await mgr.adisable(s2.id)
disabled = await mgr.aget(s2.id)
print(f"\n{disabled.name} enabled={disabled.enabled}")
await mgr.aenable(s2.id)
enabled = await mgr.aget(s2.id)
print(f"{enabled.name} enabled={enabled.enabled}")
# --- Check runs (none yet, since we haven't executed) ---
runs = await mgr.aget_runs(s1.id)
print(f"\nRuns for {s1.name}: {len(runs)}")
# --- Display with Rich ---
print()
console.show_schedules()
# --- Cleanup ---
await mgr.adelete(s1.id)
await mgr.adelete(s2.id)
print("\nAll schedules deleted.")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/scheduler/async_schedule.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/05_agent_os/scheduler/basic_schedule.py | """Basic scheduled agent run.
Starts an AgentOS with the scheduler enabled. After the server is running,
use the REST API to create a schedule that triggers an agent every 5 minutes.
Prerequisites:
pip install agno[scheduler]
# Start postgres: ./cookbook/scripts/run_pgvector.sh
Usage:
python cookbook/05_agent_os/scheduler/basic_schedule.py
Then, in another terminal, create a schedule:
curl -X POST http://localhost:7777/schedules \
-H "Content-Type: application/json" \
-d '{
"name": "greeting-every-5m",
"cron_expr": "*/5 * * * *",
"endpoint": "/agents/greeter/runs",
"payload": {"message": "Say hello!"}
}'
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.openai import OpenAIChat
from agno.os import AgentOS
# ---------------------------------------------------------------------------
# Create Example
# ---------------------------------------------------------------------------
db = PostgresDb(
id="scheduler-demo-db",
db_url="postgresql+psycopg://ai:ai@localhost:5532/ai",
)
greeter = Agent(
id="greeter",
name="Greeter Agent",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=[
"You are a friendly greeter. Say hello and include the current time."
],
db=db,
markdown=True,
)
app = AgentOS(
agents=[greeter],
db=db,
scheduler=True,
scheduler_poll_interval=15,
).get_app()
# ---------------------------------------------------------------------------
# Run Example
# ---------------------------------------------------------------------------
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7777)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/scheduler/basic_schedule.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/05_agent_os/scheduler/demo.py | """Running the scheduler inside AgentOS with programmatic schedule creation.
This example demonstrates:
- Setting scheduler=True on AgentOS to enable cron polling
- Using ScheduleManager to create schedules directly (no curl needed)
- The poller starts automatically on app startup and executes due schedules
Run with:
.venvs/demo/bin/python cookbook/05_agent_os/scheduler/demo.py
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIChat
from agno.os import AgentOS
from agno.scheduler import ScheduleManager
# --- Setup ---
db = SqliteDb(id="scheduler-os-demo", db_file="tmp/scheduler_os_demo.db")
greeter = Agent(
name="Greeter",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=["You are a friendly greeter."],
db=db,
)
reporter = Agent(
name="Reporter",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=["You summarize news headlines in 2-3 sentences."],
db=db,
)
# --- Create schedules programmatically ---
mgr = ScheduleManager(db)
# Create a schedule for the greeter agent (every 5 minutes)
greet_schedule = mgr.create(
name="greet-every-5-min",
cron="* * * * *",
endpoint="/agents/greeter/runs",
payload={"message": "Say hello!"},
description="Greet every 5 minutes",
if_exists="update",
)
print(f"Schedule ready: {greet_schedule.name} (next run: {greet_schedule.next_run_at})")
# Create a schedule for the reporter agent (daily at 9 AM)
report_schedule = mgr.create(
name="daily-news-report",
cron="* * * * *",
endpoint="/agents/reporter/runs",
payload={"message": "Summarize today's top headlines."},
description="Daily news summary at 9 AM UTC",
if_exists="update",
)
print(
f"Schedule ready: {report_schedule.name} (next run: {report_schedule.next_run_at})"
)
# --- Create AgentOS with scheduler enabled ---
agent_os = AgentOS(
name="Scheduled OS",
agents=[greeter, reporter],
db=db,
scheduler=True,
scheduler_poll_interval=15,
)
# --- Run the server ---
# The poller will automatically pick up the schedules created above.
if __name__ == "__main__":
import uvicorn
uvicorn.run(agent_os.get_app(), host="0.0.0.0", port=7777)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/scheduler/demo.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/05_agent_os/scheduler/multi_agent_schedules.py | """Multi-agent scheduling with different cron patterns and payloads.
This example demonstrates:
- Multiple agents with different roles
- Each agent gets a schedule with different cron, timezone, payload
- Retry configuration for reliability
- Rich table showing all schedules
- Filtered views (enabled only, disabled only)
"""
from agno.db.sqlite import SqliteDb
from agno.scheduler import ScheduleManager
from agno.scheduler.cli import SchedulerConsole
# --- Setup ---
db = SqliteDb(id="multi-agent-demo", db_file="tmp/multi_agent_demo.db")
mgr = ScheduleManager(db)
console = SchedulerConsole(mgr)
# =============================================================================
# Create schedules with different configurations
# =============================================================================
print("Creating schedules for 3 agents...\n")
# Research agent: daily at 7 AM EST with custom payload
s_research = mgr.create(
name="daily-research",
cron="0 7 * * *",
endpoint="/agents/research-agent/runs",
description="Gather daily research insights",
timezone="America/New_York",
payload={
"message": "Research the latest AI developments",
"stream": False,
},
)
# Writer agent: weekdays at 10 AM UTC
s_writer = mgr.create(
name="weekday-report",
cron="0 10 * * 1-5",
endpoint="/agents/writer-agent/runs",
description="Generate weekday summary report",
payload={
"message": "Write a summary of yesterday's research",
},
)
# Monitor agent: every 15 minutes with retry configuration
s_monitor = mgr.create(
name="health-monitor",
cron="*/15 * * * *",
endpoint="/agents/monitor-agent/runs",
description="System health check every 15 minutes",
payload={
"message": "Check system health and report anomalies",
},
max_retries=3,
retry_delay_seconds=30,
timeout_seconds=120,
)
print("All schedules created.")
# =============================================================================
# Display all schedules
# =============================================================================
print("\n--- All Schedules ---")
console.show_schedules()
# =============================================================================
# Show individual schedule details
# =============================================================================
print("\n--- Monitor Schedule Details ---")
console.show_schedule(s_monitor.id)
# =============================================================================
# Disable one schedule and show filtered views
# =============================================================================
mgr.disable(s_writer.id)
print("\nDisabled 'weekday-report' schedule.")
print("\n--- Enabled Schedules Only ---")
enabled = console.show_schedules(enabled=True)
print(f"({len(enabled)} enabled)")
print("\n--- Disabled Schedules Only ---")
disabled = console.show_schedules(enabled=False)
print(f"({len(disabled)} disabled)")
# =============================================================================
# Re-enable and verify
# =============================================================================
mgr.enable(s_writer.id)
print("\nRe-enabled 'weekday-report' schedule.")
all_schedules = mgr.list()
print(f"Total schedules: {len(all_schedules)}")
# =============================================================================
# Cleanup
# =============================================================================
# Uncomment to clean up schedules from the DB:
# for s in [s_research, s_writer, s_monitor]:
# mgr.delete(s.id)
# print("\nAll schedules cleaned up.")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/scheduler/multi_agent_schedules.py",
"license": "Apache License 2.0",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/05_agent_os/scheduler/rest_api_schedules.py | """Using the scheduler REST API endpoints directly.
This example demonstrates:
- Creating schedules via POST /schedules
- Listing schedules via GET /schedules
- Updating via PATCH /schedules/{id}
- Enable/disable via POST /schedules/{id}/enable and /disable
- Manual trigger via POST /schedules/{id}/trigger
- Viewing run history via GET /schedules/{id}/runs
- Deleting via DELETE /schedules/{id}
Requires: a running AgentOS server with scheduler=True
.venvs/demo/bin/python cookbook/05_agent_os/scheduler/scheduler_with_agentos.py
Then in another terminal:
.venvs/demo/bin/python cookbook/05_agent_os/scheduler/rest_api_schedules.py
"""
import httpx
BASE_URL = "http://127.0.0.1:7777"
client = httpx.Client(base_url=BASE_URL, timeout=30)
def main():
# =========================================================================
# 1. Create a schedule
# =========================================================================
print("=== Create Schedule ===\n")
resp = client.post(
"/schedules",
json={
"name": "api-demo-schedule",
"cron_expr": "*/5 * * * *",
"endpoint": "/agents/greeter/runs",
"description": "Created via REST API",
"payload": {"message": "Hello from the REST API!"},
"timezone": "UTC",
"max_retries": 1,
"retry_delay_seconds": 30,
},
)
resp.raise_for_status()
schedule = resp.json()
schedule_id = schedule["id"]
print(f"Created: {schedule['name']} (id={schedule_id})")
print(f" Cron: {schedule['cron_expr']}")
print(f" Next run: {schedule['next_run_at']}")
# =========================================================================
# 2. List all schedules
# =========================================================================
print("\n=== List Schedules ===\n")
resp = client.get("/schedules")
resp.raise_for_status()
result = resp.json()
schedules = result["data"]
meta = result["meta"]
print(
f"Page {meta['page']} of {meta['total_pages']} (total: {meta['total_count']})\n"
)
for s in schedules:
status = "enabled" if s["enabled"] else "disabled"
print(f" {s['name']} [{status}] -> {s['endpoint']}")
# =========================================================================
# 3. Get a single schedule
# =========================================================================
print("\n=== Get Schedule ===\n")
resp = client.get(f"/schedules/{schedule_id}")
resp.raise_for_status()
detail = resp.json()
print(f" Name: {detail['name']}")
print(f" Cron: {detail['cron_expr']}")
print(f" Timezone: {detail['timezone']}")
print(f" Max retries: {detail['max_retries']}")
# =========================================================================
# 4. Update the schedule
# =========================================================================
print("\n=== Update Schedule ===\n")
resp = client.patch(
f"/schedules/{schedule_id}",
json={
"description": "Updated description via REST API",
"cron_expr": "0 * * * *",
},
)
resp.raise_for_status()
updated = resp.json()
print(f" Description: {updated['description']}")
print(f" Cron: {updated['cron_expr']}")
# =========================================================================
# 5. Disable and re-enable
# =========================================================================
print("\n=== Disable/Enable ===\n")
resp = client.post(f"/schedules/{schedule_id}/disable")
resp.raise_for_status()
print(f" Disabled: enabled={resp.json()['enabled']}")
resp = client.post(f"/schedules/{schedule_id}/enable")
resp.raise_for_status()
print(f" Re-enabled: enabled={resp.json()['enabled']}")
# =========================================================================
# 6. Manual trigger
# =========================================================================
print("\n=== Manual Trigger ===\n")
try:
resp = client.post(f"/schedules/{schedule_id}/trigger")
if resp.status_code == 200:
trigger_result = resp.json()
print(f" Trigger result: status={trigger_result.get('status')}")
print(f" Run ID: {trigger_result.get('run_id')}")
elif resp.status_code == 503:
print(" Trigger returned 503 (scheduler executor not running yet)")
else:
print(f" Trigger response: {resp.status_code} {resp.text}")
except Exception as e:
print(f" Trigger timed out or failed: {type(e).__name__}")
# =========================================================================
# 7. View run history
# =========================================================================
print("\n=== Run History ===\n")
resp = client.get(f"/schedules/{schedule_id}/runs", params={"limit": 5, "page": 1})
resp.raise_for_status()
result = resp.json()
runs = result["data"]
meta = result["meta"]
if runs:
print(f"Showing {len(runs)} of {meta['total_count']} total runs\n")
for run in runs:
print(
f" Run {run['id'][:8]}... status={run['status']} attempt={run['attempt']}"
)
else:
print(" No runs yet (schedule hasn't been polled)")
# =========================================================================
# 8. Delete the schedule
# =========================================================================
print("\n=== Delete ===\n")
resp = client.delete(f"/schedules/{schedule_id}")
resp.raise_for_status()
try:
result = resp.json()
print(f" Deleted: {result}")
except Exception:
print(f" Deleted successfully (status {resp.status_code})")
print("\nDone.")
if __name__ == "__main__":
main()
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/scheduler/rest_api_schedules.py",
"license": "Apache License 2.0",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/05_agent_os/scheduler/run_history.py | """Viewing and analyzing schedule run history.
This example demonstrates:
- Creating schedules and simulating run records
- Using SchedulerConsole.show_runs() for Rich-formatted run history
- Querying run history with pagination
- Understanding run statuses (success, failed, running, paused)
"""
import time
from uuid import uuid4
from agno.db.sqlite import SqliteDb
from agno.scheduler import ScheduleManager
from agno.scheduler.cli import SchedulerConsole
# --- Setup ---
db = SqliteDb(id="run-history-demo", db_file="tmp/run_history_demo.db")
mgr = ScheduleManager(db)
console = SchedulerConsole(mgr)
# --- Create a schedule ---
schedule = mgr.create(
name="monitored-task",
cron="*/5 * * * *",
endpoint="/agents/monitor/runs",
description="A schedule with run history to inspect",
payload={"message": "Run health check"},
max_retries=2,
retry_delay_seconds=30,
)
print(f"Created schedule: {schedule.name} (id={schedule.id})")
# --- Simulate some run records by inserting directly ---
# In production, the ScheduleExecutor creates these automatically.
# Here we insert them manually to demonstrate the history display.
now = int(time.time())
# Simulate 3 runs with different statuses
run_records = [
{
"id": str(uuid4()),
"schedule_id": schedule.id,
"attempt": 1,
"triggered_at": now - 600,
"completed_at": now - 590,
"status": "success",
"status_code": 200,
"run_id": str(uuid4()),
"session_id": str(uuid4()),
"error": None,
"created_at": now - 600,
},
{
"id": str(uuid4()),
"schedule_id": schedule.id,
"attempt": 1,
"triggered_at": now - 300,
"completed_at": now - 280,
"status": "failed",
"status_code": 500,
"run_id": str(uuid4()),
"session_id": None,
"error": "Internal server error",
"created_at": now - 300,
},
{
"id": str(uuid4()),
"schedule_id": schedule.id,
"attempt": 2,
"triggered_at": now - 240,
"completed_at": now - 230,
"status": "success",
"status_code": 200,
"run_id": str(uuid4()),
"session_id": str(uuid4()),
"error": None,
"created_at": now - 240,
},
]
for record in run_records:
db.create_schedule_run(record)
# --- Display run history with Rich ---
print("\n=== Run History (Rich Table) ===\n")
console.show_runs(schedule.id)
# --- Query runs programmatically ---
print("\n=== Run History (Programmatic) ===\n")
runs = mgr.get_runs(schedule.id, limit=10)
print(f"Total runs: {len(runs)}")
for run in runs:
status = run.status
attempt = run.attempt
error = run.error or "-"
print(f" Attempt {attempt}: {status} (error={error})")
# --- Pagination ---
print("\n=== Paginated (limit=2, offset=0) ===\n")
page1 = mgr.get_runs(schedule.id, limit=2, offset=0)
print(f"Page 1: {len(page1)} runs")
page2 = mgr.get_runs(schedule.id, limit=2, offset=2)
print(f"Page 2: {len(page2)} runs")
# --- Cleanup ---
mgr.delete(schedule.id)
print("\nSchedule and runs deleted.")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/scheduler/run_history.py",
"license": "Apache License 2.0",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/05_agent_os/scheduler/schedule_management.py | """Schedule management via REST API.
Demonstrates creating, listing, updating, enabling/disabling,
manually triggering, and deleting schedules.
Prerequisites:
pip install agno[scheduler] httpx
Usage:
# First, start the server:
python cookbook/05_agent_os/scheduler/basic_schedule.py
# Then run this script:
python cookbook/05_agent_os/scheduler/schedule_management.py
"""
import httpx
# ---------------------------------------------------------------------------
# Create Example
# ---------------------------------------------------------------------------
BASE_URL = "http://localhost:7777"
def main():
client = httpx.Client(base_url=BASE_URL, timeout=30)
# 1. Create a schedule
print("--- Creating schedule ---")
resp = client.post(
"/schedules",
json={
"name": "hourly-greeting",
"cron_expr": "0 * * * *",
"endpoint": "/agents/greeter/runs",
"payload": {"message": "Hourly check-in"},
"timezone": "UTC",
"max_retries": 2,
"retry_delay_seconds": 30,
},
)
print(f" Status: {resp.status_code}")
schedule = resp.json()
schedule_id = schedule["id"]
print(f" ID: {schedule_id}")
print(f" Next run at: {schedule['next_run_at']}")
print()
# 2. List all schedules
print("--- Listing schedules ---")
resp = client.get("/schedules")
schedules = resp.json()
for s in schedules:
print(f" {s['name']} (enabled={s['enabled']}, next_run={s['next_run_at']})")
print()
# 3. Update the schedule
print("--- Updating schedule ---")
resp = client.patch(
f"/schedules/{schedule_id}",
json={"description": "Runs every hour on the hour", "max_retries": 3},
)
print(f" Updated description: {resp.json()['description']}")
print()
# 4. Disable the schedule
print("--- Disabling schedule ---")
resp = client.post(f"/schedules/{schedule_id}/disable")
print(f" Enabled: {resp.json()['enabled']}")
print()
# 5. Re-enable the schedule
print("--- Enabling schedule ---")
resp = client.post(f"/schedules/{schedule_id}/enable")
print(f" Enabled: {resp.json()['enabled']}")
print(f" Next run at: {resp.json()['next_run_at']}")
print()
# 6. Manually trigger
print("--- Triggering schedule ---")
resp = client.post(f"/schedules/{schedule_id}/trigger")
print(f" Trigger status: {resp.status_code}")
print(f" Run: {resp.json()}")
print()
# 7. View run history
print("--- Run history ---")
resp = client.get(f"/schedules/{schedule_id}/runs")
runs = resp.json()
print(f" Total runs: {len(runs)}")
for run in runs:
print(
f" attempt={run['attempt']} status={run['status']} triggered_at={run['triggered_at']}"
)
print()
# 8. Delete the schedule
print("--- Deleting schedule ---")
resp = client.delete(f"/schedules/{schedule_id}")
print(f" Delete status: {resp.status_code}")
# Verify deletion
resp = client.get(f"/schedules/{schedule_id}")
print(f" Get after delete: {resp.status_code} (expected 404)")
# ---------------------------------------------------------------------------
# Run Example
# ---------------------------------------------------------------------------
if __name__ == "__main__":
main()
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/scheduler/schedule_management.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/05_agent_os/scheduler/schedule_validation.py | """Schedule validation and error handling.
This example demonstrates:
- Invalid cron expression handling
- Invalid timezone handling
- Duplicate schedule name handling
- Complex cron patterns (ranges, steps, lists)
- Method auto-uppercasing
"""
from agno.db.sqlite import SqliteDb
from agno.scheduler import ScheduleManager
from agno.scheduler.cli import SchedulerConsole
# --- Setup ---
db = SqliteDb(id="validation-demo", db_file="tmp/validation_demo.db")
mgr = ScheduleManager(db)
# =============================================================================
# 1. Invalid cron expression
# =============================================================================
print("1. Invalid cron expression:")
try:
mgr.create(name="bad-cron", cron="not valid", endpoint="/test")
except ValueError as e:
print(f" Caught ValueError: {e}")
# =============================================================================
# 2. Invalid timezone
# =============================================================================
print("\n2. Invalid timezone:")
try:
mgr.create(name="bad-tz", cron="0 9 * * *", endpoint="/test", timezone="Fake/Zone")
except ValueError as e:
print(f" Caught ValueError: {e}")
# =============================================================================
# 3. Duplicate schedule name
# =============================================================================
print("\n3. Duplicate schedule name:")
s = mgr.create(name="unique-schedule", cron="0 9 * * *", endpoint="/test")
try:
mgr.create(name="unique-schedule", cron="0 10 * * *", endpoint="/test")
except ValueError as e:
print(f" Caught ValueError: {e}")
mgr.delete(s.id)
# =============================================================================
# 4. Complex cron patterns
# =============================================================================
print("\n4. Complex cron patterns:")
# Every 5 minutes
s1 = mgr.create(name="every-5-min", cron="*/5 * * * *", endpoint="/test")
print(f" */5 * * * * -> Created: {s1.name}")
# Weekdays 9-17
s2 = mgr.create(name="business-hours", cron="0 9-17 * * 1-5", endpoint="/test")
print(f" 0 9-17 * * 1-5 -> Created: {s2.name}")
# First day of month at midnight
s3 = mgr.create(name="monthly-report", cron="0 0 1 * *", endpoint="/test")
print(f" 0 0 1 * * -> Created: {s3.name}")
# =============================================================================
# 5. Method auto-uppercasing
# =============================================================================
print("\n5. Method auto-uppercasing:")
s4 = mgr.create(
name="lowercase-method", cron="0 9 * * *", endpoint="/test", method="get"
)
print(f" Input: 'get' -> Stored: '{s4.method}'")
# =============================================================================
# 6. Display all valid schedules
# =============================================================================
print()
console = SchedulerConsole(mgr)
console.show_schedules()
# =============================================================================
# Cleanup
# =============================================================================
for s in [s1, s2, s3, s4]:
mgr.delete(s.id)
print("All schedules cleaned up.")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/scheduler/schedule_validation.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/05_agent_os/scheduler/team_workflow_schedules.py | """Scheduling teams and workflows (not just agents).
This example demonstrates:
- Creating schedules that target team endpoints (/teams/*/runs)
- Creating schedules that target workflow endpoints (/workflows/*/runs)
- Different payload configurations for teams vs workflows
- Using the ScheduleManager directly for setup
"""
from agno.db.sqlite import SqliteDb
from agno.scheduler import ScheduleManager
from agno.scheduler.cli import SchedulerConsole
# --- Setup ---
db = SqliteDb(id="team-wf-demo", db_file="tmp/team_wf_demo.db")
mgr = ScheduleManager(db)
console = SchedulerConsole(mgr)
# =============================================================================
# 1. Schedule a team run
# =============================================================================
print("=== Team Schedules ===\n")
team_schedule = mgr.create(
name="daily-research-team",
cron="0 9 * * 1-5",
endpoint="/teams/research-team/runs",
description="Run the research team every weekday at 9 AM",
payload={
"message": "Research the latest developments in AI safety",
"stream": False,
},
timeout_seconds=1800,
max_retries=2,
retry_delay_seconds=60,
)
print(f"Created team schedule: {team_schedule.name}")
console.show_schedule(team_schedule.id)
# =============================================================================
# 2. Schedule a workflow run
# =============================================================================
print("\n=== Workflow Schedules ===\n")
wf_schedule = mgr.create(
name="nightly-data-pipeline",
cron="0 2 * * *",
endpoint="/workflows/data-pipeline/runs",
description="Run the data pipeline workflow every night at 2 AM",
payload={
"message": "Process and aggregate daily data",
},
timeout_seconds=3600,
)
print(f"Created workflow schedule: {wf_schedule.name}")
console.show_schedule(wf_schedule.id)
# =============================================================================
# 3. Mix of agent, team, and workflow schedules
# =============================================================================
print("\n=== Mixed Schedules ===\n")
agent_sched = mgr.create(
name="hourly-monitor",
cron="0 * * * *",
endpoint="/agents/monitor-agent/runs",
description="Run monitor agent every hour",
payload={"message": "Check system health"},
)
# Show all schedules together
console.show_schedules()
# =============================================================================
# 4. Different HTTP methods for non-run endpoints
# =============================================================================
print("\n=== Non-run endpoint schedules ===\n")
# Schedule a GET request (e.g., health check)
health_sched = mgr.create(
name="health-ping",
cron="*/10 * * * *",
endpoint="/health",
method="GET",
description="Ping health endpoint every 10 minutes",
)
print(
f"Created GET schedule: {health_sched.name} -> {health_sched.method} {health_sched.endpoint}"
)
# =============================================================================
# Cleanup
# =============================================================================
print("\n=== Cleanup ===\n")
for s in mgr.list():
mgr.delete(s.id)
print("All schedules cleaned up.")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/scheduler/team_workflow_schedules.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/db/schemas/scheduler.py | from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from agno.utils.dttm import now_epoch_s, to_epoch_s
@dataclass
class Schedule:
"""Model for a scheduled job."""
id: str
name: str
cron_expr: str
endpoint: str
description: Optional[str] = None
method: str = "POST"
payload: Optional[Dict[str, Any]] = None
timezone: str = "UTC"
timeout_seconds: int = 3600
max_retries: int = 0
retry_delay_seconds: int = 60
enabled: bool = True
next_run_at: Optional[int] = None
locked_by: Optional[str] = None
locked_at: Optional[int] = None
created_at: Optional[int] = None
updated_at: Optional[int] = None
def __post_init__(self) -> None:
self.created_at = now_epoch_s() if self.created_at is None else to_epoch_s(self.created_at)
if self.updated_at is not None:
self.updated_at = to_epoch_s(self.updated_at)
if self.next_run_at is not None:
self.next_run_at = int(self.next_run_at)
if self.locked_at is not None:
self.locked_at = int(self.locked_at)
def to_dict(self) -> Dict[str, Any]:
"""Serialize to dict. Preserves None values (important for DB updates)."""
return {
"id": self.id,
"name": self.name,
"description": self.description,
"method": self.method,
"endpoint": self.endpoint,
"payload": self.payload,
"cron_expr": self.cron_expr,
"timezone": self.timezone,
"timeout_seconds": self.timeout_seconds,
"max_retries": self.max_retries,
"retry_delay_seconds": self.retry_delay_seconds,
"enabled": self.enabled,
"next_run_at": self.next_run_at,
"locked_by": self.locked_by,
"locked_at": self.locked_at,
"created_at": self.created_at,
"updated_at": self.updated_at,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "Schedule":
data = dict(data)
valid_keys = {
"id",
"name",
"description",
"method",
"endpoint",
"payload",
"cron_expr",
"timezone",
"timeout_seconds",
"max_retries",
"retry_delay_seconds",
"enabled",
"next_run_at",
"locked_by",
"locked_at",
"created_at",
"updated_at",
}
filtered = {k: v for k, v in data.items() if k in valid_keys}
return cls(**filtered)
@dataclass
class ScheduleRun:
"""Model for a single execution attempt of a schedule."""
id: str
schedule_id: str
attempt: int = 1
triggered_at: Optional[int] = None
completed_at: Optional[int] = None
status: str = "running" # running | success | failed | paused | timeout
status_code: Optional[int] = None
run_id: Optional[str] = None
session_id: Optional[str] = None
error: Optional[str] = None
input: Optional[Dict[str, Any]] = None
output: Optional[Dict[str, Any]] = None
requirements: Optional[List[Dict[str, Any]]] = None
created_at: Optional[int] = None
def __post_init__(self) -> None:
self.created_at = now_epoch_s() if self.created_at is None else to_epoch_s(self.created_at)
if self.triggered_at is not None:
self.triggered_at = int(self.triggered_at)
if self.completed_at is not None:
self.completed_at = int(self.completed_at)
def to_dict(self) -> Dict[str, Any]:
"""Serialize to dict. Preserves None values."""
return {
"id": self.id,
"schedule_id": self.schedule_id,
"attempt": self.attempt,
"triggered_at": self.triggered_at,
"completed_at": self.completed_at,
"status": self.status,
"status_code": self.status_code,
"run_id": self.run_id,
"session_id": self.session_id,
"error": self.error,
"input": self.input,
"output": self.output,
"requirements": self.requirements,
"created_at": self.created_at,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "ScheduleRun":
data = dict(data)
valid_keys = {
"id",
"schedule_id",
"attempt",
"triggered_at",
"completed_at",
"status",
"status_code",
"run_id",
"session_id",
"error",
"input",
"output",
"requirements",
"created_at",
}
filtered = {k: v for k, v in data.items() if k in valid_keys}
return cls(**filtered)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/db/schemas/scheduler.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/schedules/router.py | """Schedule API router -- CRUD + trigger for cron schedules."""
import asyncio
import time
from typing import Any, Dict, Literal, Optional
from uuid import uuid4
from fastapi import APIRouter, Depends, HTTPException, Query, Request
from agno.os.routers.schedules.schema import (
ScheduleCreate,
ScheduleResponse,
ScheduleRunResponse,
ScheduleStateResponse,
ScheduleUpdate,
)
from agno.os.schema import PaginatedResponse, PaginationInfo
from agno.utils.log import log_info
# Valid DB method names that _db_call can invoke
_SchedulerDbMethod = Literal[
"get_schedule",
"get_schedule_by_name",
"get_schedules",
"create_schedule",
"update_schedule",
"delete_schedule",
"get_schedule_run",
"get_schedule_runs",
]
def get_schedule_router(os_db: Any, settings: Any) -> APIRouter:
"""Factory that creates and returns the schedule router.
Args:
os_db: The AgentOS-level DB adapter (must support scheduler methods).
settings: AgnoAPISettings instance.
Returns:
An APIRouter with all schedule endpoints attached.
"""
from agno.os.auth import get_authentication_dependency
router = APIRouter(tags=["Schedules"])
auth_dependency = get_authentication_dependency(settings)
# ------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------
def _check_scheduler_deps() -> None:
"""Raise 503 if croniter/pytz are not installed."""
try:
from agno.scheduler.cron import _require_croniter, _require_pytz
_require_croniter()
_require_pytz()
except ImportError as exc:
raise HTTPException(status_code=503, detail=str(exc))
async def _db_call(method_name: _SchedulerDbMethod, *args: Any, **kwargs: Any) -> Any:
fn = getattr(os_db, method_name, None)
if fn is None:
raise HTTPException(status_code=503, detail="Scheduler not supported by the configured database")
try:
if asyncio.iscoroutinefunction(fn):
return await fn(*args, **kwargs)
return fn(*args, **kwargs)
except NotImplementedError:
raise HTTPException(status_code=503, detail="Scheduler not supported by the configured database")
# ------------------------------------------------------------------
# Endpoints
# ------------------------------------------------------------------
@router.get("/schedules", response_model=PaginatedResponse[ScheduleResponse])
async def list_schedules(
enabled: Optional[bool] = Query(None),
limit: int = Query(100, ge=1, le=1000),
page: int = Query(1, ge=1),
_: bool = Depends(auth_dependency),
) -> PaginatedResponse[ScheduleResponse]:
schedules, total_count = await _db_call("get_schedules", enabled=enabled, limit=limit, page=page)
total_pages = (total_count + limit - 1) // limit if total_count > 0 else 0
return PaginatedResponse(
data=schedules,
meta=PaginationInfo(
page=page,
limit=limit,
total_pages=total_pages,
total_count=total_count,
),
)
@router.post("/schedules", response_model=ScheduleResponse, status_code=201)
async def create_schedule(
body: ScheduleCreate,
_: bool = Depends(auth_dependency),
) -> Dict[str, Any]:
_check_scheduler_deps()
from agno.scheduler.cron import compute_next_run, validate_cron_expr, validate_timezone
if not validate_cron_expr(body.cron_expr):
raise HTTPException(status_code=422, detail=f"Invalid cron expression: {body.cron_expr}")
if not validate_timezone(body.timezone):
raise HTTPException(status_code=422, detail=f"Invalid timezone: {body.timezone}")
# Check name uniqueness
existing = await _db_call("get_schedule_by_name", body.name)
if existing is not None:
raise HTTPException(status_code=409, detail=f"Schedule with name '{body.name}' already exists")
next_run_at = compute_next_run(body.cron_expr, body.timezone)
now = int(time.time())
schedule_dict: Dict[str, Any] = {
"id": str(uuid4()),
"name": body.name,
"description": body.description,
"method": body.method,
"endpoint": body.endpoint,
"payload": body.payload,
"cron_expr": body.cron_expr,
"timezone": body.timezone,
"timeout_seconds": body.timeout_seconds,
"max_retries": body.max_retries,
"retry_delay_seconds": body.retry_delay_seconds,
"enabled": True,
"next_run_at": next_run_at,
"locked_by": None,
"locked_at": None,
"created_at": now,
"updated_at": None,
}
result = await _db_call("create_schedule", schedule_dict)
if result is None:
raise HTTPException(status_code=500, detail="Failed to create schedule")
return result
@router.get("/schedules/{schedule_id}", response_model=ScheduleResponse)
async def get_schedule(
schedule_id: str,
_: bool = Depends(auth_dependency),
) -> Dict[str, Any]:
schedule = await _db_call("get_schedule", schedule_id)
if schedule is None:
raise HTTPException(status_code=404, detail="Schedule not found")
return schedule
@router.patch("/schedules/{schedule_id}", response_model=ScheduleResponse)
async def update_schedule(
schedule_id: str,
body: ScheduleUpdate,
_: bool = Depends(auth_dependency),
) -> Dict[str, Any]:
existing = await _db_call("get_schedule", schedule_id)
if existing is None:
raise HTTPException(status_code=404, detail="Schedule not found")
updates = body.model_dump(exclude_unset=True)
if not updates:
return existing
# Validate cron/timezone if changing
cron_changed = "cron_expr" in updates or "timezone" in updates
if cron_changed:
_check_scheduler_deps()
from agno.scheduler.cron import compute_next_run, validate_cron_expr, validate_timezone
new_cron = updates.get("cron_expr", existing["cron_expr"])
new_tz = updates.get("timezone", existing["timezone"])
if not validate_cron_expr(new_cron):
raise HTTPException(status_code=422, detail=f"Invalid cron expression: {new_cron}")
if not validate_timezone(new_tz):
raise HTTPException(status_code=422, detail=f"Invalid timezone: {new_tz}")
if existing.get("enabled", True):
updates["next_run_at"] = compute_next_run(new_cron, new_tz)
# Validate name uniqueness if changing
if "name" in updates and updates["name"] != existing["name"]:
dup = await _db_call("get_schedule_by_name", updates["name"])
if dup is not None:
raise HTTPException(status_code=409, detail=f"Schedule with name '{updates['name']}' already exists")
result = await _db_call("update_schedule", schedule_id, **updates)
if result is None:
raise HTTPException(status_code=500, detail="Failed to update schedule")
return result
@router.delete("/schedules/{schedule_id}", status_code=204)
async def delete_schedule(
schedule_id: str,
_: bool = Depends(auth_dependency),
) -> None:
existing = await _db_call("get_schedule", schedule_id)
if existing is None:
raise HTTPException(status_code=404, detail="Schedule not found")
deleted = await _db_call("delete_schedule", schedule_id)
if not deleted:
raise HTTPException(status_code=500, detail="Failed to delete schedule")
@router.post("/schedules/{schedule_id}/enable", response_model=ScheduleStateResponse)
async def enable_schedule(
schedule_id: str,
_: bool = Depends(auth_dependency),
) -> Dict[str, Any]:
existing = await _db_call("get_schedule", schedule_id)
if existing is None:
raise HTTPException(status_code=404, detail="Schedule not found")
_check_scheduler_deps()
from agno.scheduler.cron import compute_next_run
next_run_at = compute_next_run(existing["cron_expr"], existing.get("timezone", "UTC"))
result = await _db_call("update_schedule", schedule_id, enabled=True, next_run_at=next_run_at)
if result is None:
raise HTTPException(status_code=500, detail="Failed to enable schedule")
log_info(f"Schedule '{existing.get('name', schedule_id)}' enabled (next_run_at={next_run_at})")
return result
@router.post("/schedules/{schedule_id}/disable", response_model=ScheduleStateResponse)
async def disable_schedule(
schedule_id: str,
_: bool = Depends(auth_dependency),
) -> Dict[str, Any]:
existing = await _db_call("get_schedule", schedule_id)
if existing is None:
raise HTTPException(status_code=404, detail="Schedule not found")
result = await _db_call("update_schedule", schedule_id, enabled=False)
if result is None:
raise HTTPException(status_code=500, detail="Failed to disable schedule")
log_info(f"Schedule '{existing.get('name', schedule_id)}' disabled")
return result
@router.post("/schedules/{schedule_id}/trigger", response_model=ScheduleRunResponse)
async def trigger_schedule(
schedule_id: str,
request: Request,
_: bool = Depends(auth_dependency),
) -> Dict[str, Any]:
existing = await _db_call("get_schedule", schedule_id)
if existing is None:
raise HTTPException(status_code=404, detail="Schedule not found")
if not existing.get("enabled", True):
raise HTTPException(status_code=409, detail="Schedule is disabled")
executor = getattr(request.app.state, "scheduler_executor", None)
if executor is not None:
run = await executor.execute(existing, os_db, release_schedule=False)
return run
raise HTTPException(status_code=503, detail="Scheduler is not running")
@router.get("/schedules/{schedule_id}/runs", response_model=PaginatedResponse[ScheduleRunResponse])
async def list_schedule_runs(
schedule_id: str,
limit: int = Query(100, ge=1, le=1000),
page: int = Query(1, ge=1),
_: bool = Depends(auth_dependency),
) -> PaginatedResponse[ScheduleRunResponse]:
existing = await _db_call("get_schedule", schedule_id)
if existing is None:
raise HTTPException(status_code=404, detail="Schedule not found")
runs, total_count = await _db_call("get_schedule_runs", schedule_id, limit=limit, page=page)
total_pages = (total_count + limit - 1) // limit if total_count > 0 else 0
return PaginatedResponse(
data=runs,
meta=PaginationInfo(
page=page,
limit=limit,
total_pages=total_pages,
total_count=total_count,
),
)
@router.get("/schedules/{schedule_id}/runs/{run_id}", response_model=ScheduleRunResponse)
async def get_schedule_run(
schedule_id: str,
run_id: str,
_: bool = Depends(auth_dependency),
) -> Dict[str, Any]:
run = await _db_call("get_schedule_run", run_id)
if run is None or run.get("schedule_id") != schedule_id:
raise HTTPException(status_code=404, detail="Schedule run not found")
return run
return router
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/schedules/router.py",
"license": "Apache License 2.0",
"lines": 250,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/schedules/schema.py | """Pydantic request/response models for the schedule API."""
import re
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field, field_validator, model_validator
_NAME_PATTERN = re.compile(r"^[a-zA-Z0-9][a-zA-Z0-9 ._-]*$")
class ScheduleCreate(BaseModel):
name: str = Field(..., max_length=255)
cron_expr: str = Field(..., max_length=128)
endpoint: str = Field(..., max_length=512)
method: str = Field(default="POST", max_length=10)
description: Optional[str] = Field(default=None, max_length=1024)
payload: Optional[Dict[str, Any]] = None
timezone: str = Field(default="UTC", max_length=64)
timeout_seconds: int = Field(default=3600, ge=1, le=86400)
max_retries: int = Field(default=0, ge=0, le=10)
retry_delay_seconds: int = Field(default=60, ge=1, le=3600)
@field_validator("name")
@classmethod
def validate_name(cls, v: str) -> str:
if not _NAME_PATTERN.match(v):
raise ValueError("Name must start with alphanumeric and contain only alphanumeric, spaces, '.', '_', '-'")
return v
@field_validator("method")
@classmethod
def validate_method(cls, v: str) -> str:
v = v.upper()
if v not in ("GET", "POST", "PUT", "PATCH", "DELETE"):
raise ValueError("Method must be GET, POST, PUT, PATCH, or DELETE")
return v
@field_validator("endpoint")
@classmethod
def validate_endpoint(cls, v: str) -> str:
if not v.startswith("/"):
raise ValueError("Endpoint must start with '/'")
if "://" in v:
raise ValueError("Endpoint must be a path, not a full URL")
return v
class ScheduleUpdate(BaseModel):
name: Optional[str] = Field(default=None, max_length=255)
cron_expr: Optional[str] = Field(default=None, max_length=128)
endpoint: Optional[str] = Field(default=None, max_length=512)
method: Optional[str] = Field(default=None, max_length=10)
description: Optional[str] = Field(default=None, max_length=1024)
payload: Optional[Dict[str, Any]] = None
timezone: Optional[str] = Field(default=None, max_length=64)
timeout_seconds: Optional[int] = Field(default=None, ge=1, le=86400)
max_retries: Optional[int] = Field(default=None, ge=0, le=10)
retry_delay_seconds: Optional[int] = Field(default=None, ge=1, le=3600)
@field_validator("name")
@classmethod
def validate_name(cls, v: Optional[str]) -> Optional[str]:
if v is not None and not _NAME_PATTERN.match(v):
raise ValueError("Name must start with alphanumeric and contain only alphanumeric, spaces, '.', '_', '-'")
return v
@field_validator("method")
@classmethod
def validate_method(cls, v: Optional[str]) -> Optional[str]:
if v is not None:
v = v.upper()
if v not in ("GET", "POST", "PUT", "PATCH", "DELETE"):
raise ValueError("Method must be GET, POST, PUT, PATCH, or DELETE")
return v
@field_validator("endpoint")
@classmethod
def validate_endpoint(cls, v: Optional[str]) -> Optional[str]:
if v is not None:
if not v.startswith("/"):
raise ValueError("Endpoint must start with '/'")
if "://" in v:
raise ValueError("Endpoint must be a path, not a full URL")
return v
@model_validator(mode="after")
def reject_null_required_fields(self) -> "ScheduleUpdate":
non_nullable = (
"name",
"cron_expr",
"endpoint",
"method",
"timezone",
"timeout_seconds",
"max_retries",
"retry_delay_seconds",
)
data = self.model_dump(exclude_unset=True)
for field_name in non_nullable:
if field_name in data and data[field_name] is None:
raise ValueError(f"'{field_name}' cannot be set to null")
return self
class ScheduleResponse(BaseModel):
id: str
name: str
description: Optional[str] = None
method: str
endpoint: str
payload: Optional[Dict[str, Any]] = None
cron_expr: str
timezone: str
timeout_seconds: int
max_retries: int
retry_delay_seconds: int
enabled: bool
next_run_at: Optional[int] = None
created_at: Optional[int] = None
updated_at: Optional[int] = None
class ScheduleStateResponse(BaseModel):
"""Trimmed response for state-changing operations (enable/disable)."""
id: str
name: str
enabled: bool
next_run_at: Optional[int] = None
updated_at: Optional[int] = None
class ScheduleRunResponse(BaseModel):
id: str
schedule_id: str
attempt: int
triggered_at: Optional[int] = None
completed_at: Optional[int] = None
status: str
status_code: Optional[int] = None
run_id: Optional[str] = None
session_id: Optional[str] = None
error: Optional[str] = None
input: Optional[Dict[str, Any]] = None
output: Optional[Dict[str, Any]] = None
requirements: Optional[List[Dict[str, Any]]] = None
created_at: Optional[int] = None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/schedules/schema.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/scheduler/cli.py | """Rich CLI console for scheduler -- pretty output for cookbooks."""
from datetime import datetime, timezone
from typing import Any, Dict, List, Optional
from agno.db.schemas.scheduler import Schedule, ScheduleRun
from agno.scheduler.manager import ScheduleManager
def _ts(epoch: Optional[int]) -> str:
"""Format an epoch timestamp for display."""
if epoch is None:
return "-"
return datetime.fromtimestamp(epoch, tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC")
def _status_style(status: str) -> str:
"""Return Rich style for a run status."""
status_upper = status.upper()
styles = {
"COMPLETED": "bold green",
"RUNNING": "bold blue",
"PENDING": "bold yellow",
"ERROR": "bold red",
"CANCELLED": "bold magenta",
"PAUSED": "bold cyan",
}
return styles.get(status_upper, "white")
class SchedulerConsole:
"""Rich-powered display wrapper for ScheduleManager.
Provides pretty terminal output for schedule CRUD operations,
designed for use in cookbooks and interactive sessions.
"""
def __init__(self, manager: ScheduleManager) -> None:
self.manager = manager
@classmethod
def from_db(cls, db: Any) -> "SchedulerConsole":
"""Create a SchedulerConsole from a database instance."""
return cls(ScheduleManager(db))
def show_schedules(self, enabled: Optional[bool] = None) -> List[Schedule]:
"""Display all schedules in a Rich table."""
from rich.console import Console
from rich.table import Table
console = Console()
schedules = self.manager.list(enabled=enabled)
table = Table(title="Schedules", show_lines=True)
table.add_column("Name", style="bold cyan")
table.add_column("Cron", style="white")
table.add_column("Endpoint", style="white")
table.add_column("Enabled", justify="center")
table.add_column("Next Run", style="dim")
table.add_column("ID", style="dim")
for s in schedules:
enabled_str = "[green]Yes[/green]" if s.enabled else "[red]No[/red]"
table.add_row(
s.name,
s.cron_expr,
f"{s.method} {s.endpoint}",
enabled_str,
_ts(s.next_run_at),
s.id[:8] + "...",
)
console.print(table)
return schedules
def show_schedule(self, schedule_id: str) -> Optional[Schedule]:
"""Display a single schedule in a Rich panel."""
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
console = Console()
schedule = self.manager.get(schedule_id)
if schedule is None:
console.print(f"[red]Schedule not found: {schedule_id}[/red]")
return None
info = Table.grid(padding=(0, 2))
info.add_column(style="bold")
info.add_column()
info.add_row("ID:", schedule.id)
info.add_row("Name:", schedule.name)
info.add_row("Description:", schedule.description or "-")
info.add_row("Cron:", schedule.cron_expr)
info.add_row("Timezone:", schedule.timezone or "UTC")
info.add_row("Endpoint:", f"{schedule.method} {schedule.endpoint}")
info.add_row("Enabled:", "[green]Yes[/green]" if schedule.enabled else "[red]No[/red]")
info.add_row("Next Run:", _ts(schedule.next_run_at))
info.add_row("Timeout:", f"{schedule.timeout_seconds or 3600}s")
info.add_row("Max Retries:", str(schedule.max_retries or 0))
info.add_row("Created:", _ts(schedule.created_at))
info.add_row("Updated:", _ts(schedule.updated_at))
console.print(Panel(info, title=f"Schedule: {schedule.name}", border_style="cyan"))
return schedule
def show_runs(self, schedule_id: str, limit: int = 20) -> List[ScheduleRun]:
"""Display run history for a schedule in a Rich table."""
from rich.console import Console
from rich.table import Table
console = Console()
runs = self.manager.get_runs(schedule_id, limit=limit)
table = Table(title="Schedule Runs", show_lines=True)
table.add_column("Run ID", style="dim")
table.add_column("Attempt", justify="center")
table.add_column("Status")
table.add_column("Status Code", justify="center")
table.add_column("Triggered At", style="dim")
table.add_column("Completed At", style="dim")
table.add_column("Error", style="red")
for r in runs:
status = r.status or "UNKNOWN"
style = _status_style(status)
table.add_row(
r.id[:8] + "...",
str(r.attempt or 0),
f"[{style}]{status}[/{style}]",
str(r.status_code or "-"),
_ts(r.triggered_at),
_ts(r.completed_at),
(r.error or "-")[:60],
)
console.print(table)
return runs
def create_and_show(
self,
name: str,
cron: str,
endpoint: str,
method: str = "POST",
description: Optional[str] = None,
payload: Optional[Dict[str, Any]] = None,
timezone: str = "UTC",
timeout_seconds: int = 3600,
max_retries: int = 0,
retry_delay_seconds: int = 60,
if_exists: str = "raise",
) -> Schedule:
"""Create a schedule and display it in a Rich panel."""
schedule = self.manager.create(
name=name,
cron=cron,
endpoint=endpoint,
method=method,
description=description,
payload=payload,
timezone=timezone,
timeout_seconds=timeout_seconds,
max_retries=max_retries,
retry_delay_seconds=retry_delay_seconds,
if_exists=if_exists,
)
self.show_schedule(schedule.id)
return schedule
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/scheduler/cli.py",
"license": "Apache License 2.0",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/scheduler/cron.py | """Cron expression utilities for the scheduler."""
import time
from datetime import datetime
from typing import Optional
try:
from croniter import croniter # type: ignore[import-untyped]
except ImportError:
croniter = None # type: ignore[assignment, misc]
try:
import pytz
except ImportError:
pytz = None # type: ignore[assignment]
def _require_croniter() -> None:
if croniter is None:
raise ImportError("`croniter` not installed. Please install it using `pip install agno[scheduler]`")
def _require_pytz() -> None:
if pytz is None:
raise ImportError("`pytz` not installed. Please install it using `pip install agno[scheduler]`")
def validate_cron_expr(cron_expr: str) -> bool:
"""Validate a cron expression.
Args:
cron_expr: Cron expression string (5-field).
Returns:
True if valid, False otherwise.
"""
_require_croniter()
return croniter.is_valid(cron_expr)
def validate_timezone(tz: str) -> bool:
"""Validate a timezone string.
Args:
tz: Timezone string (e.g. 'UTC', 'America/New_York').
Returns:
True if valid, False otherwise.
"""
_require_pytz()
try:
pytz.timezone(tz)
return True
except pytz.exceptions.UnknownTimeZoneError:
return False
def compute_next_run(
cron_expr: str,
timezone_str: str = "UTC",
after_epoch: Optional[int] = None,
) -> int:
"""Compute the next run time as epoch seconds.
Uses a monotonicity guard: the returned value is always at least
``int(time.time()) + 1`` to avoid scheduling in the past.
Args:
cron_expr: Cron expression string (5-field).
timezone_str: Timezone for evaluation (default: UTC).
after_epoch: Epoch seconds to compute the next run after.
If None, uses the current time.
Returns:
Next run time as epoch seconds.
"""
_require_croniter()
_require_pytz()
tz = pytz.timezone(timezone_str)
if after_epoch is not None:
base_dt = datetime.fromtimestamp(after_epoch, tz=tz)
else:
base_dt = datetime.now(tz=tz)
cron = croniter(cron_expr, base_dt)
next_dt = cron.get_next(datetime)
computed = int(next_dt.timestamp())
# Monotonicity guard: never schedule in the past
minimum = int(time.time()) + 1
return max(computed, minimum)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/scheduler/cron.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/scheduler/executor.py | """Schedule executor -- fires HTTP requests for due schedules."""
import asyncio
import json
import re
import time
from typing import Any, Dict, List, Optional, Union
from uuid import uuid4
from agno.db.schemas.scheduler import Schedule
from agno.utils.log import log_error, log_info, log_warning
try:
import httpx
except ImportError:
httpx = None # type: ignore[assignment]
# Regex to detect run endpoints and capture resource type + ID
_RUN_ENDPOINT_RE = re.compile(r"^/(agents|teams|workflows)/([^/]+)/runs/?$")
# Terminal run statuses (RunStatus enum values from agno.run.base)
_TERMINAL_STATUSES = {"COMPLETED", "CANCELLED", "ERROR", "PAUSED"}
# Default polling interval in seconds for background run status checks
_DEFAULT_POLL_INTERVAL = 30
def _to_form_value(v: Any) -> str:
"""Convert a payload value to a JSON-safe form string."""
if isinstance(v, bool):
return "true" if v else "false"
if isinstance(v, (dict, list)):
return json.dumps(v)
return str(v)
class ScheduleExecutor:
"""Execute a schedule by calling its endpoint on the AgentOS server.
For run endpoints (``/agents/*/runs``, ``/teams/*/runs``, etc.) the executor
submits a background run (``background=true``), then polls the run status
endpoint until it reaches a terminal state (COMPLETED, ERROR, CANCELLED, PAUSED).
For all other endpoints a simple request/response cycle is used.
"""
def __init__(
self,
base_url: str,
internal_service_token: str,
timeout: int = 3600,
poll_interval: int = _DEFAULT_POLL_INTERVAL,
) -> None:
if httpx is None:
raise ImportError("`httpx` not installed. Please install it using `pip install httpx`")
self.base_url = base_url.rstrip("/")
self.internal_service_token = internal_service_token
self.timeout = timeout
self.poll_interval = poll_interval
self._client: Optional[httpx.AsyncClient] = None
async def _get_client(self) -> httpx.AsyncClient:
"""Get or create the shared httpx.AsyncClient."""
if self._client is None or self._client.is_closed:
self._client = httpx.AsyncClient(timeout=httpx.Timeout(self.timeout))
return self._client
async def close(self) -> None:
"""Close the shared httpx client."""
if self._client is not None and not self._client.is_closed:
await self._client.aclose()
self._client = None
# ------------------------------------------------------------------
async def execute(
self,
schedule: Union[Schedule, Dict[str, Any]],
db: Any,
release_schedule: bool = True,
) -> Dict[str, Any]:
"""Execute *schedule* and persist run records.
Args:
schedule: Schedule object or dict (from DB).
db: The DB adapter instance (must have scheduler methods).
release_schedule: Whether to release the lock after execution.
Returns:
The ScheduleRun dict.
"""
from agno.scheduler.cron import compute_next_run
# Normalize to Schedule dataclass for typed access
sched = Schedule.from_dict(schedule) if isinstance(schedule, dict) else schedule
schedule_id: Optional[str] = None
run_id_value: Optional[str] = None
session_id_value: Optional[str] = None
last_status = "failed"
last_status_code: Optional[int] = None
last_error: Optional[str] = None
last_input: Optional[Dict[str, Any]] = None
last_output: Optional[Dict[str, Any]] = None
last_requirements: Optional[List[Dict[str, Any]]] = None
run_record_id: Optional[str] = None
run_dict: Dict[str, Any] = {}
try:
schedule_id = sched.id
max_attempts = max(1, (sched.max_retries or 0) + 1)
retry_delay = sched.retry_delay_seconds or 60
for attempt in range(1, max_attempts + 1):
run_record_id = str(uuid4())
now = int(time.time())
run_dict = {
"id": run_record_id,
"schedule_id": schedule_id,
"attempt": attempt,
"triggered_at": now,
"completed_at": None,
"status": "running",
"status_code": None,
"run_id": None,
"session_id": None,
"error": None,
"input": None,
"output": None,
"requirements": None,
"created_at": now,
}
if asyncio.iscoroutinefunction(getattr(db, "create_schedule_run", None)):
await db.create_schedule_run(run_dict)
else:
db.create_schedule_run(run_dict)
try:
result = await self._call_endpoint(sched)
last_status = result.get("status", "success")
last_status_code = result.get("status_code")
last_error = result.get("error")
run_id_value = result.get("run_id") or run_id_value
session_id_value = result.get("session_id") or session_id_value
last_input = result.get("input")
last_output = result.get("output")
last_requirements = result.get("requirements")
updates: Dict[str, Any] = {
"completed_at": int(time.time()),
"status": last_status,
"status_code": last_status_code,
"run_id": run_id_value,
"session_id": session_id_value,
"error": last_error,
"input": last_input,
"output": last_output,
"requirements": last_requirements,
}
if asyncio.iscoroutinefunction(getattr(db, "update_schedule_run", None)):
await db.update_schedule_run(run_record_id, **updates)
else:
db.update_schedule_run(run_record_id, **updates)
if last_status in ("success", "paused"):
break
except Exception as exc:
last_status = "failed"
last_error = str(exc)
log_error(f"Schedule {schedule_id} attempt {attempt} failed: {exc}")
updates = {
"completed_at": int(time.time()),
"status": "failed",
"error": last_error,
}
if asyncio.iscoroutinefunction(getattr(db, "update_schedule_run", None)):
await db.update_schedule_run(run_record_id, **updates)
else:
db.update_schedule_run(run_record_id, **updates)
if attempt < max_attempts:
log_info(f"Schedule {schedule_id}: retrying in {retry_delay}s (attempt {attempt}/{max_attempts})")
await asyncio.sleep(retry_delay)
# Build final snapshot for the caller
final_run = dict(run_dict)
final_run["status"] = last_status
final_run["status_code"] = last_status_code
final_run["error"] = last_error
final_run["run_id"] = run_id_value
final_run["session_id"] = session_id_value
final_run["input"] = last_input
final_run["output"] = last_output
final_run["requirements"] = last_requirements
final_run["completed_at"] = int(time.time())
return final_run
except asyncio.CancelledError:
log_warning(f"Schedule {schedule_id} execution cancelled")
if run_record_id is not None:
cancel_updates: Dict[str, Any] = {
"completed_at": int(time.time()),
"status": "cancelled",
"error": "Execution cancelled during shutdown",
}
try:
if asyncio.iscoroutinefunction(getattr(db, "update_schedule_run", None)):
await db.update_schedule_run(run_record_id, **cancel_updates)
else:
db.update_schedule_run(run_record_id, **cancel_updates)
except Exception:
pass
raise
finally:
# Always release the schedule lock so it doesn't stay stuck
if release_schedule and schedule_id is not None:
try:
next_run_at = compute_next_run(
sched.cron_expr,
sched.timezone or "UTC",
)
except Exception:
log_warning(
f"Failed to compute next_run_at for schedule {schedule_id}; "
"disabling schedule to prevent it from becoming stuck"
)
next_run_at = None
try:
if asyncio.iscoroutinefunction(getattr(db, "update_schedule", None)):
await db.update_schedule(schedule_id, enabled=False)
else:
db.update_schedule(schedule_id, enabled=False)
except Exception as exc:
log_error(f"Failed to disable schedule {schedule_id} after cron failure: {exc}")
try:
if asyncio.iscoroutinefunction(getattr(db, "release_schedule", None)):
await db.release_schedule(schedule_id, next_run_at=next_run_at)
else:
db.release_schedule(schedule_id, next_run_at=next_run_at)
except Exception as exc:
log_error(f"Failed to release schedule {schedule_id}: {exc}")
# ------------------------------------------------------------------
async def _call_endpoint(self, schedule: Schedule) -> Dict[str, Any]:
"""Make the HTTP call to the schedule's endpoint."""
method = (schedule.method or "POST").upper()
endpoint = schedule.endpoint
payload = schedule.payload or {}
timeout_seconds = schedule.timeout_seconds or self.timeout
url = f"{self.base_url}{endpoint}"
match = _RUN_ENDPOINT_RE.match(endpoint)
is_run_endpoint = match is not None and method == "POST"
headers: Dict[str, str] = {
"Authorization": f"Bearer {self.internal_service_token}",
}
client = await self._get_client()
if is_run_endpoint and match is not None:
form_payload = {k: _to_form_value(v) for k, v in payload.items() if k not in ("stream", "background")}
form_payload["stream"] = "false"
form_payload["background"] = "true"
resource_type = match.group(1)
resource_id = match.group(2)
return await self._background_run(
client,
url,
headers,
form_payload,
resource_type,
resource_id,
timeout_seconds,
)
else:
headers["Content-Type"] = "application/json"
return await self._simple_request(client, method, url, headers, payload if payload else None)
async def _simple_request(
self,
client: Any,
method: str,
url: str,
headers: Dict[str, str],
payload: Optional[Dict[str, Any]],
) -> Dict[str, Any]:
"""Non-streaming request/response."""
kwargs: Dict[str, Any] = {"headers": headers}
if payload is not None:
kwargs["json"] = payload
resp = await client.request(method, url, **kwargs)
status = "success" if 200 <= resp.status_code < 300 else "failed"
error = resp.text if status == "failed" else None
return {
"status": status,
"status_code": resp.status_code,
"error": error,
"run_id": None,
"session_id": None,
"input": None,
"output": None,
"requirements": None,
}
async def _background_run(
self,
client: Any,
url: str,
headers: Dict[str, str],
payload: Dict[str, str],
resource_type: str,
resource_id: str,
timeout_seconds: int,
) -> Dict[str, Any]:
"""Submit a background run and poll until completion."""
kwargs: Dict[str, Any] = {"headers": headers}
if payload is not None:
kwargs["data"] = payload
resp = await client.request("POST", url, **kwargs)
if resp.status_code >= 400:
return {
"status": "failed",
"status_code": resp.status_code,
"error": resp.text,
"run_id": None,
"session_id": None,
"input": None,
"output": None,
"requirements": None,
}
try:
body = resp.json()
except (json.JSONDecodeError, ValueError):
return {
"status": "failed",
"status_code": resp.status_code,
"error": f"Invalid JSON in background run response: {resp.text[:500]}",
"run_id": None,
"session_id": None,
"input": None,
"output": None,
"requirements": None,
}
run_id = body.get("run_id")
session_id = body.get("session_id")
if not run_id or not session_id:
return {
"status": "failed",
"status_code": resp.status_code,
"error": f"Missing run_id or session_id in background run response: {body}",
"run_id": run_id,
"session_id": session_id,
"input": None,
"output": None,
"requirements": None,
}
return await self._poll_run(
client,
headers,
resource_type,
resource_id,
run_id,
session_id,
timeout_seconds,
)
async def _poll_run(
self,
client: Any,
headers: Dict[str, str],
resource_type: str,
resource_id: str,
run_id: str,
session_id: str,
timeout_seconds: int,
) -> Dict[str, Any]:
"""Poll a run status endpoint until the run reaches a terminal state."""
poll_url = f"{self.base_url}/{resource_type}/{resource_id}/runs/{run_id}"
deadline = time.monotonic() + timeout_seconds
while True:
if time.monotonic() >= deadline:
return {
"status": "failed",
"status_code": None,
"error": f"Polling timed out after {timeout_seconds}s for run {run_id}",
"run_id": run_id,
"session_id": session_id,
"input": None,
"output": None,
"requirements": None,
}
try:
resp = await client.request(
"GET",
poll_url,
headers=headers,
params={"session_id": session_id},
)
except Exception as exc:
log_warning(f"Poll request failed for run {run_id}: {exc}")
continue
if resp.status_code == 404:
continue
if resp.status_code >= 400:
return {
"status": "failed",
"status_code": resp.status_code,
"error": resp.text,
"run_id": run_id,
"session_id": session_id,
"input": None,
"output": None,
"requirements": None,
}
try:
data = resp.json()
except (json.JSONDecodeError, ValueError):
log_warning(f"Invalid JSON in poll response for run {run_id}")
continue
run_status = data.get("status")
if run_status in _TERMINAL_STATUSES:
if run_status == "COMPLETED":
status = "success"
error = None
elif run_status == "PAUSED":
status = "paused"
error = None
elif run_status == "CANCELLED":
status = "failed"
error = data.get("error") or "Run was cancelled"
else:
status = "failed"
error = data.get("error") or f"Run failed with status {run_status}"
# Extract input, output, and requirements from RunOutput
run_input = data.get("input") if isinstance(data.get("input"), dict) else None
run_output = self._extract_output(data)
run_requirements = self._extract_requirements(data) if run_status == "PAUSED" else None
return {
"status": status,
"status_code": resp.status_code,
"error": error,
"run_id": run_id,
"session_id": session_id,
"input": run_input,
"output": run_output,
"requirements": run_requirements,
}
await asyncio.sleep(self.poll_interval)
# ------------------------------------------------------------------
@staticmethod
def _extract_output(data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
"""Build a structured output dict from RunOutput data."""
content = data.get("content")
if content is None:
return None
return {
"content": content,
"content_type": data.get("content_type"),
}
@staticmethod
def _extract_requirements(data: Dict[str, Any]) -> Optional[List[Dict[str, Any]]]:
"""Extract HITL requirements from RunOutput data."""
raw = data.get("requirements")
if raw and isinstance(raw, list):
return raw
return None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/scheduler/executor.py",
"license": "Apache License 2.0",
"lines": 430,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/scheduler/manager.py | """Pythonic API for managing schedules -- direct DB access, no HTTP."""
import asyncio
import concurrent.futures
import time
from typing import Any, Dict, List, Literal, Optional
from uuid import uuid4
from agno.db.schemas.scheduler import Schedule, ScheduleRun
from agno.utils.log import log_debug, log_warning
# Valid DB method names for the scheduler
SchedulerDbMethod = Literal[
"get_schedule",
"get_schedule_by_name",
"get_schedules",
"create_schedule",
"update_schedule",
"delete_schedule",
"release_schedule",
"claim_due_schedule",
"create_schedule_run",
"update_schedule_run",
"get_schedule_run",
"get_schedule_runs",
]
class ScheduleManager:
"""Direct DB-backed schedule management API.
Provides a Pythonic interface for creating, listing, updating, and
managing schedules without going through HTTP. Used by cookbooks
and the Rich CLI console.
"""
def __init__(self, db: Any) -> None:
self.db = db
self._is_async = asyncio.iscoroutinefunction(getattr(db, "get_schedule", None))
self._pool: Optional[concurrent.futures.ThreadPoolExecutor] = None
def close(self) -> None:
"""Shut down the internal thread pool (if created)."""
if self._pool is not None:
self._pool.shutdown(wait=False)
self._pool = None
def __del__(self) -> None:
self.close()
def _call(self, method_name: SchedulerDbMethod, *args: Any, **kwargs: Any) -> Any:
"""Call a DB method, handling sync/async transparently."""
fn = getattr(self.db, method_name, None)
if fn is None:
raise NotImplementedError(f"Database does not support {method_name}")
if asyncio.iscoroutinefunction(fn):
try:
asyncio.get_running_loop()
# Running inside an async context — bridge via thread
if self._pool is None:
self._pool = concurrent.futures.ThreadPoolExecutor(max_workers=1)
return self._pool.submit(asyncio.run, fn(*args, **kwargs)).result()
except RuntimeError:
# No running loop — safe to use asyncio.run directly
return asyncio.run(fn(*args, **kwargs))
return fn(*args, **kwargs)
async def _acall(self, method_name: SchedulerDbMethod, *args: Any, **kwargs: Any) -> Any:
"""Async call a DB method."""
fn = getattr(self.db, method_name, None)
if fn is None:
raise NotImplementedError(f"Database does not support {method_name}")
if asyncio.iscoroutinefunction(fn):
return await fn(*args, **kwargs)
return fn(*args, **kwargs)
@staticmethod
def _to_schedule(data: Any) -> Optional[Schedule]:
"""Convert a DB result to a Schedule object."""
if data is None:
return None
if isinstance(data, Schedule):
return data
return Schedule.from_dict(data)
@staticmethod
def _to_schedule_list(data: Any) -> List[Schedule]:
"""Convert a list of DB results to Schedule objects."""
if not data:
return []
return [Schedule.from_dict(d) if isinstance(d, dict) else d for d in data]
@staticmethod
def _to_run_list(data: Any) -> List[ScheduleRun]:
"""Convert a list of DB results to ScheduleRun objects."""
if not data:
return []
return [ScheduleRun.from_dict(d) if isinstance(d, dict) else d for d in data]
# --- Sync API ---
def create(
self,
name: str,
cron: str,
endpoint: str,
method: str = "POST",
description: Optional[str] = None,
payload: Optional[Dict[str, Any]] = None,
timezone: str = "UTC",
timeout_seconds: int = 3600,
max_retries: int = 0,
retry_delay_seconds: int = 60,
if_exists: str = "raise",
) -> Schedule:
"""Create a new schedule.
Args:
if_exists: Behaviour when a schedule with the same name already
exists. ``"raise"`` (default) raises ``ValueError``,
``"skip"`` returns the existing schedule unchanged,
``"update"`` overwrites the existing schedule with the
supplied values.
"""
from agno.scheduler.cron import compute_next_run, validate_cron_expr, validate_timezone
if if_exists not in ("raise", "skip", "update"):
raise ValueError(f"if_exists must be 'raise', 'skip', or 'update', got '{if_exists}'")
if not validate_cron_expr(cron):
raise ValueError(f"Invalid cron expression: {cron}")
if not validate_timezone(timezone):
raise ValueError(f"Invalid timezone: {timezone}")
existing = self._to_schedule(self._call("get_schedule_by_name", name))
if existing is not None:
if if_exists == "skip":
log_debug(f"Schedule '{name}' already exists, skipping")
return existing
if if_exists == "update":
log_debug(f"Schedule '{name}' already exists, updating")
next_run_at = compute_next_run(cron, timezone)
updated = self._to_schedule(
self._call(
"update_schedule",
existing.id,
cron_expr=cron,
endpoint=endpoint,
method=method.upper(),
description=description,
payload=payload,
timezone=timezone,
timeout_seconds=timeout_seconds,
max_retries=max_retries,
retry_delay_seconds=retry_delay_seconds,
next_run_at=next_run_at,
)
)
return updated or existing
raise ValueError(f"Schedule with name '{name}' already exists")
next_run_at = compute_next_run(cron, timezone)
now = int(time.time())
schedule = Schedule(
id=str(uuid4()),
name=name,
description=description,
method=method.upper(),
endpoint=endpoint,
payload=payload,
cron_expr=cron,
timezone=timezone,
timeout_seconds=timeout_seconds,
max_retries=max_retries,
retry_delay_seconds=retry_delay_seconds,
enabled=True,
next_run_at=next_run_at,
locked_by=None,
locked_at=None,
created_at=now,
updated_at=None,
)
result = self._to_schedule(self._call("create_schedule", schedule.to_dict()))
if result is None:
raise RuntimeError("Failed to create schedule")
log_debug(f"Schedule '{name}' created (id={result.id}, cron={cron})")
return result
def list(self, enabled: Optional[bool] = None, limit: int = 100, page: int = 1) -> List[Schedule]:
"""List all schedules."""
result = self._call("get_schedules", enabled=enabled, limit=limit, page=page)
# get_schedules returns (schedules_list, total_count) tuple
schedules_data = result[0] if isinstance(result, tuple) else result
return self._to_schedule_list(schedules_data)
def get(self, schedule_id: str) -> Optional[Schedule]:
"""Get a schedule by ID."""
return self._to_schedule(self._call("get_schedule", schedule_id))
def update(self, schedule_id: str, **kwargs: Any) -> Optional[Schedule]:
"""Update a schedule."""
return self._to_schedule(self._call("update_schedule", schedule_id, **kwargs))
def delete(self, schedule_id: str) -> bool:
"""Delete a schedule."""
return self._call("delete_schedule", schedule_id)
def enable(self, schedule_id: str) -> Optional[Schedule]:
"""Enable a schedule and compute next run."""
schedule = self._to_schedule(self._call("get_schedule", schedule_id))
if schedule is None:
return None
from agno.scheduler.cron import compute_next_run
next_run_at = compute_next_run(schedule.cron_expr, schedule.timezone)
return self._to_schedule(self._call("update_schedule", schedule_id, enabled=True, next_run_at=next_run_at))
def disable(self, schedule_id: str) -> Optional[Schedule]:
"""Disable a schedule."""
return self._to_schedule(self._call("update_schedule", schedule_id, enabled=False))
def trigger(self, schedule_id: str) -> None:
"""Manually trigger a schedule.
Note: Direct triggering is not supported through the manager.
Use the REST API ``POST /schedules/{id}/trigger`` endpoint,
or the ``SchedulePoller.trigger()`` method with a running executor.
"""
log_warning(
"ScheduleManager.trigger() is not supported for direct DB access. "
"Use the REST API POST /schedules/{id}/trigger endpoint, or "
"SchedulePoller.trigger() with a running executor."
)
def get_runs(self, schedule_id: str, limit: int = 20, page: int = 1) -> List[ScheduleRun]:
"""Get run history for a schedule."""
result = self._call("get_schedule_runs", schedule_id, limit=limit, page=page)
# get_schedule_runs returns (runs_list, total_count) tuple
runs_data = result[0] if isinstance(result, tuple) else result
return self._to_run_list(runs_data)
# --- Async API ---
async def acreate(
self,
name: str,
cron: str,
endpoint: str,
method: str = "POST",
description: Optional[str] = None,
payload: Optional[Dict[str, Any]] = None,
timezone: str = "UTC",
timeout_seconds: int = 3600,
max_retries: int = 0,
retry_delay_seconds: int = 60,
if_exists: str = "raise",
) -> Schedule:
"""Async create a new schedule.
Args:
if_exists: Behaviour when a schedule with the same name already
exists. ``"raise"`` (default) raises ``ValueError``,
``"skip"`` returns the existing schedule unchanged,
``"update"`` overwrites the existing schedule with the
supplied values.
"""
from agno.scheduler.cron import compute_next_run, validate_cron_expr, validate_timezone
if if_exists not in ("raise", "skip", "update"):
raise ValueError(f"if_exists must be 'raise', 'skip', or 'update', got '{if_exists}'")
if not validate_cron_expr(cron):
raise ValueError(f"Invalid cron expression: {cron}")
if not validate_timezone(timezone):
raise ValueError(f"Invalid timezone: {timezone}")
existing = self._to_schedule(await self._acall("get_schedule_by_name", name))
if existing is not None:
if if_exists == "skip":
log_debug(f"Schedule '{name}' already exists, skipping")
return existing
if if_exists == "update":
log_debug(f"Schedule '{name}' already exists, updating")
next_run_at = compute_next_run(cron, timezone)
updated = self._to_schedule(
await self._acall(
"update_schedule",
existing.id,
cron_expr=cron,
endpoint=endpoint,
method=method.upper(),
description=description,
payload=payload,
timezone=timezone,
timeout_seconds=timeout_seconds,
max_retries=max_retries,
retry_delay_seconds=retry_delay_seconds,
next_run_at=next_run_at,
)
)
return updated or existing
raise ValueError(f"Schedule with name '{name}' already exists")
next_run_at = compute_next_run(cron, timezone)
now = int(time.time())
schedule = Schedule(
id=str(uuid4()),
name=name,
description=description,
method=method.upper(),
endpoint=endpoint,
payload=payload,
cron_expr=cron,
timezone=timezone,
timeout_seconds=timeout_seconds,
max_retries=max_retries,
retry_delay_seconds=retry_delay_seconds,
enabled=True,
next_run_at=next_run_at,
locked_by=None,
locked_at=None,
created_at=now,
updated_at=None,
)
result = self._to_schedule(await self._acall("create_schedule", schedule.to_dict()))
if result is None:
raise RuntimeError("Failed to create schedule")
log_debug(f"Schedule '{name}' created (id={result.id}, cron={cron})")
return result
async def alist(self, enabled: Optional[bool] = None, limit: int = 100, page: int = 1) -> List[Schedule]:
"""Async list all schedules."""
result = await self._acall("get_schedules", enabled=enabled, limit=limit, page=page)
# get_schedules returns (schedules_list, total_count) tuple
schedules_data = result[0] if isinstance(result, tuple) else result
return self._to_schedule_list(schedules_data)
async def aget(self, schedule_id: str) -> Optional[Schedule]:
"""Async get a schedule by ID."""
return self._to_schedule(await self._acall("get_schedule", schedule_id))
async def aupdate(self, schedule_id: str, **kwargs: Any) -> Optional[Schedule]:
"""Async update a schedule."""
return self._to_schedule(await self._acall("update_schedule", schedule_id, **kwargs))
async def adelete(self, schedule_id: str) -> bool:
"""Async delete a schedule."""
return await self._acall("delete_schedule", schedule_id)
async def aenable(self, schedule_id: str) -> Optional[Schedule]:
"""Async enable a schedule."""
schedule = self._to_schedule(await self._acall("get_schedule", schedule_id))
if schedule is None:
return None
from agno.scheduler.cron import compute_next_run
next_run_at = compute_next_run(schedule.cron_expr, schedule.timezone)
return self._to_schedule(
await self._acall("update_schedule", schedule_id, enabled=True, next_run_at=next_run_at)
)
async def adisable(self, schedule_id: str) -> Optional[Schedule]:
"""Async disable a schedule."""
return self._to_schedule(await self._acall("update_schedule", schedule_id, enabled=False))
async def aget_runs(self, schedule_id: str, limit: int = 20, page: int = 1) -> List[ScheduleRun]:
"""Async get run history for a schedule."""
result = await self._acall("get_schedule_runs", schedule_id, limit=limit, page=page)
# get_schedule_runs returns (runs_list, total_count) tuple
runs_data = result[0] if isinstance(result, tuple) else result
return self._to_run_list(runs_data)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/scheduler/manager.py",
"license": "Apache License 2.0",
"lines": 325,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/scheduler/poller.py | """Schedule poller -- periodically claims and executes due schedules."""
import asyncio
from typing import Any, Dict, Optional, Set, Union
from uuid import uuid4
from agno.db.schemas.scheduler import Schedule
from agno.utils.log import log_error, log_info, log_warning
# Default timeout (in seconds) when stopping the poller
_DEFAULT_STOP_TIMEOUT = 30
class SchedulePoller:
"""Periodically poll the DB for due schedules and execute them.
Each poll tick repeatedly calls ``db.claim_due_schedule()`` until no more
schedules are due, spawning an ``asyncio.create_task`` for each claimed
schedule so they run concurrently.
"""
def __init__(
self,
db: Any,
executor: Any,
poll_interval: int = 15,
worker_id: Optional[str] = None,
max_concurrent: int = 10,
stop_timeout: int = _DEFAULT_STOP_TIMEOUT,
) -> None:
self.db = db
self.executor = executor
self.poll_interval = poll_interval
self.worker_id = worker_id or f"worker-{uuid4().hex[:8]}"
self.max_concurrent = max_concurrent
self.stop_timeout = stop_timeout
self._task: Optional[asyncio.Task] = None # type: ignore[type-arg]
self._running = False
self._in_flight: Set[asyncio.Task] = set() # type: ignore[type-arg]
async def start(self) -> None:
"""Start the polling loop as a background task."""
if self._running:
return
self._running = True
self._task = asyncio.create_task(self._poll_loop())
log_info(f"Scheduler poller started (worker={self.worker_id}, interval={self.poll_interval}s)")
async def stop(self) -> None:
"""Stop the polling loop gracefully and cancel in-flight tasks."""
self._running = False
if self._task is not None:
self._task.cancel()
try:
await asyncio.wait_for(self._task, timeout=self.stop_timeout)
except (asyncio.CancelledError, asyncio.TimeoutError):
pass
self._task = None
# Cancel and await all in-flight execution tasks
for task in list(self._in_flight):
task.cancel()
if self._in_flight:
try:
await asyncio.wait_for(
asyncio.gather(*self._in_flight, return_exceptions=True),
timeout=self.stop_timeout,
)
except asyncio.TimeoutError:
log_warning(f"Timed out waiting for {len(self._in_flight)} in-flight tasks during shutdown")
self._in_flight.clear()
# Close the executor's httpx client
if hasattr(self.executor, "close"):
await self.executor.close()
log_info("Scheduler poller stopped")
async def _poll_loop(self) -> None:
"""Main loop: poll first, then sleep."""
while self._running:
try:
await self._poll_once()
if not self._running:
break
await asyncio.sleep(self.poll_interval)
except asyncio.CancelledError:
break
except Exception as exc:
log_error(f"Scheduler poll error: {exc}")
await asyncio.sleep(self.poll_interval)
async def _poll_once(self) -> None:
"""Claim all due schedules in a tight loop and fire them off."""
while self._running:
# Enforce concurrency limit
self._in_flight -= {t for t in self._in_flight if t.done()}
if len(self._in_flight) >= self.max_concurrent:
log_warning(f"Max concurrent executions reached ({self.max_concurrent}), waiting")
break
try:
if asyncio.iscoroutinefunction(getattr(self.db, "claim_due_schedule", None)):
schedule = await self.db.claim_due_schedule(self.worker_id)
else:
schedule = self.db.claim_due_schedule(self.worker_id)
if schedule is None:
break
sched = Schedule.from_dict(schedule) if isinstance(schedule, dict) else schedule
log_info(f"Claimed schedule: {sched.name or sched.id}")
task = asyncio.create_task(self._execute_safe(sched))
self._in_flight.add(task)
task.add_done_callback(lambda t: self._in_flight.discard(t))
except Exception as exc:
log_error(f"Error claiming schedule: {exc}")
break
async def _execute_safe(self, schedule: Union[Schedule, Dict[str, Any]]) -> None:
"""Execute a schedule, catching all errors."""
try:
await self.executor.execute(schedule, self.db)
except Exception as exc:
sched_id = schedule.id if isinstance(schedule, Schedule) else schedule.get("id")
log_error(f"Error executing schedule {sched_id}: {exc}")
async def trigger(self, schedule_id: str) -> None:
"""Manually trigger a schedule by ID (immediate execution)."""
try:
if asyncio.iscoroutinefunction(getattr(self.db, "get_schedule", None)):
schedule = await self.db.get_schedule(schedule_id)
else:
schedule = self.db.get_schedule(schedule_id)
if schedule is None:
log_error(f"Schedule not found: {schedule_id}")
return
sched = Schedule.from_dict(schedule) if isinstance(schedule, dict) else schedule
if not sched.enabled:
log_warning(f"Schedule {schedule_id} is disabled, skipping trigger")
return
log_info(f"Manually triggering schedule: {sched.name or schedule_id}")
task = asyncio.create_task(self.executor.execute(sched, self.db, release_schedule=False))
self._in_flight.add(task)
task.add_done_callback(self._in_flight.discard)
except Exception as exc:
log_error(f"Error triggering schedule {schedule_id}: {exc}")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/scheduler/poller.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/db/sqlite/test_scheduler.py | """Integration tests for scheduler DB operations on real SQLite."""
import os
import tempfile
import time
import uuid
import pytest
from agno.db.sqlite import SqliteDb
@pytest.fixture
def db():
"""Create a SqliteDb with a real temp file for scheduler integration tests."""
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f:
db_path = f.name
db = SqliteDb(
session_table="test_sessions",
db_file=db_path,
)
yield db
if os.path.exists(db_path):
os.unlink(db_path)
def _make_schedule(**overrides):
now = int(time.time())
d = {
"id": str(uuid.uuid4()),
"name": f"test-schedule-{uuid.uuid4().hex[:6]}",
"description": "Integration test schedule",
"method": "POST",
"endpoint": "/agents/a1/runs",
"payload": None,
"cron_expr": "0 9 * * *",
"timezone": "UTC",
"timeout_seconds": 3600,
"max_retries": 0,
"retry_delay_seconds": 60,
"enabled": True,
"next_run_at": now + 3600,
"locked_by": None,
"locked_at": None,
"created_at": now,
"updated_at": None,
}
d.update(overrides)
return d
def _make_run(schedule_id, **overrides):
now = int(time.time())
d = {
"id": str(uuid.uuid4()),
"schedule_id": schedule_id,
"attempt": 1,
"triggered_at": now,
"completed_at": now + 5,
"status": "success",
"status_code": 200,
"run_id": None,
"session_id": None,
"error": None,
"created_at": now,
}
d.update(overrides)
return d
# =============================================================================
# Schedule CRUD
# =============================================================================
class TestScheduleCRUD:
def test_create_and_get(self, db):
sched = _make_schedule()
created = db.create_schedule(sched)
assert created["id"] == sched["id"]
fetched = db.get_schedule(sched["id"])
assert fetched is not None
assert fetched["id"] == sched["id"]
assert fetched["name"] == sched["name"]
assert fetched["cron_expr"] == "0 9 * * *"
def test_get_by_name(self, db):
sched = _make_schedule(name="unique-name-test")
db.create_schedule(sched)
found = db.get_schedule_by_name("unique-name-test")
assert found is not None
assert found["id"] == sched["id"]
def test_get_by_name_not_found(self, db):
result = db.get_schedule_by_name("nonexistent")
assert result is None
def test_get_not_found(self, db):
result = db.get_schedule("nonexistent-id")
assert result is None
def test_list_schedules(self, db):
s1 = _make_schedule()
s2 = _make_schedule()
db.create_schedule(s1)
db.create_schedule(s2)
all_schedules = db.get_schedules()
assert len(all_schedules) >= 2
ids = {s["id"] for s in all_schedules}
assert s1["id"] in ids
assert s2["id"] in ids
def test_update_schedule(self, db):
sched = _make_schedule()
db.create_schedule(sched)
updated = db.update_schedule(sched["id"], description="Updated description")
assert updated is not None
assert updated["description"] == "Updated description"
assert updated["updated_at"] is not None
def test_delete_schedule(self, db):
sched = _make_schedule()
db.create_schedule(sched)
assert db.delete_schedule(sched["id"]) is True
assert db.get_schedule(sched["id"]) is None
def test_delete_nonexistent(self, db):
assert db.delete_schedule("nonexistent") is False
# =============================================================================
# Enabled filter
# =============================================================================
class TestEnabledFilter:
def test_filter_enabled(self, db):
s_enabled = _make_schedule(enabled=True)
s_disabled = _make_schedule(enabled=False)
db.create_schedule(s_enabled)
db.create_schedule(s_disabled)
enabled_only = db.get_schedules(enabled=True)
disabled_only = db.get_schedules(enabled=False)
enabled_ids = {s["id"] for s in enabled_only}
disabled_ids = {s["id"] for s in disabled_only}
assert s_enabled["id"] in enabled_ids
assert s_disabled["id"] not in enabled_ids
assert s_disabled["id"] in disabled_ids
assert s_enabled["id"] not in disabled_ids
# =============================================================================
# Claiming and releasing
# =============================================================================
class TestClaimAndRelease:
def test_claim_due_schedule(self, db):
now = int(time.time())
sched = _make_schedule(next_run_at=now - 10, enabled=True)
db.create_schedule(sched)
claimed = db.claim_due_schedule("worker-1")
assert claimed is not None
assert claimed["id"] == sched["id"]
assert claimed["locked_by"] == "worker-1"
assert claimed["locked_at"] is not None
def test_claim_returns_none_when_nothing_due(self, db):
now = int(time.time())
sched = _make_schedule(next_run_at=now + 9999, enabled=True)
db.create_schedule(sched)
claimed = db.claim_due_schedule("worker-1")
assert claimed is None
def test_claim_skips_disabled(self, db):
now = int(time.time())
sched = _make_schedule(next_run_at=now - 10, enabled=False)
db.create_schedule(sched)
claimed = db.claim_due_schedule("worker-1")
assert claimed is None
def test_claim_skips_locked(self, db):
now = int(time.time())
sched = _make_schedule(
next_run_at=now - 10,
enabled=True,
locked_by="other-worker",
locked_at=now,
)
db.create_schedule(sched)
claimed = db.claim_due_schedule("worker-2")
assert claimed is None
def test_claim_stale_lock(self, db):
now = int(time.time())
sched = _make_schedule(
next_run_at=now - 10,
enabled=True,
locked_by="stale-worker",
locked_at=now - 600, # 10 min old, stale with default 300s grace
)
db.create_schedule(sched)
claimed = db.claim_due_schedule("worker-2")
assert claimed is not None
assert claimed["locked_by"] == "worker-2"
def test_release_schedule(self, db):
now = int(time.time())
sched = _make_schedule(next_run_at=now - 10, enabled=True)
db.create_schedule(sched)
claimed = db.claim_due_schedule("worker-1")
assert claimed is not None
next_run = now + 3600
released = db.release_schedule(sched["id"], next_run_at=next_run)
assert released is True
refreshed = db.get_schedule(sched["id"])
assert refreshed["locked_by"] is None
assert refreshed["locked_at"] is None
assert refreshed["next_run_at"] == next_run
# =============================================================================
# Schedule runs
# =============================================================================
class TestScheduleRuns:
def test_create_and_get_run(self, db):
sched = _make_schedule()
db.create_schedule(sched)
run = _make_run(sched["id"])
created = db.create_schedule_run(run)
assert created["id"] == run["id"]
fetched = db.get_schedule_run(run["id"])
assert fetched is not None
assert fetched["schedule_id"] == sched["id"]
assert fetched["status"] == "success"
def test_update_run(self, db):
sched = _make_schedule()
db.create_schedule(sched)
run = _make_run(sched["id"], status="running")
db.create_schedule_run(run)
updated = db.update_schedule_run(run["id"], status="success", status_code=200)
assert updated is not None
assert updated["status"] == "success"
assert updated["status_code"] == 200
def test_get_runs_for_schedule(self, db):
sched = _make_schedule()
db.create_schedule(sched)
r1 = _make_run(sched["id"])
r2 = _make_run(sched["id"], attempt=2)
db.create_schedule_run(r1)
db.create_schedule_run(r2)
runs = db.get_schedule_runs(sched["id"])
assert len(runs) == 2
run_ids = {r["id"] for r in runs}
assert r1["id"] in run_ids
assert r2["id"] in run_ids
def test_get_runs_empty(self, db):
sched = _make_schedule()
db.create_schedule(sched)
runs = db.get_schedule_runs(sched["id"])
assert runs == []
def test_get_runs_with_limit(self, db):
sched = _make_schedule()
db.create_schedule(sched)
for i in range(5):
db.create_schedule_run(_make_run(sched["id"], attempt=i + 1))
runs = db.get_schedule_runs(sched["id"], limit=3)
assert len(runs) == 3
def test_get_run_not_found(self, db):
result = db.get_schedule_run("nonexistent-run-id")
assert result is None
def test_delete_schedule_cascades_to_runs(self, db):
"""Deleting a schedule should also delete its associated runs."""
sched = _make_schedule()
db.create_schedule(sched)
r1 = _make_run(sched["id"])
r2 = _make_run(sched["id"], attempt=2)
db.create_schedule_run(r1)
db.create_schedule_run(r2)
# Confirm runs exist
runs = db.get_schedule_runs(sched["id"])
assert len(runs) == 2
# Delete the schedule
assert db.delete_schedule(sched["id"]) is True
assert db.get_schedule(sched["id"]) is None
# Runs should also be gone
assert db.get_schedule_runs(sched["id"]) == []
assert db.get_schedule_run(r1["id"]) is None
assert db.get_schedule_run(r2["id"]) is None
# =============================================================================
# Full lifecycle
# =============================================================================
class TestFullLifecycle:
def test_schedule_lifecycle(self, db):
"""Create -> get -> update -> enable/disable -> claim -> release -> runs -> delete."""
now = int(time.time())
# Create
sched = _make_schedule(next_run_at=now - 10, enabled=True)
db.create_schedule(sched)
# Get
fetched = db.get_schedule(sched["id"])
assert fetched is not None
# Update
db.update_schedule(sched["id"], description="Updated")
fetched = db.get_schedule(sched["id"])
assert fetched["description"] == "Updated"
# Disable
db.update_schedule(sched["id"], enabled=False)
fetched = db.get_schedule(sched["id"])
assert fetched["enabled"] is False
# Re-enable with new next_run_at in the past
db.update_schedule(sched["id"], enabled=True, next_run_at=now - 5)
# Claim
claimed = db.claim_due_schedule("lifecycle-worker")
assert claimed is not None
assert claimed["id"] == sched["id"]
# Create run records
run = _make_run(sched["id"])
db.create_schedule_run(run)
db.update_schedule_run(run["id"], status="success", completed_at=int(time.time()))
# Release
db.release_schedule(sched["id"], next_run_at=now + 7200)
fetched = db.get_schedule(sched["id"])
assert fetched["locked_by"] is None
assert fetched["next_run_at"] == now + 7200
# Verify run
runs = db.get_schedule_runs(sched["id"])
assert len(runs) == 1
assert runs[0]["status"] == "success"
# Delete
assert db.delete_schedule(sched["id"]) is True
assert db.get_schedule(sched["id"]) is None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/sqlite/test_scheduler.py",
"license": "Apache License 2.0",
"lines": 297,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/agent/test_background_execution.py | """Unit tests for background execution feature."""
import asyncio
import inspect
from typing import Any, Optional
from unittest.mock import MagicMock
import pytest
from agno.agent import _init, _response, _run, _storage
from agno.agent.agent import Agent
from agno.run import RunContext
from agno.run.agent import RunOutput
from agno.run.base import RunStatus
from agno.run.cancel import (
cancel_run,
cleanup_run,
get_active_runs,
get_cancellation_manager,
is_cancelled,
register_run,
set_cancellation_manager,
)
from agno.run.cancellation_management.in_memory_cancellation_manager import InMemoryRunCancellationManager
from agno.session import AgentSession
@pytest.fixture(autouse=True)
def reset_cancellation_manager():
original_manager = get_cancellation_manager()
set_cancellation_manager(InMemoryRunCancellationManager())
try:
yield
finally:
set_cancellation_manager(original_manager)
def _patch_sync_dispatch_dependencies(
agent: Agent,
monkeypatch: pytest.MonkeyPatch,
runs: Optional[list[Any]] = None,
) -> None:
monkeypatch.setattr(_init, "has_async_db", lambda agent: False)
monkeypatch.setattr(_storage, "update_metadata", lambda agent, session=None: None)
monkeypatch.setattr(_storage, "load_session_state", lambda agent, session=None, session_state=None: session_state)
monkeypatch.setattr(_run, "resolve_run_dependencies", lambda agent, run_context: None)
monkeypatch.setattr(_response, "get_response_format", lambda agent, run_context=None: None)
monkeypatch.setattr(
_storage,
"read_or_create_session",
lambda agent, session_id=None, user_id=None: AgentSession(session_id=session_id, user_id=user_id, runs=runs),
)
# ============= Cancel-before-start semantics =============
class TestCancelBeforeStart:
def test_cancel_before_register_preserves_intent(self):
"""Cancelling a run before it's registered stores the intent."""
run_id = "future-run"
# Cancel before registering
was_registered = cancel_run(run_id)
assert was_registered is False
# Register the run — should NOT overwrite the cancel intent
register_run(run_id)
# The run should still be cancelled
assert is_cancelled(run_id) is True
def test_cancel_after_register_marks_cancelled(self):
"""Cancelling a run after registration works normally."""
run_id = "registered-run"
register_run(run_id)
assert is_cancelled(run_id) is False
was_registered = cancel_run(run_id)
assert was_registered is True
assert is_cancelled(run_id) is True
def test_register_does_not_overwrite_cancel(self):
"""Calling register_run on an already-cancelled run preserves the cancel state."""
run_id = "cancel-then-register"
cancel_run(run_id)
register_run(run_id)
register_run(run_id) # Call again to be sure
assert is_cancelled(run_id) is True
def test_cleanup_removes_cancel_intent(self):
"""Cleanup removes the run from tracking entirely."""
run_id = "cleanup-test"
cancel_run(run_id)
assert run_id in get_active_runs()
cleanup_run(run_id)
assert run_id not in get_active_runs()
# ============= Background execution validation =============
class TestBackgroundValidation:
def test_background_with_stream_raises_value_error(self, monkeypatch: pytest.MonkeyPatch):
"""Background execution cannot be combined with streaming."""
agent = Agent(name="test-agent")
_patch_sync_dispatch_dependencies(agent, monkeypatch, runs=[])
with pytest.raises(ValueError, match="Background execution cannot be combined with streaming"):
_run.arun_dispatch(agent=agent, input="hello", stream=True, background=True)
def test_background_without_db_raises_value_error(self, monkeypatch: pytest.MonkeyPatch):
"""Background execution requires a database."""
agent = Agent(name="test-agent")
agent.db = None
_patch_sync_dispatch_dependencies(agent, monkeypatch, runs=[])
with pytest.raises(ValueError, match="Background execution requires a database"):
_run.arun_dispatch(agent=agent, input="hello", stream=False, background=True)
def test_background_dispatch_returns_coroutine(self, monkeypatch: pytest.MonkeyPatch):
"""arun_dispatch with background=True returns a coroutine (not async def itself)."""
agent = Agent(name="test-agent")
agent.db = MagicMock()
_patch_sync_dispatch_dependencies(agent, monkeypatch, runs=[])
result = _run.arun_dispatch(agent=agent, input="hello", stream=False, background=True)
# arun_dispatch is not async, so it returns a coroutine object
assert inspect.iscoroutine(result)
# Clean up the coroutine to avoid warnings
result.close()
# ============= Background execution lifecycle =============
class TestBackgroundLifecycle:
@pytest.mark.asyncio
async def test_arun_background_returns_pending_status(self, monkeypatch: pytest.MonkeyPatch):
"""_arun_background returns immediately with PENDING status."""
agent = Agent(name="test-agent")
saved_sessions: list[AgentSession] = []
async def fake_aread_or_create_session(agent, session_id=None, user_id=None):
return AgentSession(session_id=session_id or "test-session", user_id=user_id, runs=[])
async def fake_asave_session(agent, session=None):
saved_sessions.append(session)
async def fake_arun(agent, run_response, run_context, **kwargs):
# Simulate successful completion
run_response.status = RunStatus.completed
run_response.content = "done"
return run_response
monkeypatch.setattr(_storage, "aread_or_create_session", fake_aread_or_create_session)
monkeypatch.setattr(_storage, "update_metadata", lambda agent, session=None: None)
monkeypatch.setattr("agno.agent._session.asave_session", fake_asave_session)
monkeypatch.setattr(_run, "_arun", fake_arun)
run_response = RunOutput(
run_id="bg-run-1",
session_id="test-session",
)
run_context = RunContext(
run_id="bg-run-1",
session_id="test-session",
)
result = await _run._arun_background(
agent,
run_response=run_response,
run_context=run_context,
session_id="test-session",
)
# Should return immediately with PENDING status
assert result.status == RunStatus.pending
assert result.run_id == "bg-run-1"
# Wait a moment for the background task to complete
await asyncio.sleep(0.1)
@pytest.mark.asyncio
async def test_arun_background_persists_pending_before_returning(self, monkeypatch: pytest.MonkeyPatch):
"""Background run persists PENDING status to DB before returning."""
agent = Agent(name="test-agent")
persisted_statuses: list[RunStatus] = []
async def fake_aread_or_create_session(agent, session_id=None, user_id=None):
return AgentSession(session_id=session_id or "test-session", user_id=user_id, runs=[])
async def fake_asave_session(agent, session=None):
if session and session.runs:
for run in session.runs:
persisted_statuses.append(run.status)
async def fake_arun(agent, run_response, run_context, **kwargs):
run_response.status = RunStatus.completed
return run_response
monkeypatch.setattr(_storage, "aread_or_create_session", fake_aread_or_create_session)
monkeypatch.setattr(_storage, "update_metadata", lambda agent, session=None: None)
monkeypatch.setattr("agno.agent._session.asave_session", fake_asave_session)
monkeypatch.setattr(_run, "_arun", fake_arun)
run_response = RunOutput(run_id="bg-run-2", session_id="test-session")
run_context = RunContext(run_id="bg-run-2", session_id="test-session")
await _run._arun_background(
agent,
run_response=run_response,
run_context=run_context,
session_id="test-session",
)
# First save should be with PENDING status (before returning)
assert RunStatus.pending in persisted_statuses
# Wait for background task
await asyncio.sleep(0.1)
# Background task should have saved RUNNING and then the final state
assert RunStatus.running in persisted_statuses
@pytest.mark.asyncio
async def test_arun_background_error_persists_error_status(self, monkeypatch: pytest.MonkeyPatch):
"""If the background run fails, ERROR status is persisted."""
agent = Agent(name="test-agent")
final_statuses: list[RunStatus] = []
async def fake_aread_or_create_session(agent, session_id=None, user_id=None):
return AgentSession(session_id=session_id or "test-session", user_id=user_id, runs=[])
async def fake_asave_session(agent, session=None):
if session and session.runs:
for run in session.runs:
final_statuses.append(run.status)
async def fake_arun_that_fails(agent, run_response, run_context, **kwargs):
raise RuntimeError("model call failed")
monkeypatch.setattr(_storage, "aread_or_create_session", fake_aread_or_create_session)
monkeypatch.setattr(_storage, "update_metadata", lambda agent, session=None: None)
monkeypatch.setattr("agno.agent._session.asave_session", fake_asave_session)
monkeypatch.setattr(_run, "_arun", fake_arun_that_fails)
run_response = RunOutput(run_id="bg-run-err", session_id="test-session")
run_context = RunContext(run_id="bg-run-err", session_id="test-session")
result = await _run._arun_background(
agent,
run_response=run_response,
run_context=run_context,
session_id="test-session",
)
assert result.status == RunStatus.pending
# Wait for background task to complete (and fail)
await asyncio.sleep(0.2)
# Should have persisted ERROR status
assert RunStatus.error in final_statuses
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/agent/test_background_execution.py",
"license": "Apache License 2.0",
"lines": 205,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/os/routers/test_schedules_router.py | """Tests for the schedule REST API router."""
import time
from unittest.mock import MagicMock, patch
import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
from agno.os.routers.schedules import get_schedule_router
from agno.os.settings import AgnoAPISettings
# =============================================================================
# Fixtures
# =============================================================================
def _make_schedule_dict(**overrides):
"""Create a schedule dict with sensible defaults."""
now = int(time.time())
d = {
"id": "sched-1",
"name": "daily-check",
"description": None,
"method": "POST",
"endpoint": "/agents/my-agent/runs",
"payload": None,
"cron_expr": "0 9 * * *",
"timezone": "UTC",
"timeout_seconds": 3600,
"max_retries": 0,
"retry_delay_seconds": 60,
"enabled": True,
"next_run_at": now + 3600,
"locked_by": None,
"locked_at": None,
"created_at": now,
"updated_at": None,
}
d.update(overrides)
return d
@pytest.fixture
def mock_db():
"""Create a mock DB with schedule methods."""
db = MagicMock()
db.get_schedules = MagicMock(return_value=[])
db.get_schedule = MagicMock(return_value=None)
db.get_schedule_by_name = MagicMock(return_value=None)
db.create_schedule = MagicMock(return_value=_make_schedule_dict())
db.update_schedule = MagicMock(return_value=_make_schedule_dict())
db.delete_schedule = MagicMock(return_value=True)
db.get_schedule_runs = MagicMock(return_value=[])
db.get_schedule_run = MagicMock(return_value=None)
return db
@pytest.fixture
def settings():
"""Create test settings with auth disabled (no security key = auth disabled)."""
return AgnoAPISettings()
@pytest.fixture
def client(mock_db, settings):
app = FastAPI()
router = get_schedule_router(os_db=mock_db, settings=settings)
app.include_router(router)
return TestClient(app)
# =============================================================================
# Tests: GET /schedules
# =============================================================================
class TestListSchedules:
def test_empty_list(self, client, mock_db):
mock_db.get_schedules = MagicMock(return_value=([], 0))
resp = client.get("/schedules")
assert resp.status_code == 200
assert resp.json()["data"] == []
def test_returns_schedules(self, client, mock_db):
schedules = [_make_schedule_dict(id="s1"), _make_schedule_dict(id="s2", name="second")]
mock_db.get_schedules = MagicMock(return_value=(schedules, 2))
resp = client.get("/schedules")
assert resp.status_code == 200
data = resp.json()["data"]
assert len(data) == 2
assert data[0]["id"] == "s1"
def test_filter_enabled(self, client, mock_db):
mock_db.get_schedules = MagicMock(return_value=([], 0))
client.get("/schedules?enabled=true")
mock_db.get_schedules.assert_called_once()
call_kwargs = mock_db.get_schedules.call_args[1]
assert call_kwargs["enabled"] is True
# =============================================================================
# Tests: POST /schedules
# =============================================================================
class TestCreateSchedule:
@patch("agno.scheduler.cron._require_pytz")
@patch("agno.scheduler.cron._require_croniter")
@patch("agno.scheduler.cron.validate_cron_expr", return_value=True)
@patch("agno.scheduler.cron.validate_timezone", return_value=True)
@patch("agno.scheduler.cron.compute_next_run", return_value=int(time.time()) + 60)
def test_create_success(self, mock_compute, mock_tz, mock_cron, mock_req_cron, mock_req_pytz, client, mock_db):
mock_db.get_schedule_by_name = MagicMock(return_value=None)
created = _make_schedule_dict(name="new-sched")
mock_db.create_schedule = MagicMock(return_value=created)
resp = client.post(
"/schedules",
json={
"name": "new-sched",
"cron_expr": "0 9 * * *",
"endpoint": "/agents/a1/runs",
},
)
assert resp.status_code == 201
assert resp.json()["name"] == "new-sched"
mock_db.create_schedule.assert_called_once()
@patch("agno.scheduler.cron._require_pytz")
@patch("agno.scheduler.cron._require_croniter")
@patch("agno.scheduler.cron.validate_cron_expr", return_value=False)
def test_create_invalid_cron(self, mock_cron, mock_req_cron, mock_req_pytz, client, mock_db):
resp = client.post(
"/schedules",
json={
"name": "bad-cron",
"cron_expr": "not valid",
"endpoint": "/test",
},
)
assert resp.status_code == 422
@patch("agno.scheduler.cron._require_pytz")
@patch("agno.scheduler.cron._require_croniter")
@patch("agno.scheduler.cron.validate_cron_expr", return_value=True)
@patch("agno.scheduler.cron.validate_timezone", return_value=True)
@patch("agno.scheduler.cron.compute_next_run", return_value=int(time.time()) + 60)
def test_create_duplicate_name(
self, mock_compute, mock_tz, mock_cron, mock_req_cron, mock_req_pytz, client, mock_db
):
mock_db.get_schedule_by_name = MagicMock(return_value=_make_schedule_dict())
resp = client.post(
"/schedules",
json={
"name": "daily-check",
"cron_expr": "0 9 * * *",
"endpoint": "/test",
},
)
assert resp.status_code == 409
assert "already exists" in resp.json()["detail"]
# =============================================================================
# Tests: GET /schedules/{schedule_id}
# =============================================================================
class TestGetSchedule:
def test_found(self, client, mock_db):
sched = _make_schedule_dict()
mock_db.get_schedule = MagicMock(return_value=sched)
resp = client.get("/schedules/sched-1")
assert resp.status_code == 200
assert resp.json()["id"] == "sched-1"
def test_not_found(self, client, mock_db):
mock_db.get_schedule = MagicMock(return_value=None)
resp = client.get("/schedules/missing")
assert resp.status_code == 404
# =============================================================================
# Tests: PATCH /schedules/{schedule_id}
# =============================================================================
class TestUpdateSchedule:
def test_update_description(self, client, mock_db):
existing = _make_schedule_dict()
updated = _make_schedule_dict(description="Updated desc")
mock_db.get_schedule = MagicMock(return_value=existing)
mock_db.update_schedule = MagicMock(return_value=updated)
resp = client.patch("/schedules/sched-1", json={"description": "Updated desc"})
assert resp.status_code == 200
mock_db.update_schedule.assert_called_once()
def test_update_not_found(self, client, mock_db):
mock_db.get_schedule = MagicMock(return_value=None)
resp = client.patch("/schedules/missing", json={"description": "x"})
assert resp.status_code == 404
def test_update_empty_body(self, client, mock_db):
existing = _make_schedule_dict()
mock_db.get_schedule = MagicMock(return_value=existing)
resp = client.patch("/schedules/sched-1", json={})
assert resp.status_code == 200
mock_db.update_schedule.assert_not_called()
# =============================================================================
# Tests: DELETE /schedules/{schedule_id}
# =============================================================================
class TestDeleteSchedule:
def test_delete_success(self, client, mock_db):
mock_db.get_schedule = MagicMock(return_value=_make_schedule_dict())
mock_db.delete_schedule = MagicMock(return_value=True)
resp = client.delete("/schedules/sched-1")
assert resp.status_code == 204
mock_db.delete_schedule.assert_called_once_with("sched-1")
def test_delete_not_found(self, client, mock_db):
mock_db.get_schedule = MagicMock(return_value=None)
resp = client.delete("/schedules/missing")
assert resp.status_code == 404
# =============================================================================
# Tests: POST /schedules/{schedule_id}/enable
# =============================================================================
class TestEnableSchedule:
@patch("agno.scheduler.cron._require_pytz")
@patch("agno.scheduler.cron._require_croniter")
@patch("agno.scheduler.cron.compute_next_run", return_value=int(time.time()) + 60)
def test_enable_success(self, mock_compute, mock_req_cron, mock_req_pytz, client, mock_db):
existing = _make_schedule_dict(enabled=False)
enabled = _make_schedule_dict(enabled=True)
mock_db.get_schedule = MagicMock(return_value=existing)
mock_db.update_schedule = MagicMock(return_value=enabled)
resp = client.post("/schedules/sched-1/enable")
assert resp.status_code == 200
assert resp.json()["enabled"] is True
def test_enable_not_found(self, client, mock_db):
mock_db.get_schedule = MagicMock(return_value=None)
resp = client.post("/schedules/missing/enable")
assert resp.status_code == 404
# =============================================================================
# Tests: POST /schedules/{schedule_id}/disable
# =============================================================================
class TestDisableSchedule:
def test_disable_success(self, client, mock_db):
existing = _make_schedule_dict(enabled=True)
disabled = _make_schedule_dict(enabled=False)
mock_db.get_schedule = MagicMock(return_value=existing)
mock_db.update_schedule = MagicMock(return_value=disabled)
resp = client.post("/schedules/sched-1/disable")
assert resp.status_code == 200
assert resp.json()["enabled"] is False
def test_disable_not_found(self, client, mock_db):
mock_db.get_schedule = MagicMock(return_value=None)
resp = client.post("/schedules/missing/disable")
assert resp.status_code == 404
# =============================================================================
# Tests: POST /schedules/{schedule_id}/trigger
# =============================================================================
class TestTriggerSchedule:
def test_trigger_no_executor(self, client, mock_db):
"""Without a scheduler_executor on app.state, trigger returns 503."""
mock_db.get_schedule = MagicMock(return_value=_make_schedule_dict())
resp = client.post("/schedules/sched-1/trigger")
assert resp.status_code == 503
def test_trigger_disabled_schedule(self, client, mock_db):
mock_db.get_schedule = MagicMock(return_value=_make_schedule_dict(enabled=False))
resp = client.post("/schedules/sched-1/trigger")
assert resp.status_code == 409
assert "disabled" in resp.json()["detail"].lower()
# =============================================================================
# Tests: GET /schedules/{schedule_id}/runs
# =============================================================================
class TestListScheduleRuns:
def test_list_runs(self, client, mock_db):
now = int(time.time())
runs = [
{
"id": "r1",
"schedule_id": "sched-1",
"attempt": 1,
"triggered_at": now,
"completed_at": now + 10,
"status": "success",
"status_code": 200,
"run_id": None,
"session_id": None,
"error": None,
"created_at": now,
}
]
mock_db.get_schedule = MagicMock(return_value=_make_schedule_dict())
mock_db.get_schedule_runs = MagicMock(return_value=(runs, 1))
resp = client.get("/schedules/sched-1/runs")
assert resp.status_code == 200
assert len(resp.json()["data"]) == 1
def test_list_runs_schedule_not_found(self, client, mock_db):
mock_db.get_schedule = MagicMock(return_value=None)
resp = client.get("/schedules/missing/runs")
assert resp.status_code == 404
# =============================================================================
# Tests: GET /schedules/{schedule_id}/runs/{run_id}
# =============================================================================
class TestGetScheduleRun:
def test_get_run_found(self, client, mock_db):
now = int(time.time())
run = {
"id": "r1",
"schedule_id": "sched-1",
"attempt": 1,
"triggered_at": now,
"completed_at": now + 10,
"status": "success",
"status_code": 200,
"run_id": None,
"session_id": None,
"error": None,
"created_at": now,
}
mock_db.get_schedule_run = MagicMock(return_value=run)
resp = client.get("/schedules/sched-1/runs/r1")
assert resp.status_code == 200
assert resp.json()["id"] == "r1"
def test_get_run_not_found(self, client, mock_db):
mock_db.get_schedule_run = MagicMock(return_value=None)
resp = client.get("/schedules/sched-1/runs/missing")
assert resp.status_code == 404
def test_get_run_wrong_schedule(self, client, mock_db):
run = {
"id": "r1",
"schedule_id": "other-sched",
"attempt": 1,
"status": "success",
"created_at": int(time.time()),
}
mock_db.get_schedule_run = MagicMock(return_value=run)
resp = client.get("/schedules/sched-1/runs/r1")
assert resp.status_code == 404
# =============================================================================
# Tests: Pydantic schema validation
# =============================================================================
class TestScheduleCreateValidation:
def test_invalid_name(self, client, mock_db):
resp = client.post(
"/schedules",
json={
"name": "!invalid name!",
"cron_expr": "0 9 * * *",
"endpoint": "/test",
},
)
assert resp.status_code == 422
def test_invalid_endpoint_no_slash(self, client, mock_db):
resp = client.post(
"/schedules",
json={
"name": "test",
"cron_expr": "0 9 * * *",
"endpoint": "no-leading-slash",
},
)
assert resp.status_code == 422
def test_invalid_endpoint_full_url(self, client, mock_db):
resp = client.post(
"/schedules",
json={
"name": "test",
"cron_expr": "0 9 * * *",
"endpoint": "http://example.com/test",
},
)
assert resp.status_code == 422
def test_invalid_method(self, client, mock_db):
resp = client.post(
"/schedules",
json={
"name": "test",
"cron_expr": "0 9 * * *",
"endpoint": "/test",
"method": "INVALID",
},
)
assert resp.status_code == 422
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/os/routers/test_schedules_router.py",
"license": "Apache License 2.0",
"lines": 349,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/scheduler/test_cli.py | """Tests for the SchedulerConsole Rich CLI."""
import time
from unittest.mock import MagicMock, patch
import pytest
from agno.db.schemas.scheduler import Schedule, ScheduleRun
from agno.scheduler.cli import SchedulerConsole, _status_style, _ts
# =============================================================================
# Helper function tests
# =============================================================================
class TestTsFormatter:
def test_none_returns_dash(self):
assert _ts(None) == "-"
def test_epoch_formats_correctly(self):
# 2024-01-01 00:00:00 UTC = 1704067200
result = _ts(1704067200)
assert "2024-01-01" in result
assert "UTC" in result
def test_returns_string(self):
result = _ts(int(time.time()))
assert isinstance(result, str)
assert "UTC" in result
class TestStatusStyle:
def test_completed(self):
assert _status_style("COMPLETED") == "bold green"
def test_running(self):
assert _status_style("RUNNING") == "bold blue"
def test_error(self):
assert _status_style("ERROR") == "bold red"
def test_cancelled(self):
assert _status_style("CANCELLED") == "bold magenta"
def test_paused(self):
assert _status_style("PAUSED") == "bold cyan"
def test_pending(self):
assert _status_style("PENDING") == "bold yellow"
def test_unknown_returns_white(self):
assert _status_style("SOMETHING_ELSE") == "white"
def test_case_insensitive(self):
assert _status_style("completed") == "bold green"
assert _status_style("error") == "bold red"
# =============================================================================
# SchedulerConsole tests
# =============================================================================
def _make_schedule(**overrides):
now = int(time.time())
d = {
"id": "sched-1",
"name": "test",
"description": "A test schedule",
"method": "POST",
"endpoint": "/test",
"payload": None,
"cron_expr": "0 9 * * *",
"timezone": "UTC",
"timeout_seconds": 3600,
"max_retries": 0,
"retry_delay_seconds": 60,
"enabled": True,
"next_run_at": now + 3600,
"created_at": now,
"updated_at": None,
}
d.update(overrides)
return Schedule.from_dict(d)
def _make_run(**overrides):
now = int(time.time())
d = {
"id": "run-1",
"schedule_id": "sched-1",
"attempt": 1,
"triggered_at": now,
"completed_at": now + 5,
"status": "success",
"status_code": 200,
"run_id": None,
"session_id": None,
"error": None,
"created_at": now,
}
d.update(overrides)
return ScheduleRun.from_dict(d)
@pytest.fixture
def mock_manager():
mgr = MagicMock()
mgr.list = MagicMock(return_value=[_make_schedule()])
mgr.get = MagicMock(return_value=_make_schedule())
mgr.get_runs = MagicMock(return_value=[_make_run()])
mgr.create = MagicMock(return_value=_make_schedule(name="created"))
return mgr
@pytest.fixture
def console(mock_manager):
return SchedulerConsole(mock_manager)
class TestShowSchedules:
@patch("rich.console.Console")
def test_returns_schedule_list(self, mock_console_cls, console, mock_manager):
result = console.show_schedules()
assert len(result) == 1
assert result[0].id == "sched-1"
mock_manager.list.assert_called_once_with(enabled=None)
@patch("rich.console.Console")
def test_passes_enabled_filter(self, mock_console_cls, console, mock_manager):
console.show_schedules(enabled=True)
mock_manager.list.assert_called_once_with(enabled=True)
@patch("rich.console.Console")
def test_empty_list(self, mock_console_cls, console, mock_manager):
mock_manager.list = MagicMock(return_value=[])
result = console.show_schedules()
assert result == []
class TestShowSchedule:
@patch("rich.console.Console")
def test_found(self, mock_console_cls, console, mock_manager):
result = console.show_schedule("sched-1")
assert result is not None
assert result.id == "sched-1"
mock_manager.get.assert_called_once_with("sched-1")
@patch("rich.console.Console")
def test_not_found(self, mock_console_cls, console, mock_manager):
mock_manager.get = MagicMock(return_value=None)
result = console.show_schedule("missing")
assert result is None
class TestShowRuns:
@patch("rich.console.Console")
def test_returns_runs(self, mock_console_cls, console, mock_manager):
result = console.show_runs("sched-1")
assert len(result) == 1
assert result[0].id == "run-1"
mock_manager.get_runs.assert_called_once_with("sched-1", limit=20)
@patch("rich.console.Console")
def test_custom_limit(self, mock_console_cls, console, mock_manager):
console.show_runs("sched-1", limit=5)
mock_manager.get_runs.assert_called_once_with("sched-1", limit=5)
@patch("rich.console.Console")
def test_empty_runs(self, mock_console_cls, console, mock_manager):
mock_manager.get_runs = MagicMock(return_value=[])
result = console.show_runs("sched-1")
assert result == []
class TestCreateAndShow:
@patch("rich.console.Console")
def test_creates_and_shows(self, mock_console_cls, console, mock_manager):
result = console.create_and_show(
name="new",
cron="0 9 * * *",
endpoint="/test",
)
assert result.name == "created"
mock_manager.create.assert_called_once()
mock_manager.get.assert_called_once() # show_schedule calls get
class TestFromDb:
def test_creates_from_db(self):
mock_db = MagicMock()
console = SchedulerConsole.from_db(mock_db)
assert isinstance(console, SchedulerConsole)
assert console.manager.db is mock_db
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/scheduler/test_cli.py",
"license": "Apache License 2.0",
"lines": 152,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/scheduler/test_cron.py | """Tests for cron expression utilities."""
import time
import pytest
pytest.importorskip("croniter", reason="croniter not installed")
pytest.importorskip("pytz", reason="pytz not installed")
from agno.scheduler.cron import compute_next_run, validate_cron_expr, validate_timezone # noqa: E402
class TestValidateCronExpr:
def test_valid_cron_every_minute(self):
assert validate_cron_expr("* * * * *") is True
def test_valid_cron_daily(self):
assert validate_cron_expr("0 9 * * *") is True
def test_valid_cron_weekly(self):
assert validate_cron_expr("0 0 * * 1") is True
def test_valid_cron_monthly(self):
assert validate_cron_expr("0 0 1 * *") is True
def test_valid_cron_with_ranges(self):
assert validate_cron_expr("0 9-17 * * 1-5") is True
def test_valid_cron_with_step(self):
assert validate_cron_expr("*/5 * * * *") is True
def test_invalid_cron_empty(self):
assert validate_cron_expr("") is False
def test_invalid_cron_too_few_fields(self):
assert validate_cron_expr("* *") is False
def test_invalid_cron_bad_minute(self):
assert validate_cron_expr("61 * * * *") is False
def test_invalid_cron_bad_hour(self):
assert validate_cron_expr("0 25 * * *") is False
class TestValidateTimezone:
def test_valid_utc(self):
assert validate_timezone("UTC") is True
def test_valid_new_york(self):
assert validate_timezone("America/New_York") is True
def test_valid_tokyo(self):
assert validate_timezone("Asia/Tokyo") is True
def test_invalid_timezone(self):
assert validate_timezone("Not/A/Timezone") is False
def test_invalid_empty(self):
assert validate_timezone("") is False
class TestComputeNextRun:
def test_returns_future_time(self):
result = compute_next_run("* * * * *")
assert result > int(time.time())
def test_monotonicity_guard(self):
# Even if we provide an after_epoch in the distant past, the result
# should still be >= now + 1
result = compute_next_run("* * * * *", after_epoch=0)
minimum = int(time.time()) + 1
assert result >= minimum
def test_respects_timezone(self):
# Both should return valid future timestamps
utc_result = compute_next_run("0 12 * * *", "UTC")
ny_result = compute_next_run("0 12 * * *", "America/New_York")
assert utc_result > int(time.time())
assert ny_result > int(time.time())
# They should differ (different timezones, different absolute times)
# But edge cases exist, so we just check they're both valid
assert isinstance(utc_result, int)
assert isinstance(ny_result, int)
def test_after_epoch(self):
now = int(time.time())
result = compute_next_run("* * * * *", after_epoch=now)
assert result > now
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/scheduler/test_cron.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/scheduler/test_executor.py | """Tests for the ScheduleExecutor."""
import asyncio
import json
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from agno.scheduler.executor import ScheduleExecutor, _to_form_value
class TestToFormValue:
def test_bool_true(self):
assert _to_form_value(True) == "true"
def test_bool_false(self):
assert _to_form_value(False) == "false"
def test_dict(self):
result = _to_form_value({"key": "value"})
assert json.loads(result) == {"key": "value"}
def test_list(self):
result = _to_form_value([1, 2, 3])
assert json.loads(result) == [1, 2, 3]
def test_string(self):
assert _to_form_value("hello") == "hello"
def test_int(self):
assert _to_form_value(42) == "42"
class TestExecutorInit:
def test_requires_httpx(self):
with patch("agno.scheduler.executor.httpx", None):
with pytest.raises(ImportError, match="httpx"):
ScheduleExecutor(base_url="http://localhost:8000", internal_service_token="tok")
def test_strips_trailing_slash(self):
executor = ScheduleExecutor(base_url="http://localhost:8000/", internal_service_token="tok")
assert executor.base_url == "http://localhost:8000"
def test_default_timeout(self):
executor = ScheduleExecutor(base_url="http://localhost:8000", internal_service_token="tok")
assert executor.timeout == 3600
def test_custom_poll_interval(self):
executor = ScheduleExecutor(base_url="http://localhost:8000", internal_service_token="tok", poll_interval=10)
assert executor.poll_interval == 10
class TestExecutorSimpleRequest:
"""Test _simple_request for non-run endpoints."""
@pytest.fixture
def executor(self):
return ScheduleExecutor(base_url="http://localhost:8000", internal_service_token="tok")
@pytest.mark.asyncio
async def test_simple_get_success(self, executor):
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.text = "OK"
mock_client = AsyncMock()
mock_client.request = AsyncMock(return_value=mock_resp)
result = await executor._simple_request(mock_client, "GET", "http://localhost:8000/config", {}, None)
assert result["status"] == "success"
assert result["status_code"] == 200
assert result["error"] is None
@pytest.mark.asyncio
async def test_simple_request_failure(self, executor):
mock_resp = MagicMock()
mock_resp.status_code = 500
mock_resp.text = "Internal Server Error"
mock_client = AsyncMock()
mock_client.request = AsyncMock(return_value=mock_resp)
result = await executor._simple_request(
mock_client, "POST", "http://localhost:8000/test", {"Content-Type": "application/json"}, {"key": "value"}
)
assert result["status"] == "failed"
assert result["status_code"] == 500
assert result["error"] == "Internal Server Error"
class TestExecutorBackgroundRun:
"""Test _background_run for run endpoints."""
@pytest.fixture
def executor(self):
return ScheduleExecutor(base_url="http://localhost:8000", internal_service_token="tok", poll_interval=0)
@pytest.mark.asyncio
async def test_background_run_submit_failure(self, executor):
mock_resp = MagicMock()
mock_resp.status_code = 422
mock_resp.text = "Unprocessable"
mock_client = AsyncMock()
mock_client.request = AsyncMock(return_value=mock_resp)
result = await executor._background_run(
mock_client,
"http://localhost:8000/agents/a1/runs",
{},
{"message": "hi"},
"agents",
"a1",
60,
)
assert result["status"] == "failed"
assert result["status_code"] == 422
@pytest.mark.asyncio
async def test_background_run_invalid_json(self, executor):
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.text = "not json"
mock_resp.json = MagicMock(side_effect=json.JSONDecodeError("", "", 0))
mock_client = AsyncMock()
mock_client.request = AsyncMock(return_value=mock_resp)
result = await executor._background_run(
mock_client,
"http://localhost:8000/agents/a1/runs",
{},
{"message": "hi"},
"agents",
"a1",
60,
)
assert result["status"] == "failed"
assert "Invalid JSON" in result["error"]
@pytest.mark.asyncio
async def test_background_run_missing_run_id(self, executor):
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.json = MagicMock(return_value={"session_id": "s1"})
mock_client = AsyncMock()
mock_client.request = AsyncMock(return_value=mock_resp)
result = await executor._background_run(
mock_client,
"http://localhost:8000/agents/a1/runs",
{},
{},
"agents",
"a1",
60,
)
assert result["status"] == "failed"
assert "Missing run_id" in result["error"]
class TestExecutorPollRun:
"""Test _poll_run status polling."""
@pytest.fixture
def executor(self):
return ScheduleExecutor(base_url="http://localhost:8000", internal_service_token="tok", poll_interval=0)
@pytest.mark.asyncio
async def test_poll_completed(self, executor):
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.json = MagicMock(return_value={"status": "COMPLETED"})
mock_client = AsyncMock()
mock_client.request = AsyncMock(return_value=mock_resp)
result = await executor._poll_run(mock_client, {}, "agents", "a1", "run-1", "sess-1", 60)
assert result["status"] == "success"
assert result["run_id"] == "run-1"
assert result["session_id"] == "sess-1"
@pytest.mark.asyncio
async def test_poll_error(self, executor):
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.json = MagicMock(return_value={"status": "ERROR", "error": "OOM"})
mock_client = AsyncMock()
mock_client.request = AsyncMock(return_value=mock_resp)
result = await executor._poll_run(mock_client, {}, "agents", "a1", "run-1", "sess-1", 60)
assert result["status"] == "failed"
assert result["error"] == "OOM"
@pytest.mark.asyncio
async def test_poll_cancelled(self, executor):
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.json = MagicMock(return_value={"status": "CANCELLED"})
mock_client = AsyncMock()
mock_client.request = AsyncMock(return_value=mock_resp)
result = await executor._poll_run(mock_client, {}, "agents", "a1", "run-1", "sess-1", 60)
assert result["status"] == "failed"
assert "cancelled" in result["error"].lower()
@pytest.mark.asyncio
async def test_poll_paused(self, executor):
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.json = MagicMock(return_value={"status": "PAUSED"})
mock_client = AsyncMock()
mock_client.request = AsyncMock(return_value=mock_resp)
result = await executor._poll_run(mock_client, {}, "agents", "a1", "run-1", "sess-1", 60)
assert result["status"] == "paused"
assert result["error"] is None
@pytest.mark.asyncio
async def test_poll_timeout(self, executor):
"""Polling should return failed when timeout is exceeded."""
# Always return a non-terminal status
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.json = MagicMock(return_value={"status": "RUNNING"})
mock_client = AsyncMock()
mock_client.request = AsyncMock(return_value=mock_resp)
# Use a very short timeout (already expired)
result = await executor._poll_run(mock_client, {}, "agents", "a1", "run-1", "sess-1", timeout_seconds=0)
assert result["status"] == "failed"
assert "timed out" in result["error"].lower()
@pytest.mark.asyncio
async def test_poll_skips_404(self, executor):
"""404 responses should be retried (run not yet visible)."""
call_count = 0
async def mock_request(*args, **kwargs):
nonlocal call_count
call_count += 1
resp = MagicMock()
if call_count < 3:
resp.status_code = 404
else:
resp.status_code = 200
resp.json = MagicMock(return_value={"status": "COMPLETED"})
return resp
mock_client = AsyncMock()
mock_client.request = mock_request
result = await executor._poll_run(mock_client, {}, "agents", "a1", "run-1", "sess-1", 60)
assert result["status"] == "success"
assert call_count == 3
class TestExecutorExecute:
"""Test the full execute() flow with mocked DB and endpoint."""
@pytest.fixture
def executor(self):
return ScheduleExecutor(base_url="http://localhost:8000", internal_service_token="tok", poll_interval=0)
@pytest.fixture
def mock_db(self):
db = MagicMock()
db.create_schedule_run = MagicMock()
db.update_schedule_run = MagicMock()
db.release_schedule = MagicMock()
db.update_schedule = MagicMock()
return db
@pytest.fixture
def simple_schedule(self):
return {
"id": "sched-1",
"name": "test-schedule",
"cron_expr": "* * * * *",
"timezone": "UTC",
"endpoint": "/config",
"method": "GET",
"payload": None,
"max_retries": 0,
"retry_delay_seconds": 60,
}
@pytest.mark.asyncio
async def test_execute_simple_success(self, executor, mock_db, simple_schedule):
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.text = "OK"
with patch("agno.scheduler.executor.httpx") as mock_httpx:
mock_client = AsyncMock()
mock_client.request = AsyncMock(return_value=mock_resp)
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
mock_client.__aexit__ = AsyncMock(return_value=None)
mock_httpx.AsyncClient.return_value = mock_client
mock_httpx.Timeout = MagicMock()
result = await executor.execute(simple_schedule, mock_db)
assert result["status"] == "success"
mock_db.create_schedule_run.assert_called_once()
mock_db.update_schedule_run.assert_called_once()
mock_db.release_schedule.assert_called_once()
@pytest.mark.asyncio
async def test_execute_cancellation(self, executor, mock_db, simple_schedule):
"""CancelledError should mark the run as cancelled and re-raise."""
async def cancel_endpoint(*args, **kwargs):
raise asyncio.CancelledError()
with patch.object(executor, "_call_endpoint", side_effect=cancel_endpoint):
with pytest.raises(asyncio.CancelledError):
await executor.execute(simple_schedule, mock_db)
# Should have recorded the cancellation in the run
mock_db.update_schedule_run.assert_called()
cancel_call = mock_db.update_schedule_run.call_args
assert cancel_call[1]["status"] == "cancelled"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/scheduler/test_executor.py",
"license": "Apache License 2.0",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/scheduler/test_executor_retry.py | """Tests for the ScheduleExecutor retry flow."""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from agno.scheduler.executor import ScheduleExecutor
@pytest.fixture
def executor():
return ScheduleExecutor(base_url="http://localhost:8000", internal_service_token="tok", poll_interval=0)
@pytest.fixture
def mock_db():
db = MagicMock()
db.create_schedule_run = MagicMock()
db.update_schedule_run = MagicMock()
db.release_schedule = MagicMock()
db.update_schedule = MagicMock()
return db
@pytest.fixture
def schedule():
return {
"id": "sched-1",
"name": "test-schedule",
"cron_expr": "* * * * *",
"timezone": "UTC",
"endpoint": "/config",
"method": "GET",
"payload": None,
"max_retries": 2,
"retry_delay_seconds": 0,
}
class TestRetrySucceedsOnSecondAttempt:
@pytest.mark.asyncio
@patch("agno.scheduler.executor.asyncio.sleep", new_callable=AsyncMock)
async def test_retries_until_success(self, mock_sleep, executor, mock_db, schedule):
"""First attempt fails, second succeeds -- verify 2 create_schedule_run calls."""
call_count = 0
async def mock_call_endpoint(sched):
nonlocal call_count
call_count += 1
if call_count == 1:
raise RuntimeError("Transient failure")
return {"status": "success", "status_code": 200, "error": None, "run_id": None, "session_id": None}
with patch.object(executor, "_call_endpoint", side_effect=mock_call_endpoint):
result = await executor.execute(schedule, mock_db)
assert result["status"] == "success"
assert call_count == 2
# Should have created a run record for each attempt
assert mock_db.create_schedule_run.call_count == 2
# Should have updated each run record
assert mock_db.update_schedule_run.call_count == 2
# Should have released the schedule
mock_db.release_schedule.assert_called_once()
class TestRetryAllFail:
@pytest.mark.asyncio
@patch("agno.scheduler.executor.asyncio.sleep", new_callable=AsyncMock)
async def test_all_retries_fail(self, mock_sleep, executor, mock_db, schedule):
"""All attempts fail -- verify final status is 'failed'."""
async def mock_call_endpoint(sched):
raise RuntimeError("Persistent failure")
with patch.object(executor, "_call_endpoint", side_effect=mock_call_endpoint):
result = await executor.execute(schedule, mock_db)
assert result["status"] == "failed"
assert "Persistent failure" in result["error"]
# max_retries=2 means 3 total attempts (1 + 2 retries)
assert mock_db.create_schedule_run.call_count == 3
assert mock_db.update_schedule_run.call_count == 3
mock_db.release_schedule.assert_called_once()
class TestNoRetries:
@pytest.mark.asyncio
async def test_no_retries_single_attempt(self, executor, mock_db):
"""max_retries=0 means exactly one attempt."""
schedule = {
"id": "sched-1",
"name": "test",
"cron_expr": "* * * * *",
"timezone": "UTC",
"endpoint": "/config",
"method": "GET",
"payload": None,
"max_retries": 0,
"retry_delay_seconds": 0,
}
async def mock_call_endpoint(sched):
raise RuntimeError("boom")
with patch.object(executor, "_call_endpoint", side_effect=mock_call_endpoint):
result = await executor.execute(schedule, mock_db)
assert result["status"] == "failed"
assert mock_db.create_schedule_run.call_count == 1
assert mock_db.update_schedule_run.call_count == 1
class TestReleaseAlwaysCalled:
@pytest.mark.asyncio
async def test_release_on_success(self, executor, mock_db, schedule):
"""release_schedule is called even on success."""
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.text = "OK"
with patch("agno.scheduler.executor.httpx") as mock_httpx:
mock_client = AsyncMock()
mock_client.request = AsyncMock(return_value=mock_resp)
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
mock_client.__aexit__ = AsyncMock(return_value=None)
mock_httpx.AsyncClient.return_value = mock_client
mock_httpx.Timeout = MagicMock()
await executor.execute(schedule, mock_db)
mock_db.release_schedule.assert_called_once()
@pytest.mark.asyncio
@patch("agno.scheduler.executor.asyncio.sleep", new_callable=AsyncMock)
async def test_release_on_failure(self, mock_sleep, executor, mock_db, schedule):
"""release_schedule is called even when all attempts fail."""
async def mock_call_endpoint(sched):
raise RuntimeError("fail")
with patch.object(executor, "_call_endpoint", side_effect=mock_call_endpoint):
await executor.execute(schedule, mock_db)
mock_db.release_schedule.assert_called_once()
@pytest.mark.asyncio
async def test_no_release_when_flag_false(self, executor, mock_db, schedule):
"""release_schedule is NOT called when release_schedule=False."""
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.text = "OK"
with patch("agno.scheduler.executor.httpx") as mock_httpx:
mock_client = AsyncMock()
mock_client.request = AsyncMock(return_value=mock_resp)
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
mock_client.__aexit__ = AsyncMock(return_value=None)
mock_httpx.AsyncClient.return_value = mock_client
mock_httpx.Timeout = MagicMock()
await executor.execute(schedule, mock_db, release_schedule=False)
mock_db.release_schedule.assert_not_called()
class TestComputeNextRunFailure:
@pytest.mark.asyncio
async def test_cron_failure_disables_schedule(self, executor, mock_db):
"""When compute_next_run raises, the schedule gets disabled."""
schedule = {
"id": "sched-1",
"name": "test",
"cron_expr": "INVALID",
"timezone": "UTC",
"endpoint": "/config",
"method": "GET",
"payload": None,
"max_retries": 0,
"retry_delay_seconds": 0,
}
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.text = "OK"
with patch("agno.scheduler.executor.httpx") as mock_httpx:
mock_client = AsyncMock()
mock_client.request = AsyncMock(return_value=mock_resp)
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
mock_client.__aexit__ = AsyncMock(return_value=None)
mock_httpx.AsyncClient.return_value = mock_client
mock_httpx.Timeout = MagicMock()
await executor.execute(schedule, mock_db)
# Should have disabled the schedule because cron_expr is invalid
mock_db.update_schedule.assert_called_once_with("sched-1", enabled=False)
class TestAsyncDbSupport:
@pytest.mark.asyncio
async def test_async_db_methods(self, executor):
"""Executor should call async DB methods when the adapter uses coroutines."""
db = MagicMock()
db.create_schedule_run = AsyncMock()
db.update_schedule_run = AsyncMock()
db.release_schedule = AsyncMock()
db.update_schedule = AsyncMock()
schedule = {
"id": "sched-1",
"name": "test",
"cron_expr": "* * * * *",
"timezone": "UTC",
"endpoint": "/config",
"method": "GET",
"payload": None,
"max_retries": 0,
"retry_delay_seconds": 0,
}
mock_resp = MagicMock()
mock_resp.status_code = 200
mock_resp.text = "OK"
with patch("agno.scheduler.executor.httpx") as mock_httpx:
mock_client = AsyncMock()
mock_client.request = AsyncMock(return_value=mock_resp)
mock_client.__aenter__ = AsyncMock(return_value=mock_client)
mock_client.__aexit__ = AsyncMock(return_value=None)
mock_httpx.AsyncClient.return_value = mock_client
mock_httpx.Timeout = MagicMock()
result = await executor.execute(schedule, db)
assert result["status"] == "success"
db.create_schedule_run.assert_called_once()
db.update_schedule_run.assert_called_once()
db.release_schedule.assert_called_once()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/scheduler/test_executor_retry.py",
"license": "Apache License 2.0",
"lines": 190,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/scheduler/test_manager.py | """Tests for the ScheduleManager Pythonic API."""
import time
from unittest.mock import AsyncMock, MagicMock
import pytest
pytest.importorskip("croniter", reason="croniter not installed")
pytest.importorskip("pytz", reason="pytz not installed")
from agno.scheduler.manager import ScheduleManager # noqa: E402
# =============================================================================
# Fixtures
# =============================================================================
def _make_schedule(**overrides):
now = int(time.time())
d = {
"id": "sched-1",
"name": "test-schedule",
"description": None,
"method": "POST",
"endpoint": "/agents/a1/runs",
"payload": None,
"cron_expr": "0 9 * * *",
"timezone": "UTC",
"timeout_seconds": 3600,
"max_retries": 0,
"retry_delay_seconds": 60,
"enabled": True,
"next_run_at": now + 3600,
"locked_by": None,
"locked_at": None,
"created_at": now,
"updated_at": None,
}
d.update(overrides)
return d
@pytest.fixture
def mock_db():
db = MagicMock()
db.get_schedule = MagicMock(return_value=_make_schedule())
db.get_schedule_by_name = MagicMock(return_value=None)
db.get_schedules = MagicMock(return_value=[_make_schedule()])
db.create_schedule = MagicMock(side_effect=lambda d: d)
db.update_schedule = MagicMock(return_value=_make_schedule())
db.delete_schedule = MagicMock(return_value=True)
db.get_schedule_runs = MagicMock(return_value=[])
return db
@pytest.fixture
def mgr(mock_db):
return ScheduleManager(mock_db)
# =============================================================================
# Sync API Tests
# =============================================================================
class TestManagerCreate:
def test_create_success(self, mgr, mock_db):
result = mgr.create(name="new-sched", cron="0 9 * * *", endpoint="/test")
assert result.name == "new-sched"
assert result.cron_expr == "0 9 * * *"
assert result.enabled is True
mock_db.create_schedule.assert_called_once()
def test_create_invalid_cron(self, mgr):
with pytest.raises(ValueError, match="Invalid cron"):
mgr.create(name="bad", cron="not valid", endpoint="/test")
def test_create_invalid_timezone(self, mgr):
with pytest.raises(ValueError, match="Invalid timezone"):
mgr.create(name="bad", cron="0 9 * * *", endpoint="/test", timezone="Fake/Zone")
def test_create_duplicate_name(self, mgr, mock_db):
mock_db.get_schedule_by_name = MagicMock(return_value=_make_schedule())
with pytest.raises(ValueError, match="already exists"):
mgr.create(name="test-schedule", cron="0 9 * * *", endpoint="/test")
def test_create_sets_method_uppercase(self, mgr, mock_db):
result = mgr.create(name="new-sched", cron="0 9 * * *", endpoint="/test", method="get")
assert result.method == "GET"
def test_create_with_payload(self, mgr, mock_db):
result = mgr.create(
name="with-payload",
cron="0 9 * * *",
endpoint="/test",
payload={"key": "value"},
)
assert result.payload == {"key": "value"}
def test_create_db_returns_none(self, mgr, mock_db):
mock_db.create_schedule = MagicMock(return_value=None)
with pytest.raises(RuntimeError, match="Failed to create"):
mgr.create(name="fail", cron="0 9 * * *", endpoint="/test")
class TestManagerList:
def test_list_all(self, mgr, mock_db):
result = mgr.list()
assert len(result) == 1
mock_db.get_schedules.assert_called_once_with(enabled=None, limit=100, page=1)
def test_list_with_filters(self, mgr, mock_db):
mgr.list(enabled=True, limit=10, page=2)
mock_db.get_schedules.assert_called_once_with(enabled=True, limit=10, page=2)
class TestManagerGet:
def test_get_found(self, mgr, mock_db):
result = mgr.get("sched-1")
assert result.id == "sched-1"
mock_db.get_schedule.assert_called_once_with("sched-1")
def test_get_not_found(self, mgr, mock_db):
mock_db.get_schedule = MagicMock(return_value=None)
assert mgr.get("missing") is None
class TestManagerUpdate:
def test_update(self, mgr, mock_db):
result = mgr.update("sched-1", description="Updated")
assert result is not None
mock_db.update_schedule.assert_called_once_with("sched-1", description="Updated")
class TestManagerDelete:
def test_delete(self, mgr, mock_db):
assert mgr.delete("sched-1") is True
mock_db.delete_schedule.assert_called_once_with("sched-1")
class TestManagerEnable:
def test_enable_found(self, mgr, mock_db):
result = mgr.enable("sched-1")
assert result is not None
# Should have called update with enabled=True and a next_run_at
call_kwargs = mock_db.update_schedule.call_args
assert call_kwargs[1]["enabled"] is True
assert "next_run_at" in call_kwargs[1]
def test_enable_not_found(self, mgr, mock_db):
mock_db.get_schedule = MagicMock(return_value=None)
assert mgr.enable("missing") is None
mock_db.update_schedule.assert_not_called()
class TestManagerDisable:
def test_disable(self, mgr, mock_db):
result = mgr.disable("sched-1")
assert result is not None
mock_db.update_schedule.assert_called_once_with("sched-1", enabled=False)
class TestManagerTrigger:
def test_trigger_returns_none(self, mgr, mock_db):
result = mgr.trigger("sched-1")
assert result is None
class TestManagerGetRuns:
def test_get_runs(self, mgr, mock_db):
result = mgr.get_runs("sched-1", limit=5, page=2)
assert result == []
mock_db.get_schedule_runs.assert_called_once_with("sched-1", limit=5, page=2)
class TestManagerCallMissingMethod:
def test_missing_method(self, mgr, mock_db):
del mock_db.get_schedule
mock_db.get_schedule = None
# Simulate getattr returning None
mock_db_no_method = MagicMock(spec=[])
mgr2 = ScheduleManager(mock_db_no_method)
with pytest.raises(NotImplementedError, match="does not support"):
mgr2._call("nonexistent_method")
# =============================================================================
# Async API Tests
# =============================================================================
@pytest.fixture
def mock_async_db():
db = MagicMock()
db.get_schedule = AsyncMock(return_value=_make_schedule())
db.get_schedule_by_name = AsyncMock(return_value=None)
db.get_schedules = AsyncMock(return_value=[_make_schedule()])
db.create_schedule = AsyncMock(side_effect=lambda d: d)
db.update_schedule = AsyncMock(return_value=_make_schedule())
db.delete_schedule = AsyncMock(return_value=True)
db.get_schedule_runs = AsyncMock(return_value=[])
return db
@pytest.fixture
def async_mgr(mock_async_db):
return ScheduleManager(mock_async_db)
class TestAsyncCreate:
@pytest.mark.asyncio
async def test_acreate_success(self, async_mgr, mock_async_db):
result = await async_mgr.acreate(name="async-sched", cron="0 9 * * *", endpoint="/test")
assert result.name == "async-sched"
mock_async_db.create_schedule.assert_called_once()
@pytest.mark.asyncio
async def test_acreate_invalid_cron(self, async_mgr):
with pytest.raises(ValueError, match="Invalid cron"):
await async_mgr.acreate(name="bad", cron="invalid", endpoint="/test")
@pytest.mark.asyncio
async def test_acreate_duplicate_name(self, async_mgr, mock_async_db):
mock_async_db.get_schedule_by_name = AsyncMock(return_value=_make_schedule())
with pytest.raises(ValueError, match="already exists"):
await async_mgr.acreate(name="test-schedule", cron="0 9 * * *", endpoint="/test")
class TestAsyncList:
@pytest.mark.asyncio
async def test_alist(self, async_mgr, mock_async_db):
result = await async_mgr.alist()
assert len(result) == 1
class TestAsyncGet:
@pytest.mark.asyncio
async def test_aget(self, async_mgr, mock_async_db):
result = await async_mgr.aget("sched-1")
assert result.id == "sched-1"
class TestAsyncUpdate:
@pytest.mark.asyncio
async def test_aupdate(self, async_mgr, mock_async_db):
result = await async_mgr.aupdate("sched-1", description="Async updated")
assert result is not None
class TestAsyncDelete:
@pytest.mark.asyncio
async def test_adelete(self, async_mgr, mock_async_db):
result = await async_mgr.adelete("sched-1")
assert result is True
class TestAsyncEnable:
@pytest.mark.asyncio
async def test_aenable_found(self, async_mgr, mock_async_db):
result = await async_mgr.aenable("sched-1")
assert result is not None
call_kwargs = mock_async_db.update_schedule.call_args
assert call_kwargs[1]["enabled"] is True
@pytest.mark.asyncio
async def test_aenable_not_found(self, async_mgr, mock_async_db):
mock_async_db.get_schedule = AsyncMock(return_value=None)
assert await async_mgr.aenable("missing") is None
class TestAsyncDisable:
@pytest.mark.asyncio
async def test_adisable(self, async_mgr, mock_async_db):
result = await async_mgr.adisable("sched-1")
assert result is not None
class TestAsyncGetRuns:
@pytest.mark.asyncio
async def test_aget_runs(self, async_mgr, mock_async_db):
result = await async_mgr.aget_runs("sched-1")
assert result == []
class TestAsyncCallMissingMethod:
@pytest.mark.asyncio
async def test_acall_missing(self, async_mgr):
mock_db_no_method = MagicMock(spec=[])
mgr2 = ScheduleManager(mock_db_no_method)
with pytest.raises(NotImplementedError, match="does not support"):
await mgr2._acall("nonexistent_method")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/scheduler/test_manager.py",
"license": "Apache License 2.0",
"lines": 222,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/scheduler/test_poller.py | """Tests for the SchedulePoller."""
import asyncio
from unittest.mock import AsyncMock, MagicMock
import pytest
from agno.db.schemas.scheduler import Schedule
from agno.scheduler.poller import SchedulePoller
def _make_schedule_dict(**overrides):
"""Create a schedule dict with all required fields."""
d = {
"id": "s1",
"name": "test",
"cron_expr": "0 9 * * *",
"endpoint": "/test",
"enabled": True,
}
d.update(overrides)
return d
@pytest.fixture
def mock_db():
db = MagicMock()
db.claim_due_schedule = MagicMock(return_value=None)
db.get_schedule = MagicMock(return_value=None)
return db
@pytest.fixture
def mock_executor():
executor = MagicMock()
executor.execute = AsyncMock(return_value={"status": "success"})
executor.close = AsyncMock()
return executor
class TestPollerInit:
def test_defaults(self, mock_db, mock_executor):
poller = SchedulePoller(db=mock_db, executor=mock_executor)
assert poller.poll_interval == 15
assert poller.max_concurrent == 10
assert poller._running is False
assert poller.worker_id.startswith("worker-")
def test_custom_params(self, mock_db, mock_executor):
poller = SchedulePoller(
db=mock_db,
executor=mock_executor,
poll_interval=5,
worker_id="my-worker",
max_concurrent=3,
)
assert poller.poll_interval == 5
assert poller.worker_id == "my-worker"
assert poller.max_concurrent == 3
class TestPollerStartStop:
@pytest.mark.asyncio
async def test_start_creates_task(self, mock_db, mock_executor):
poller = SchedulePoller(db=mock_db, executor=mock_executor, poll_interval=100)
await poller.start()
assert poller._running is True
assert poller._task is not None
await poller.stop()
assert poller._running is False
assert poller._task is None
@pytest.mark.asyncio
async def test_start_is_idempotent(self, mock_db, mock_executor):
poller = SchedulePoller(db=mock_db, executor=mock_executor, poll_interval=100)
await poller.start()
task1 = poller._task
await poller.start() # second call should be a no-op
assert poller._task is task1
await poller.stop()
@pytest.mark.asyncio
async def test_stop_cancels_in_flight(self, mock_db, mock_executor):
poller = SchedulePoller(db=mock_db, executor=mock_executor)
# Simulate an in-flight task
async def slow_task():
await asyncio.sleep(1000)
task = asyncio.create_task(slow_task())
poller._in_flight.add(task)
await poller.stop()
assert task.cancelled()
assert len(poller._in_flight) == 0
class TestPollerPollOnce:
@pytest.mark.asyncio
async def test_no_due_schedules(self, mock_db, mock_executor):
mock_db.claim_due_schedule = MagicMock(return_value=None)
poller = SchedulePoller(db=mock_db, executor=mock_executor)
poller._running = True
await poller._poll_once()
mock_db.claim_due_schedule.assert_called_once()
assert len(poller._in_flight) == 0
@pytest.mark.asyncio
async def test_claims_and_dispatches(self, mock_db, mock_executor):
schedule = _make_schedule_dict()
call_count = 0
def claim_side_effect(worker_id):
nonlocal call_count
call_count += 1
if call_count == 1:
return schedule
return None
mock_db.claim_due_schedule = MagicMock(side_effect=claim_side_effect)
poller = SchedulePoller(db=mock_db, executor=mock_executor)
poller._running = True
await poller._poll_once()
# Wait briefly for the spawned task to run
await asyncio.sleep(0.05)
assert call_count == 2 # called until None returned
mock_executor.execute.assert_called_once()
# Verify the schedule was converted to a Schedule object
call_args = mock_executor.execute.call_args
assert isinstance(call_args[0][0], Schedule)
assert call_args[0][0].id == "s1"
@pytest.mark.asyncio
async def test_respects_concurrency_limit(self, mock_db, mock_executor):
poller = SchedulePoller(db=mock_db, executor=mock_executor, max_concurrent=2)
poller._running = True
# Simulate 2 in-flight tasks
async def slow():
await asyncio.sleep(1000)
t1 = asyncio.create_task(slow())
t2 = asyncio.create_task(slow())
poller._in_flight = {t1, t2}
await poller._poll_once()
# Should not have claimed any schedules because we're at the limit
mock_db.claim_due_schedule.assert_not_called()
t1.cancel()
t2.cancel()
await asyncio.gather(t1, t2, return_exceptions=True)
@pytest.mark.asyncio
async def test_claim_error_breaks_loop(self, mock_db, mock_executor):
mock_db.claim_due_schedule = MagicMock(side_effect=Exception("DB error"))
poller = SchedulePoller(db=mock_db, executor=mock_executor)
poller._running = True
await poller._poll_once() # Should not raise
mock_db.claim_due_schedule.assert_called_once()
@pytest.mark.asyncio
async def test_async_db_claim(self, mock_executor):
"""Poller should support async DB adapters."""
schedule = _make_schedule_dict(name="async-test")
call_count = 0
async def async_claim(worker_id):
nonlocal call_count
call_count += 1
if call_count == 1:
return schedule
return None
mock_db = MagicMock()
mock_db.claim_due_schedule = async_claim
poller = SchedulePoller(db=mock_db, executor=mock_executor)
poller._running = True
await poller._poll_once()
await asyncio.sleep(0.05)
assert call_count == 2
mock_executor.execute.assert_called_once()
# Verify the schedule was converted to a Schedule object
call_args = mock_executor.execute.call_args
assert isinstance(call_args[0][0], Schedule)
assert call_args[0][0].id == "s1"
class TestPollerTrigger:
@pytest.mark.asyncio
async def test_trigger_found(self, mock_db, mock_executor):
schedule = _make_schedule_dict()
mock_db.get_schedule = MagicMock(return_value=schedule)
poller = SchedulePoller(db=mock_db, executor=mock_executor)
await poller.trigger("s1")
# Wait for the task to execute
await asyncio.sleep(0.05)
mock_executor.execute.assert_called_once()
call_args = mock_executor.execute.call_args
# First positional arg is a Schedule object
assert isinstance(call_args[0][0], Schedule)
assert call_args[0][0].id == "s1"
# release_schedule=False is passed as keyword
assert call_args[1]["release_schedule"] is False
@pytest.mark.asyncio
async def test_trigger_not_found(self, mock_db, mock_executor):
mock_db.get_schedule = MagicMock(return_value=None)
poller = SchedulePoller(db=mock_db, executor=mock_executor)
await poller.trigger("missing")
mock_executor.execute.assert_not_called()
@pytest.mark.asyncio
async def test_trigger_disabled(self, mock_db, mock_executor):
schedule = _make_schedule_dict(enabled=False)
mock_db.get_schedule = MagicMock(return_value=schedule)
poller = SchedulePoller(db=mock_db, executor=mock_executor)
await poller.trigger("s1")
mock_executor.execute.assert_not_called()
class TestPollerExecuteSafe:
@pytest.mark.asyncio
async def test_catches_exceptions(self, mock_db):
executor = MagicMock()
executor.execute = AsyncMock(side_effect=RuntimeError("boom"))
poller = SchedulePoller(db=mock_db, executor=executor)
# Should not raise
await poller._execute_safe({"id": "s1"})
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/scheduler/test_poller.py",
"license": "Apache License 2.0",
"lines": 190,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/scheduler/test_schedule_model.py | """Tests for Schedule and ScheduleRun data models."""
import time
from agno.db.schemas.scheduler import Schedule, ScheduleRun
class TestSchedule:
def test_create_basic(self):
s = Schedule(id="test-id", name="daily-check", cron_expr="0 9 * * *", endpoint="/agents/my-agent/runs")
assert s.id == "test-id"
assert s.name == "daily-check"
assert s.cron_expr == "0 9 * * *"
assert s.endpoint == "/agents/my-agent/runs"
assert s.method == "POST"
assert s.timezone == "UTC"
assert s.enabled is True
assert s.created_at is not None
def test_to_dict(self):
s = Schedule(id="test-id", name="test", cron_expr="* * * * *", endpoint="/test")
d = s.to_dict()
assert d["id"] == "test-id"
assert d["name"] == "test"
assert d["cron_expr"] == "* * * * *"
assert d["endpoint"] == "/test"
assert d["method"] == "POST"
assert d["enabled"] is True
assert "created_at" in d
def test_from_dict(self):
data = {
"id": "abc",
"name": "my-schedule",
"cron_expr": "0 9 * * *",
"endpoint": "/agents/x/runs",
"method": "POST",
"timezone": "America/New_York",
"enabled": False,
"created_at": int(time.time()),
}
s = Schedule.from_dict(data)
assert s.id == "abc"
assert s.name == "my-schedule"
assert s.timezone == "America/New_York"
assert s.enabled is False
def test_from_dict_ignores_extra_keys(self):
data = {
"id": "abc",
"name": "test",
"cron_expr": "* * * * *",
"endpoint": "/test",
"extra_field": "should_be_ignored",
}
s = Schedule.from_dict(data)
assert s.id == "abc"
assert not hasattr(s, "extra_field")
def test_roundtrip(self):
original = Schedule(
id="rt-test",
name="roundtrip",
cron_expr="0 12 * * *",
endpoint="/test",
description="A test schedule",
payload={"key": "value"},
)
d = original.to_dict()
restored = Schedule.from_dict(d)
assert restored.id == original.id
assert restored.name == original.name
assert restored.payload == original.payload
assert restored.description == original.description
class TestScheduleRun:
def test_create_basic(self):
r = ScheduleRun(id="run-1", schedule_id="sched-1")
assert r.id == "run-1"
assert r.schedule_id == "sched-1"
assert r.attempt == 1
assert r.status == "running"
assert r.created_at is not None
def test_to_dict(self):
r = ScheduleRun(id="run-1", schedule_id="sched-1", status="success", status_code=200)
d = r.to_dict()
assert d["id"] == "run-1"
assert d["status"] == "success"
assert d["status_code"] == 200
def test_from_dict(self):
data = {
"id": "r-1",
"schedule_id": "s-1",
"attempt": 2,
"status": "failed",
"error": "Connection refused",
"created_at": int(time.time()),
}
r = ScheduleRun.from_dict(data)
assert r.attempt == 2
assert r.status == "failed"
assert r.error == "Connection refused"
def test_roundtrip(self):
original = ScheduleRun(
id="rt-run",
schedule_id="rt-sched",
attempt=3,
status="success",
status_code=200,
run_id="run-xyz",
)
d = original.to_dict()
restored = ScheduleRun.from_dict(d)
assert restored.id == original.id
assert restored.attempt == original.attempt
assert restored.run_id == original.run_id
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/scheduler/test_schedule_model.py",
"license": "Apache License 2.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/team/_task_tools.py | """Task management tools for autonomous team execution (mode=tasks)."""
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from agno.team.team import Team
from copy import deepcopy
from typing import (
Any,
AsyncIterator,
Dict,
Iterator,
List,
Optional,
Sequence,
Union,
)
from agno.agent import Agent
from agno.exceptions import RunCancelledException
from agno.media import Audio, File, Image, Video
from agno.run import RunContext
from agno.run.agent import RunOutput, RunOutputEvent
from agno.run.base import RunStatus
from agno.run.team import (
TaskCreatedEvent,
TaskUpdatedEvent,
TeamRunOutput,
TeamRunOutputEvent,
)
from agno.session import TeamSession
from agno.team.task import TaskList, TaskStatus, save_task_list
from agno.tools.function import Function
from agno.utils.events import (
create_team_task_created_event,
create_team_task_updated_event,
handle_event,
)
from agno.utils.log import (
log_debug,
use_agent_logger,
use_team_logger,
)
from agno.utils.merge_dict import merge_dictionaries, merge_parallel_session_states
from agno.utils.response import check_if_run_cancelled
from agno.utils.team import (
add_interaction_to_team_run_context,
format_member_agent_task,
)
def _get_task_management_tools(
team: "Team",
task_list: TaskList,
run_response: TeamRunOutput,
run_context: RunContext,
session: TeamSession,
team_run_context: Dict[str, Any],
user_id: Optional[str] = None,
stream: bool = False,
stream_events: bool = False,
async_mode: bool = False,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
audio: Optional[Sequence[Audio]] = None,
files: Optional[Sequence[File]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
debug_mode: Optional[bool] = None,
) -> List[Function]:
"""Build task management tools that close over team state.
Returns a list of Function objects the leader model can call as tools.
"""
_images: List[Image] = list(images) if images else []
_videos: List[Video] = list(videos) if videos else []
_audio: List[Audio] = list(audio) if audio else []
_files: List[File] = list(files) if files else []
from agno.team._init import _initialize_member
from agno.team._run import _update_team_media
from agno.team._tools import (
_determine_team_member_interactions,
_find_member_by_id,
_get_history_for_member_agent,
_propagate_member_pause,
)
# ------------------------------------------------------------------
# Helpers: emit task events through the standard event pipeline
# ------------------------------------------------------------------
def _emit_task_created(task):
"""Create a TaskCreatedEvent routed through handle_event."""
return handle_event(
create_team_task_created_event(
from_run_response=run_response,
task_id=task.id,
title=task.title,
description=task.description,
assignee=task.assignee,
status=task.status.value,
dependencies=task.dependencies,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
def _emit_task_updated(task, previous_status, result: Optional[str] = None):
"""Create a TaskUpdatedEvent routed through handle_event.
Args:
task: The task being updated.
previous_status: The status before this update.
result: The result to include in the event. Pass explicitly for
completed/failed transitions; omit for in_progress transitions
to avoid stale data.
"""
return handle_event(
create_team_task_updated_event(
from_run_response=run_response,
task_id=task.id,
title=task.title,
status=task.status.value,
previous_status=previous_status,
result=result,
assignee=task.assignee,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# ------------------------------------------------------------------
# Tool: create_task
# ------------------------------------------------------------------
def create_task(
title: str,
description: str = "",
assignee: str = "",
depends_on: Optional[List[str]] = None,
) -> Iterator[Union[TaskCreatedEvent, str]]:
"""Create a new task for the team to work on.
Args:
title (str): A short, actionable title for the task.
description (str): Detailed description of what needs to be done.
assignee (str): The member_id to assign this task to. Must be a valid member_id.
depends_on (list, optional): List of task IDs that must complete before this task can start.
Returns:
str: Confirmation with the new task ID.
"""
# Check for duplicate tasks with the same title (case-insensitive)
title_lower = title.lower().strip()
for existing_task in task_list.tasks:
if existing_task.title.lower().strip() == title_lower:
log_debug(f"Task with title '{title}' already exists: [{existing_task.id}]")
yield f"Task already exists: [{existing_task.id}] {existing_task.title} (status: {existing_task.status.value}). Use this task instead of creating a duplicate."
return
task = task_list.create_task(
title=title,
description=description,
assignee=assignee,
dependencies=depends_on or [],
)
save_task_list(run_context.session_state, task_list)
log_debug(f"Task created: [{task.id}] {task.title}")
if stream_events:
yield _emit_task_created(task)
yield f"Task created: [{task.id}] {task.title} (status: {task.status.value})"
# ------------------------------------------------------------------
# Tool: update_task_status
# ------------------------------------------------------------------
def update_task_status(
task_id: str,
status: str,
result: Optional[str] = None,
) -> Iterator[Union[TaskUpdatedEvent, str]]:
"""Update the status of a task. Use this to mark tasks you handle yourself as completed.
Args:
task_id (str): The ID of the task to update.
status (str): New status. One of: pending, in_progress, completed, failed.
result (str, optional): The result or outcome of the task (when completing).
Returns:
str: Confirmation of the update.
"""
try:
new_status = TaskStatus(status)
except ValueError:
yield f"Invalid status '{status}'. Must be one of: pending, in_progress, completed, failed."
return
if new_status == TaskStatus.blocked:
yield "Cannot manually set status to 'blocked'. Blocked status is managed automatically based on task dependencies."
return
# Get the task to capture previous status
task = task_list.get_task(task_id)
if task is None:
yield f"Task with ID '{task_id}' not found."
return
previous_status = task.status.value
task_title = task.title
updates: Dict[str, Any] = {"status": new_status}
if result is not None:
updates["result"] = result
updated_task = task_list.update_task(task_id, **updates)
save_task_list(run_context.session_state, task_list)
if stream_events and updated_task:
# Only include result for terminal states (completed/failed)
event_result = updated_task.result if new_status in (TaskStatus.completed, TaskStatus.failed) else None
yield _emit_task_updated(updated_task, previous_status, result=event_result)
if updated_task:
yield f"Task [{updated_task.id}] '{updated_task.title}' updated to {updated_task.status.value}."
else:
yield f"Task [{task_id}] '{task_title}' updated to {new_status.value}."
# ------------------------------------------------------------------
# Tool: list_tasks
# ------------------------------------------------------------------
def list_tasks() -> str:
"""List all tasks with their current status, assignees, and dependencies.
Returns:
str: Formatted task list.
"""
return task_list.get_summary_string()
# ------------------------------------------------------------------
# Tool: add_task_note
# ------------------------------------------------------------------
def add_task_note(task_id: str, note: str) -> str:
"""Add a note to a task for tracking progress or communicating context.
Args:
task_id (str): The ID of the task.
note (str): The note to add.
Returns:
str: Confirmation.
"""
task = task_list.get_task(task_id)
if task is None:
return f"Task with ID '{task_id}' not found."
task.notes.append(note)
save_task_list(run_context.session_state, task_list)
return f"Note added to task [{task.id}]."
# ------------------------------------------------------------------
# Tool: mark_all_complete
# ------------------------------------------------------------------
def mark_all_complete(summary: str) -> str:
"""Signal that the overall goal has been achieved. Call this when all tasks are done.
Args:
summary (str): A summary of the work done and the final outcome.
Returns:
str: Confirmation.
"""
task_list.goal_complete = True
task_list.completion_summary = summary
save_task_list(run_context.session_state, task_list)
return f"Goal marked as complete. Summary: {summary}"
# ------------------------------------------------------------------
# Shared: member setup and post-processing
# ------------------------------------------------------------------
def _setup_member_for_task(member_agent: Union[Agent, "Team"], task_description: str):
"""Initialize member and prepare task input. Returns (member_agent_task, history)."""
_initialize_member(team, member_agent)
if not team.send_media_to_model:
member_agent.send_media_to_model = False
team_member_interactions_str = _determine_team_member_interactions(
team, team_run_context, images=_images, videos=_videos, audio=_audio, files=_files
)
team_history_str = None
if team.add_team_history_to_members and session:
team_history_str = session.get_team_history_context(num_runs=team.num_team_history_runs)
member_agent_task: Any = task_description
if team_history_str or team_member_interactions_str:
member_agent_task = format_member_agent_task(
task_description=member_agent_task,
team_member_interactions_str=team_member_interactions_str,
team_history_str=team_history_str,
)
history = None
if hasattr(member_agent, "add_history_to_context") and member_agent.add_history_to_context:
history = _get_history_for_member_agent(team, session, member_agent)
if history and isinstance(member_agent_task, str):
from agno.models.message import Message
history.append(Message(role="user", content=member_agent_task))
return member_agent_task, history
def _post_process_member_run(
member_run_response: Optional[Union[TeamRunOutput, RunOutput]],
member_agent: Union[Agent, "Team"],
member_agent_task: Any,
member_session_state_copy: Optional[Dict[str, Any]],
tool_name: str = "execute_task",
skip_session_merge: bool = False,
) -> None:
"""Post-process a member run: update parent IDs, interactions, session state."""
if member_run_response is not None:
member_run_response.parent_run_id = run_response.run_id
# Update tool child_run_id
if run_response.tools is not None and member_run_response is not None:
for tool in run_response.tools:
if tool.tool_name and tool.tool_name.lower() == tool_name and tool.child_run_id is None:
tool.child_run_id = member_run_response.run_id
break
member_name = member_agent.name or (member_agent.id if member_agent.id else "Unknown")
normalized_task = (
str(member_agent_task)
if not hasattr(member_agent_task, "content")
else str(member_agent_task.content or "")
)
add_interaction_to_team_run_context(
team_run_context=team_run_context,
member_name=member_name,
task=normalized_task,
run_response=member_run_response,
)
if run_response and member_run_response:
run_response.add_member_run(member_run_response)
if member_run_response:
if (
not member_agent.store_media
or not member_agent.store_tool_messages
or not member_agent.store_history_messages
):
from agno.agent._run import scrub_run_output_for_storage
scrub_run_output_for_storage(member_agent, run_response=member_run_response) # type: ignore[arg-type]
session.upsert_run(member_run_response)
if run_context.session_state is not None and member_session_state_copy is not None and not skip_session_merge:
merge_dictionaries(run_context.session_state, member_session_state_copy)
if member_run_response is not None:
_update_team_media(team, member_run_response)
# ------------------------------------------------------------------
# Tool: execute_task (sync)
# ------------------------------------------------------------------
def execute_task(task_id: str, member_id: str) -> Iterator[Union[RunOutputEvent, TeamRunOutputEvent, str]]:
"""Execute a task by delegating it to a team member. The member will receive the task
description and return a result.
Args:
task_id (str): The ID of the task to execute.
member_id (str): The ID of the member to execute the task.
Returns:
str: The result of the task execution.
"""
task = task_list.get_task(task_id)
if task is None:
yield f"Task with ID '{task_id}' not found."
return
if task.status not in (TaskStatus.pending, TaskStatus.in_progress):
yield f"Task [{task_id}] is {task.status.value} and cannot be executed."
return
result = _find_member_by_id(team, member_id, run_context=run_context)
if result is None:
yield f"Member with ID {member_id} not found. Available members:\n{team.get_members_system_message_content(indent=0, run_context=run_context)}"
return
_, member_agent = result
previous_status = task.status.value
task.status = TaskStatus.in_progress
task.assignee = member_id
save_task_list(run_context.session_state, task_list)
if stream_events:
yield _emit_task_updated(task, previous_status)
use_agent_logger()
member_session_state_copy = deepcopy(run_context.session_state)
member_run_response: Optional[Union[TeamRunOutput, RunOutput]] = None
try:
member_task_description = task.description or task.title
member_agent_task, history = _setup_member_for_task(member_agent, member_task_description)
if stream:
member_stream = member_agent.run(
input=member_agent_task if not history else history,
user_id=user_id,
session_id=session.session_id,
session_state=member_session_state_copy,
images=_images,
videos=_videos,
audio=_audio,
files=_files,
stream=True,
stream_events=stream_events or team.stream_member_events,
debug_mode=debug_mode,
dependencies=run_context.dependencies,
add_dependencies_to_context=add_dependencies_to_context,
metadata=run_context.metadata,
add_session_state_to_context=add_session_state_to_context,
knowledge_filters=run_context.knowledge_filters
if not member_agent.knowledge_filters and member_agent.knowledge
else None,
yield_run_output=True,
)
for event in member_stream:
if isinstance(event, (TeamRunOutput, RunOutput)):
member_run_response = event
continue
check_if_run_cancelled(event)
event.parent_run_id = event.parent_run_id or run_response.run_id
yield event
else:
member_run_response = member_agent.run(
input=member_agent_task if not history else history,
user_id=user_id,
session_id=session.session_id,
session_state=member_session_state_copy,
images=_images,
videos=_videos,
audio=_audio,
files=_files,
stream=False,
debug_mode=debug_mode,
dependencies=run_context.dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=run_context.metadata,
knowledge_filters=run_context.knowledge_filters
if not member_agent.knowledge_filters and member_agent.knowledge
else None,
)
check_if_run_cancelled(member_run_response)
except RunCancelledException:
raise
except Exception as e:
task.status = TaskStatus.failed
task.result = f"Member execution error: {e}"
save_task_list(run_context.session_state, task_list)
use_team_logger()
yield f"Task [{task.id}] failed due to member execution error: {e}"
return
# Check HITL pause
if member_run_response is not None and member_run_response.is_paused:
_propagate_member_pause(run_response, member_agent, member_run_response)
task.status = TaskStatus.pending # Reset to pending so it can be retried after HITL
save_task_list(run_context.session_state, task_list)
use_team_logger()
_post_process_member_run(member_run_response, member_agent, member_agent_task, member_session_state_copy)
yield f"Member '{member_agent.name}' requires human input before continuing. Task [{task.id}] paused."
return
# Process result
use_team_logger()
_post_process_member_run(member_run_response, member_agent, member_agent_task, member_session_state_copy)
if member_run_response is not None and member_run_response.status == RunStatus.error:
task.status = TaskStatus.failed
task.result = str(member_run_response.content) if member_run_response.content else "Task failed"
save_task_list(run_context.session_state, task_list)
if stream_events:
yield _emit_task_updated(task, "in_progress", result=task.result)
yield f"Task [{task.id}] failed: {task.result}"
elif member_run_response is not None and member_run_response.content:
content = str(member_run_response.content)
task.status = TaskStatus.completed
task.result = content
save_task_list(run_context.session_state, task_list)
if stream_events:
yield _emit_task_updated(task, "in_progress", result=task.result)
yield f"Task [{task.id}] completed. Result: {content}"
else:
task.status = TaskStatus.completed
task.result = "No content returned"
save_task_list(run_context.session_state, task_list)
if stream_events:
yield _emit_task_updated(task, "in_progress", result=task.result)
yield f"Task [{task.id}] completed with no content."
# ------------------------------------------------------------------
# Tool: execute_task (async)
# ------------------------------------------------------------------
async def aexecute_task(
task_id: str, member_id: str
) -> AsyncIterator[Union[RunOutputEvent, TeamRunOutputEvent, str]]:
"""Execute a task by delegating it to a team member. The member will receive the task
description and return a result.
Args:
task_id (str): The ID of the task to execute.
member_id (str): The ID of the member to execute the task.
Returns:
str: The result of the task execution.
"""
task = task_list.get_task(task_id)
if task is None:
yield f"Task with ID '{task_id}' not found."
return
if task.status not in (TaskStatus.pending, TaskStatus.in_progress):
yield f"Task [{task_id}] is {task.status.value} and cannot be executed."
return
result = _find_member_by_id(team, member_id, run_context=run_context)
if result is None:
yield f"Member with ID {member_id} not found. Available members:\n{team.get_members_system_message_content(indent=0, run_context=run_context)}"
return
_, member_agent = result
previous_status = task.status.value
task.status = TaskStatus.in_progress
task.assignee = member_id
save_task_list(run_context.session_state, task_list)
if stream_events:
yield _emit_task_updated(task, previous_status)
use_agent_logger()
member_session_state_copy = deepcopy(run_context.session_state)
member_run_response: Optional[Union[TeamRunOutput, RunOutput]] = None
try:
member_task_description = task.description or task.title
member_agent_task, history = _setup_member_for_task(member_agent, member_task_description)
if stream:
member_stream = member_agent.arun(
input=member_agent_task if not history else history,
user_id=user_id,
session_id=session.session_id,
session_state=member_session_state_copy,
images=_images,
videos=_videos,
audio=_audio,
files=_files,
stream=True,
stream_events=stream_events or team.stream_member_events,
debug_mode=debug_mode,
dependencies=run_context.dependencies,
add_dependencies_to_context=add_dependencies_to_context,
metadata=run_context.metadata,
add_session_state_to_context=add_session_state_to_context,
knowledge_filters=run_context.knowledge_filters
if not member_agent.knowledge_filters and member_agent.knowledge
else None,
yield_run_output=True,
)
async for event in member_stream:
if isinstance(event, (TeamRunOutput, RunOutput)):
member_run_response = event
continue
check_if_run_cancelled(event)
event.parent_run_id = event.parent_run_id or run_response.run_id
yield event
else:
member_run_response = await member_agent.arun( # type: ignore[misc]
input=member_agent_task if not history else history,
user_id=user_id,
session_id=session.session_id,
session_state=member_session_state_copy,
images=_images,
videos=_videos,
audio=_audio,
files=_files,
stream=False,
debug_mode=debug_mode,
dependencies=run_context.dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=run_context.metadata,
knowledge_filters=run_context.knowledge_filters
if not member_agent.knowledge_filters and member_agent.knowledge
else None,
)
check_if_run_cancelled(member_run_response)
except RunCancelledException:
raise
except Exception as e:
task.status = TaskStatus.failed
task.result = f"Member execution error: {e}"
save_task_list(run_context.session_state, task_list)
use_team_logger()
yield f"Task [{task.id}] failed due to member execution error: {e}"
return
if member_run_response is not None and member_run_response.is_paused:
_propagate_member_pause(run_response, member_agent, member_run_response)
task.status = TaskStatus.pending
save_task_list(run_context.session_state, task_list)
use_team_logger()
_post_process_member_run(member_run_response, member_agent, member_agent_task, member_session_state_copy)
yield f"Member '{member_agent.name}' requires human input before continuing. Task [{task.id}] paused."
return
use_team_logger()
_post_process_member_run(member_run_response, member_agent, member_agent_task, member_session_state_copy)
if member_run_response is not None and member_run_response.status == RunStatus.error:
task.status = TaskStatus.failed
task.result = str(member_run_response.content) if member_run_response.content else "Task failed"
save_task_list(run_context.session_state, task_list)
if stream_events:
yield _emit_task_updated(task, "in_progress", result=task.result)
yield f"Task [{task.id}] failed: {task.result}"
elif member_run_response is not None and member_run_response.content:
content = str(member_run_response.content)
task.status = TaskStatus.completed
task.result = content
save_task_list(run_context.session_state, task_list)
if stream_events:
yield _emit_task_updated(task, "in_progress", result=task.result)
yield f"Task [{task.id}] completed. Result: {content}"
else:
task.status = TaskStatus.completed
task.result = "No content returned"
save_task_list(run_context.session_state, task_list)
if stream_events:
yield _emit_task_updated(task, "in_progress", result=task.result)
yield f"Task [{task.id}] completed with no content."
# ------------------------------------------------------------------
# Tool: execute_tasks_parallel (sync)
# ------------------------------------------------------------------
def execute_tasks_parallel(task_ids: List[str]) -> Iterator[Union[RunOutputEvent, TeamRunOutputEvent, str]]:
"""Execute multiple independent tasks in parallel by delegating each to its assigned member.
All tasks must be pending with no unresolved dependencies.
Args:
task_ids (list): List of task IDs to execute concurrently.
Returns:
str: Aggregated results from all task executions.
"""
from concurrent.futures import ThreadPoolExecutor, as_completed
# Validate all tasks
tasks_to_run = []
for tid in task_ids:
task = task_list.get_task(tid)
if task is None:
yield f"Task '{tid}' not found."
return
if task.status not in (TaskStatus.pending, TaskStatus.in_progress):
yield f"Task [{tid}] is {task.status.value}, cannot execute."
return
if not task.assignee:
yield f"Task [{tid}] has no assignee. Assign a member_id first."
return
member_result = _find_member_by_id(team, task.assignee)
if member_result is None:
yield f"Member '{task.assignee}' not found for task [{tid}]."
return
tasks_to_run.append((task, member_result[1]))
if not tasks_to_run:
yield "No valid tasks to execute."
return
# Mark all in_progress and emit events
for task_obj, _ in tasks_to_run:
previous_status = task_obj.status.value
task_obj.status = TaskStatus.in_progress
if stream_events:
yield _emit_task_updated(task_obj, previous_status)
save_task_list(run_context.session_state, task_list)
def _run_single_task(task_obj, member_agent):
"""Run a single task in a thread. Returns (task_id, member_run_response, session_state_copy, error)."""
member_task_description = task_obj.description or task_obj.title
member_agent_task, history = _setup_member_for_task(member_agent, member_task_description)
use_agent_logger()
member_session_state_copy = deepcopy(run_context.session_state)
# Copy media lists per-thread to avoid concurrent mutation
thread_images = list(_images)
thread_videos = list(_videos)
thread_audio = list(_audio)
thread_files = list(_files)
try:
member_run_response = member_agent.run(
input=member_agent_task if not history else history,
user_id=user_id,
session_id=session.session_id,
session_state=member_session_state_copy,
images=thread_images,
videos=thread_videos,
audio=thread_audio,
files=thread_files,
stream=False,
debug_mode=debug_mode,
dependencies=run_context.dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=run_context.metadata,
knowledge_filters=run_context.knowledge_filters
if not member_agent.knowledge_filters and member_agent.knowledge
else None,
)
return (task_obj.id, member_run_response, member_session_state_copy, member_agent_task, None)
except RunCancelledException:
raise
except Exception as e:
return (task_obj.id, None, member_session_state_copy, member_agent_task, e)
results_text: List[str] = []
modified_states: List[Dict[str, Any]] = []
completion_events: List[TaskUpdatedEvent] = []
with ThreadPoolExecutor(max_workers=len(tasks_to_run)) as executor:
futures = {
executor.submit(_run_single_task, task_obj, member_agent): (task_obj, member_agent)
for task_obj, member_agent in tasks_to_run
}
for future in as_completed(futures):
task_obj, member_agent = futures[future]
try:
tid, member_run, state_copy, member_task, error = future.result()
if state_copy is not None:
modified_states.append(state_copy)
if error is not None:
task_obj.status = TaskStatus.failed
task_obj.result = f"Member execution error: {error}"
if stream_events:
completion_events.append(
_emit_task_updated(task_obj, "in_progress", result=task_obj.result)
)
results_text.append(f"Task [{tid}] failed: {error}")
continue
use_team_logger()
# Check HITL pause
if member_run is not None and member_run.is_paused:
_propagate_member_pause(run_response, member_agent, member_run)
task_obj.status = TaskStatus.pending
_post_process_member_run(
member_run,
member_agent,
member_task,
state_copy,
tool_name="execute_tasks_parallel",
skip_session_merge=True,
)
results_text.append(
f"Task [{tid}]: Member '{member_agent.name}' requires human input. Task paused."
)
elif member_run is not None and member_run.status == RunStatus.error:
task_obj.status = TaskStatus.failed
task_obj.result = str(member_run.content) if member_run.content else "Task failed"
_post_process_member_run(
member_run,
member_agent,
member_task,
state_copy,
tool_name="execute_tasks_parallel",
skip_session_merge=True,
)
if stream_events:
completion_events.append(
_emit_task_updated(task_obj, "in_progress", result=task_obj.result)
)
results_text.append(f"Task [{tid}] failed: {task_obj.result}")
elif member_run is not None and member_run.content:
content = str(member_run.content)
task_obj.status = TaskStatus.completed
task_obj.result = content
_post_process_member_run(
member_run,
member_agent,
member_task,
state_copy,
tool_name="execute_tasks_parallel",
skip_session_merge=True,
)
if stream_events:
completion_events.append(
_emit_task_updated(task_obj, "in_progress", result=task_obj.result)
)
results_text.append(f"Task [{tid}] completed. Result: {content}")
else:
task_obj.status = TaskStatus.completed
task_obj.result = "No content returned"
_post_process_member_run(
member_run,
member_agent,
member_task,
state_copy,
tool_name="execute_tasks_parallel",
skip_session_merge=True,
)
if stream_events:
completion_events.append(
_emit_task_updated(task_obj, "in_progress", result=task_obj.result)
)
results_text.append(f"Task [{tid}] completed with no content.")
except Exception as e:
task_obj.status = TaskStatus.failed
task_obj.result = f"Unexpected error: {e}"
if stream_events:
completion_events.append(_emit_task_updated(task_obj, "in_progress", result=task_obj.result))
results_text.append(f"Task [{task_obj.id}] failed unexpectedly: {e}")
# Merge all modified session states
if modified_states:
merge_parallel_session_states(run_context.session_state, modified_states) # type: ignore
save_task_list(run_context.session_state, task_list)
use_team_logger()
# Yield all completion events
for event in completion_events:
yield event
yield "\n".join(results_text)
# ------------------------------------------------------------------
# Tool: execute_tasks_parallel (async)
# ------------------------------------------------------------------
async def aexecute_tasks_parallel(
task_ids: List[str],
) -> AsyncIterator[Union[RunOutputEvent, TeamRunOutputEvent, str]]:
"""Execute multiple independent tasks in parallel by delegating each to its assigned member.
All tasks must be pending with no unresolved dependencies.
Args:
task_ids (list): List of task IDs to execute concurrently.
Returns:
str: Aggregated results from all task executions.
"""
import asyncio
# Validate all tasks
tasks_to_run = []
for tid in task_ids:
task = task_list.get_task(tid)
if task is None:
yield f"Task '{tid}' not found."
return
if task.status not in (TaskStatus.pending, TaskStatus.in_progress):
yield f"Task [{tid}] is {task.status.value}, cannot execute."
return
if not task.assignee:
yield f"Task [{tid}] has no assignee. Assign a member_id first."
return
member_result = _find_member_by_id(team, task.assignee)
if member_result is None:
yield f"Member '{task.assignee}' not found for task [{tid}]."
return
tasks_to_run.append((task, member_result[1]))
if not tasks_to_run:
yield "No valid tasks to execute."
return
# Mark all in_progress and emit events
for task_obj, _ in tasks_to_run:
previous_status = task_obj.status.value
task_obj.status = TaskStatus.in_progress
if stream_events:
yield _emit_task_updated(task_obj, previous_status)
save_task_list(run_context.session_state, task_list)
async def _run_single_task_async(task_obj, member_agent):
"""Run a single task asynchronously."""
member_task_description = task_obj.description or task_obj.title
member_agent_task, history = _setup_member_for_task(member_agent, member_task_description)
use_agent_logger()
member_session_state_copy = deepcopy(run_context.session_state)
# Copy media lists to avoid concurrent mutation across coroutines
task_images = list(_images)
task_videos = list(_videos)
task_audio = list(_audio)
task_files = list(_files)
try:
member_run_response = await member_agent.arun(
input=member_agent_task if not history else history,
user_id=user_id,
session_id=session.session_id,
session_state=member_session_state_copy,
images=task_images,
videos=task_videos,
audio=task_audio,
files=task_files,
stream=False,
debug_mode=debug_mode,
dependencies=run_context.dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=run_context.metadata,
knowledge_filters=run_context.knowledge_filters
if not member_agent.knowledge_filters and member_agent.knowledge
else None,
)
return (task_obj.id, member_run_response, member_session_state_copy, member_agent_task, None)
except RunCancelledException:
raise
except Exception as e:
return (task_obj.id, None, member_session_state_copy, member_agent_task, e)
# Run all tasks concurrently
gather_results = await asyncio.gather(
*[_run_single_task_async(task_obj, member_agent) for task_obj, member_agent in tasks_to_run],
return_exceptions=True,
)
results_text: List[str] = []
modified_states: List[Dict[str, Any]] = []
completion_events: List[TaskUpdatedEvent] = []
for i, gather_result in enumerate(gather_results):
task_obj, member_agent = tasks_to_run[i]
if isinstance(gather_result, BaseException):
task_obj.status = TaskStatus.failed
task_obj.result = f"Unexpected error: {gather_result}"
if stream_events:
completion_events.append(_emit_task_updated(task_obj, "in_progress", result=task_obj.result))
results_text.append(f"Task [{task_obj.id}] failed unexpectedly: {gather_result}")
continue
tid, member_run, state_copy, member_task, error = gather_result
if state_copy is not None:
modified_states.append(state_copy)
if error is not None:
task_obj.status = TaskStatus.failed
task_obj.result = f"Member execution error: {error}"
if stream_events:
completion_events.append(_emit_task_updated(task_obj, "in_progress", result=task_obj.result))
results_text.append(f"Task [{tid}] failed: {error}")
continue
use_team_logger()
if member_run is not None and member_run.is_paused:
_propagate_member_pause(run_response, member_agent, member_run)
task_obj.status = TaskStatus.pending
_post_process_member_run(
member_run,
member_agent,
member_task,
state_copy,
tool_name="execute_tasks_parallel",
skip_session_merge=True,
)
results_text.append(f"Task [{tid}]: Member '{member_agent.name}' requires human input. Task paused.")
elif member_run is not None and member_run.status == RunStatus.error:
task_obj.status = TaskStatus.failed
task_obj.result = str(member_run.content) if member_run.content else "Task failed"
_post_process_member_run(
member_run,
member_agent,
member_task,
state_copy,
tool_name="execute_tasks_parallel",
skip_session_merge=True,
)
if stream_events:
completion_events.append(_emit_task_updated(task_obj, "in_progress", result=task_obj.result))
results_text.append(f"Task [{tid}] failed: {task_obj.result}")
elif member_run is not None and member_run.content:
content = str(member_run.content)
task_obj.status = TaskStatus.completed
task_obj.result = content
_post_process_member_run(
member_run,
member_agent,
member_task,
state_copy,
tool_name="execute_tasks_parallel",
skip_session_merge=True,
)
if stream_events:
completion_events.append(_emit_task_updated(task_obj, "in_progress", result=task_obj.result))
results_text.append(f"Task [{tid}] completed. Result: {content}")
else:
task_obj.status = TaskStatus.completed
task_obj.result = "No content returned"
_post_process_member_run(
member_run,
member_agent,
member_task,
state_copy,
tool_name="execute_tasks_parallel",
skip_session_merge=True,
)
if stream_events:
completion_events.append(_emit_task_updated(task_obj, "in_progress", result=task_obj.result))
results_text.append(f"Task [{tid}] completed with no content.")
# Merge all modified session states
if modified_states:
merge_parallel_session_states(run_context.session_state, modified_states) # type: ignore
save_task_list(run_context.session_state, task_list)
use_team_logger()
# Yield all completion events
for event in completion_events:
yield event
yield "\n".join(results_text)
# ------------------------------------------------------------------
# Build and return Function list
# ------------------------------------------------------------------
tools: List[Function] = [
Function.from_callable(create_task, name="create_task"),
Function.from_callable(update_task_status, name="update_task_status"),
Function.from_callable(list_tasks, name="list_tasks"),
Function.from_callable(add_task_note, name="add_task_note"),
Function.from_callable(mark_all_complete, name="mark_all_complete"),
]
# Add the correct execute_task variant
if async_mode:
tools.append(Function.from_callable(aexecute_task, name="execute_task"))
tools.append(Function.from_callable(aexecute_tasks_parallel, name="execute_tasks_parallel"))
else:
tools.append(Function.from_callable(execute_task, name="execute_task"))
tools.append(Function.from_callable(execute_tasks_parallel, name="execute_tasks_parallel"))
return tools
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/team/_task_tools.py",
"license": "Apache License 2.0",
"lines": 941,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/team/mode.py | """Team execution modes."""
from enum import Enum
class TeamMode(str, Enum):
"""Execution mode for a Team.
Controls how the team leader coordinates work with member agents.
"""
coordinate = "coordinate"
"""Default supervisor pattern. Leader picks members, crafts tasks, synthesizes responses."""
route = "route"
"""Router pattern. Leader routes to a specialist and returns the member's response directly."""
broadcast = "broadcast"
"""Broadcast pattern. Leader delegates the same task to all members simultaneously."""
tasks = "tasks"
"""Autonomous task-based execution. Leader decomposes goals into a shared task list,
delegates tasks to members, and loops until all work is complete."""
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/team/mode.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/agno/team/task.py | """Task model and TaskList for autonomous team execution."""
from __future__ import annotations
from dataclasses import dataclass, field
from enum import Enum
from time import time
from typing import Any, Dict, List, Optional
from uuid import uuid4
class TaskStatus(str, Enum):
"""Status of a task in the team task list."""
pending = "pending"
in_progress = "in_progress"
completed = "completed"
failed = "failed"
blocked = "blocked"
@dataclass
class Task:
"""A single task in the team's shared task list."""
id: str = ""
title: str = ""
description: str = ""
status: TaskStatus = TaskStatus.pending
assignee: Optional[str] = None
parent_id: Optional[str] = None
dependencies: List[str] = field(default_factory=list)
result: Optional[str] = None
notes: List[str] = field(default_factory=list)
created_at: float = 0.0
def __post_init__(self) -> None:
if not self.id:
self.id = str(uuid4())[:8]
if self.created_at == 0.0:
self.created_at = time()
def to_dict(self) -> Dict[str, Any]:
return {
"id": self.id,
"title": self.title,
"description": self.description,
"status": self.status.value,
"assignee": self.assignee,
"parent_id": self.parent_id,
"dependencies": self.dependencies,
"result": self.result,
"notes": self.notes,
"created_at": self.created_at,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "Task":
status_value = data.get("status", "pending")
return cls(
id=data.get("id", ""),
title=data.get("title", ""),
description=data.get("description", ""),
status=TaskStatus(status_value),
assignee=data.get("assignee"),
parent_id=data.get("parent_id"),
dependencies=data.get("dependencies", []),
result=data.get("result"),
notes=data.get("notes", []),
created_at=data.get("created_at", 0.0),
)
TERMINAL_STATUSES = {TaskStatus.completed, TaskStatus.failed}
DEPENDENCY_SATISFIED_STATUSES = {TaskStatus.completed}
@dataclass
class TaskList:
"""A shared task list for autonomous team execution.
Provides CRUD, dependency management, and serialization for tasks
stored in session_state.
"""
tasks: List[Task] = field(default_factory=list)
goal_complete: bool = False
completion_summary: Optional[str] = None
# --- CRUD ---
def create_task(
self,
title: str,
description: str = "",
assignee: Optional[str] = None,
parent_id: Optional[str] = None,
dependencies: Optional[List[str]] = None,
) -> Task:
task = Task(
title=title,
description=description,
assignee=assignee,
parent_id=parent_id,
dependencies=dependencies or [],
)
self.tasks.append(task)
self._update_blocked_statuses()
return task
def get_task(self, task_id: str) -> Optional[Task]:
for task in self.tasks:
if task.id == task_id:
return task
return None
def update_task(self, task_id: str, **updates: Any) -> Optional[Task]:
task = self.get_task(task_id)
if task is None:
return None
for key, value in updates.items():
if key == "status" and isinstance(value, str):
value = TaskStatus(value)
if hasattr(task, key):
setattr(task, key, value)
self._update_blocked_statuses()
return task
# --- Queries ---
def get_available_tasks(self, for_assignee: Optional[str] = None) -> List[Task]:
"""Return tasks that are pending and have all dependencies satisfied."""
available = []
for task in self.tasks:
if task.status != TaskStatus.pending:
continue
if self._is_blocked(task):
continue
if for_assignee is not None and task.assignee is not None and task.assignee != for_assignee:
continue
available.append(task)
return available
def all_terminal(self) -> bool:
"""Return True when every task is in a terminal state (completed or failed)."""
if not self.tasks:
return False
return all(t.status in TERMINAL_STATUSES for t in self.tasks)
def get_summary_string(self) -> str:
"""Render the task list as a formatted string for the system message."""
if not self.tasks:
return "No tasks created yet."
counts: Dict[str, int] = {}
for t in self.tasks:
counts[t.status.value] = counts.get(t.status.value, 0) + 1
parts = [f"{v} {k}" for k, v in counts.items()]
header = f"Tasks ({len(self.tasks)} total: {', '.join(parts)}):"
lines = [header]
for t in self.tasks:
status_str = t.status.value.upper()
assignee_str = f" (assigned: {t.assignee})" if t.assignee else " (unassigned)"
lines.append(f" [{t.id}] {t.title} - {status_str}{assignee_str}")
if t.dependencies:
lines.append(f" Depends on: {t.dependencies}")
if t.result:
# Truncate long results
result_preview = t.result[:200] + "..." if len(t.result) > 200 else t.result
lines.append(f" Result: {result_preview}")
if t.notes:
for note in t.notes[-3:]: # Show last 3 notes
lines.append(f" Note: {note}")
if self.goal_complete and self.completion_summary:
lines.append(f"\nGoal marked complete: {self.completion_summary}")
return "\n".join(lines)
# --- Dependency management ---
def _is_blocked(self, task: Task) -> bool:
"""Check if a task has unfinished or failed dependencies."""
if not task.dependencies:
return False
for dep_id in task.dependencies:
dep = self.get_task(dep_id)
if dep is None:
return True # Unknown dependency ID -- treat as blocked (fail-closed)
if dep.status not in DEPENDENCY_SATISFIED_STATUSES:
return True
return False
def _has_failed_dependency(self, task: "Task") -> bool:
"""Return True if any dependency of *task* has failed."""
if not task.dependencies:
return False
for dep_id in task.dependencies:
dep = self.get_task(dep_id)
if dep is not None and dep.status == TaskStatus.failed:
return True
return False
def _update_blocked_statuses(self) -> None:
"""Recompute blocked status for all pending/blocked tasks.
If a dependency has failed the dependent task is also marked failed
so that ``all_terminal()`` can detect completion and the loop does
not deadlock.
"""
for task in self.tasks:
if task.status == TaskStatus.blocked:
if self._has_failed_dependency(task):
task.status = TaskStatus.failed
task.result = "Automatically failed: a dependency failed."
elif not self._is_blocked(task):
task.status = TaskStatus.pending
elif task.status == TaskStatus.pending:
if self._is_blocked(task):
task.status = TaskStatus.blocked
# --- Serialization ---
def to_dict(self) -> Dict[str, Any]:
return {
"tasks": [t.to_dict() for t in self.tasks],
"goal_complete": self.goal_complete,
"completion_summary": self.completion_summary,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "TaskList":
tasks = [Task.from_dict(t) for t in data.get("tasks", [])]
task_list = cls(
tasks=tasks,
goal_complete=data.get("goal_complete", False),
completion_summary=data.get("completion_summary"),
)
task_list._update_blocked_statuses()
return task_list
# --- session_state helpers ---
TASK_LIST_KEY = "_team_tasks"
def load_task_list(session_state: Optional[Dict[str, Any]]) -> TaskList:
"""Load task list from session_state, or return an empty one."""
if session_state and TASK_LIST_KEY in session_state:
return TaskList.from_dict(session_state[TASK_LIST_KEY])
return TaskList()
def save_task_list(session_state: Optional[Dict[str, Any]], task_list: TaskList) -> None:
"""Persist task list into session_state."""
if session_state is not None:
session_state[TASK_LIST_KEY] = task_list.to_dict()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/team/task.py",
"license": "Apache License 2.0",
"lines": 215,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/utils/callables.py | """Callable factory resolution utilities for Agent and Team.
Provides shared logic for resolving callable factories for tools, knowledge,
and members at runtime, with caching support.
"""
from __future__ import annotations
import asyncio
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Literal,
Optional,
Tuple,
)
if TYPE_CHECKING:
from agno.run import RunContext
from agno.utils.log import log_debug, log_warning
def _get_or_create_cache(entity: Any, attr: str) -> Dict[str, Any]:
"""Get or create a cache dict on the entity, ensuring it persists across calls."""
cache = getattr(entity, attr, None)
if cache is None:
cache = {}
try:
object.__setattr__(entity, attr, cache)
except (AttributeError, TypeError):
pass # Entity doesn't support attribute setting; cache will be per-call
return cache
def is_callable_factory(value: Any, excluded_types: Tuple[type, ...] = ()) -> bool:
"""Check if a value is a callable factory (not a tool/knowledge instance).
Args:
value: The value to check.
excluded_types: Types that are callable but should NOT be treated as factories.
Returns:
True if value is a callable factory.
"""
if not callable(value):
return False
if isinstance(value, excluded_types):
return False
# Classes themselves are callable but shouldn't be treated as factories
if isinstance(value, type):
return False
return True
def invoke_callable_factory(
factory: Callable,
entity: Any,
run_context: "RunContext",
) -> Any:
"""Invoke a callable factory with signature-based parameter injection (sync).
Inspects the factory's signature and injects matching parameters:
- agent/team: the entity (Agent or Team)
- run_context: the current RunContext
- session_state: the current session state dict
Raises RuntimeError if the factory is async (use ainvoke_callable_factory instead).
"""
if asyncio.iscoroutinefunction(factory):
raise RuntimeError(
f"Async callable factory {factory!r} cannot be used in sync mode. Use arun() or aprint_response() instead."
)
sig = inspect.signature(factory)
kwargs: Dict[str, Any] = {}
if "agent" in sig.parameters:
kwargs["agent"] = entity
if "team" in sig.parameters:
kwargs["team"] = entity
if "run_context" in sig.parameters:
kwargs["run_context"] = run_context
if "session_state" in sig.parameters:
kwargs["session_state"] = run_context.session_state if run_context.session_state is not None else {}
result = factory(**kwargs)
if asyncio.isfuture(result) or asyncio.iscoroutine(result):
# Cleanup the coroutine to prevent warnings
if asyncio.iscoroutine(result):
result.close()
raise RuntimeError(
f"Callable factory {factory!r} returned an awaitable in sync mode. Use arun() or aprint_response() instead."
)
return result
async def ainvoke_callable_factory(
factory: Callable,
entity: Any,
run_context: "RunContext",
) -> Any:
"""Invoke a callable factory with signature-based parameter injection (async).
Supports both sync and async factories. Async results are awaited automatically.
"""
sig = inspect.signature(factory)
kwargs: Dict[str, Any] = {}
if "agent" in sig.parameters:
kwargs["agent"] = entity
if "team" in sig.parameters:
kwargs["team"] = entity
if "run_context" in sig.parameters:
kwargs["run_context"] = run_context
if "session_state" in sig.parameters:
kwargs["session_state"] = run_context.session_state if run_context.session_state is not None else {}
result = factory(**kwargs)
if asyncio.iscoroutine(result):
result = await result
return result
def _compute_cache_key(
entity: Any,
run_context: "RunContext",
custom_key_fn: Optional[Callable] = None,
) -> Optional[str]:
"""Compute cache key for a callable factory (sync).
Priority: custom_key_fn > user_id > session_id > None (skip caching).
Raises RuntimeError if custom_key_fn is async.
"""
if custom_key_fn is not None:
if asyncio.iscoroutinefunction(custom_key_fn):
raise RuntimeError(
f"Async cache key function {custom_key_fn!r} cannot be used in sync mode. "
"Use arun() or aprint_response() instead."
)
sig = inspect.signature(custom_key_fn)
kwargs: Dict[str, Any] = {}
if "run_context" in sig.parameters:
kwargs["run_context"] = run_context
if "agent" in sig.parameters:
kwargs["agent"] = entity
if "team" in sig.parameters:
kwargs["team"] = entity
result = custom_key_fn(**kwargs)
if asyncio.iscoroutine(result):
result.close()
raise RuntimeError(
f"Cache key function {custom_key_fn!r} returned an awaitable in sync mode. "
"Use arun() or aprint_response() instead."
)
return result
if run_context.user_id is not None:
return run_context.user_id
if run_context.session_id is not None:
return run_context.session_id
return None
async def _acompute_cache_key(
entity: Any,
run_context: "RunContext",
custom_key_fn: Optional[Callable] = None,
) -> Optional[str]:
"""Compute cache key for a callable factory (async).
Supports both sync and async custom key functions.
Priority: custom_key_fn > user_id > session_id > None (skip caching).
"""
if custom_key_fn is not None:
sig = inspect.signature(custom_key_fn)
kwargs: Dict[str, Any] = {}
if "run_context" in sig.parameters:
kwargs["run_context"] = run_context
if "agent" in sig.parameters:
kwargs["agent"] = entity
if "team" in sig.parameters:
kwargs["team"] = entity
result = custom_key_fn(**kwargs)
if asyncio.iscoroutine(result):
result = await result
return result
if run_context.user_id is not None:
return run_context.user_id
if run_context.session_id is not None:
return run_context.session_id
return None
# ---------------------------------------------------------------------------
# Tools resolution
# ---------------------------------------------------------------------------
def resolve_callable_tools(entity: Any, run_context: "RunContext") -> None:
"""Resolve callable tools factory and populate run_context.tools (sync)."""
from agno.tools import Toolkit
from agno.tools.function import Function
if not is_callable_factory(entity.tools, excluded_types=(Toolkit, Function)):
return
custom_key_fn = getattr(entity, "callable_tools_cache_key", None)
cache_enabled = getattr(entity, "cache_callables", True)
cache = _get_or_create_cache(entity, "_callable_tools_cache")
cache_key = _compute_cache_key(entity, run_context, custom_key_fn)
# Check cache
if cache_enabled and cache_key is not None and cache_key in cache:
log_debug(f"Using cached tools for key: {cache_key}")
run_context.tools = cache[cache_key]
return
# Invoke factory
result = invoke_callable_factory(entity.tools, entity, run_context)
if result is None:
result = []
elif not isinstance(result, (list, tuple)):
raise TypeError(f"Callable tools factory must return a list or tuple, got {type(result).__name__}")
else:
result = list(result)
# Store in cache
if cache_enabled and cache_key is not None:
cache[cache_key] = result
log_debug(f"Cached tools for key: {cache_key}")
run_context.tools = result
async def aresolve_callable_tools(entity: Any, run_context: "RunContext") -> None:
"""Resolve callable tools factory and populate run_context.tools (async)."""
from agno.tools import Toolkit
from agno.tools.function import Function
if not is_callable_factory(entity.tools, excluded_types=(Toolkit, Function)):
return
custom_key_fn = getattr(entity, "callable_tools_cache_key", None)
cache_enabled = getattr(entity, "cache_callables", True)
cache = _get_or_create_cache(entity, "_callable_tools_cache")
cache_key = await _acompute_cache_key(entity, run_context, custom_key_fn)
# Check cache
if cache_enabled and cache_key is not None and cache_key in cache:
log_debug(f"Using cached tools for key: {cache_key}")
run_context.tools = cache[cache_key]
return
# Invoke factory
result = await ainvoke_callable_factory(entity.tools, entity, run_context)
if result is None:
result = []
elif not isinstance(result, (list, tuple)):
raise TypeError(f"Callable tools factory must return a list or tuple, got {type(result).__name__}")
else:
result = list(result)
# Store in cache
if cache_enabled and cache_key is not None:
cache[cache_key] = result
log_debug(f"Cached tools for key: {cache_key}")
run_context.tools = result
# ---------------------------------------------------------------------------
# Knowledge resolution
# ---------------------------------------------------------------------------
def resolve_callable_knowledge(entity: Any, run_context: "RunContext") -> None:
"""Resolve callable knowledge factory and populate run_context.knowledge (sync)."""
from agno.knowledge.protocol import KnowledgeProtocol
knowledge = entity.knowledge
if not is_callable_factory(knowledge, excluded_types=(KnowledgeProtocol,)):
return
custom_key_fn = getattr(entity, "callable_knowledge_cache_key", None)
cache_enabled = getattr(entity, "cache_callables", True)
cache = _get_or_create_cache(entity, "_callable_knowledge_cache")
cache_key = _compute_cache_key(entity, run_context, custom_key_fn)
# Check cache
if cache_enabled and cache_key is not None and cache_key in cache:
log_debug(f"Using cached knowledge for key: {cache_key}")
run_context.knowledge = cache[cache_key]
return
# Invoke factory
result = invoke_callable_factory(knowledge, entity, run_context)
if result is not None:
# Validate that the result satisfies KnowledgeProtocol
if not isinstance(result, KnowledgeProtocol):
raise TypeError(
f"Callable knowledge factory must return a KnowledgeProtocol instance, got {type(result).__name__}"
)
# Store in cache
if cache_enabled and cache_key is not None and result is not None:
cache[cache_key] = result
log_debug(f"Cached knowledge for key: {cache_key}")
run_context.knowledge = result
async def aresolve_callable_knowledge(entity: Any, run_context: "RunContext") -> None:
"""Resolve callable knowledge factory and populate run_context.knowledge (async)."""
from agno.knowledge.protocol import KnowledgeProtocol
knowledge = entity.knowledge
if not is_callable_factory(knowledge, excluded_types=(KnowledgeProtocol,)):
return
custom_key_fn = getattr(entity, "callable_knowledge_cache_key", None)
cache_enabled = getattr(entity, "cache_callables", True)
cache = _get_or_create_cache(entity, "_callable_knowledge_cache")
cache_key = await _acompute_cache_key(entity, run_context, custom_key_fn)
# Check cache
if cache_enabled and cache_key is not None and cache_key in cache:
log_debug(f"Using cached knowledge for key: {cache_key}")
run_context.knowledge = cache[cache_key]
return
# Invoke factory
result = await ainvoke_callable_factory(knowledge, entity, run_context)
if result is not None:
if not isinstance(result, KnowledgeProtocol):
raise TypeError(
f"Callable knowledge factory must return a KnowledgeProtocol instance, got {type(result).__name__}"
)
# Store in cache
if cache_enabled and cache_key is not None and result is not None:
cache[cache_key] = result
log_debug(f"Cached knowledge for key: {cache_key}")
run_context.knowledge = result
# ---------------------------------------------------------------------------
# Members resolution (Team only)
# ---------------------------------------------------------------------------
def resolve_callable_members(entity: Any, run_context: "RunContext") -> None:
"""Resolve callable members factory and populate run_context.members (sync)."""
members = getattr(entity, "members", None)
if not is_callable_factory(members):
return
assert callable(members)
custom_key_fn = getattr(entity, "callable_members_cache_key", None)
cache_enabled = getattr(entity, "cache_callables", True)
cache = _get_or_create_cache(entity, "_callable_members_cache")
cache_key = _compute_cache_key(entity, run_context, custom_key_fn)
# Check cache
if cache_enabled and cache_key is not None and cache_key in cache:
log_debug(f"Using cached members for key: {cache_key}")
run_context.members = cache[cache_key]
return
# Invoke factory
result = invoke_callable_factory(members, entity, run_context)
if result is None:
result = []
elif not isinstance(result, (list, tuple)):
raise TypeError(f"Callable members factory must return a list or tuple, got {type(result).__name__}")
else:
result = list(result)
# Store in cache
if cache_enabled and cache_key is not None:
cache[cache_key] = result
log_debug(f"Cached members for key: {cache_key}")
run_context.members = result
async def aresolve_callable_members(entity: Any, run_context: "RunContext") -> None:
"""Resolve callable members factory and populate run_context.members (async)."""
members = getattr(entity, "members", None)
if not is_callable_factory(members):
return
assert callable(members)
custom_key_fn = getattr(entity, "callable_members_cache_key", None)
cache_enabled = getattr(entity, "cache_callables", True)
cache = _get_or_create_cache(entity, "_callable_members_cache")
cache_key = await _acompute_cache_key(entity, run_context, custom_key_fn)
# Check cache
if cache_enabled and cache_key is not None and cache_key in cache:
log_debug(f"Using cached members for key: {cache_key}")
run_context.members = cache[cache_key]
return
# Invoke factory
result = await ainvoke_callable_factory(members, entity, run_context)
if result is None:
result = []
elif not isinstance(result, (list, tuple)):
raise TypeError(f"Callable members factory must return a list or tuple, got {type(result).__name__}")
else:
result = list(result)
# Store in cache
if cache_enabled and cache_key is not None:
cache[cache_key] = result
log_debug(f"Cached members for key: {cache_key}")
run_context.members = result
# ---------------------------------------------------------------------------
# Cache management
# ---------------------------------------------------------------------------
def clear_callable_cache(
entity: Any,
kind: Optional[Literal["tools", "knowledge", "members"]] = None,
close: bool = False,
) -> None:
"""Clear callable factory caches (sync).
Args:
entity: The Agent or Team whose caches to clear.
kind: Which cache to clear. None clears all.
close: If True, call .close() on cached tools before clearing.
"""
caches_to_clear: List[str] = []
if kind is None:
caches_to_clear = ["_callable_tools_cache", "_callable_knowledge_cache", "_callable_members_cache"]
elif kind == "tools":
caches_to_clear = ["_callable_tools_cache"]
elif kind == "knowledge":
caches_to_clear = ["_callable_knowledge_cache"]
elif kind == "members":
caches_to_clear = ["_callable_members_cache"]
else:
raise ValueError(f"Invalid kind: {kind!r}. Expected 'tools', 'knowledge', 'members', or None.")
if close:
_close_cached_resources_sync(entity, caches_to_clear)
for cache_name in caches_to_clear:
cache = getattr(entity, cache_name, None)
if cache is not None:
cache.clear()
async def aclear_callable_cache(
entity: Any,
kind: Optional[Literal["tools", "knowledge", "members"]] = None,
close: bool = False,
) -> None:
"""Clear callable factory caches (async).
Args:
entity: The Agent or Team whose caches to clear.
kind: Which cache to clear. None clears all.
close: If True, call .aclose()/.close() on cached tools before clearing.
"""
caches_to_clear: List[str] = []
if kind is None:
caches_to_clear = ["_callable_tools_cache", "_callable_knowledge_cache", "_callable_members_cache"]
elif kind == "tools":
caches_to_clear = ["_callable_tools_cache"]
elif kind == "knowledge":
caches_to_clear = ["_callable_knowledge_cache"]
elif kind == "members":
caches_to_clear = ["_callable_members_cache"]
else:
raise ValueError(f"Invalid kind: {kind!r}. Expected 'tools', 'knowledge', 'members', or None.")
if close:
await _aclose_cached_resources(entity, caches_to_clear)
for cache_name in caches_to_clear:
cache = getattr(entity, cache_name, None)
if cache is not None:
cache.clear()
def _close_cached_resources_sync(entity: Any, cache_names: List[str]) -> None:
"""Close cached resources, deduplicating by identity."""
seen_ids: set = set()
for cache_name in cache_names:
cache = getattr(entity, cache_name, None)
if not cache:
continue
for cached_value in cache.values():
items = cached_value if isinstance(cached_value, (list, tuple)) else [cached_value]
for item in items:
item_id = id(item)
if item_id in seen_ids:
continue
seen_ids.add(item_id)
close_fn = getattr(item, "close", None)
if close_fn is not None and callable(close_fn):
result = close_fn()
if asyncio.iscoroutine(result):
result.close() # Prevent RuntimeWarning
log_warning(
f"Sync close() on {item!r} returned a coroutine. "
"Use aclear_callable_cache() for async cleanup."
)
async def _aclose_cached_resources(entity: Any, cache_names: List[str]) -> None:
"""Close cached resources async, deduplicating by identity. Prefers aclose() over close()."""
seen_ids: set = set()
for cache_name in cache_names:
cache = getattr(entity, cache_name, None)
if not cache:
continue
for cached_value in cache.values():
items = cached_value if isinstance(cached_value, (list, tuple)) else [cached_value]
for item in items:
item_id = id(item)
if item_id in seen_ids:
continue
seen_ids.add(item_id)
aclose_fn = getattr(item, "aclose", None)
if aclose_fn is not None and callable(aclose_fn):
result = aclose_fn()
if asyncio.iscoroutine(result):
await result
continue
close_fn = getattr(item, "close", None)
if close_fn is not None and callable(close_fn):
result = close_fn()
if asyncio.iscoroutine(result):
await result
# ---------------------------------------------------------------------------
# Helper to get resolved resource, falling back to static
# ---------------------------------------------------------------------------
def get_resolved_knowledge(entity: Any, run_context: Optional["RunContext"] = None) -> Any:
"""Get the resolved knowledge: run_context.knowledge > entity.knowledge (if static)."""
from agno.knowledge.protocol import KnowledgeProtocol
if run_context is not None and run_context.knowledge is not None:
return run_context.knowledge
knowledge = getattr(entity, "knowledge", None)
if knowledge is not None and not is_callable_factory(knowledge, excluded_types=(KnowledgeProtocol,)):
return knowledge
return None
def get_resolved_tools(entity: Any, run_context: Optional["RunContext"] = None) -> Optional[list]:
"""Get the resolved tools: run_context.tools > entity.tools (if list)."""
if run_context is not None and run_context.tools is not None:
return run_context.tools
tools = getattr(entity, "tools", None)
if tools is not None and isinstance(tools, list):
return tools
return None
def get_resolved_members(entity: Any, run_context: Optional["RunContext"] = None) -> Optional[list]:
"""Get the resolved members: run_context.members > entity.members (if list)."""
if run_context is not None and run_context.members is not None:
return run_context.members
members = getattr(entity, "members", None)
if members is not None and isinstance(members, list):
return members
return None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/callables.py",
"license": "Apache License 2.0",
"lines": 477,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/teams/human_in_the_loop/test_team_confirmation_flows.py | """Integration tests for team HITL confirmation flows.
Tests sync/async/streaming confirmation and rejection of member agent tools.
"""
import os
import pytest
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.run.team import RunPausedEvent as TeamRunPausedEvent
from agno.team.team import Team
from agno.tools.decorator import tool
pytestmark = pytest.mark.skipif(not os.getenv("OPENAI_API_KEY"), reason="OPENAI_API_KEY not set")
@tool(requires_confirmation=True)
def get_the_weather(city: str) -> str:
"""Get the current weather for a city.
Args:
city: The city to get weather for.
"""
return f"It is currently 70 degrees and cloudy in {city}"
def _make_agent(db=None):
return Agent(
name="Weather Agent",
role="Provides weather information. Use the get_the_weather tool to get weather data.",
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=db,
telemetry=False,
)
def _make_team(agent, db=None):
return Team(
name="Weather Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=[agent],
db=db,
telemetry=False,
instructions=[
"You MUST delegate all weather-related tasks to the Weather Agent.",
"Do NOT try to answer weather questions yourself - always use the Weather Agent member.",
],
)
def test_member_confirmation_pause(shared_db):
"""Team pauses when member agent tool requires confirmation."""
agent = _make_agent(db=shared_db)
team = _make_team(agent, db=shared_db)
response = team.run("What is the weather in Tokyo?", session_id="test_confirm_pause")
assert response.is_paused
assert len(response.active_requirements) >= 1
req = response.active_requirements[0]
assert req.needs_confirmation
assert req.member_agent_name is not None
assert req.tool_execution is not None
assert req.tool_execution.tool_name == "get_the_weather"
def test_member_confirmation_continue(shared_db):
"""Pause -> confirm -> continue_run completes successfully."""
agent = _make_agent(db=shared_db)
team = _make_team(agent, db=shared_db)
response = team.run("What is the weather in Tokyo?", session_id="test_confirm_continue")
assert response.is_paused
req = response.active_requirements[0]
assert req.needs_confirmation
req.confirm()
result = team.continue_run(response)
assert not result.is_paused
assert result.content is not None
def test_member_rejection_flow(shared_db):
"""Pause -> reject with note -> continue_run processes the rejection.
Note: After rejection, the model may either:
1. Complete with a message acknowledging the rejection, OR
2. Retry by calling the member agent again (which triggers another pause)
This test verifies that the original rejection is processed correctly.
"""
agent = _make_agent(db=shared_db)
team = _make_team(agent, db=shared_db)
response = team.run("What is the weather in Tokyo?", session_id="test_reject_flow")
assert response.is_paused
req = response.active_requirements[0]
assert req.needs_confirmation
original_tool_call_id = req.tool_execution.tool_call_id
req.reject(note="User does not want weather data")
result = team.continue_run(response)
# The original requirement should be resolved (rejected)
assert req.is_resolved()
assert req.confirmation is False # Was rejected, not confirmed
assert result.content is not None
# If model retried and we paused again, verify it's a NEW tool call, not the same one
if result.is_paused and result.active_requirements:
new_req = result.active_requirements[0]
# Should be a different tool call (model retried)
assert new_req.tool_execution.tool_call_id != original_tool_call_id
def test_member_confirmation_streaming(shared_db):
"""Streaming run pauses, then continues after confirmation."""
agent = _make_agent(db=shared_db)
team = _make_team(agent, db=shared_db)
paused_event = None
for event in team.run(
"What is the weather in Tokyo?",
session_id="test_confirm_stream",
stream=True,
stream_events=True,
):
# Use isinstance to check for team's pause event (not the member agent's)
if isinstance(event, TeamRunPausedEvent):
paused_event = event
break
assert paused_event is not None
assert paused_event.is_paused
assert len(paused_event.requirements) >= 1
req = paused_event.requirements[0]
req.confirm()
result = team.continue_run(
run_id=paused_event.run_id,
session_id=paused_event.session_id,
requirements=paused_event.requirements,
)
assert not result.is_paused
assert result.content is not None
@pytest.mark.asyncio
async def test_member_confirmation_async(shared_db):
"""Async run pauses and continues after confirmation."""
agent = _make_agent(db=shared_db)
team = _make_team(agent, db=shared_db)
response = await team.arun("What is the weather in Tokyo?", session_id="test_confirm_async")
assert response.is_paused
req = response.active_requirements[0]
assert req.needs_confirmation
req.confirm()
result = await team.acontinue_run(response)
assert not result.is_paused
assert result.content is not None
@pytest.mark.asyncio
async def test_member_confirmation_async_streaming(shared_db):
"""Async streaming run pauses and continues after confirmation."""
agent = _make_agent(db=shared_db)
team = _make_team(agent, db=shared_db)
paused_event = None
async for event in team.arun(
"What is the weather in Tokyo?",
session_id="test_confirm_async_stream",
stream=True,
stream_events=True,
):
# Use isinstance to check for team's pause event (not the member agent's)
if isinstance(event, TeamRunPausedEvent):
paused_event = event
break
assert paused_event is not None
assert paused_event.is_paused
req = paused_event.requirements[0]
req.confirm()
result = await team.acontinue_run(
run_id=paused_event.run_id,
session_id=paused_event.session_id,
requirements=paused_event.requirements,
)
assert not result.is_paused
assert result.content is not None
def test_paused_event_in_stream(shared_db):
"""Streaming with events emits a TeamRunPaused event."""
agent = _make_agent(db=shared_db)
team = _make_team(agent, db=shared_db)
found_paused_event = False
for event in team.run(
"What is the weather in Tokyo?", session_id="test_paused_event", stream=True, stream_events=True
):
# Check for TeamRunPausedEvent using isinstance
if isinstance(event, TeamRunPausedEvent):
found_paused_event = True
break
assert found_paused_event, "TeamRunPaused event should appear in stream"
def test_unresolved_stays_paused(shared_db):
"""Calling continue_run without resolving requirements keeps team paused."""
agent = _make_agent(db=shared_db)
team = _make_team(agent, db=shared_db)
response = team.run("What is the weather in Tokyo?", session_id="test_unresolved")
assert response.is_paused
# Do NOT confirm the requirement
result = team.continue_run(response)
assert result.is_paused
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/human_in_the_loop/test_team_confirmation_flows.py",
"license": "Apache License 2.0",
"lines": 177,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/teams/human_in_the_loop/test_team_external_execution_flows.py | """Integration tests for team HITL external execution flows.
Tests sync/async/streaming flows where member agent tools are executed externally.
"""
import os
import pytest
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.run.team import RunPausedEvent as TeamRunPausedEvent
from agno.team.team import Team
from agno.tools.decorator import tool
pytestmark = pytest.mark.skipif(not os.getenv("OPENAI_API_KEY"), reason="OPENAI_API_KEY not set")
@tool(external_execution=True)
def send_email(to: str, subject: str, body: str) -> str:
"""Send an email to a recipient.
Args:
to: The recipient email address.
subject: The email subject.
body: The email body.
"""
return f"Email sent to {to}"
def _make_agent(db=None):
return Agent(
name="Email Agent",
role="Handles email operations. Use the send_email tool to send emails.",
model=OpenAIChat(id="gpt-4o-mini"),
tools=[send_email],
db=db,
telemetry=False,
)
def _make_team(agent, db=None):
return Team(
name="Comms Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=[agent],
db=db,
telemetry=False,
instructions=[
"You MUST delegate all email-related tasks to the Email Agent.",
"Do NOT try to handle email tasks yourself - always use the Email Agent member.",
],
)
# def test_member_external_execution_pause(shared_db):
# """Team pauses when member agent tool requires external execution."""
# agent = _make_agent(db=shared_db)
# team = _make_team(agent, db=shared_db)
# response = team.run(
# "Send an email to john@example.com with subject 'Hello' and body 'Hi there'",
# session_id="test_ext_exec_pause",
# )
# assert response.is_paused
# assert len(response.active_requirements) >= 1
# req = response.active_requirements[0]
# assert req.needs_external_execution
# assert req.tool_execution is not None
# assert req.tool_execution.tool_name == "send_email"
# def test_member_external_execution_continue(shared_db):
# """Pause -> provide external result -> continue_run completes."""
# agent = _make_agent(db=shared_db)
# team = _make_team(agent, db=shared_db)
# response = team.run(
# "Send an email to john@example.com with subject 'Hello' and body 'Hi there'",
# session_id="test_ext_exec_continue",
# )
# assert response.is_paused
# req = response.active_requirements[0]
# assert req.needs_external_execution
# req.set_external_execution_result("Email sent successfully to john@example.com")
# result = team.continue_run(response)
# assert not result.is_paused
# assert result.content is not None
# @pytest.mark.asyncio
# async def test_member_external_execution_async(shared_db):
# """Async external execution flow."""
# agent = _make_agent(db=shared_db)
# team = _make_team(agent, db=shared_db)
# response = await team.arun(
# "Send an email to john@example.com with subject 'Hello' and body 'Hi there'",
# session_id="test_ext_exec_async",
# )
# assert response.is_paused
# req = response.active_requirements[0]
# assert req.needs_external_execution
# req.set_external_execution_result("Email sent successfully to john@example.com")
# result = await team.acontinue_run(response)
# assert not result.is_paused
# assert result.content is not None
@pytest.mark.asyncio
async def test_member_external_execution_async_streaming(shared_db):
"""Async streaming external execution flow."""
agent = _make_agent(db=shared_db)
team = _make_team(agent, db=shared_db)
paused_event = None
async for event in team.arun(
"Send an email to john@example.com with subject 'Hello' and body 'Hi there'",
session_id="test_ext_exec_async_stream",
stream=True,
stream_events=True,
):
# Use isinstance to check for team's pause event (not the member agent's)
if isinstance(event, TeamRunPausedEvent):
paused_event = event
break
assert paused_event is not None
assert paused_event.is_paused
req = paused_event.requirements[0]
assert req.needs_external_execution
req.set_external_execution_result("Email sent successfully to john@example.com")
result = await team.acontinue_run(
run_id=paused_event.run_id,
session_id=paused_event.session_id,
requirements=paused_event.requirements,
)
assert not result.is_paused
assert result.content is not None
def test_member_external_execution_streaming(shared_db):
"""Streaming external execution flow."""
agent = _make_agent(db=shared_db)
team = _make_team(agent, db=shared_db)
paused_event = None
for event in team.run(
"Send an email to john@example.com with subject 'Hello' and body 'Hi there'",
session_id="test_ext_exec_stream",
stream=True,
stream_events=True,
):
# Use isinstance to check for team's pause event (not the member agent's)
if isinstance(event, TeamRunPausedEvent):
paused_event = event
break
assert paused_event is not None
assert paused_event.is_paused
req = paused_event.requirements[0]
assert req.needs_external_execution
req.set_external_execution_result("Email sent successfully to john@example.com")
result = team.continue_run(
run_id=paused_event.run_id,
session_id=paused_event.session_id,
requirements=paused_event.requirements,
)
assert not result.is_paused
assert result.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/human_in_the_loop/test_team_external_execution_flows.py",
"license": "Apache License 2.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/teams/human_in_the_loop/test_team_tool_hitl.py | """Integration tests for team-level tool HITL.
Tests HITL for tools provided directly to the Team (vs. member agent tools).
"""
import os
import pytest
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.run.team import RunPausedEvent as TeamRunPausedEvent
from agno.team.team import Team
from agno.tools.decorator import tool
pytestmark = pytest.mark.skipif(not os.getenv("OPENAI_API_KEY"), reason="OPENAI_API_KEY not set")
@tool(requires_confirmation=True)
def approve_deployment(app_name: str, environment: str) -> str:
"""Approve a deployment to the specified environment.
Args:
app_name: Name of the application to deploy.
environment: Target environment (staging, production).
"""
return f"Deployed {app_name} to {environment} successfully"
def _make_team(db=None):
"""Create a team with the HITL tool on the team itself (no member agents with tools)."""
helper = Agent(
name="Helper Agent",
role="Assists with general questions",
model=OpenAIChat(id="gpt-4o-mini"),
telemetry=False,
)
return Team(
name="Deploy Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=[helper],
tools=[approve_deployment],
db=db,
telemetry=False,
instructions=[
"You MUST use the approve_deployment tool when asked to deploy an application.",
"Do NOT respond without using the tool - always call approve_deployment first.",
],
)
def test_team_tool_confirmation_pause(shared_db):
"""Team pauses when a team-level tool requires confirmation."""
team = _make_team(db=shared_db)
response = team.run("Deploy myapp to production", session_id="test_team_tool_pause")
assert response.is_paused
assert len(response.active_requirements) >= 1
req = response.active_requirements[0]
assert req.needs_confirmation
assert req.tool_execution is not None
assert req.tool_execution.tool_name == "approve_deployment"
# Team-level tools should NOT have member context
assert req.member_agent_id is None
def test_team_tool_confirmation_continue(shared_db):
"""Team-level tool: pause -> confirm -> continue completes."""
team = _make_team(db=shared_db)
response = team.run("Deploy myapp to production", session_id="test_team_tool_continue")
assert response.is_paused
req = response.active_requirements[0]
assert req.needs_confirmation
req.confirm()
result = team.continue_run(response)
assert not result.is_paused
assert result.content is not None
@pytest.mark.asyncio
async def test_team_tool_confirmation_async(shared_db):
"""Async team-level tool confirmation flow."""
team = _make_team(db=shared_db)
response = await team.arun("Deploy myapp to staging", session_id="test_team_tool_async")
assert response.is_paused
req = response.active_requirements[0]
assert req.needs_confirmation
req.confirm()
result = await team.acontinue_run(response)
assert not result.is_paused
assert result.content is not None
def test_team_tool_confirmation_streaming(shared_db):
"""Streaming team-level tool confirmation flow."""
team = _make_team(db=shared_db)
paused_event = None
for event in team.run(
"Deploy myapp to staging",
session_id="test_team_tool_stream",
stream=True,
stream_events=True,
):
# Use isinstance to check for team's pause event
if isinstance(event, TeamRunPausedEvent):
paused_event = event
break
assert paused_event is not None
assert paused_event.is_paused
req = paused_event.active_requirements[0]
assert req.needs_confirmation
assert req.member_agent_id is None
req.confirm()
result = team.continue_run(
run_id=paused_event.run_id,
session_id=paused_event.session_id,
requirements=paused_event.requirements,
)
assert not result.is_paused
assert result.content is not None
@pytest.mark.asyncio
async def test_team_tool_confirmation_async_streaming(shared_db):
"""Async streaming team-level tool confirmation flow."""
team = _make_team(db=shared_db)
paused_event = None
async for event in team.arun(
"Deploy myapp to staging",
session_id="test_team_tool_async_stream",
stream=True,
stream_events=True,
):
# Use isinstance to check for team's pause event
if isinstance(event, TeamRunPausedEvent):
paused_event = event
break
assert paused_event is not None
assert paused_event.is_paused
req = paused_event.active_requirements[0]
assert req.needs_confirmation
assert req.member_agent_id is None
req.confirm()
result = await team.acontinue_run(
run_id=paused_event.run_id,
session_id=paused_event.session_id,
requirements=paused_event.requirements,
)
assert not result.is_paused
assert result.content is not None
def test_team_tool_rejection(shared_db):
"""Team-level tool: reject -> continue handles gracefully."""
team = _make_team(db=shared_db)
response = team.run("Deploy myapp to production", session_id="test_team_tool_reject")
assert response.is_paused
req = response.active_requirements[0]
assert req.needs_confirmation
req.reject(note="Deployment not approved by ops team")
result = team.continue_run(response)
assert not result.is_paused
assert result.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/human_in_the_loop/test_team_tool_hitl.py",
"license": "Apache License 2.0",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/teams/human_in_the_loop/test_team_user_input_flows.py | """Integration tests for team HITL user input flows.
Tests sync/async/streaming flows where member agent tools require user input.
"""
import os
import pytest
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.run.team import RunPausedEvent as TeamRunPausedEvent
from agno.team.team import Team
from agno.tools.decorator import tool
pytestmark = pytest.mark.skipif(not os.getenv("OPENAI_API_KEY"), reason="OPENAI_API_KEY not set")
@tool(requires_user_input=True, user_input_fields=["city"])
def get_the_weather(city: str) -> str:
"""Get the current weather for a city.
Args:
city: The city to get weather for.
"""
return f"It is currently 70 degrees and cloudy in {city}"
def _make_agent(db=None):
return Agent(
name="Weather Agent",
role="Provides weather information. Use the get_the_weather tool to get weather data.",
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=db,
telemetry=False,
)
def _make_team(agent, db=None):
return Team(
name="Weather Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=[agent],
db=db,
telemetry=False,
instructions=[
"You MUST delegate all weather-related tasks to the Weather Agent.",
"Do NOT try to answer weather questions yourself - always use the Weather Agent member.",
],
)
def test_member_user_input_pause(shared_db):
"""Team pauses when member agent tool requires user input."""
agent = _make_agent(db=shared_db)
team = _make_team(agent, db=shared_db)
response = team.run("Get me the weather for any city", session_id="test_user_input_pause")
assert response.is_paused
assert len(response.active_requirements) >= 1
req = response.active_requirements[0]
assert req.needs_user_input
assert req.user_input_schema is not None
field_names = [f.name for f in req.user_input_schema]
assert "city" in field_names
def test_member_user_input_continue(shared_db):
"""Pause -> provide user input -> continue_run completes."""
agent = _make_agent(db=shared_db)
team = _make_team(agent, db=shared_db)
response = team.run("Get me the weather for any city", session_id="test_user_input_continue")
assert response.is_paused
req = response.active_requirements[0]
assert req.needs_user_input
req.provide_user_input({"city": "Tokyo"})
result = team.continue_run(response)
assert not result.is_paused
assert result.content is not None
@pytest.mark.asyncio
async def test_member_user_input_async(shared_db):
"""Async user input flow."""
agent = _make_agent(db=shared_db)
team = _make_team(agent, db=shared_db)
response = await team.arun("Get me the weather for any city", session_id="test_user_input_async")
assert response.is_paused
req = response.active_requirements[0]
assert req.needs_user_input
req.provide_user_input({"city": "Paris"})
result = await team.acontinue_run(response)
assert not result.is_paused
assert result.content is not None
def test_member_user_input_invalid_field_name(shared_db):
"""Providing an invalid field name leaves the requirement unresolved."""
agent = _make_agent(db=shared_db)
team = _make_team(agent, db=shared_db)
response = team.run("Get me the weather for any city", session_id="test_user_input_invalid_field")
assert response.is_paused
req = response.active_requirements[0]
assert req.needs_user_input
# Provide an invalid field name — should not mark as resolved
req.provide_user_input({"nonexistent_field": "Tokyo"})
assert not req.is_resolved()
# Now provide the correct field name
req.provide_user_input({"city": "Tokyo"})
assert req.is_resolved()
result = team.continue_run(response)
assert not result.is_paused
assert result.content is not None
@pytest.mark.asyncio
async def test_member_user_input_async_streaming(shared_db):
"""Async streaming user input flow."""
agent = _make_agent(db=shared_db)
team = _make_team(agent, db=shared_db)
paused_event = None
async for event in team.arun(
"Get me the weather for any city",
session_id="test_user_input_async_stream",
stream=True,
stream_events=True,
):
# Use isinstance to check for team's pause event (not the member agent's)
if isinstance(event, TeamRunPausedEvent):
paused_event = event
break
assert paused_event is not None
assert paused_event.is_paused
req = paused_event.requirements[0]
assert req.needs_user_input
req.provide_user_input({"city": "Berlin"})
result = await team.acontinue_run(
run_id=paused_event.run_id,
session_id=paused_event.session_id,
requirements=paused_event.requirements,
)
assert not result.is_paused
assert result.content is not None
def test_member_user_input_streaming(shared_db):
"""Streaming user input flow."""
agent = _make_agent(db=shared_db)
team = _make_team(agent, db=shared_db)
paused_event = None
for event in team.run(
"Get me the weather for any city",
session_id="test_user_input_stream",
stream=True,
stream_events=True,
):
# Use isinstance to check for team's pause event (not the member agent's)
if isinstance(event, TeamRunPausedEvent):
paused_event = event
break
assert paused_event is not None
assert paused_event.is_paused
req = paused_event.requirements[0]
assert req.needs_user_input
req.provide_user_input({"city": "London"})
result = team.continue_run(
run_id=paused_event.run_id,
session_id=paused_event.session_id,
requirements=paused_event.requirements,
)
assert not result.is_paused
assert result.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/human_in_the_loop/test_team_user_input_flows.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/teams/test_callable_members.py | """Integration tests for Team callable members based on session_state.
This tests the pattern where team members are dynamically selected at runtime
based on session_state values (e.g., needs_research flag).
Key scenarios tested:
1. Basic callable members selection based on session_state
2. Delegation to callable members (team leader must see member IDs)
3. System message contains resolved member information
"""
from agno.agent.agent import Agent
from agno.models.openai.chat import OpenAIChat
from agno.team.team import Team
def test_callable_members_selected_by_session_state(shared_db):
"""Team members are selected based on session_state at runtime."""
writer = Agent(
name="Writer",
role="Content writer",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=["Write clear, concise content."],
)
researcher = Agent(
name="Researcher",
role="Research analyst",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=["Research topics and summarize findings."],
)
def pick_members(session_state: dict):
"""Include the researcher only when needed."""
needs_research = session_state.get("needs_research", False)
if needs_research:
return [researcher, writer]
return [writer]
team = Team(
name="Content Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=pick_members,
cache_callables=False,
instructions=["Coordinate the team to complete the task."],
db=shared_db,
telemetry=False,
)
# Run without research - only writer should be used
response1 = team.run(
"Write a haiku about Python",
session_state={"needs_research": False},
)
assert response1 is not None
assert response1.content is not None
# Run with research - researcher + writer should be used
response2 = team.run(
"Research the history of Python and write a short summary",
session_state={"needs_research": True},
)
assert response2 is not None
assert response2.content is not None
def test_callable_members_stream(shared_db):
"""Callable members work with streaming."""
writer = Agent(
name="Writer",
role="Content writer",
model=OpenAIChat(id="gpt-4o-mini"),
)
researcher = Agent(
name="Researcher",
role="Research analyst",
model=OpenAIChat(id="gpt-4o-mini"),
)
def pick_members(session_state: dict):
if session_state.get("needs_research"):
return [researcher, writer]
return [writer]
team = Team(
name="Content Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=pick_members,
cache_callables=False,
db=shared_db,
telemetry=False,
)
# Stream with research enabled
chunks = []
for chunk in team.run(
"Write a short poem",
session_state={"needs_research": True},
stream=True,
):
chunks.append(chunk)
assert len(chunks) > 0
response = team.get_last_run_output()
assert response is not None
assert response.content is not None
async def test_callable_members_async(shared_db):
"""Callable members work with async runs."""
writer = Agent(
name="Writer",
role="Content writer",
model=OpenAIChat(id="gpt-4o-mini"),
)
researcher = Agent(
name="Researcher",
role="Research analyst",
model=OpenAIChat(id="gpt-4o-mini"),
)
def pick_members(session_state: dict):
if session_state.get("needs_research"):
return [researcher, writer]
return [writer]
team = Team(
name="Content Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=pick_members,
cache_callables=False,
db=shared_db,
telemetry=False,
)
response = await team.arun(
"Write a short greeting",
session_state={"needs_research": False},
)
assert response is not None
assert response.content is not None
def test_callable_members_default_session_state(shared_db):
"""Callable members handle missing session_state keys gracefully."""
writer = Agent(
name="Writer",
model=OpenAIChat(id="gpt-4o-mini"),
)
researcher = Agent(
name="Researcher",
model=OpenAIChat(id="gpt-4o-mini"),
)
def pick_members(session_state: dict):
# Default to writer only if needs_research is not set
needs_research = session_state.get("needs_research", False)
if needs_research:
return [researcher, writer]
return [writer]
team = Team(
name="Content Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=pick_members,
cache_callables=False,
db=shared_db,
telemetry=False,
)
# Empty session_state - should default to writer only
response = team.run(
"Say hello",
session_state={},
)
assert response is not None
assert response.content is not None
def test_callable_members_complex_selection(shared_db):
"""Callable members can use multiple session_state values."""
writer = Agent(
name="Writer",
model=OpenAIChat(id="gpt-4o-mini"),
)
researcher = Agent(
name="Researcher",
model=OpenAIChat(id="gpt-4o-mini"),
)
editor = Agent(
name="Editor",
model=OpenAIChat(id="gpt-4o-mini"),
)
def pick_members(session_state: dict):
members = [writer]
if session_state.get("needs_research"):
members.insert(0, researcher)
if session_state.get("needs_editing"):
members.append(editor)
return members
team = Team(
name="Content Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=pick_members,
cache_callables=False,
db=shared_db,
telemetry=False,
)
# Full pipeline: research + write + edit
response = team.run(
"Create a polished article",
session_state={"needs_research": True, "needs_editing": True},
)
assert response is not None
assert response.content is not None
def test_callable_members_delegation(shared_db):
"""Team leader can delegate to callable members by ID.
This tests that the system message contains resolved member IDs so the
team leader can properly delegate tasks to dynamically resolved members.
"""
# Create a writer that has a distinctive behavior we can verify
writer = Agent(
name="Writer",
role="Content writer who writes poems",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=["You are a poet. Always write in verse with rhymes."],
)
researcher = Agent(
name="Researcher",
role="Research analyst who finds facts",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=["You research topics and provide factual summaries."],
)
def pick_members(session_state: dict):
needs_research = session_state.get("needs_research", False)
if needs_research:
return [researcher, writer]
return [writer]
team = Team(
name="Content Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=pick_members,
cache_callables=False,
instructions=[
"You coordinate the team. Delegate writing tasks to the Writer.",
"For research tasks, first delegate to Researcher, then to Writer.",
],
db=shared_db,
telemetry=False,
)
# Run with delegation - the team should delegate to the writer
response = team.run(
"Write a short haiku about the ocean",
session_state={"needs_research": False},
)
assert response is not None
assert response.content is not None
# Verify delegation happened by checking member_responses
# The writer should have been called
if response.member_responses:
member_names = [mr.member_name for mr in response.member_responses]
assert "Writer" in member_names
def test_callable_members_system_message_contains_member_ids(shared_db):
"""System message must contain resolved member IDs for delegation to work.
This is the core test for the fix - when members is a callable, the system
message builder must use get_resolved_members() to get the actual member
list, not read team.members directly (which would be the callable).
get_members_system_message_content() iterates over
team.members directly. If it's a callable, the iteration fails silently
and returns empty content, so the team leader has no member IDs to delegate to.
"""
writer = Agent(
name="TestWriter",
role="A test writer agent",
model=OpenAIChat(id="gpt-4o-mini"),
)
researcher = Agent(
name="TestResearcher",
role="A test researcher agent",
model=OpenAIChat(id="gpt-4o-mini"),
)
def pick_members(session_state: dict):
if session_state.get("include_researcher"):
return [researcher, writer]
return [writer]
team = Team(
name="Test Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=pick_members,
cache_callables=False,
db=shared_db,
telemetry=False,
)
# Run the team to trigger system message generation
response = team.run(
"Hello",
session_state={"include_researcher": True},
)
assert response is not None
assert response.messages is not None, "Response should have messages"
# Find the system message
system_messages = [m for m in response.messages if m.role == "system"]
assert len(system_messages) > 0, "Should have at least one system message"
system_content = system_messages[0].content
assert system_content is not None, "System message should have content"
# CRITICAL: The system message MUST contain the member IDs for delegation to work
# Without the fix, this would be empty because get_members_system_message_content()
# iterates over team.members (the callable) instead of resolved members
assert "TestWriter" in system_content, (
f"System message must contain 'TestWriter' for delegation. Got: {system_content[:500]}..."
)
assert "TestResearcher" in system_content, (
f"System message must contain 'TestResearcher' for delegation. Got: {system_content[:500]}..."
)
async def test_callable_members_delegation_async(shared_db):
"""Async delegation to callable members works correctly."""
writer = Agent(
name="AsyncWriter",
role="Async content writer",
model=OpenAIChat(id="gpt-4o-mini"),
)
def pick_members(session_state: dict):
return [writer]
team = Team(
name="Async Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=pick_members,
cache_callables=False,
instructions=["Delegate all writing tasks to the AsyncWriter."],
db=shared_db,
telemetry=False,
)
response = await team.arun(
"Write a one-line greeting",
session_state={},
)
assert response is not None
assert response.content is not None
# Check delegation happened
if response.member_responses:
member_names = [mr.member_name for mr in response.member_responses]
assert "AsyncWriter" in member_names
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/test_callable_members.py",
"license": "Apache License 2.0",
"lines": 314,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/agent/test_callable_resources.py | """Tests for Agent callable factory support (tools, knowledge)."""
from __future__ import annotations
from typing import Any, Dict, Optional
from unittest.mock import MagicMock
import pytest
from agno.agent.agent import Agent
from agno.run.base import RunContext
from agno.tools import Toolkit
from agno.tools.function import Function
from agno.utils.callables import (
aclear_callable_cache,
ainvoke_callable_factory,
aresolve_callable_tools,
clear_callable_cache,
get_resolved_knowledge,
get_resolved_tools,
invoke_callable_factory,
is_callable_factory,
resolve_callable_knowledge,
resolve_callable_tools,
)
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
def _make_run_context(
user_id: Optional[str] = None,
session_id: str = "test-session",
session_state: Optional[Dict[str, Any]] = None,
) -> RunContext:
return RunContext(
run_id="test-run",
session_id=session_id,
user_id=user_id,
session_state=session_state,
)
class _MockKnowledge:
"""A mock that satisfies KnowledgeProtocol."""
def build_context(self, **kwargs) -> str:
return "mock context"
def get_tools(self, **kwargs):
return []
async def aget_tools(self, **kwargs):
return []
def retrieve(self, query: str, **kwargs):
return []
async def aretrieve(self, query: str, **kwargs):
return []
def _dummy_tool(x: str) -> str:
return f"result: {x}"
def _another_tool(x: str) -> str:
return f"other: {x}"
# ---------------------------------------------------------------------------
# is_callable_factory
# ---------------------------------------------------------------------------
class TestIsCallableFactory:
def test_regular_function_is_factory(self):
def my_factory():
return []
assert is_callable_factory(my_factory) is True
def test_lambda_is_factory(self):
assert is_callable_factory(lambda: []) is True
def test_toolkit_not_factory(self):
tk = Toolkit(name="test")
assert is_callable_factory(tk, excluded_types=(Toolkit, Function)) is False
def test_class_not_factory(self):
assert is_callable_factory(Toolkit) is False
def test_none_not_factory(self):
assert is_callable_factory(None) is False
def test_string_not_factory(self):
assert is_callable_factory("hello") is False
def test_list_not_factory(self):
assert is_callable_factory([_dummy_tool]) is False
# ---------------------------------------------------------------------------
# invoke_callable_factory
# ---------------------------------------------------------------------------
class TestInvokeCallableFactory:
def test_no_args_factory(self):
def factory():
return [_dummy_tool]
agent = Agent(name="test")
rc = _make_run_context()
result = invoke_callable_factory(factory, agent, rc)
assert result == [_dummy_tool]
def test_agent_injection(self):
captured = {}
def factory(agent):
captured["agent"] = agent
return [_dummy_tool]
agent = Agent(name="injected")
rc = _make_run_context()
invoke_callable_factory(factory, agent, rc)
assert captured["agent"] is agent
def test_run_context_injection(self):
captured = {}
def factory(run_context):
captured["run_context"] = run_context
return [_dummy_tool]
agent = Agent(name="test")
rc = _make_run_context(user_id="u1")
invoke_callable_factory(factory, agent, rc)
assert captured["run_context"] is rc
def test_session_state_injection(self):
captured = {}
def factory(session_state):
captured["session_state"] = session_state
return [_dummy_tool]
agent = Agent(name="test")
rc = _make_run_context(session_state={"key": "val"})
invoke_callable_factory(factory, agent, rc)
assert captured["session_state"] == {"key": "val"}
def test_session_state_defaults_to_empty_dict(self):
captured = {}
def factory(session_state):
captured["session_state"] = session_state
return []
agent = Agent(name="test")
rc = _make_run_context(session_state=None)
invoke_callable_factory(factory, agent, rc)
assert captured["session_state"] == {}
def test_multiple_params_injected(self):
captured = {}
def factory(agent, run_context, session_state):
captured["agent"] = agent
captured["run_context"] = run_context
captured["session_state"] = session_state
return [_dummy_tool]
agent = Agent(name="multi")
rc = _make_run_context(session_state={"k": "v"})
invoke_callable_factory(factory, agent, rc)
assert captured["agent"] is agent
assert captured["run_context"] is rc
assert captured["session_state"] == {"k": "v"}
def test_async_factory_raises_in_sync(self):
async def async_factory():
return [_dummy_tool]
agent = Agent(name="test")
rc = _make_run_context()
with pytest.raises(RuntimeError, match="cannot be used in sync mode"):
invoke_callable_factory(async_factory, agent, rc)
# ---------------------------------------------------------------------------
# ainvoke_callable_factory
# ---------------------------------------------------------------------------
class TestAinvokeCallableFactory:
@pytest.mark.asyncio
async def test_sync_factory_works_in_async(self):
def factory():
return [_dummy_tool]
agent = Agent(name="test")
rc = _make_run_context()
result = await ainvoke_callable_factory(factory, agent, rc)
assert result == [_dummy_tool]
@pytest.mark.asyncio
async def test_async_factory_awaited(self):
async def factory(agent):
return [_dummy_tool]
agent = Agent(name="test")
rc = _make_run_context()
result = await ainvoke_callable_factory(factory, agent, rc)
assert result == [_dummy_tool]
# ---------------------------------------------------------------------------
# Agent callable tools storage
# ---------------------------------------------------------------------------
class TestAgentCallableToolsStorage:
def test_callable_stored_as_factory(self):
def tools_factory():
return [_dummy_tool]
agent = Agent(name="test", tools=tools_factory)
assert callable(agent.tools)
assert not isinstance(agent.tools, list)
def test_list_stored_as_list(self):
agent = Agent(name="test", tools=[_dummy_tool])
assert isinstance(agent.tools, list)
assert len(agent.tools) == 1
def test_none_stored_as_empty_list(self):
agent = Agent(name="test")
assert agent.tools == []
def test_toolkit_not_treated_as_factory(self):
tk = Toolkit(name="test")
agent = Agent(name="test", tools=[tk])
assert isinstance(agent.tools, list)
def test_cache_dicts_initialized(self):
agent = Agent(name="test")
assert hasattr(agent, "_callable_tools_cache")
assert hasattr(agent, "_callable_knowledge_cache")
assert isinstance(agent._callable_tools_cache, dict)
assert isinstance(agent._callable_knowledge_cache, dict)
# ---------------------------------------------------------------------------
# Agent callable knowledge storage
# ---------------------------------------------------------------------------
class TestAgentCallableKnowledgeStorage:
def test_callable_knowledge_stored_as_factory(self):
def knowledge_factory():
return MagicMock()
agent = Agent(name="test", knowledge=knowledge_factory)
assert callable(agent.knowledge)
def test_knowledge_instance_stored_directly(self):
mock_knowledge = MagicMock()
# Make it satisfy KnowledgeProtocol (has build_context, get_tools, aget_tools)
mock_knowledge.build_context = MagicMock()
mock_knowledge.get_tools = MagicMock()
mock_knowledge.aget_tools = MagicMock()
agent = Agent(name="test", knowledge=mock_knowledge)
assert agent.knowledge is mock_knowledge
# ---------------------------------------------------------------------------
# resolve_callable_tools
# ---------------------------------------------------------------------------
class TestResolveCallableTools:
def test_static_tools_noop(self):
agent = Agent(name="test", tools=[_dummy_tool])
rc = _make_run_context()
resolve_callable_tools(agent, rc)
assert rc.tools is None # Not set because tools is a static list
def test_factory_resolved_and_stored_on_context(self):
def factory():
return [_dummy_tool, _another_tool]
agent = Agent(name="test", tools=factory)
rc = _make_run_context(user_id="user1")
resolve_callable_tools(agent, rc)
assert rc.tools == [_dummy_tool, _another_tool]
def test_factory_none_result_becomes_empty_list(self):
def factory():
return None
agent = Agent(name="test", tools=factory)
rc = _make_run_context(user_id="user1")
resolve_callable_tools(agent, rc)
assert rc.tools == []
def test_factory_invalid_return_raises(self):
def factory():
return "not a list"
agent = Agent(name="test", tools=factory)
rc = _make_run_context(user_id="user1")
with pytest.raises(TypeError, match="must return a list or tuple"):
resolve_callable_tools(agent, rc)
def test_caching_by_user_id(self):
call_count = 0
def factory():
nonlocal call_count
call_count += 1
return [_dummy_tool]
agent = Agent(name="test", tools=factory)
rc1 = _make_run_context(user_id="user1")
resolve_callable_tools(agent, rc1)
assert call_count == 1
rc2 = _make_run_context(user_id="user1")
resolve_callable_tools(agent, rc2)
assert call_count == 1 # Cached
assert rc2.tools == [_dummy_tool]
def test_different_cache_key_invokes_again(self):
call_count = 0
def factory(run_context):
nonlocal call_count
call_count += 1
return [_dummy_tool] if run_context.user_id == "user1" else [_another_tool]
agent = Agent(name="test", tools=factory)
rc1 = _make_run_context(user_id="user1")
resolve_callable_tools(agent, rc1)
assert call_count == 1
rc2 = _make_run_context(user_id="user2")
resolve_callable_tools(agent, rc2)
assert call_count == 2
def test_cache_disabled_invokes_every_time(self):
call_count = 0
def factory():
nonlocal call_count
call_count += 1
return [_dummy_tool]
agent = Agent(name="test", tools=factory, cache_callables=False)
rc1 = _make_run_context(user_id="user1")
resolve_callable_tools(agent, rc1)
assert call_count == 1
rc2 = _make_run_context(user_id="user1")
resolve_callable_tools(agent, rc2)
assert call_count == 2
def test_no_cache_key_skips_caching(self):
call_count = 0
def factory():
nonlocal call_count
call_count += 1
return [_dummy_tool]
agent = Agent(name="test", tools=factory)
# No user_id, no session_id -> no cache key -> skip caching
rc1 = _make_run_context(user_id=None, session_id=None)
# session_id can't actually be None in RunContext dataclass, let's use a workaround
rc1.session_id = None # type: ignore[assignment]
rc1.user_id = None
resolve_callable_tools(agent, rc1)
assert call_count == 1
rc2 = _make_run_context(user_id=None, session_id=None)
rc2.session_id = None # type: ignore[assignment]
rc2.user_id = None
resolve_callable_tools(agent, rc2)
assert call_count == 2 # Factory invoked again because no cache key
def test_custom_cache_key_function(self):
call_count = 0
def factory():
nonlocal call_count
call_count += 1
return [_dummy_tool]
def custom_key(run_context):
return f"custom-{run_context.user_id}"
agent = Agent(name="test", tools=factory, callable_tools_cache_key=custom_key)
rc1 = _make_run_context(user_id="u1")
resolve_callable_tools(agent, rc1)
assert call_count == 1
rc2 = _make_run_context(user_id="u1")
resolve_callable_tools(agent, rc2)
assert call_count == 1 # Cached under custom key
def test_cache_key_falls_back_to_session_id(self):
call_count = 0
def factory():
nonlocal call_count
call_count += 1
return [_dummy_tool]
agent = Agent(name="test", tools=factory)
# No user_id but has session_id
rc1 = _make_run_context(user_id=None, session_id="sess1")
resolve_callable_tools(agent, rc1)
assert call_count == 1
rc2 = _make_run_context(user_id=None, session_id="sess1")
resolve_callable_tools(agent, rc2)
assert call_count == 1 # Cached by session_id
# ---------------------------------------------------------------------------
# aresolve_callable_tools (async)
# ---------------------------------------------------------------------------
class TestAresolveCallableTools:
@pytest.mark.asyncio
async def test_async_factory_resolved(self):
async def factory(agent):
return [_dummy_tool]
agent = Agent(name="test", tools=factory)
rc = _make_run_context(user_id="u1")
await aresolve_callable_tools(agent, rc)
assert rc.tools == [_dummy_tool]
@pytest.mark.asyncio
async def test_sync_factory_works_in_async(self):
def factory():
return [_dummy_tool]
agent = Agent(name="test", tools=factory)
rc = _make_run_context(user_id="u1")
await aresolve_callable_tools(agent, rc)
assert rc.tools == [_dummy_tool]
# ---------------------------------------------------------------------------
# resolve_callable_knowledge
# ---------------------------------------------------------------------------
class TestResolveCallableKnowledge:
def _make_mock_knowledge(self):
"""Create a mock that satisfies KnowledgeProtocol."""
mock = MagicMock()
mock.build_context = MagicMock(return_value="context")
mock.get_tools = MagicMock(return_value=[])
mock.aget_tools = MagicMock(return_value=[])
mock.retrieve = MagicMock(return_value=[])
mock.aretrieve = MagicMock(return_value=[])
return mock
def test_static_knowledge_noop(self):
mock_k = self._make_mock_knowledge()
agent = Agent(name="test", knowledge=mock_k)
rc = _make_run_context()
resolve_callable_knowledge(agent, rc)
assert rc.knowledge is None
def test_factory_resolved(self):
mock_k = self._make_mock_knowledge()
def factory():
return mock_k
agent = Agent(name="test", knowledge=factory)
rc = _make_run_context(user_id="u1")
resolve_callable_knowledge(agent, rc)
assert rc.knowledge is mock_k
def test_factory_caching(self):
call_count = 0
def factory():
nonlocal call_count
call_count += 1
mock = MagicMock()
mock.build_context = MagicMock(return_value="context")
mock.get_tools = MagicMock(return_value=[])
mock.aget_tools = MagicMock(return_value=[])
mock.retrieve = MagicMock(return_value=[])
mock.aretrieve = MagicMock(return_value=[])
return mock
agent = Agent(name="test", knowledge=factory)
rc1 = _make_run_context(user_id="u1")
resolve_callable_knowledge(agent, rc1)
assert call_count == 1
rc2 = _make_run_context(user_id="u1")
resolve_callable_knowledge(agent, rc2)
assert call_count == 1 # Cached
# ---------------------------------------------------------------------------
# get_resolved_tools / get_resolved_knowledge
# ---------------------------------------------------------------------------
class TestGetResolvedHelpers:
def test_get_resolved_tools_from_context(self):
agent = Agent(name="test", tools=lambda: [_dummy_tool])
rc = _make_run_context()
rc.tools = [_dummy_tool]
result = get_resolved_tools(agent, rc)
assert result == [_dummy_tool]
def test_get_resolved_tools_from_static(self):
agent = Agent(name="test", tools=[_dummy_tool])
rc = _make_run_context()
result = get_resolved_tools(agent, rc)
assert result == [_dummy_tool]
def test_get_resolved_tools_factory_no_context(self):
agent = Agent(name="test", tools=lambda: [_dummy_tool])
rc = _make_run_context()
result = get_resolved_tools(agent, rc)
assert result is None # Factory not resolved, no context.tools
def test_get_resolved_knowledge_from_context(self):
mock_k = MagicMock()
agent = Agent(name="test")
rc = _make_run_context()
rc.knowledge = mock_k
result = get_resolved_knowledge(agent, rc)
assert result is mock_k
def test_get_resolved_knowledge_static(self):
mock_k = _MockKnowledge()
agent = Agent(name="test", knowledge=mock_k)
rc = _make_run_context()
result = get_resolved_knowledge(agent, rc)
assert result is mock_k
# ---------------------------------------------------------------------------
# clear_callable_cache
# ---------------------------------------------------------------------------
class TestClearCallableCache:
def test_clear_all(self):
agent = Agent(name="test")
agent._callable_tools_cache["key"] = [_dummy_tool]
agent._callable_knowledge_cache["key"] = MagicMock()
clear_callable_cache(agent)
assert len(agent._callable_tools_cache) == 0
assert len(agent._callable_knowledge_cache) == 0
def test_clear_tools_only(self):
agent = Agent(name="test")
agent._callable_tools_cache["key"] = [_dummy_tool]
agent._callable_knowledge_cache["key"] = MagicMock()
clear_callable_cache(agent, kind="tools")
assert len(agent._callable_tools_cache) == 0
assert len(agent._callable_knowledge_cache) == 1
def test_clear_knowledge_only(self):
agent = Agent(name="test")
agent._callable_tools_cache["key"] = [_dummy_tool]
agent._callable_knowledge_cache["key"] = MagicMock()
clear_callable_cache(agent, kind="knowledge")
assert len(agent._callable_tools_cache) == 1
assert len(agent._callable_knowledge_cache) == 0
def test_close_calls_close_on_cached_tools(self):
tool_with_close = MagicMock()
tool_with_close.close = MagicMock(return_value=None)
agent = Agent(name="test")
agent._callable_tools_cache["key"] = [tool_with_close]
clear_callable_cache(agent, kind="tools", close=True)
tool_with_close.close.assert_called_once()
def test_close_deduplicates_by_identity(self):
tool = MagicMock()
tool.close = MagicMock(return_value=None)
agent = Agent(name="test")
# Same tool instance under two cache keys
agent._callable_tools_cache["key1"] = [tool]
agent._callable_tools_cache["key2"] = [tool]
clear_callable_cache(agent, kind="tools", close=True)
tool.close.assert_called_once()
@pytest.mark.asyncio
async def test_aclear_prefers_aclose(self):
aclose_called = False
async def mock_aclose():
nonlocal aclose_called
aclose_called = True
tool = MagicMock()
tool.aclose = mock_aclose
agent = Agent(name="test")
agent._callable_tools_cache["key"] = [tool]
await aclear_callable_cache(agent, kind="tools", close=True)
assert aclose_called
# ---------------------------------------------------------------------------
# Agent.add_tool guard
# ---------------------------------------------------------------------------
class TestAddToolGuard:
def test_add_tool_raises_with_callable_factory(self):
from agno.agent._init import add_tool
agent = Agent(name="test", tools=lambda: [_dummy_tool])
with pytest.raises(RuntimeError, match="Cannot add_tool.*when tools is a callable factory"):
add_tool(agent, _another_tool)
def test_add_tool_works_with_list(self):
from agno.agent._init import add_tool
agent = Agent(name="test", tools=[_dummy_tool])
add_tool(agent, _another_tool)
assert len(agent.tools) == 2 # type: ignore[arg-type]
# ---------------------------------------------------------------------------
# Agent.set_tools
# ---------------------------------------------------------------------------
class TestSetTools:
def test_set_tools_with_callable(self):
from agno.agent._init import set_tools
agent = Agent(name="test", tools=[_dummy_tool])
def new_factory():
return [_another_tool]
set_tools(agent, new_factory)
assert callable(agent.tools)
def test_set_tools_clears_cache(self):
from agno.agent._init import set_tools
agent = Agent(name="test")
agent._callable_tools_cache["old_key"] = [_dummy_tool]
def new_factory():
return [_another_tool]
set_tools(agent, new_factory)
assert len(agent._callable_tools_cache) == 0
def test_set_tools_with_list(self):
from agno.agent._init import set_tools
agent = Agent(name="test")
set_tools(agent, [_dummy_tool, _another_tool])
assert isinstance(agent.tools, list)
assert len(agent.tools) == 2
# ---------------------------------------------------------------------------
# Agent config fields
# ---------------------------------------------------------------------------
class TestAgentConfigFields:
def test_cache_callables_default_true(self):
agent = Agent(name="test")
assert agent.cache_callables is True
def test_cache_callables_configurable(self):
agent = Agent(name="test", cache_callables=False)
assert agent.cache_callables is False
def test_callable_cache_key_functions(self):
def my_key(run_context):
return "custom"
agent = Agent(
name="test",
callable_tools_cache_key=my_key,
callable_knowledge_cache_key=my_key,
)
assert agent.callable_tools_cache_key is my_key
assert agent.callable_knowledge_cache_key is my_key
# ---------------------------------------------------------------------------
# Agent deep_copy with callable tools
# ---------------------------------------------------------------------------
class TestAgentDeepCopyCallable:
def test_deep_copy_preserves_callable_factory(self):
def factory():
return [_dummy_tool]
agent = Agent(name="test", tools=factory)
copied = agent.deep_copy()
# The factory should be shared by reference (not deep-copied)
assert copied.tools is agent.tools
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/agent/test_callable_resources.py",
"license": "Apache License 2.0",
"lines": 543,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/team/test_callable_resources.py | """Tests for Team callable factory support (tools, knowledge, members)."""
from __future__ import annotations
from typing import Any, Dict, Optional
import pytest
from agno.agent.agent import Agent
from agno.run.base import RunContext
from agno.utils.callables import (
aresolve_callable_members,
aresolve_callable_tools,
clear_callable_cache,
get_resolved_members,
resolve_callable_members,
resolve_callable_tools,
)
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
def _make_run_context(
user_id: Optional[str] = None,
session_id: str = "test-session",
session_state: Optional[Dict[str, Any]] = None,
) -> RunContext:
return RunContext(
run_id="test-run",
session_id=session_id,
user_id=user_id,
session_state=session_state,
)
def _dummy_tool(x: str) -> str:
return f"result: {x}"
def _another_tool(x: str) -> str:
return f"other: {x}"
def _make_team(**kwargs):
"""Create a Team with minimal config."""
from agno.team.team import Team
defaults = {
"name": "test-team",
}
defaults.update(kwargs)
# Members must be provided (list or callable)
if "members" not in defaults:
defaults["members"] = [Agent(name="member-1")]
return Team(**defaults)
# ---------------------------------------------------------------------------
# Team callable tools
# ---------------------------------------------------------------------------
class TestTeamCallableTools:
def test_callable_tools_stored_as_factory(self):
def tools_factory():
return [_dummy_tool]
team = _make_team(tools=tools_factory)
assert callable(team.tools)
assert not isinstance(team.tools, list)
def test_list_tools_stored_as_list(self):
team = _make_team(tools=[_dummy_tool])
assert isinstance(team.tools, list)
def test_resolve_callable_tools(self):
def factory(team):
return [_dummy_tool]
team = _make_team(tools=factory)
rc = _make_run_context(user_id="u1")
resolve_callable_tools(team, rc)
assert rc.tools == [_dummy_tool]
def test_tools_caching(self):
call_count = 0
def factory():
nonlocal call_count
call_count += 1
return [_dummy_tool]
team = _make_team(tools=factory)
rc1 = _make_run_context(user_id="u1")
resolve_callable_tools(team, rc1)
assert call_count == 1
rc2 = _make_run_context(user_id="u1")
resolve_callable_tools(team, rc2)
assert call_count == 1 # Cached
def test_cache_disabled(self):
call_count = 0
def factory():
nonlocal call_count
call_count += 1
return [_dummy_tool]
team = _make_team(tools=factory, cache_callables=False)
rc1 = _make_run_context(user_id="u1")
resolve_callable_tools(team, rc1)
rc2 = _make_run_context(user_id="u1")
resolve_callable_tools(team, rc2)
assert call_count == 2
# ---------------------------------------------------------------------------
# Team callable members
# ---------------------------------------------------------------------------
class TestTeamCallableMembers:
def test_callable_members_stored_as_factory(self):
def members_factory():
return [Agent(name="dynamic-agent")]
team = _make_team(members=members_factory)
assert callable(team.members)
assert not isinstance(team.members, list)
def test_list_members_stored_as_list(self):
agents = [Agent(name="a1"), Agent(name="a2")]
team = _make_team(members=agents)
assert isinstance(team.members, list)
assert len(team.members) == 2
def test_resolve_callable_members(self):
agent_a = Agent(name="agent-a")
agent_b = Agent(name="agent-b")
def factory(team):
return [agent_a, agent_b]
team = _make_team(members=factory)
rc = _make_run_context(user_id="u1")
resolve_callable_members(team, rc)
assert rc.members == [agent_a, agent_b]
def test_members_caching(self):
call_count = 0
agent_a = Agent(name="agent-a")
def factory():
nonlocal call_count
call_count += 1
return [agent_a]
team = _make_team(members=factory)
rc1 = _make_run_context(user_id="u1")
resolve_callable_members(team, rc1)
assert call_count == 1
rc2 = _make_run_context(user_id="u1")
resolve_callable_members(team, rc2)
assert call_count == 1 # Cached
def test_members_different_keys(self):
call_count = 0
def factory(run_context):
nonlocal call_count
call_count += 1
return [Agent(name=f"agent-{run_context.user_id}")]
team = _make_team(members=factory)
rc1 = _make_run_context(user_id="u1")
resolve_callable_members(team, rc1)
rc2 = _make_run_context(user_id="u2")
resolve_callable_members(team, rc2)
assert call_count == 2
def test_members_none_result_becomes_empty_list(self):
def factory():
return None
team = _make_team(members=factory)
rc = _make_run_context(user_id="u1")
resolve_callable_members(team, rc)
assert rc.members == []
def test_members_invalid_return_raises(self):
def factory():
return "not a list"
team = _make_team(members=factory)
rc = _make_run_context(user_id="u1")
with pytest.raises(TypeError, match="must return a list or tuple"):
resolve_callable_members(team, rc)
def test_custom_members_cache_key(self):
call_count = 0
def factory():
nonlocal call_count
call_count += 1
return [Agent(name="a")]
def custom_key(run_context):
return f"tenant-{run_context.user_id}"
team = _make_team(
members=factory,
callable_members_cache_key=custom_key,
)
rc1 = _make_run_context(user_id="u1")
resolve_callable_members(team, rc1)
assert call_count == 1
rc2 = _make_run_context(user_id="u1")
resolve_callable_members(team, rc2)
assert call_count == 1
# ---------------------------------------------------------------------------
# Async team members
# ---------------------------------------------------------------------------
class TestAsyncTeamMembers:
@pytest.mark.asyncio
async def test_async_members_factory(self):
agent_a = Agent(name="async-a")
async def factory(team):
return [agent_a]
team = _make_team(members=factory)
rc = _make_run_context(user_id="u1")
await aresolve_callable_members(team, rc)
assert rc.members == [agent_a]
@pytest.mark.asyncio
async def test_sync_members_factory_in_async(self):
agent_a = Agent(name="sync-a")
def factory():
return [agent_a]
team = _make_team(members=factory)
rc = _make_run_context(user_id="u1")
await aresolve_callable_members(team, rc)
assert rc.members == [agent_a]
# ---------------------------------------------------------------------------
# Team cache clearing
# ---------------------------------------------------------------------------
class TestTeamClearCache:
def test_clear_all(self):
team = _make_team()
team._callable_tools_cache["key"] = [_dummy_tool]
team._callable_members_cache["key"] = [Agent(name="a")]
clear_callable_cache(team)
assert len(team._callable_tools_cache) == 0
assert len(team._callable_members_cache) == 0
def test_clear_members_only(self):
team = _make_team()
team._callable_tools_cache["key"] = [_dummy_tool]
team._callable_members_cache["key"] = [Agent(name="a")]
clear_callable_cache(team, kind="members")
assert len(team._callable_tools_cache) == 1
assert len(team._callable_members_cache) == 0
# ---------------------------------------------------------------------------
# Team config fields
# ---------------------------------------------------------------------------
class TestTeamConfigFields:
def test_cache_callables_default_true(self):
team = _make_team()
assert team.cache_callables is True
def test_cache_callables_configurable(self):
team = _make_team(cache_callables=False)
assert team.cache_callables is False
def test_callable_cache_key_functions(self):
def my_key(run_context):
return "custom"
team = _make_team(
callable_tools_cache_key=my_key,
callable_members_cache_key=my_key,
)
assert team.callable_tools_cache_key is my_key
assert team.callable_members_cache_key is my_key
# ---------------------------------------------------------------------------
# Team add_tool guard
# ---------------------------------------------------------------------------
class TestTeamAddToolGuard:
def test_add_tool_raises_with_callable_factory(self):
from agno.team._init import add_tool
team = _make_team(tools=lambda: [_dummy_tool])
with pytest.raises(RuntimeError, match="Cannot add_tool.*when tools is a callable factory"):
add_tool(team, _another_tool)
# ---------------------------------------------------------------------------
# Team set_tools
# ---------------------------------------------------------------------------
class TestTeamSetTools:
def test_set_tools_with_callable(self):
from agno.team._init import set_tools
team = _make_team(tools=[_dummy_tool])
def new_factory():
return [_another_tool]
set_tools(team, new_factory)
assert callable(team.tools)
def test_set_tools_clears_cache(self):
from agno.team._init import set_tools
team = _make_team()
team._callable_tools_cache["old"] = [_dummy_tool]
set_tools(team, lambda: [_another_tool])
assert len(team._callable_tools_cache) == 0
# ---------------------------------------------------------------------------
# get_resolved_members
# ---------------------------------------------------------------------------
class TestGetResolvedMembers:
def test_from_context(self):
agents = [Agent(name="a")]
team = _make_team(members=lambda: agents)
rc = _make_run_context()
rc.members = agents
result = get_resolved_members(team, rc)
assert result == agents
def test_from_static(self):
agents = [Agent(name="a")]
team = _make_team(members=agents)
rc = _make_run_context()
result = get_resolved_members(team, rc)
assert result == agents
def test_callable_not_resolved(self):
team = _make_team(members=lambda: [Agent(name="a")])
rc = _make_run_context()
result = get_resolved_members(team, rc)
assert result is None
# ---------------------------------------------------------------------------
# Async team callable tools
# ---------------------------------------------------------------------------
class TestAsyncTeamCallableTools:
@pytest.mark.asyncio
async def test_async_tools_factory(self):
async def factory(team):
return [_dummy_tool]
team = _make_team(tools=factory)
rc = _make_run_context(user_id="u1")
await aresolve_callable_tools(team, rc)
assert rc.tools == [_dummy_tool]
@pytest.mark.asyncio
async def test_sync_tools_factory_in_async(self):
def factory():
return [_dummy_tool]
team = _make_team(tools=factory)
rc = _make_run_context(user_id="u1")
await aresolve_callable_tools(team, rc)
assert rc.tools == [_dummy_tool]
@pytest.mark.asyncio
async def test_async_tools_factory_caching(self):
call_count = 0
async def factory():
nonlocal call_count
call_count += 1
return [_dummy_tool]
team = _make_team(tools=factory)
rc1 = _make_run_context(user_id="u1")
await aresolve_callable_tools(team, rc1)
assert call_count == 1
rc2 = _make_run_context(user_id="u1")
await aresolve_callable_tools(team, rc2)
assert call_count == 1 # Cached
# ---------------------------------------------------------------------------
# _find_member_by_id with run_context
# ---------------------------------------------------------------------------
class TestFindMemberByIdWithRunContext:
def test_find_static_member(self):
from agno.team._tools import _find_member_by_id
agent = Agent(name="member-1")
team = _make_team(members=[agent])
from agno.team._tools import get_member_id
member_id = get_member_id(agent)
result = _find_member_by_id(team, member_id)
assert result is not None
assert result[1] is agent
def test_find_callable_member_via_run_context(self):
from agno.team._tools import _find_member_by_id, get_member_id
agent = Agent(name="dynamic-agent")
def factory():
return [agent]
team = _make_team(members=factory)
rc = _make_run_context(user_id="u1")
resolve_callable_members(team, rc)
member_id = get_member_id(agent)
result = _find_member_by_id(team, member_id, run_context=rc)
assert result is not None
assert result[1] is agent
def test_find_callable_member_without_run_context_fails(self):
from agno.team._tools import _find_member_by_id, get_member_id
agent = Agent(name="dynamic-agent")
def factory():
return [agent]
team = _make_team(members=factory)
member_id = get_member_id(agent)
# Without run_context, callable members are not visible
result = _find_member_by_id(team, member_id)
assert result is None
# ---------------------------------------------------------------------------
# Team deep_copy with callable factories
# ---------------------------------------------------------------------------
class TestTeamDeepCopyCallableFactories:
def test_deep_copy_with_callable_tools(self):
def tools_factory():
return [_dummy_tool]
team = _make_team(tools=tools_factory)
copy = team.deep_copy()
assert copy.tools is tools_factory
def test_deep_copy_with_callable_members(self):
def members_factory():
return [Agent(name="a")]
team = _make_team(members=members_factory)
copy = team.deep_copy()
assert copy.members is members_factory
def test_deep_copy_with_static_tools(self):
team = _make_team(tools=[_dummy_tool])
copy = team.deep_copy()
assert isinstance(copy.tools, list)
def test_deep_copy_with_static_members(self):
agents = [Agent(name="a")]
team = _make_team(members=agents)
copy = team.deep_copy()
assert isinstance(copy.members, list)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/team/test_callable_resources.py",
"license": "Apache License 2.0",
"lines": 371,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/team/test_continue_run_requirements.py | """Tests for Team continue_run helpers (propagation, routing, normalization)."""
from unittest.mock import MagicMock, patch
from agno.models.response import ToolExecution
from agno.run.requirement import RunRequirement
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_tool_execution(**overrides) -> ToolExecution:
defaults = dict(tool_name="do_something", tool_args={"x": 1})
defaults.update(overrides)
return ToolExecution(**defaults)
def _make_requirement(**te_overrides) -> RunRequirement:
return RunRequirement(tool_execution=_make_tool_execution(**te_overrides))
# ===========================================================================
# 1. _propagate_member_pause
# ===========================================================================
class TestPropagateMemberPause:
def test_copies_requirements_with_member_context(self):
from agno.team._tools import _propagate_member_pause
# Create a mock member agent
member_agent = MagicMock()
member_agent.name = "Research Agent"
# Create a member run response with requirements
member_run_response = MagicMock()
req = _make_requirement(requires_confirmation=True)
member_run_response.requirements = [req]
member_run_response.run_id = "member-run-123"
# Create team run response
run_response = MagicMock()
run_response.requirements = None
with patch("agno.team._tools.get_member_id", return_value="member-id-abc"):
_propagate_member_pause(run_response, member_agent, member_run_response)
assert run_response.requirements is not None
assert len(run_response.requirements) == 1
copied_req = run_response.requirements[0]
assert copied_req.member_agent_id == "member-id-abc"
assert copied_req.member_agent_name == "Research Agent"
assert copied_req.member_run_id == "member-run-123"
def test_deep_copies_requirements(self):
"""Modifying the copied requirement must not affect the original."""
from agno.team._tools import _propagate_member_pause
member_agent = MagicMock()
member_agent.name = "Agent"
req = _make_requirement(requires_confirmation=True)
member_run_response = MagicMock()
member_run_response.requirements = [req]
member_run_response.run_id = "run-1"
run_response = MagicMock()
run_response.requirements = None
with patch("agno.team._tools.get_member_id", return_value="id-1"):
_propagate_member_pause(run_response, member_agent, member_run_response)
# Modify the copied requirement
run_response.requirements[0].member_agent_id = "changed"
# Original should be unaffected
assert req.member_agent_id is None
def test_user_input_schema_is_deeply_copied(self):
"""Mutating the copied user_input_schema must not affect the original."""
from agno.team._tools import _propagate_member_pause
from agno.tools.function import UserInputField
member_agent = MagicMock()
member_agent.name = "Agent"
req = _make_requirement(
requires_user_input=True,
user_input_schema=[UserInputField(name="city", field_type=str)],
)
original_schema = req.tool_execution.user_input_schema
member_run_response = MagicMock()
member_run_response.requirements = [req]
member_run_response.run_id = "run-1"
run_response = MagicMock()
run_response.requirements = None
with patch("agno.team._tools.get_member_id", return_value="id-1"):
_propagate_member_pause(run_response, member_agent, member_run_response)
copied_req = run_response.requirements[0]
# Mutate the copy's user_input_schema
copied_req.user_input_schema[0].value = "Tokyo"
# Original user_input_schema should be unaffected
assert original_schema[0].value is None
# The requirement-level schema should also be isolated
assert req.user_input_schema[0].value is None
def test_tool_execution_is_deeply_copied(self):
"""Mutating the copied tool_execution must not affect the original."""
from agno.team._tools import _propagate_member_pause
member_agent = MagicMock()
member_agent.name = "Agent"
req = _make_requirement(requires_confirmation=True)
original_tool_execution = req.tool_execution
member_run_response = MagicMock()
member_run_response.requirements = [req]
member_run_response.run_id = "run-1"
run_response = MagicMock()
run_response.requirements = None
with patch("agno.team._tools.get_member_id", return_value="id-1"):
_propagate_member_pause(run_response, member_agent, member_run_response)
copied_req = run_response.requirements[0]
# Mutate the copy's tool_execution
copied_req.tool_execution.confirmed = True
# Original tool_execution should be unaffected
assert original_tool_execution.confirmed is None
def test_empty_requirements_does_nothing(self):
from agno.team._tools import _propagate_member_pause
member_agent = MagicMock()
member_run_response = MagicMock()
member_run_response.requirements = []
run_response = MagicMock()
run_response.requirements = None
_propagate_member_pause(run_response, member_agent, member_run_response)
# requirements should stay None since nothing was added
assert run_response.requirements is None
def test_multiple_requirements_all_copied(self):
from agno.team._tools import _propagate_member_pause
member_agent = MagicMock()
member_agent.name = "Agent"
req1 = _make_requirement(requires_confirmation=True)
req2 = _make_requirement(external_execution_required=True)
member_run_response = MagicMock()
member_run_response.requirements = [req1, req2]
member_run_response.run_id = "run-1"
run_response = MagicMock()
run_response.requirements = None
with patch("agno.team._tools.get_member_id", return_value="id-1"):
_propagate_member_pause(run_response, member_agent, member_run_response)
assert len(run_response.requirements) == 2
assert all(r.member_agent_id == "id-1" for r in run_response.requirements)
def test_appends_to_existing_requirements(self):
from agno.team._tools import _propagate_member_pause
member_agent = MagicMock()
member_agent.name = "Agent"
new_req = _make_requirement(requires_confirmation=True)
member_run_response = MagicMock()
member_run_response.requirements = [new_req]
member_run_response.run_id = "run-1"
existing_req = _make_requirement(external_execution_required=True)
run_response = MagicMock()
run_response.requirements = [existing_req]
with patch("agno.team._tools.get_member_id", return_value="id-1"):
_propagate_member_pause(run_response, member_agent, member_run_response)
assert len(run_response.requirements) == 2
# ===========================================================================
# 2. _find_member_route_by_id
# ===========================================================================
class TestFindMemberRouteById:
def _make_team_with_members(self):
"""Create a team hierarchy for testing."""
from agno.agent import Agent
from agno.team.team import Team
agent_a = Agent(name="Agent A")
agent_b = Agent(name="Agent B")
agent_c = Agent(name="Agent C")
sub_team = Team(name="Sub Team", members=[agent_c])
team = Team(name="Parent Team", members=[agent_a, agent_b, sub_team])
return team, agent_a, agent_b, agent_c, sub_team
def test_direct_member_match(self):
from agno.team._tools import _find_member_route_by_id
from agno.utils.team import get_member_id
team, agent_a, _, _, _ = self._make_team_with_members()
member_id = get_member_id(agent_a)
result = _find_member_route_by_id(team, member_id)
assert result is not None
idx, member = result
assert idx == 0
assert member is agent_a
def test_nested_member_returns_sub_team(self):
"""For a member nested inside a sub-team, should return the sub-team for routing."""
from agno.team._tools import _find_member_route_by_id
from agno.utils.team import get_member_id
team, _, _, agent_c, sub_team = self._make_team_with_members()
member_id = get_member_id(agent_c)
result = _find_member_route_by_id(team, member_id)
assert result is not None
idx, member = result
assert idx == 2 # sub_team is at index 2
assert member is sub_team # Routes through sub-team, not directly to agent_c
def test_unknown_member_returns_none(self):
from agno.team._tools import _find_member_route_by_id
team, _, _, _, _ = self._make_team_with_members()
result = _find_member_route_by_id(team, "nonexistent-id")
assert result is None
# ===========================================================================
# 3. _normalize_requirements_payload
# ===========================================================================
class TestNormalizeRequirementsPayload:
def test_converts_dict_to_run_requirement(self):
from agno.team._run import _normalize_requirements_payload
req = _make_requirement(requires_confirmation=True)
d = req.to_dict()
result = _normalize_requirements_payload([d])
assert len(result) == 1
assert isinstance(result[0], RunRequirement)
def test_passes_through_run_requirement_objects(self):
from agno.team._run import _normalize_requirements_payload
req = _make_requirement(requires_confirmation=True)
result = _normalize_requirements_payload([req])
assert result[0] is req # Same object, not a copy
def test_handles_mixed_list(self):
from agno.team._run import _normalize_requirements_payload
req = _make_requirement(requires_confirmation=True)
d = _make_requirement(external_execution_required=True).to_dict()
result = _normalize_requirements_payload([req, d])
assert len(result) == 2
assert isinstance(result[0], RunRequirement)
assert isinstance(result[1], RunRequirement)
# ===========================================================================
# 4. _has_member_requirements and _has_team_level_requirements
# ===========================================================================
class TestRequirementClassification:
def test_has_member_requirements(self):
from agno.team._run import _has_member_requirements
req = _make_requirement(requires_confirmation=True)
req.member_agent_id = "agent-1"
assert _has_member_requirements([req]) is True
def test_has_no_member_requirements(self):
from agno.team._run import _has_member_requirements
req = _make_requirement(requires_confirmation=True)
assert _has_member_requirements([req]) is False
def test_has_team_level_requirements(self):
from agno.team._run import _has_team_level_requirements
req = _make_requirement(requires_confirmation=True)
# No member_agent_id means it's a team-level requirement
assert _has_team_level_requirements([req]) is True
def test_has_no_team_level_requirements(self):
from agno.team._run import _has_team_level_requirements
req = _make_requirement(requires_confirmation=True)
req.member_agent_id = "agent-1"
assert _has_team_level_requirements([req]) is False
def test_mixed_requirements(self):
from agno.team._run import _has_member_requirements, _has_team_level_requirements
team_req = _make_requirement(requires_confirmation=True)
member_req = _make_requirement(external_execution_required=True)
member_req.member_agent_id = "agent-1"
reqs = [team_req, member_req]
assert _has_member_requirements(reqs) is True
assert _has_team_level_requirements(reqs) is True
def test_empty_list(self):
from agno.team._run import _has_member_requirements, _has_team_level_requirements
assert _has_member_requirements([]) is False
assert _has_team_level_requirements([]) is False
# ===========================================================================
# 5. _build_continuation_message
# ===========================================================================
class TestBuildContinuationMessage:
def test_empty_results(self):
from agno.team._run import _build_continuation_message
msg = _build_continuation_message([])
assert "completed" in msg.lower()
def test_single_result(self):
from agno.team._run import _build_continuation_message
msg = _build_continuation_message(["[Agent A]: Deployment successful"])
assert "Agent A" in msg
assert "Deployment successful" in msg
def test_multiple_results(self):
from agno.team._run import _build_continuation_message
msg = _build_continuation_message(
[
"[Agent A]: Result 1",
"[Agent B]: Result 2",
]
)
assert "Agent A" in msg
assert "Agent B" in msg
assert "Result 1" in msg
assert "Result 2" in msg
# ===========================================================================
# 6. Chained HITL: newly propagated requirements are preserved
# ===========================================================================
class TestChainedHITLRequirements:
"""Verify that after routing, newly propagated requirements from chained
HITL (member pausing again) are merged back with team-level requirements
rather than being discarded."""
def test_newly_propagated_reqs_preserved_after_routing(self):
"""Simulate: member routing propagates new reqs back onto run_response.
After the routing block, those new reqs must appear alongside team-level reqs."""
# Set up initial state: one team-level req and one member req
team_req = _make_requirement(requires_confirmation=True)
member_req = _make_requirement(external_execution_required=True)
member_req.member_agent_id = "agent-1"
member_req.member_agent_name = "Agent 1"
all_reqs = [team_req, member_req]
# Simulate the routing logic from continue_run_dispatch
member_reqs = [r for r in all_reqs if getattr(r, "member_agent_id", None) is not None]
team_level_reqs = [r for r in all_reqs if getattr(r, "member_agent_id", None) is None]
original_member_req_ids = {id(r) for r in member_reqs}
# Simulate _route_requirements_to_members appending a new propagated req
new_propagated = _make_requirement(requires_confirmation=True)
new_propagated.member_agent_id = "agent-2"
simulated_post_routing = member_reqs + [new_propagated]
# Merge logic
newly_propagated = [r for r in simulated_post_routing if id(r) not in original_member_req_ids]
final_reqs = team_level_reqs + newly_propagated
assert len(final_reqs) == 2 # team_req + new_propagated
assert team_req in final_reqs
assert new_propagated in final_reqs
# Original member_req should NOT be in the final set
assert member_req not in final_reqs
def test_no_propagated_reqs_yields_only_team_level(self):
"""If no member pauses again, only team-level reqs remain."""
team_req = _make_requirement(requires_confirmation=True)
member_req = _make_requirement(external_execution_required=True)
member_req.member_agent_id = "agent-1"
all_reqs = [team_req, member_req]
member_reqs = [r for r in all_reqs if getattr(r, "member_agent_id", None) is not None]
team_level_reqs = [r for r in all_reqs if getattr(r, "member_agent_id", None) is None]
original_member_req_ids = {id(r) for r in member_reqs}
# Simulate routing consuming all member reqs (no new propagation)
simulated_post_routing = member_reqs
newly_propagated = [r for r in simulated_post_routing if id(r) not in original_member_req_ids]
final_reqs = team_level_reqs + newly_propagated
assert len(final_reqs) == 1
assert final_reqs[0] is team_req
# ===========================================================================
# 7. Mixed HITL types
# ===========================================================================
class TestMixedHITLTypes:
"""Verify requirements of different HITL types can coexist."""
def test_mixed_confirmation_and_external_execution(self):
conf_req = _make_requirement(requires_confirmation=True)
ext_req = _make_requirement(external_execution_required=True)
assert conf_req.needs_confirmation is True
assert conf_req.needs_external_execution is False
assert ext_req.needs_confirmation is False
assert ext_req.needs_external_execution is True
# Both should be unresolved
assert conf_req.is_resolved() is False
assert ext_req.is_resolved() is False
# Resolve confirmation
conf_req.confirm()
assert conf_req.is_resolved() is True
# ext_req still unresolved
assert ext_req.is_resolved() is False
# Resolve external execution
ext_req.set_external_execution_result("done")
assert ext_req.is_resolved() is True
def test_mixed_member_and_team_level_requirements(self):
from agno.team._run import _has_member_requirements, _has_team_level_requirements
team_conf_req = _make_requirement(requires_confirmation=True)
member_ext_req = _make_requirement(external_execution_required=True)
member_ext_req.member_agent_id = "agent-1"
from agno.tools.function import UserInputField
member_input_req = _make_requirement(
requires_user_input=True,
user_input_schema=[UserInputField(name="city", field_type=str)],
)
member_input_req.member_agent_id = "agent-2"
reqs = [team_conf_req, member_ext_req, member_input_req]
assert _has_member_requirements(reqs) is True
assert _has_team_level_requirements(reqs) is True
# Categorize
team_reqs = [r for r in reqs if getattr(r, "member_agent_id", None) is None]
member_reqs = [r for r in reqs if getattr(r, "member_agent_id", None) is not None]
assert len(team_reqs) == 1
assert len(member_reqs) == 2
# ===========================================================================
# 8. Deeply nested teams (3+ levels)
# ===========================================================================
class TestDeeplyNestedTeams:
"""Test _find_member_route_by_id with 3+ levels of nesting."""
def test_three_level_nesting_returns_top_sub_team(self):
from agno.agent import Agent
from agno.team._tools import _find_member_route_by_id
from agno.team.team import Team
from agno.utils.team import get_member_id
deep_agent = Agent(name="Deep Agent")
inner_team = Team(name="Inner Team", members=[deep_agent])
outer_team = Team(name="Outer Team", members=[inner_team])
root_team = Team(name="Root Team", members=[outer_team])
deep_agent_id = get_member_id(deep_agent)
result = _find_member_route_by_id(root_team, deep_agent_id)
assert result is not None
idx, member = result
# Should return outer_team (the direct child of root_team)
assert member is outer_team
assert idx == 0
def test_three_level_nesting_direct_child_match(self):
from agno.agent import Agent
from agno.team._tools import _find_member_route_by_id
from agno.team.team import Team
from agno.utils.team import get_member_id
deep_agent = Agent(name="Deep Agent")
inner_team = Team(name="Inner Team", members=[deep_agent])
mid_agent = Agent(name="Mid Agent")
outer_team = Team(name="Outer Team", members=[inner_team, mid_agent])
root_team = Team(name="Root Team", members=[outer_team])
mid_agent_id = get_member_id(mid_agent)
# mid_agent is inside outer_team, so routing should go through outer_team
result = _find_member_route_by_id(root_team, mid_agent_id)
assert result is not None
idx, member = result
assert member is outer_team
def test_deeply_nested_unknown_returns_none(self):
from agno.agent import Agent
from agno.team._tools import _find_member_route_by_id
from agno.team.team import Team
deep_agent = Agent(name="Deep Agent")
inner_team = Team(name="Inner Team", members=[deep_agent])
outer_team = Team(name="Outer Team", members=[inner_team])
result = _find_member_route_by_id(outer_team, "nonexistent-deep-id")
assert result is None
# ===========================================================================
# 9. _member_run_response cleanup
# ===========================================================================
class TestMemberRunResponseCleanup:
"""Verify that _member_run_response is cleared after routing consumption."""
def test_propagate_sets_member_run_response(self):
from agno.team._tools import _propagate_member_pause
member_agent = MagicMock()
member_agent.name = "Agent"
member_run_response = MagicMock()
req = _make_requirement(requires_confirmation=True)
member_run_response.requirements = [req]
member_run_response.run_id = "run-1"
run_response = MagicMock()
run_response.requirements = None
with patch("agno.team._tools.get_member_id", return_value="id-1"):
_propagate_member_pause(run_response, member_agent, member_run_response)
# _member_run_response should be set
assert run_response.requirements[0]._member_run_response is member_run_response
# ===========================================================================
# 10. Unresolved team-level requirements guard
# ===========================================================================
class TestUnresolvedTeamLevelRequirements:
"""Verify that unresolved team-level requirements are detected properly
for the re-pause guard in continue_run_dispatch."""
def test_unresolved_team_level_detected(self):
"""Unresolved team-level requirement should be found by the guard."""
req = _make_requirement(requires_confirmation=True)
# No member_agent_id means team-level
assert req.member_agent_id is None
assert not req.is_resolved()
unresolved = [r for r in [req] if getattr(r, "member_agent_id", None) is None and not r.is_resolved()]
assert len(unresolved) == 1
def test_resolved_team_level_not_detected(self):
"""Resolved team-level requirement should not trigger the guard."""
req = _make_requirement(requires_confirmation=True)
req.confirm()
assert req.is_resolved()
unresolved = [r for r in [req] if getattr(r, "member_agent_id", None) is None and not r.is_resolved()]
assert len(unresolved) == 0
def test_member_reqs_excluded_from_team_level_guard(self):
"""Member requirements should not be caught by the team-level guard."""
req = _make_requirement(requires_confirmation=True)
req.member_agent_id = "agent-1"
unresolved = [r for r in [req] if getattr(r, "member_agent_id", None) is None and not r.is_resolved()]
assert len(unresolved) == 0
def test_mixed_reqs_only_team_level_unresolved(self):
"""Only unresolved team-level requirements should trigger the guard."""
team_unresolved = _make_requirement(requires_confirmation=True)
team_resolved = _make_requirement(requires_confirmation=True)
team_resolved.confirm()
member_unresolved = _make_requirement(requires_confirmation=True)
member_unresolved.member_agent_id = "agent-1"
all_reqs = [team_unresolved, team_resolved, member_unresolved]
unresolved = [r for r in all_reqs if getattr(r, "member_agent_id", None) is None and not r.is_resolved()]
assert len(unresolved) == 1
assert unresolved[0] is team_unresolved
# ===========================================================================
# 11. asyncio.gather error handling in _aroute_requirements_to_members
# ===========================================================================
class TestAsyncGatherErrorHandling:
"""Verify that _aroute_requirements_to_members handles member failures gracefully."""
def test_gather_filters_exceptions(self):
"""When asyncio.gather returns exceptions, they should be filtered out."""
# Simulate the post-gather filtering logic
results = ["[Agent A]: Success", Exception("Agent B failed"), None, "[Agent C]: Done"]
member_results = []
for r in results:
if isinstance(r, Exception):
pass # logged as warning
elif r is not None:
member_results.append(r)
assert len(member_results) == 2
assert member_results[0] == "[Agent A]: Success"
assert member_results[1] == "[Agent C]: Done"
def test_all_exceptions_yields_empty_results(self):
"""When all members fail, result list should be empty."""
results = [Exception("fail 1"), Exception("fail 2")]
member_results = []
for r in results:
if isinstance(r, Exception):
pass
elif r is not None:
member_results.append(r)
assert len(member_results) == 0
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/team/test_continue_run_requirements.py",
"license": "Apache License 2.0",
"lines": 482,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/team/test_run_requirement_fixes.py | """Tests for RunRequirement fixes in the Team HITL implementation."""
import pytest
from agno.models.response import ToolExecution
from agno.run.requirement import RunRequirement
from agno.tools.function import UserInputField
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_tool_execution(**overrides) -> ToolExecution:
"""Create a ToolExecution with sensible defaults, overridden by kwargs."""
defaults = dict(tool_name="do_something", tool_args={"x": 1})
defaults.update(overrides)
return ToolExecution(**defaults)
def _make_requirement(**te_overrides) -> RunRequirement:
"""Shortcut: build a RunRequirement wrapping a fresh ToolExecution."""
return RunRequirement(tool_execution=_make_tool_execution(**te_overrides))
# ===========================================================================
# 1. needs_confirmation fix
# ===========================================================================
class TestNeedsConfirmation:
"""When tool_execution.confirmed is already set, needs_confirmation must be False."""
def test_confirmed_true_returns_false(self):
"""If the tool was already confirmed (True), no further confirmation is needed."""
req = _make_requirement(requires_confirmation=True, confirmed=True)
assert req.needs_confirmation is False
def test_confirmed_false_returns_false(self):
"""If the tool was explicitly rejected (False), no further confirmation is needed."""
req = _make_requirement(requires_confirmation=True, confirmed=False)
assert req.needs_confirmation is False
def test_confirmed_none_returns_true(self):
"""If confirmed is None and requires_confirmation is True, confirmation is still needed."""
req = _make_requirement(requires_confirmation=True, confirmed=None)
assert req.needs_confirmation is True
def test_no_requires_confirmation_returns_false(self):
"""If requires_confirmation is not set at all, needs_confirmation is False."""
req = _make_requirement(requires_confirmation=False)
assert req.needs_confirmation is False
def test_requirement_level_confirmation_overrides(self):
"""If RunRequirement.confirmation is already set (e.g. True), needs_confirmation is False."""
req = _make_requirement(requires_confirmation=True)
req.confirmation = True
assert req.needs_confirmation is False
def test_requirement_level_confirmation_false_overrides(self):
"""If RunRequirement.confirmation is False, needs_confirmation is False."""
req = _make_requirement(requires_confirmation=True)
req.confirmation = False
assert req.needs_confirmation is False
# ===========================================================================
# 2. needs_external_execution fix
# ===========================================================================
class TestNeedsExternalExecution:
"""When external_execution_result is set, needs_external_execution must be False."""
def test_result_set_returns_false(self):
req = _make_requirement(external_execution_required=True)
req.external_execution_result = "done"
assert req.needs_external_execution is False
def test_result_none_returns_true(self):
req = _make_requirement(external_execution_required=True)
assert req.needs_external_execution is True
def test_not_required_returns_false(self):
req = _make_requirement(external_execution_required=False)
assert req.needs_external_execution is False
def test_empty_string_result_counts_as_set(self):
"""Even an empty string is not None so it should resolve the requirement."""
req = _make_requirement(external_execution_required=True)
req.external_execution_result = ""
assert req.needs_external_execution is False
# ===========================================================================
# 3. provide_user_input
# ===========================================================================
class TestProvideUserInput:
def test_sets_user_input_fields_on_tool_execution(self):
te = _make_tool_execution(
requires_user_input=True,
user_input_schema=[
UserInputField(name="name", field_type=str, description="Your name"),
UserInputField(name="age", field_type=int, description="Your age"),
],
)
req = RunRequirement(tool_execution=te)
# Provide partial input first
req.provide_user_input({"name": "Alice"})
assert req.user_input_schema[0].value == "Alice"
assert req.user_input_schema[1].value is None
# Not fully answered yet
assert req.tool_execution.answered is not True
def test_all_fields_marks_answered(self):
te = _make_tool_execution(
requires_user_input=True,
user_input_schema=[
UserInputField(name="name", field_type=str),
],
)
req = RunRequirement(tool_execution=te)
req.provide_user_input({"name": "Alice"})
assert req.user_input_schema[0].value == "Alice"
assert req.tool_execution.answered is True
def test_provide_user_input_raises_when_not_needed(self):
req = _make_requirement(requires_user_input=False)
with pytest.raises(ValueError, match="does not require user input"):
req.provide_user_input({"name": "Alice"})
# ===========================================================================
# 4. reject(note=...) propagation
# ===========================================================================
class TestRejectNotePropagation:
def test_reject_sets_confirmation_note_on_tool_execution(self):
req = _make_requirement(requires_confirmation=True)
req.reject(note="Not allowed")
assert req.confirmation_note == "Not allowed"
assert req.tool_execution.confirmation_note == "Not allowed"
assert req.confirmation is False
assert req.tool_execution.confirmed is False
def test_reject_without_note(self):
req = _make_requirement(requires_confirmation=True)
req.reject()
assert req.confirmation_note is None
assert req.tool_execution.confirmation_note is None
assert req.confirmation is False
def test_reject_raises_when_not_needed(self):
req = _make_requirement(requires_confirmation=False)
with pytest.raises(ValueError, match="does not require confirmation"):
req.reject(note="nope")
# ===========================================================================
# 5. Member context fields (serialisation round-trip)
# ===========================================================================
class TestMemberContextFields:
def test_member_fields_are_set(self):
req = _make_requirement()
req.member_agent_id = "agent-123"
req.member_agent_name = "Research Agent"
req.member_run_id = "run-456"
assert req.member_agent_id == "agent-123"
assert req.member_agent_name == "Research Agent"
assert req.member_run_id == "run-456"
def test_member_fields_serialised_in_to_dict(self):
req = _make_requirement()
req.member_agent_id = "agent-123"
req.member_agent_name = "Research Agent"
req.member_run_id = "run-456"
d = req.to_dict()
assert d["member_agent_id"] == "agent-123"
assert d["member_agent_name"] == "Research Agent"
assert d["member_run_id"] == "run-456"
def test_member_fields_absent_when_none(self):
req = _make_requirement()
d = req.to_dict()
# None values are stripped by to_dict
assert "member_agent_id" not in d
assert "member_agent_name" not in d
assert "member_run_id" not in d
def test_member_fields_round_trip_via_from_dict(self):
req = _make_requirement(requires_confirmation=True)
req.member_agent_id = "agent-123"
req.member_agent_name = "Research Agent"
req.member_run_id = "run-456"
d = req.to_dict()
restored = RunRequirement.from_dict(d)
assert restored.member_agent_id == "agent-123"
assert restored.member_agent_name == "Research Agent"
assert restored.member_run_id == "run-456"
def test_from_dict_without_member_fields(self):
req = _make_requirement()
d = req.to_dict()
restored = RunRequirement.from_dict(d)
assert restored.member_agent_id is None
assert restored.member_agent_name is None
assert restored.member_run_id is None
# ===========================================================================
# 6. is_resolved() method
# ===========================================================================
class TestIsResolved:
def test_confirmation_pending_not_resolved(self):
req = _make_requirement(requires_confirmation=True)
assert req.is_resolved() is False
def test_confirmation_given_resolved(self):
req = _make_requirement(requires_confirmation=True)
req.confirm()
assert req.is_resolved() is True
def test_confirmation_rejected_resolved(self):
req = _make_requirement(requires_confirmation=True)
req.reject(note="No")
assert req.is_resolved() is True
def test_external_execution_pending_not_resolved(self):
req = _make_requirement(external_execution_required=True)
assert req.is_resolved() is False
def test_external_execution_provided_resolved(self):
req = _make_requirement(external_execution_required=True)
req.set_external_execution_result("result data")
assert req.is_resolved() is True
def test_user_input_pending_not_resolved(self):
te = _make_tool_execution(
requires_user_input=True,
user_input_schema=[UserInputField(name="city", field_type=str)],
)
req = RunRequirement(tool_execution=te)
assert req.is_resolved() is False
def test_user_input_provided_resolved(self):
te = _make_tool_execution(
requires_user_input=True,
user_input_schema=[UserInputField(name="city", field_type=str)],
)
req = RunRequirement(tool_execution=te)
req.provide_user_input({"city": "Paris"})
assert req.is_resolved() is True
def test_no_requirements_is_resolved(self):
"""A requirement with no HITL flags is trivially resolved."""
req = _make_requirement()
assert req.is_resolved() is True
def test_multiple_unresolved_flags(self):
"""If both confirmation and external execution are needed, not resolved until both done."""
te = _make_tool_execution(requires_confirmation=True, external_execution_required=True)
req = RunRequirement(tool_execution=te)
assert req.is_resolved() is False
# Resolve confirmation only
req.confirm()
assert req.is_resolved() is False
# Now resolve external execution
req.set_external_execution_result("done")
assert req.is_resolved() is True
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/team/test_run_requirement_fixes.py",
"license": "Apache License 2.0",
"lines": 219,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/team/test_task_model.py | """Unit tests for Task, TaskList, and session_state helpers."""
from agno.team.task import (
TASK_LIST_KEY,
Task,
TaskList,
TaskStatus,
load_task_list,
save_task_list,
)
class TestTaskStatus:
def test_enum_values(self):
assert TaskStatus.pending == "pending"
assert TaskStatus.in_progress == "in_progress"
assert TaskStatus.completed == "completed"
assert TaskStatus.failed == "failed"
assert TaskStatus.blocked == "blocked"
def test_from_string(self):
assert TaskStatus("pending") == TaskStatus.pending
assert TaskStatus("completed") == TaskStatus.completed
class TestTask:
def test_auto_id(self):
task = Task(title="Test")
assert task.id # non-empty
assert len(task.id) == 8
def test_auto_created_at(self):
task = Task(title="Test")
assert task.created_at > 0
def test_explicit_id(self):
task = Task(id="abc", title="Test")
assert task.id == "abc"
def test_default_status(self):
task = Task(title="Test")
assert task.status == TaskStatus.pending
def test_to_dict(self):
task = Task(id="t1", title="Do thing", description="Details", assignee="agent-a")
d = task.to_dict()
assert d["id"] == "t1"
assert d["title"] == "Do thing"
assert d["description"] == "Details"
assert d["status"] == "pending"
assert d["assignee"] == "agent-a"
def test_from_dict(self):
data = {
"id": "t1",
"title": "Do thing",
"description": "Details",
"status": "in_progress",
"assignee": "agent-a",
"dependencies": ["t0"],
"result": "Done",
"notes": ["note1"],
"created_at": 1000.0,
}
task = Task.from_dict(data)
assert task.id == "t1"
assert task.status == TaskStatus.in_progress
assert task.dependencies == ["t0"]
assert task.result == "Done"
assert task.notes == ["note1"]
def test_roundtrip(self):
task = Task(id="t1", title="Test", description="desc", assignee="a")
d = task.to_dict()
task2 = Task.from_dict(d)
assert task2.id == task.id
assert task2.title == task.title
assert task2.description == task.description
assert task2.assignee == task.assignee
assert task2.status == task.status
class TestTaskList:
def test_create_task(self):
tl = TaskList()
task = tl.create_task("Do thing", description="Details")
assert len(tl.tasks) == 1
assert task.title == "Do thing"
assert task.status == TaskStatus.pending
def test_get_task(self):
tl = TaskList()
t = tl.create_task("Task A")
found = tl.get_task(t.id)
assert found is t
def test_get_task_not_found(self):
tl = TaskList()
assert tl.get_task("nonexistent") is None
def test_update_task(self):
tl = TaskList()
t = tl.create_task("Task A")
updated = tl.update_task(t.id, status="completed", result="All done")
assert updated is not None
assert updated.status == TaskStatus.completed
assert updated.result == "All done"
def test_update_task_not_found(self):
tl = TaskList()
assert tl.update_task("nonexistent", status="completed") is None
def test_get_available_tasks(self):
tl = TaskList()
t1 = tl.create_task("Task 1")
t2 = tl.create_task("Task 2")
t3 = tl.create_task("Task 3", dependencies=[t1.id])
available = tl.get_available_tasks()
ids = [t.id for t in available]
assert t1.id in ids
assert t2.id in ids
assert t3.id not in ids # blocked by t1
def test_get_available_tasks_with_assignee(self):
tl = TaskList()
t1 = tl.create_task("Task 1", assignee="agent-a")
t2 = tl.create_task("Task 2", assignee="agent-b")
available = tl.get_available_tasks(for_assignee="agent-a")
ids = [t.id for t in available]
assert t1.id in ids
assert t2.id not in ids
def test_all_terminal_empty(self):
tl = TaskList()
assert tl.all_terminal() is False
def test_all_terminal_all_completed(self):
tl = TaskList()
t1 = tl.create_task("Task 1")
t2 = tl.create_task("Task 2")
tl.update_task(t1.id, status="completed")
tl.update_task(t2.id, status="completed")
assert tl.all_terminal() is True
def test_all_terminal_mixed(self):
tl = TaskList()
t1 = tl.create_task("Task 1")
t2 = tl.create_task("Task 2")
tl.update_task(t1.id, status="completed")
tl.update_task(t2.id, status="failed")
assert tl.all_terminal() is True
def test_all_terminal_not_done(self):
tl = TaskList()
t1 = tl.create_task("Task 1")
tl.create_task("Task 2")
tl.update_task(t1.id, status="completed")
assert tl.all_terminal() is False
class TestTaskListDependencies:
def test_dependency_blocks_task(self):
tl = TaskList()
t1 = tl.create_task("Task 1")
t2 = tl.create_task("Task 2", dependencies=[t1.id])
assert t2.status == TaskStatus.blocked
def test_completing_dependency_unblocks(self):
tl = TaskList()
t1 = tl.create_task("Task 1")
t2 = tl.create_task("Task 2", dependencies=[t1.id])
assert t2.status == TaskStatus.blocked
tl.update_task(t1.id, status="completed")
assert t2.status == TaskStatus.pending
def test_multiple_dependencies(self):
tl = TaskList()
t1 = tl.create_task("Task 1")
t2 = tl.create_task("Task 2")
t3 = tl.create_task("Task 3", dependencies=[t1.id, t2.id])
assert t3.status == TaskStatus.blocked
tl.update_task(t1.id, status="completed")
assert t3.status == TaskStatus.blocked # t2 still pending
tl.update_task(t2.id, status="completed")
assert t3.status == TaskStatus.pending
class TestTaskListSummary:
def test_empty_summary(self):
tl = TaskList()
assert tl.get_summary_string() == "No tasks created yet."
def test_summary_with_tasks(self):
tl = TaskList()
t1 = tl.create_task("Task 1", assignee="agent-a")
tl.create_task("Task 2")
tl.update_task(t1.id, status="completed", result="Done!")
summary = tl.get_summary_string()
assert "Task 1" in summary
assert "Task 2" in summary
assert "COMPLETED" in summary
assert "PENDING" in summary
assert "agent-a" in summary
assert "Done!" in summary
def test_summary_truncates_long_results(self):
tl = TaskList()
t = tl.create_task("Task 1")
tl.update_task(t.id, status="completed", result="x" * 300)
summary = tl.get_summary_string()
assert "..." in summary
class TestTaskListSerialization:
def test_roundtrip(self):
tl = TaskList()
t1 = tl.create_task("Task 1", assignee="agent-a")
tl.create_task("Task 2", dependencies=[t1.id])
tl.update_task(t1.id, status="completed", result="Done")
tl.goal_complete = True
tl.completion_summary = "All done"
d = tl.to_dict()
tl2 = TaskList.from_dict(d)
assert len(tl2.tasks) == 2
assert tl2.tasks[0].status == TaskStatus.completed
assert tl2.tasks[0].result == "Done"
assert tl2.tasks[1].status == TaskStatus.pending # unblocked after t1 completed
assert tl2.goal_complete is True
assert tl2.completion_summary == "All done"
class TestSessionStateHelpers:
def test_load_empty(self):
tl = load_task_list(None)
assert len(tl.tasks) == 0
def test_load_no_key(self):
tl = load_task_list({"other": "data"})
assert len(tl.tasks) == 0
def test_save_and_load(self):
state: dict = {}
tl = TaskList()
tl.create_task("Task 1")
save_task_list(state, tl)
assert TASK_LIST_KEY in state
tl2 = load_task_list(state)
assert len(tl2.tasks) == 1
assert tl2.tasks[0].title == "Task 1"
def test_save_to_none_state(self):
tl = TaskList()
# Should not raise
save_task_list(None, tl)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/team/test_task_model.py",
"license": "Apache License 2.0",
"lines": 215,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/team/test_team_mode.py | """Unit tests for TeamMode enum and backwards compatibility."""
from unittest.mock import MagicMock
import pytest
from agno.team.mode import TeamMode
def _run_determine_tools_for_model(team):
from agno.run import RunContext
from agno.run.team import TeamRunOutput
from agno.session.team import TeamSession
model = MagicMock()
model.supports_native_structured_outputs = False
return team._determine_tools_for_model(
model=model,
run_response=TeamRunOutput(content="ok"),
run_context=RunContext(session_state={}, run_id="run-id", session_id="session-id"),
team_run_context={},
session=TeamSession(session_id="session-id"),
input_message="Original user request",
check_mcp_tools=False,
)
class TestTeamMode:
"""Tests for TeamMode enum values and behavior."""
def test_enum_values(self):
assert TeamMode.coordinate == "coordinate"
assert TeamMode.route == "route"
assert TeamMode.broadcast == "broadcast"
assert TeamMode.tasks == "tasks"
def test_from_string(self):
assert TeamMode("coordinate") == TeamMode.coordinate
assert TeamMode("route") == TeamMode.route
assert TeamMode("broadcast") == TeamMode.broadcast
assert TeamMode("tasks") == TeamMode.tasks
def test_invalid_mode_raises(self):
with pytest.raises(ValueError):
TeamMode("invalid")
def test_is_str_subclass(self):
assert isinstance(TeamMode.coordinate, str)
class TestTeamModeBackwardsCompat:
"""Tests that mode inference from legacy booleans works correctly."""
def test_default_mode_is_coordinate(self):
"""When no mode or booleans are set, mode should be coordinate."""
from agno.team.team import Team
team = Team(name="test", members=[])
assert team.mode == TeamMode.coordinate
assert not team.respond_directly
assert not team.delegate_to_all_members
def test_mode_coordinate_explicit(self):
from agno.team.team import Team
team = Team(name="test", members=[], mode=TeamMode.coordinate)
assert team.mode == TeamMode.coordinate
def test_mode_route_sets_respond_directly(self):
from agno.team.team import Team
team = Team(name="test", members=[], mode=TeamMode.route)
assert team.mode == TeamMode.route
assert team.respond_directly is True
def test_mode_broadcast_sets_delegate_to_all(self):
from agno.team.team import Team
team = Team(name="test", members=[], mode=TeamMode.broadcast)
assert team.mode == TeamMode.broadcast
assert team.delegate_to_all_members is True
def test_mode_tasks(self):
from agno.team.team import Team
team = Team(name="test", members=[], mode=TeamMode.tasks)
assert team.mode == TeamMode.tasks
def test_respond_directly_infers_route(self):
"""Legacy boolean should set mode to route."""
from agno.team.team import Team
team = Team(name="test", members=[], respond_directly=True)
assert team.mode == TeamMode.route
def test_delegate_to_all_infers_broadcast(self):
"""Legacy boolean should set mode to broadcast."""
from agno.team.team import Team
team = Team(name="test", members=[], delegate_to_all_members=True)
assert team.mode == TeamMode.broadcast
def test_mode_route_overrides_conflicting_delegate_to_all(self):
"""Explicit mode=route should force delegate_to_all_members=False even if passed True."""
from agno.team.team import Team
team = Team(name="test", members=[], mode=TeamMode.route, delegate_to_all_members=True)
assert team.mode == TeamMode.route
assert team.respond_directly is True
assert team.delegate_to_all_members is False
def test_mode_broadcast_overrides_conflicting_respond_directly(self):
"""Explicit mode=broadcast should force respond_directly=False even if passed True."""
from agno.team.team import Team
team = Team(name="test", members=[], mode=TeamMode.broadcast, respond_directly=True)
assert team.mode == TeamMode.broadcast
assert team.delegate_to_all_members is True
assert team.respond_directly is False
def test_mode_coordinate_overrides_conflicting_booleans(self):
"""Explicit mode=coordinate should force both booleans False."""
from agno.team.team import Team
team = Team(
name="test", members=[], mode=TeamMode.coordinate, respond_directly=True, delegate_to_all_members=True
)
assert team.mode == TeamMode.coordinate
assert team.respond_directly is False
assert team.delegate_to_all_members is False
def test_mode_tasks_overrides_conflicting_booleans(self):
"""Explicit mode=tasks should force both booleans False."""
from agno.team.team import Team
team = Team(name="test", members=[], mode=TeamMode.tasks, respond_directly=True, delegate_to_all_members=True)
assert team.mode == TeamMode.tasks
assert team.respond_directly is False
assert team.delegate_to_all_members is False
def test_max_iterations_default(self):
from agno.team.team import Team
team = Team(name="test", members=[])
assert team.max_iterations == 10
def test_max_iterations_custom(self):
from agno.team.team import Team
team = Team(name="test", members=[], max_iterations=25)
assert team.max_iterations == 25
def test_tasks_mode_does_not_use_delegation_tools(self, monkeypatch):
from agno.agent import Agent
from agno.team.team import Team
team = Team(
name="tasks-smoke",
members=[Agent(name="member", role="member")],
mode=TeamMode.tasks,
)
called = {"delegate_tool": False}
def fake_get_delegate_task_function(**kwargs):
called["delegate_tool"] = True
raise AssertionError("_get_delegate_task_function should not be used in tasks mode")
monkeypatch.setattr(team, "_get_delegate_task_function", fake_get_delegate_task_function)
monkeypatch.setattr("agno.team.task.load_task_list", lambda session_state: MagicMock())
monkeypatch.setattr("agno.team._task_tools._get_task_management_tools", lambda **kwargs: [])
_run_determine_tools_for_model(team)
assert called["delegate_tool"] is False
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/team/test_team_mode.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/07_knowledge/os/multiple_knowledge_instances.py | """
Multiple Knowledge Instances in AgentOS
============================================================
This cookbook demonstrates how to configure multiple Knowledge instances
in AgentOS, each with isolated content.
Key Concepts:
- Multiple Knowledge instances can share the same vector_db and contents_db
- Each instance is identified by its `name` property
- Content is isolated per instance via the `linked_to` field
- Instances with the same name but different databases are treated as separate
- The /knowledge/config endpoint returns all registered instances
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.knowledge.knowledge import Knowledge
from agno.models.openai import OpenAIChat
from agno.os import AgentOS
from agno.vectordb.pgvector import PgVector
# Database connections
contents_db = PostgresDb(
db_url="postgresql+psycopg://ai:ai@localhost:5532/ai",
knowledge_table="knowledge_contents",
)
vector_db = PgVector(
table_name="knowledge_vectors",
db_url="postgresql+psycopg://ai:ai@localhost:5532/ai",
)
# Create Knowledge instances
company_knowledge = Knowledge(
name="Company Knowledge Base",
description="Unified knowledge from multiple sources",
contents_db=contents_db,
vector_db=vector_db,
# content_sources=[sharepoint, github_docs, azure_blob],
)
personal_knowledge = Knowledge(
name="Personal Knowledge Base",
description="Unified knowledge from multiple sources",
contents_db=contents_db,
vector_db=vector_db,
# content_sources=[sharepoint, github_docs, azure_blob],
)
company_knowledge_db = PostgresDb(
db_url="postgresql+psycopg://ai:ai@localhost:5532/ai",
knowledge_table="knowledge_contents2",
)
company_knowledge_additional = Knowledge(
name="Company Knowledge Base",
description="Unified knowledge from multiple sources",
contents_db=company_knowledge_db,
vector_db=vector_db,
# content_sources=[sharepoint, github_docs, azure_blob],
)
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
knowledge=company_knowledge,
search_knowledge=True,
)
agent_os = AgentOS(
knowledge=[company_knowledge, company_knowledge_additional, personal_knowledge],
agents=[agent],
)
app = agent_os.get_app()
# ============================================================================
# Run AgentOS
# ============================================================================
if __name__ == "__main__":
# Serves a FastAPI app exposed by AgentOS. Use reload=True for local dev.
agent_os.serve(app="multiple_knowledge_instances:app", reload=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/07_knowledge/os/multiple_knowledge_instances.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/agent/_cli.py | """User-facing CLI helpers for Agent: response printing and interactive REPL."""
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Union,
)
from pydantic import BaseModel
if TYPE_CHECKING:
from agno.agent.agent import Agent
from agno.filters import FilterExpr
from agno.media import Audio, File, Image, Video
from agno.models.message import Message
from agno.utils.print_response.agent import (
aprint_response,
aprint_response_stream,
print_response,
print_response_stream,
)
def agent_print_response(
agent: Agent,
input: Union[List, Dict, str, Message, BaseModel, List[Message]],
*,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
run_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
stream: Optional[bool] = None,
markdown: Optional[bool] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
add_session_state_to_context: Optional[bool] = None,
metadata: Optional[Dict[str, Any]] = None,
debug_mode: Optional[bool] = None,
show_message: bool = True,
show_reasoning: bool = True,
show_full_reasoning: bool = False,
console: Optional[Any] = None,
tags_to_include_in_markdown: Optional[Set[str]] = None,
**kwargs: Any,
) -> None:
from agno.agent import _init
if _init.has_async_db(agent):
raise Exception("This method is not supported with an async DB. Please use the async version of this method.")
if not tags_to_include_in_markdown:
tags_to_include_in_markdown = {"think", "thinking"}
if markdown is None:
markdown = agent.markdown
if agent.output_schema is not None:
markdown = False
# Use stream override value when necessary
if stream is None:
stream = False if agent.stream is None else agent.stream
if "stream_events" in kwargs:
kwargs.pop("stream_events")
if stream:
print_response_stream(
agent=agent,
input=input,
session_id=session_id,
session_state=session_state,
user_id=user_id,
run_id=run_id,
audio=audio,
images=images,
videos=videos,
files=files,
stream_events=True,
knowledge_filters=knowledge_filters,
debug_mode=debug_mode,
markdown=markdown,
show_message=show_message,
show_reasoning=show_reasoning,
show_full_reasoning=show_full_reasoning,
tags_to_include_in_markdown=tags_to_include_in_markdown,
console=console,
add_history_to_context=add_history_to_context,
dependencies=dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=metadata,
**kwargs,
)
else:
print_response(
agent=agent,
input=input,
session_id=session_id,
session_state=session_state,
user_id=user_id,
run_id=run_id,
audio=audio,
images=images,
videos=videos,
files=files,
knowledge_filters=knowledge_filters,
debug_mode=debug_mode,
markdown=markdown,
show_message=show_message,
show_reasoning=show_reasoning,
show_full_reasoning=show_full_reasoning,
tags_to_include_in_markdown=tags_to_include_in_markdown,
console=console,
add_history_to_context=add_history_to_context,
dependencies=dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=metadata,
**kwargs,
)
async def agent_aprint_response(
agent: Agent,
input: Union[List, Dict, str, Message, BaseModel, List[Message]],
*,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
run_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
stream: Optional[bool] = None,
markdown: Optional[bool] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
add_history_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
metadata: Optional[Dict[str, Any]] = None,
debug_mode: Optional[bool] = None,
show_message: bool = True,
show_reasoning: bool = True,
show_full_reasoning: bool = False,
console: Optional[Any] = None,
tags_to_include_in_markdown: Optional[Set[str]] = None,
**kwargs: Any,
) -> None:
if not tags_to_include_in_markdown:
tags_to_include_in_markdown = {"think", "thinking"}
if markdown is None:
markdown = agent.markdown
if agent.output_schema is not None:
markdown = False
if stream is None:
stream = agent.stream or False
if "stream_events" in kwargs:
kwargs.pop("stream_events")
if stream:
await aprint_response_stream(
agent=agent,
input=input,
session_id=session_id,
session_state=session_state,
user_id=user_id,
run_id=run_id,
audio=audio,
images=images,
videos=videos,
files=files,
stream_events=True,
knowledge_filters=knowledge_filters,
debug_mode=debug_mode,
markdown=markdown,
show_message=show_message,
show_reasoning=show_reasoning,
show_full_reasoning=show_full_reasoning,
tags_to_include_in_markdown=tags_to_include_in_markdown,
console=console,
add_history_to_context=add_history_to_context,
dependencies=dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=metadata,
**kwargs,
)
else:
await aprint_response(
agent=agent,
input=input,
session_id=session_id,
session_state=session_state,
user_id=user_id,
run_id=run_id,
audio=audio,
images=images,
videos=videos,
files=files,
knowledge_filters=knowledge_filters,
debug_mode=debug_mode,
markdown=markdown,
show_message=show_message,
show_reasoning=show_reasoning,
show_full_reasoning=show_full_reasoning,
tags_to_include_in_markdown=tags_to_include_in_markdown,
console=console,
add_history_to_context=add_history_to_context,
dependencies=dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=metadata,
**kwargs,
)
def cli_app(
agent: Agent,
input: Optional[str] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
user: str = "User",
emoji: str = ":sunglasses:",
stream: bool = False,
markdown: bool = False,
exit_on: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
"""Run an interactive command-line interface to interact with the agent."""
from inspect import isawaitable
from rich.prompt import Prompt
# Ensuring the agent is not using our async MCP tools
if agent.tools is not None and isinstance(agent.tools, list):
for tool in agent.tools:
if isawaitable(tool):
raise NotImplementedError("Use `acli_app` to use async tools.")
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
if hasattr(type(tool), "__mro__") and any(
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
):
raise NotImplementedError("Use `acli_app` to use MCP tools.")
if input:
agent_print_response(
agent,
input=input,
stream=stream,
markdown=markdown,
user_id=user_id,
session_id=session_id,
**kwargs,
)
_exit_on = exit_on or ["exit", "quit", "bye"]
while True:
message = Prompt.ask(f"[bold] {emoji} {user} [/bold]")
if message in _exit_on:
break
agent_print_response(
agent,
input=message,
stream=stream,
markdown=markdown,
user_id=user_id,
session_id=session_id,
**kwargs,
)
async def acli_app(
agent: Agent,
input: Optional[str] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
user: str = "User",
emoji: str = ":sunglasses:",
stream: bool = False,
markdown: bool = False,
exit_on: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
"""
Run an interactive command-line interface to interact with the agent.
Works with agent dependencies requiring async logic.
"""
from rich.prompt import Prompt
if input:
await agent_aprint_response(
agent,
input=input,
stream=stream,
markdown=markdown,
user_id=user_id,
session_id=session_id,
**kwargs,
)
_exit_on = exit_on or ["exit", "quit", "bye"]
while True:
message = Prompt.ask(f"[bold] {emoji} {user} [/bold]")
if message in _exit_on:
break
await agent_aprint_response(
agent,
input=message,
stream=stream,
markdown=markdown,
user_id=user_id,
session_id=session_id,
**kwargs,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/agent/_cli.py",
"license": "Apache License 2.0",
"lines": 304,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/agent/_default_tools.py | """Built-in tool factory functions for Agent."""
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Union,
cast,
)
if TYPE_CHECKING:
from agno.agent.agent import Agent
from agno.culture.manager import CultureManager
from agno.db.base import BaseDb, SessionType
from agno.filters import FilterExpr
from agno.knowledge.types import KnowledgeFilter
from agno.memory import MemoryManager
from agno.models.message import Message, MessageReferences
from agno.run import RunContext
from agno.run.agent import RunOutput
from agno.session import AgentSession
from agno.tools.function import Function
from agno.utils.knowledge import get_agentic_or_user_search_filters
from agno.utils.log import (
log_debug,
log_info,
log_warning,
)
from agno.utils.timer import Timer
def get_update_user_memory_function(agent: Agent, user_id: Optional[str] = None, async_mode: bool = False) -> Function:
def update_user_memory(task: str) -> str:
"""Use this function to submit a task to modify the Agent's memory.
Describe the task in detail and be specific.
The task can include adding a memory, updating a memory, deleting a memory, or clearing all memories.
Args:
task: The task to update the memory. Be specific and describe the task in detail.
Returns:
str: A string indicating the status of the task.
"""
agent.memory_manager = cast(MemoryManager, agent.memory_manager)
response = agent.memory_manager.update_memory_task(task=task, user_id=user_id)
return response
async def aupdate_user_memory(task: str) -> str:
"""Use this function to update the Agent's memory of a user.
Describe the task in detail and be specific.
The task can include adding a memory, updating a memory, deleting a memory, or clearing all memories.
Args:
task: The task to update the memory. Be specific and describe the task in detail.
Returns:
str: A string indicating the status of the task.
"""
agent.memory_manager = cast(MemoryManager, agent.memory_manager)
response = await agent.memory_manager.aupdate_memory_task(task=task, user_id=user_id)
return response
if async_mode:
update_user_memory_function = aupdate_user_memory
else:
update_user_memory_function = update_user_memory # type: ignore
return Function.from_callable(update_user_memory_function, name="update_user_memory")
def get_update_cultural_knowledge_function(agent: Agent, async_mode: bool = False) -> Function:
def update_cultural_knowledge(task: str) -> str:
"""Use this function to update a cultural knowledge."""
agent.culture_manager = cast(CultureManager, agent.culture_manager)
response = agent.culture_manager.update_culture_task(task=task)
return response
async def aupdate_cultural_knowledge(task: str) -> str:
"""Use this function to update a cultural knowledge asynchronously."""
agent.culture_manager = cast(CultureManager, agent.culture_manager)
response = await agent.culture_manager.aupdate_culture_task(task=task)
return response
if async_mode:
update_cultural_knowledge_function = aupdate_cultural_knowledge
else:
update_cultural_knowledge_function = update_cultural_knowledge # type: ignore
return Function.from_callable(
update_cultural_knowledge_function,
name="create_or_update_cultural_knowledge",
)
def create_knowledge_search_tool(
agent: Agent,
run_response: Optional[RunOutput] = None,
run_context: Optional[RunContext] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
enable_agentic_filters: Optional[bool] = False,
async_mode: bool = False,
) -> Function:
"""Create a unified search_knowledge_base tool.
Routes all knowledge searches through get_relevant_docs_from_knowledge(),
which checks knowledge_retriever first and falls back to knowledge.search().
This ensures the custom retriever is always respected when provided.
"""
def _format_results(docs: Optional[List[Union[Dict[str, Any], str]]]) -> str:
if not docs:
return "No documents found"
if agent.references_format == "json":
import json
return json.dumps(docs, indent=2, default=str)
else:
import yaml
return yaml.dump(docs, default_flow_style=False)
def _track_references(docs: Optional[List[Union[Dict[str, Any], str]]], query: str, elapsed: float) -> None:
if run_response is not None and docs:
references = MessageReferences(
query=query,
references=docs,
time=round(elapsed, 4),
)
if run_response.references is None:
run_response.references = []
run_response.references.append(references)
def _resolve_filters(
agentic_filters: Optional[List[Any]] = None,
) -> Optional[Union[Dict[str, Any], List[FilterExpr]]]:
if agentic_filters:
filters_dict: Dict[str, Any] = {}
for filt in agentic_filters:
if isinstance(filt, dict):
filters_dict.update(filt)
elif hasattr(filt, "key") and hasattr(filt, "value"):
filters_dict[filt.key] = filt.value
return get_agentic_or_user_search_filters(filters_dict, knowledge_filters)
return knowledge_filters
if enable_agentic_filters:
def search_knowledge_base_with_filters(query: str, filters: Optional[List[KnowledgeFilter]] = None) -> str:
"""Use this function to search the knowledge base for information about a query.
Args:
query: The query to search for.
filters (optional): The filters to apply to the search. This is a list of KnowledgeFilter objects.
Returns:
str: A string containing the response from the knowledge base.
"""
retrieval_timer = Timer()
retrieval_timer.start()
try:
from agno.agent import _messages
docs = _messages.get_relevant_docs_from_knowledge(
agent,
query=query,
filters=_resolve_filters(filters),
validate_filters=True,
run_context=run_context,
)
except Exception as e:
log_warning(f"Knowledge search failed: {e}")
return f"Error searching knowledge base: {type(e).__name__}"
_track_references(docs, query, retrieval_timer.elapsed)
retrieval_timer.stop()
log_debug(f"Time to get references: {retrieval_timer.elapsed:.4f}s")
return _format_results(docs)
async def asearch_knowledge_base_with_filters(
query: str, filters: Optional[List[KnowledgeFilter]] = None
) -> str:
"""Use this function to search the knowledge base for information about a query.
Args:
query: The query to search for.
filters (optional): The filters to apply to the search. This is a list of KnowledgeFilter objects.
Returns:
str: A string containing the response from the knowledge base.
"""
retrieval_timer = Timer()
retrieval_timer.start()
try:
from agno.agent import _messages
docs = await _messages.aget_relevant_docs_from_knowledge(
agent,
query=query,
filters=_resolve_filters(filters),
validate_filters=True,
run_context=run_context,
)
except Exception as e:
log_warning(f"Knowledge search failed: {e}")
return f"Error searching knowledge base: {type(e).__name__}"
_track_references(docs, query, retrieval_timer.elapsed)
retrieval_timer.stop()
log_debug(f"Time to get references: {retrieval_timer.elapsed:.4f}s")
return _format_results(docs)
if async_mode:
return Function.from_callable(asearch_knowledge_base_with_filters, name="search_knowledge_base")
return Function.from_callable(search_knowledge_base_with_filters, name="search_knowledge_base")
else:
def search_knowledge_base(query: str) -> str:
"""Use this function to search the knowledge base for information about a query.
Args:
query: The query to search for.
Returns:
str: A string containing the response from the knowledge base.
"""
retrieval_timer = Timer()
retrieval_timer.start()
try:
from agno.agent import _messages
docs = _messages.get_relevant_docs_from_knowledge(
agent,
query=query,
filters=knowledge_filters,
run_context=run_context,
)
except Exception as e:
log_warning(f"Knowledge search failed: {e}")
return f"Error searching knowledge base: {type(e).__name__}"
_track_references(docs, query, retrieval_timer.elapsed)
retrieval_timer.stop()
log_debug(f"Time to get references: {retrieval_timer.elapsed:.4f}s")
return _format_results(docs)
async def asearch_knowledge_base(query: str) -> str:
"""Use this function to search the knowledge base for information about a query.
Args:
query: The query to search for.
Returns:
str: A string containing the response from the knowledge base.
"""
retrieval_timer = Timer()
retrieval_timer.start()
try:
from agno.agent import _messages
docs = await _messages.aget_relevant_docs_from_knowledge(
agent,
query=query,
filters=knowledge_filters,
run_context=run_context,
)
except Exception as e:
log_warning(f"Knowledge search failed: {e}")
return f"Error searching knowledge base: {type(e).__name__}"
_track_references(docs, query, retrieval_timer.elapsed)
retrieval_timer.stop()
log_debug(f"Time to get references: {retrieval_timer.elapsed:.4f}s")
return _format_results(docs)
if async_mode:
return Function.from_callable(asearch_knowledge_base, name="search_knowledge_base")
return Function.from_callable(search_knowledge_base, name="search_knowledge_base")
def get_chat_history_function(agent: Agent, session: AgentSession) -> Callable:
def get_chat_history(num_chats: Optional[int] = None) -> str:
"""Use this function to get the chat history between the user and agent.
Args:
num_chats: The number of chats to return.
Each chat contains 2 messages. One from the user and one from the agent.
Default: None
Returns:
str: A JSON of a list of dictionaries representing the chat history.
Example:
- To get the last chat, use num_chats=1.
- To get the last 5 chats, use num_chats=5.
- To get all chats, use num_chats=None.
- To get the first chat, use num_chats=None and pick the first message.
"""
import json
history: List[Dict[str, Any]] = []
all_chats = session.get_messages()
if len(all_chats) == 0:
return json.dumps([])
for chat in all_chats: # type: ignore
history.append(chat.to_dict()) # type: ignore
if num_chats is not None:
history = history[-num_chats:]
return json.dumps(history)
return get_chat_history
def get_tool_call_history_function(agent: Agent, session: AgentSession) -> Callable:
def get_tool_call_history(num_calls: int = 3) -> str:
"""Use this function to get the tools called by the agent in reverse chronological order.
Args:
num_calls: The number of tool calls to return.
Default: 3
Returns:
str: A JSON of a list of dictionaries representing the tool call history.
Example:
- To get the last tool call, use num_calls=1.
- To get all tool calls, use num_calls=None.
"""
import json
tool_calls = session.get_tool_calls(num_calls=num_calls)
if len(tool_calls) == 0:
return json.dumps([])
return json.dumps(tool_calls)
return get_tool_call_history
def update_session_state_tool(agent: Agent, run_context: RunContext, session_state_updates: dict) -> str:
"""
Update the shared session state. Provide any updates as a dictionary of key-value pairs.
Example:
"session_state_updates": {"shopping_list": ["milk", "eggs", "bread"]}
Args:
session_state_updates (dict): The updates to apply to the shared session state. Should be a dictionary of key-value pairs.
"""
if run_context.session_state is None:
run_context.session_state = {}
session_state = run_context.session_state
for key, value in session_state_updates.items():
session_state[key] = value
return f"Updated session state: {session_state}"
def make_update_session_state_entrypoint(agent: Agent) -> Callable:
"""Create a closure that binds agent to the update_session_state_tool function."""
def _entrypoint(run_context: RunContext, session_state_updates: dict) -> str:
"""
Update the shared session state. Provide any updates as a dictionary of key-value pairs.
Example:
"session_state_updates": {"shopping_list": ["milk", "eggs", "bread"]}
Args:
session_state_updates (dict): The updates to apply to the shared session state. Should be a dictionary of key-value pairs.
"""
return update_session_state_tool(agent, run_context, session_state_updates)
return _entrypoint
def add_to_knowledge(agent: Agent, query: str, result: str) -> str:
"""Use this function to add information to the knowledge base for future use.
Args:
query (str): The query or topic to add.
result (str): The actual content or information to store.
Returns:
str: A string indicating the status of the addition.
"""
import json
if agent.knowledge is None:
return "Knowledge not available"
# Check if knowledge supports insert
insert_fn = getattr(agent.knowledge, "insert", None)
if not callable(insert_fn):
return "Knowledge does not support insert"
document_name = query.replace(" ", "_").replace("?", "").replace("!", "").replace(".", "")
document_content = json.dumps({"query": query, "result": result})
log_info(f"Adding document to Knowledge: {document_name}: {document_content}")
from agno.knowledge.reader.text_reader import TextReader
insert_fn(name=document_name, text_content=document_content, reader=TextReader())
return "Successfully added to knowledge base"
def get_previous_sessions_messages_function(
agent: Agent, num_history_sessions: Optional[int] = 2, user_id: Optional[str] = None
) -> Callable:
"""Factory function to create a get_previous_session_messages function.
Args:
agent: The Agent instance.
num_history_sessions: The last n sessions to be taken from db.
user_id: The user ID to filter sessions by.
Returns:
Callable: A function that retrieves messages from previous sessions.
"""
def get_previous_session_messages() -> str:
"""Use this function to retrieve messages from previous chat sessions.
USE THIS TOOL ONLY WHEN THE QUESTION IS EITHER "What was my last conversation?" or "What was my last question?" and similar to it.
Returns:
str: JSON formatted list of message pairs from previous sessions
"""
# TODO: Review and Test this function
import json
if agent.db is None:
return json.dumps([])
agent.db = cast(BaseDb, agent.db)
selected_sessions = agent.db.get_sessions(
session_type=SessionType.AGENT,
limit=num_history_sessions,
user_id=user_id,
sort_by="created_at",
sort_order="desc",
)
all_messages: list = []
seen_message_pairs: set = set()
for session in selected_sessions:
if isinstance(session, AgentSession) and session.runs:
for run in session.runs:
messages = run.messages
if messages is not None:
last_user = None
for msg in messages:
try:
if msg.role == "user":
last_user = msg
elif msg.role == "assistant" and last_user is not None:
user_content = last_user.content
assistant_content = msg.content
if user_content is None or assistant_content is None:
continue
msg_pair_id = f"{user_content}:{assistant_content}"
if msg_pair_id not in seen_message_pairs:
seen_message_pairs.add(msg_pair_id)
all_messages.append(Message.model_validate(last_user))
all_messages.append(Message.model_validate(msg))
last_user = None
except Exception as e:
log_warning(f"Error processing message pair: {e}")
last_user = None
continue
return json.dumps([msg.to_dict() for msg in all_messages]) if all_messages else json.dumps([])
return get_previous_session_messages
async def aget_previous_sessions_messages_function(
agent: Agent, num_history_sessions: Optional[int] = 2, user_id: Optional[str] = None
) -> Function:
"""Factory function to create a get_previous_session_messages function (async).
Args:
agent: The Agent instance.
num_history_sessions: The last n sessions to be taken from db.
user_id: The user ID to filter sessions by.
Returns:
Function: An async function that retrieves messages from previous sessions.
"""
from agno.agent import _init
async def aget_previous_session_messages() -> str:
"""Use this function to retrieve messages from previous chat sessions.
USE THIS TOOL ONLY WHEN THE QUESTION IS EITHER "What was my last conversation?" or "What was my last question?" and similar to it.
Returns:
str: JSON formatted list of message pairs from previous sessions
"""
# TODO: Review and Test this function
import json
if agent.db is None:
return json.dumps([])
if _init.has_async_db(agent):
selected_sessions = await agent.db.get_sessions( # type: ignore
session_type=SessionType.AGENT,
limit=num_history_sessions,
user_id=user_id,
sort_by="created_at",
sort_order="desc",
)
else:
selected_sessions = agent.db.get_sessions(
session_type=SessionType.AGENT,
limit=num_history_sessions,
user_id=user_id,
sort_by="created_at",
sort_order="desc",
)
all_messages: list = []
seen_message_pairs: set = set()
for session in selected_sessions: # type: ignore
if isinstance(session, AgentSession) and session.runs:
for run in session.runs:
messages = run.messages
if messages is not None:
last_user = None
for msg in messages:
try:
if msg.role == "user":
last_user = msg
elif msg.role == "assistant" and last_user is not None:
user_content = last_user.content
assistant_content = msg.content
if user_content is None or assistant_content is None:
continue
msg_pair_id = f"{user_content}:{assistant_content}"
if msg_pair_id not in seen_message_pairs:
seen_message_pairs.add(msg_pair_id)
all_messages.append(Message.model_validate(last_user))
all_messages.append(Message.model_validate(msg))
last_user = None
except Exception as e:
log_warning(f"Error processing message pair: {e}")
last_user = None
continue
return json.dumps([msg.to_dict() for msg in all_messages]) if all_messages else json.dumps([])
return Function.from_callable(aget_previous_session_messages, name="get_previous_session_messages")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/agent/_default_tools.py",
"license": "Apache License 2.0",
"lines": 450,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/agent/_hooks.py | """Pre/post hooks for Agent."""
from __future__ import annotations
from inspect import iscoroutinefunction
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Callable,
Iterator,
List,
Optional,
)
if TYPE_CHECKING:
from agno.agent.agent import Agent
from agno.exceptions import InputCheckError, OutputCheckError
from agno.run import RunContext
from agno.run.agent import RunInput, RunOutput, RunOutputEvent
from agno.session import AgentSession
from agno.utils.events import (
create_post_hook_completed_event,
create_post_hook_started_event,
create_pre_hook_completed_event,
create_pre_hook_started_event,
handle_event,
)
from agno.utils.hooks import (
copy_args_for_background,
filter_hook_args,
is_guardrail_hook,
should_run_hook_in_background,
)
from agno.utils.log import (
log_error,
log_exception,
log_warning,
)
def execute_pre_hooks(
agent: Agent,
hooks: Optional[List[Callable[..., Any]]],
run_response: RunOutput,
run_input: RunInput,
session: AgentSession,
run_context: RunContext,
user_id: Optional[str] = None,
debug_mode: Optional[bool] = None,
stream_events: bool = False,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> Iterator[RunOutputEvent]:
"""Execute multiple pre-hook functions in succession."""
from agno.agent._init import set_debug
if hooks is None:
return
# Prepare arguments for this hook
all_args = {
"run_input": run_input,
"run_context": run_context,
"agent": agent,
"session": session,
"user_id": user_id,
"debug_mode": debug_mode if debug_mode is not None else agent.debug_mode,
"metadata": run_context.metadata if run_context else None,
}
all_args.update(kwargs)
# Global background mode: run guardrails synchronously, buffer everything else.
# Guardrails MUST block so InputCheckError/OutputCheckError can propagate.
# Non-guardrail hooks are buffered and only queued after ALL guardrails pass —
# this prevents side-effects (logging, webhooks) from firing on rejected input.
# deepcopy runs AFTER the guardrail loop so mutations (e.g. PII masking) propagate.
if agent._run_hooks_in_background is True and background_tasks is not None:
pending_bg_hooks = []
for hook in hooks:
if is_guardrail_hook(hook):
filtered_args = filter_hook_args(hook, all_args)
try:
hook(**filtered_args)
except (InputCheckError, OutputCheckError):
raise
except Exception as e:
log_error(f"Background guardrail '{hook.__name__}' execution failed: {str(e)}")
log_exception(e)
else:
pending_bg_hooks.append(hook)
bg_args = copy_args_for_background(all_args)
for hook in pending_bg_hooks:
filtered_args = filter_hook_args(hook, bg_args)
background_tasks.add_task(hook, **filtered_args)
return
for i, hook in enumerate(hooks):
# Check if this specific hook should run in background (via @hook decorator)
if should_run_hook_in_background(hook) and background_tasks is not None:
# Copy args to prevent race conditions
bg_args = copy_args_for_background(all_args)
filtered_args = filter_hook_args(hook, bg_args)
background_tasks.add_task(hook, **filtered_args)
continue
if stream_events:
yield handle_event( # type: ignore
run_response=run_response,
event=create_pre_hook_started_event(
from_run_response=run_response,
run_input=run_input,
pre_hook_name=hook.__name__,
),
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
try:
# Filter arguments to only include those that the hook accepts
filtered_args = filter_hook_args(hook, all_args)
if iscoroutinefunction(hook):
log_warning(
f"Async hook '{hook.__name__}' cannot be used with sync run(). Use arun() instead. Skipping hook."
)
continue
hook(**filtered_args)
if stream_events:
yield handle_event( # type: ignore
run_response=run_response,
event=create_pre_hook_completed_event(
from_run_response=run_response,
run_input=run_input,
pre_hook_name=hook.__name__,
),
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
except (InputCheckError, OutputCheckError) as e:
raise e
except Exception as e:
log_error(f"Pre-hook #{i + 1} execution failed: {str(e)}")
log_exception(e)
finally:
# Reset global log mode in case an agent in the pre-hook changed it
set_debug(agent, debug_mode=debug_mode)
# Update the input on the run_response
run_response.input = run_input
async def aexecute_pre_hooks(
agent: Agent,
hooks: Optional[List[Callable[..., Any]]],
run_response: RunOutput,
run_input: RunInput,
session: AgentSession,
run_context: RunContext,
user_id: Optional[str] = None,
debug_mode: Optional[bool] = None,
stream_events: bool = False,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> AsyncIterator[RunOutputEvent]:
"""Execute multiple pre-hook functions in succession (async version)."""
from agno.agent._init import set_debug
if hooks is None:
return
# Prepare arguments for this hook
all_args = {
"run_input": run_input,
"agent": agent,
"session": session,
"run_context": run_context,
"user_id": user_id,
"debug_mode": debug_mode if debug_mode is not None else agent.debug_mode,
"metadata": run_context.metadata if run_context else None,
}
all_args.update(kwargs)
# Global background mode — see execute_pre_hooks for pattern explanation.
if agent._run_hooks_in_background is True and background_tasks is not None:
pending_bg_hooks = []
for hook in hooks:
if is_guardrail_hook(hook):
filtered_args = filter_hook_args(hook, all_args)
try:
if iscoroutinefunction(hook):
await hook(**filtered_args)
else:
hook(**filtered_args)
except (InputCheckError, OutputCheckError):
raise
except Exception as e:
log_error(f"Background guardrail '{hook.__name__}' execution failed: {str(e)}")
log_exception(e)
else:
pending_bg_hooks.append(hook)
bg_args = copy_args_for_background(all_args)
for hook in pending_bg_hooks:
filtered_args = filter_hook_args(hook, bg_args)
background_tasks.add_task(hook, **filtered_args)
return
for i, hook in enumerate(hooks):
# Check if this specific hook should run in background (via @hook decorator)
if should_run_hook_in_background(hook) and background_tasks is not None:
# Copy args to prevent race conditions
bg_args = copy_args_for_background(all_args)
filtered_args = filter_hook_args(hook, bg_args)
background_tasks.add_task(hook, **filtered_args)
continue
if stream_events:
yield handle_event( # type: ignore
run_response=run_response,
event=create_pre_hook_started_event(
from_run_response=run_response,
run_input=run_input,
pre_hook_name=hook.__name__,
),
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
try:
# Filter arguments to only include those that the hook accepts
filtered_args = filter_hook_args(hook, all_args)
if iscoroutinefunction(hook):
await hook(**filtered_args)
else:
# Synchronous function
hook(**filtered_args)
if stream_events:
yield handle_event( # type: ignore
run_response=run_response,
event=create_pre_hook_completed_event(
from_run_response=run_response,
run_input=run_input,
pre_hook_name=hook.__name__,
),
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
except (InputCheckError, OutputCheckError) as e:
raise e
except Exception as e:
log_error(f"Pre-hook #{i + 1} execution failed: {str(e)}")
log_exception(e)
finally:
# Reset global log mode in case an agent in the pre-hook changed it
set_debug(agent, debug_mode=debug_mode)
# Update the input on the run_response
run_response.input = run_input
def execute_post_hooks(
agent: Agent,
hooks: Optional[List[Callable[..., Any]]],
run_output: RunOutput,
session: AgentSession,
run_context: RunContext,
user_id: Optional[str] = None,
debug_mode: Optional[bool] = None,
stream_events: bool = False,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> Iterator[RunOutputEvent]:
"""Execute multiple post-hook functions in succession."""
from agno.agent._init import set_debug
if hooks is None:
return
# Prepare arguments for this hook
all_args = {
"run_output": run_output,
"agent": agent,
"session": session,
"user_id": user_id,
"run_context": run_context,
"debug_mode": debug_mode if debug_mode is not None else agent.debug_mode,
"metadata": run_context.metadata if run_context else None,
}
all_args.update(kwargs)
# Global background mode — see execute_pre_hooks for pattern explanation.
if agent._run_hooks_in_background is True and background_tasks is not None:
pending_bg_hooks = []
for hook in hooks:
if is_guardrail_hook(hook):
filtered_args = filter_hook_args(hook, all_args)
try:
hook(**filtered_args)
except (InputCheckError, OutputCheckError):
raise
except Exception as e:
log_error(f"Background guardrail '{hook.__name__}' execution failed: {str(e)}")
log_exception(e)
else:
pending_bg_hooks.append(hook)
bg_args = copy_args_for_background(all_args)
for hook in pending_bg_hooks:
filtered_args = filter_hook_args(hook, bg_args)
background_tasks.add_task(hook, **filtered_args)
return
for i, hook in enumerate(hooks):
# Check if this specific hook should run in background (via @hook decorator)
if should_run_hook_in_background(hook) and background_tasks is not None:
# Copy args to prevent race conditions
bg_args = copy_args_for_background(all_args)
filtered_args = filter_hook_args(hook, bg_args)
background_tasks.add_task(hook, **filtered_args)
continue
if stream_events:
yield handle_event( # type: ignore
run_response=run_output,
event=create_post_hook_started_event(
from_run_response=run_output,
post_hook_name=hook.__name__,
),
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
try:
# Filter arguments to only include those that the hook accepts
filtered_args = filter_hook_args(hook, all_args)
if iscoroutinefunction(hook):
log_warning(
f"Async hook '{hook.__name__}' cannot be used with sync run(). Use arun() instead. Skipping hook."
)
continue
hook(**filtered_args)
if stream_events:
yield handle_event( # type: ignore
run_response=run_output,
event=create_post_hook_completed_event(
from_run_response=run_output,
post_hook_name=hook.__name__,
),
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
except (InputCheckError, OutputCheckError) as e:
raise e
except Exception as e:
log_error(f"Post-hook #{i + 1} execution failed: {str(e)}")
log_exception(e)
finally:
# Reset global log mode in case an agent in the pre-hook changed it
set_debug(agent, debug_mode=debug_mode)
async def aexecute_post_hooks(
agent: Agent,
hooks: Optional[List[Callable[..., Any]]],
run_output: RunOutput,
session: AgentSession,
run_context: RunContext,
user_id: Optional[str] = None,
debug_mode: Optional[bool] = None,
stream_events: bool = False,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> AsyncIterator[RunOutputEvent]:
"""Execute multiple post-hook functions in succession (async version)."""
from agno.agent._init import set_debug
if hooks is None:
return
# Prepare arguments for this hook
all_args = {
"run_output": run_output,
"agent": agent,
"session": session,
"run_context": run_context,
"user_id": user_id,
"debug_mode": debug_mode if debug_mode is not None else agent.debug_mode,
"metadata": run_context.metadata if run_context else None,
}
all_args.update(kwargs)
# Global background mode — see execute_pre_hooks for pattern explanation.
if agent._run_hooks_in_background is True and background_tasks is not None:
pending_bg_hooks = []
for hook in hooks:
if is_guardrail_hook(hook):
filtered_args = filter_hook_args(hook, all_args)
try:
if iscoroutinefunction(hook):
await hook(**filtered_args)
else:
hook(**filtered_args)
except (InputCheckError, OutputCheckError):
raise
except Exception as e:
log_error(f"Background guardrail '{hook.__name__}' execution failed: {str(e)}")
log_exception(e)
else:
pending_bg_hooks.append(hook)
bg_args = copy_args_for_background(all_args)
for hook in pending_bg_hooks:
filtered_args = filter_hook_args(hook, bg_args)
background_tasks.add_task(hook, **filtered_args)
return
for i, hook in enumerate(hooks):
# Check if this specific hook should run in background (via @hook decorator)
if should_run_hook_in_background(hook) and background_tasks is not None:
# Copy args to prevent race conditions
bg_args = copy_args_for_background(all_args)
filtered_args = filter_hook_args(hook, bg_args)
background_tasks.add_task(hook, **filtered_args)
continue
if stream_events:
yield handle_event( # type: ignore
run_response=run_output,
event=create_post_hook_started_event(
from_run_response=run_output,
post_hook_name=hook.__name__,
),
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
try:
# Filter arguments to only include those that the hook accepts
filtered_args = filter_hook_args(hook, all_args)
if iscoroutinefunction(hook):
await hook(**filtered_args)
else:
hook(**filtered_args)
if stream_events:
yield handle_event( # type: ignore
run_response=run_output,
event=create_post_hook_completed_event(
from_run_response=run_output,
post_hook_name=hook.__name__,
),
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
except (InputCheckError, OutputCheckError) as e:
raise e
except Exception as e:
log_error(f"Post-hook #{i + 1} execution failed: {str(e)}")
log_exception(e)
finally:
# Reset global log mode in case an agent in the pre-hook changed it
set_debug(agent, debug_mode=debug_mode)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/agent/_hooks.py",
"license": "Apache License 2.0",
"lines": 421,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/agent/_init.py | """Initialization helpers for Agent."""
from __future__ import annotations
from os import getenv
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Literal,
Optional,
Sequence,
Union,
cast,
)
if TYPE_CHECKING:
from agno.agent.agent import Agent
from agno.compression.manager import CompressionManager
from agno.culture.manager import CultureManager
from agno.db.base import AsyncBaseDb
from agno.learn.machine import LearningMachine
from agno.memory import MemoryManager
from agno.models.utils import get_model
from agno.session import SessionSummaryManager
from agno.tools import Toolkit
from agno.tools.function import Function
from agno.utils.log import (
log_debug,
log_exception,
log_info,
log_warning,
set_log_level_to_debug,
set_log_level_to_info,
)
from agno.utils.safe_formatter import SafeFormatter
from agno.utils.string import generate_id_from_name
def set_id(agent: Agent) -> None:
if agent.id is None:
agent.id = generate_id_from_name(agent.name)
def set_debug(agent: Agent, debug_mode: Optional[bool] = None) -> None:
# Get the debug level from the environment variable or the default debug level
debug_level: Literal[1, 2] = (
cast(Literal[1, 2], int(env)) if (env := getenv("AGNO_DEBUG_LEVEL")) in ("1", "2") else agent.debug_level
)
# If the default debug mode is set, or passed on run, or via environment variable, set the debug mode to True
if agent.debug_mode or debug_mode or getenv("AGNO_DEBUG", "false").lower() == "true":
set_log_level_to_debug(level=debug_level)
else:
set_log_level_to_info()
def set_telemetry(agent: Agent) -> None:
"""Override telemetry settings based on environment variables."""
telemetry_env = getenv("AGNO_TELEMETRY")
if telemetry_env is not None:
agent.telemetry = telemetry_env.lower() == "true"
def set_default_model(agent: Agent) -> None:
# Use the default Model (OpenAIChat) if no model is provided
if agent.model is None:
try:
from agno.models.openai import OpenAIChat
except ModuleNotFoundError as e:
log_exception(e)
raise ImportError(
"Agno agents use `openai` as the default model provider. Please provide a `model` or install `openai`."
) from e
log_info("Setting default model to OpenAI Chat")
agent.model = OpenAIChat(id="gpt-4o")
def set_culture_manager(agent: Agent) -> None:
if agent.db is None:
log_warning("Database not provided. Cultural knowledge will not be stored.")
if agent.culture_manager is None:
agent.culture_manager = CultureManager(model=agent.model, db=agent.db)
else:
if agent.culture_manager.model is None:
agent.culture_manager.model = agent.model
if agent.culture_manager.db is None:
agent.culture_manager.db = agent.db
if agent.add_culture_to_context is None:
agent.add_culture_to_context = (
agent.enable_agentic_culture or agent.update_cultural_knowledge or agent.culture_manager is not None
)
def set_memory_manager(agent: Agent) -> None:
if agent.db is None:
log_warning("Database not provided. Memories will not be stored.")
if agent.memory_manager is None:
agent.memory_manager = MemoryManager(model=agent.model, db=agent.db)
else:
if agent.memory_manager.model is None:
agent.memory_manager.model = agent.model
if agent.memory_manager.db is None:
agent.memory_manager.db = agent.db
if agent.add_memories_to_context is None:
agent.add_memories_to_context = (
agent.update_memory_on_run or agent.enable_agentic_memory or agent.memory_manager is not None
)
def set_learning_machine(agent: Agent) -> None:
"""Initialize LearningMachine with agent's db and model.
Sets the internal _learning field without modifying the public learning field.
Handles:
- learning=True: Create default LearningMachine
- learning=False/None: Disabled
- learning=LearningMachine(...): Use provided, inject db/model/knowledge
"""
agent._learning_init_attempted = True
# Handle learning=False or learning=None
if agent.learning is None or agent.learning is False:
agent._learning = None
return
# Check db requirement
if agent.db is None:
log_warning("Database not provided. LearningMachine not initialized.")
agent._learning = None
return
# Handle learning=True: create default LearningMachine
# Enables user_profile (structured fields) and user_memory (unstructured observations)
if agent.learning is True:
agent._learning = LearningMachine(db=agent.db, model=agent.model, user_profile=True, user_memory=True)
return
# Handle learning=LearningMachine(...): inject dependencies
if isinstance(agent.learning, LearningMachine):
if agent.learning.db is None:
agent.learning.db = agent.db
if agent.learning.model is None:
agent.learning.model = agent.model
agent._learning = agent.learning
def set_session_summary_manager(agent: Agent) -> None:
if agent.enable_session_summaries and agent.session_summary_manager is None:
agent.session_summary_manager = SessionSummaryManager(model=agent.model)
if agent.session_summary_manager is not None:
if agent.session_summary_manager.model is None:
agent.session_summary_manager.model = agent.model
if agent.add_session_summary_to_context is None:
agent.add_session_summary_to_context = (
agent.enable_session_summaries or agent.session_summary_manager is not None
)
def set_compression_manager(agent: Agent) -> None:
if agent.compress_tool_results and agent.compression_manager is None:
agent.compression_manager = CompressionManager(
model=agent.model,
)
if agent.compression_manager is not None and agent.compression_manager.model is None:
agent.compression_manager.model = agent.model
# Check compression flag on the compression manager
if agent.compression_manager is not None and agent.compression_manager.compress_tool_results:
agent.compress_tool_results = True
def _initialize_session_state(
session_state: Dict[str, Any],
user_id: Optional[str] = None,
session_id: Optional[str] = None,
run_id: Optional[str] = None,
) -> Dict[str, Any]:
"""Inject current_user_id, current_session_id, and current_run_id into session_state.
These transient values are stripped before persisting to the database (see _session.py)
but must be available at runtime so that tool functions and instruction templates can
reference them. Teams and Workflows already do this; this brings Agents to parity.
"""
if user_id:
session_state["current_user_id"] = user_id
if session_id is not None:
session_state["current_session_id"] = session_id
if run_id is not None:
session_state["current_run_id"] = run_id
return session_state
def has_async_db(agent: Agent) -> bool:
"""Return True if the db the agent is equipped with is an Async implementation."""
return agent.db is not None and isinstance(agent.db, AsyncBaseDb)
def get_models(agent: Agent) -> None:
from agno.metrics import ModelType
if agent.model is not None:
agent.model = get_model(agent.model)
if agent.model is not None:
agent.model.model_type = ModelType.MODEL
if agent.reasoning_model is not None:
agent.reasoning_model = get_model(agent.reasoning_model)
if agent.reasoning_model is not None:
agent.reasoning_model.model_type = ModelType.REASONING_MODEL
if agent.parser_model is not None:
agent.parser_model = get_model(agent.parser_model)
if agent.parser_model is not None:
agent.parser_model.model_type = ModelType.PARSER_MODEL
if agent.output_model is not None:
agent.output_model = get_model(agent.output_model)
if agent.output_model is not None:
agent.output_model.model_type = ModelType.OUTPUT_MODEL
if agent.compression_manager is not None and agent.compression_manager.model is None:
agent.compression_manager.model = agent.model
def initialize_agent(agent: Agent, debug_mode: Optional[bool] = None) -> None:
set_default_model(agent)
set_debug(agent, debug_mode=debug_mode)
set_id(agent)
set_telemetry(agent)
if agent.update_memory_on_run or agent.enable_agentic_memory or agent.memory_manager is not None:
set_memory_manager(agent)
if (
agent.add_culture_to_context
or agent.update_cultural_knowledge
or agent.enable_agentic_culture
or agent.culture_manager is not None
):
set_culture_manager(agent)
if agent.enable_session_summaries or agent.session_summary_manager is not None:
set_session_summary_manager(agent)
if agent.compress_tool_results or agent.compression_manager is not None:
set_compression_manager(agent)
if agent.learning is not None and agent.learning is not False:
set_learning_machine(agent)
log_debug(f"Agent ID: {agent.id}", center=True)
if agent._formatter is None:
agent._formatter = SafeFormatter()
def add_tool(agent: Agent, tool: Union[Toolkit, Callable, Function, Dict]) -> None:
from agno.utils.callables import is_callable_factory
if is_callable_factory(agent.tools, excluded_types=(Toolkit, Function)):
raise RuntimeError(
"Cannot add_tool() when tools is a callable factory. Use set_tools() to replace the factory."
)
if not agent.tools:
agent.tools = []
agent.tools.append(tool) # type: ignore[union-attr]
def set_tools(agent: Agent, tools: Union[Sequence[Union[Toolkit, Callable, Function, Dict]], Callable]) -> None:
from agno.utils.callables import is_callable_factory
if is_callable_factory(tools, excluded_types=(Toolkit, Function)):
agent.tools = tools # type: ignore[assignment]
agent._callable_tools_cache.clear()
else:
agent.tools = list(tools) if tools else [] # type: ignore[arg-type]
async def connect_mcp_tools(agent: Agent) -> None:
"""Connect the MCP tools to the agent."""
if agent.tools and isinstance(agent.tools, list):
for tool in agent.tools:
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
if (
hasattr(type(tool), "__mro__")
and any(c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__)
and not tool.initialized # type: ignore
):
try:
# Connect the MCP server
await tool.connect() # type: ignore
agent._mcp_tools_initialized_on_run.append(tool) # type: ignore
except Exception as e:
log_warning(f"Error connecting tool: {str(e)}")
async def disconnect_mcp_tools(agent: Agent) -> None:
"""Disconnect the MCP tools from the agent."""
for tool in agent._mcp_tools_initialized_on_run:
try:
await tool.close()
except Exception as e:
log_warning(f"Error disconnecting tool: {str(e)}")
agent._mcp_tools_initialized_on_run = []
def connect_connectable_tools(agent: Agent) -> None:
"""Connect tools that require connection management (e.g., database connections)."""
if agent.tools and isinstance(agent.tools, list):
for tool in agent.tools:
if (
hasattr(tool, "requires_connect")
and tool.requires_connect # type: ignore
and hasattr(tool, "connect")
and tool not in agent._connectable_tools_initialized_on_run
):
try:
tool.connect() # type: ignore
agent._connectable_tools_initialized_on_run.append(tool)
except Exception as e:
log_warning(f"Error connecting tool: {str(e)}")
def disconnect_connectable_tools(agent: Agent) -> None:
"""Disconnect tools that require connection management."""
for tool in agent._connectable_tools_initialized_on_run:
if hasattr(tool, "close"):
try:
tool.close() # type: ignore
except Exception as e:
log_warning(f"Error disconnecting tool: {str(e)}")
agent._connectable_tools_initialized_on_run = []
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/agent/_init.py",
"license": "Apache License 2.0",
"lines": 271,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/agent/_managers.py | """Background task orchestration for memory, learning, and cultural knowledge."""
from __future__ import annotations
from asyncio import CancelledError, Task, create_task
from concurrent.futures import Future
from typing import (
TYPE_CHECKING,
List,
Optional,
)
if TYPE_CHECKING:
from agno.agent.agent import Agent
from agno.metrics import RunMetrics
from agno.db.base import UserMemory
from agno.db.schemas.culture import CulturalKnowledge
from agno.models.message import Message
from agno.run.messages import RunMessages
from agno.session import AgentSession
from agno.utils.log import log_debug, log_warning
# ---------------------------------------------------------------------------
# Memory
# ---------------------------------------------------------------------------
def make_memories(
agent: Agent,
run_messages: RunMessages,
user_id: Optional[str] = None,
) -> Optional[RunMetrics]:
from agno.metrics import RunMetrics
collector = RunMetrics()
user_message_str = run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
if (
user_message_str is not None
and user_message_str.strip() != ""
and agent.memory_manager is not None
and agent.update_memory_on_run
):
log_debug("Managing user memories")
agent.memory_manager.create_user_memories( # type: ignore
message=user_message_str,
user_id=user_id,
agent_id=agent.id,
run_metrics=collector,
)
if run_messages.extra_messages is not None and len(run_messages.extra_messages) > 0:
parsed_messages = []
for _im in run_messages.extra_messages:
if isinstance(_im, Message):
parsed_messages.append(_im)
elif isinstance(_im, dict):
try:
parsed_messages.append(Message(**_im))
except Exception as e:
log_warning(f"Failed to validate message during memory update: {e}")
else:
log_warning(f"Unsupported message type: {type(_im)}")
continue
# Filter out messages with empty content before passing to memory manager
non_empty_messages = [
msg
for msg in parsed_messages
if msg.content and (not isinstance(msg.content, str) or msg.content.strip() != "")
]
if len(non_empty_messages) > 0:
if agent.memory_manager is not None and agent.update_memory_on_run:
agent.memory_manager.create_user_memories(
messages=non_empty_messages, user_id=user_id, agent_id=agent.id, run_metrics=collector
) # type: ignore
else:
log_warning(
"Unable to add messages to memory: memory_manager not configured or update_memory_on_run is disabled"
)
return collector
async def amake_memories(
agent: Agent,
run_messages: RunMessages,
user_id: Optional[str] = None,
) -> Optional[RunMetrics]:
from agno.metrics import RunMetrics
collector = RunMetrics()
user_message_str = run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
if (
user_message_str is not None
and user_message_str.strip() != ""
and agent.memory_manager is not None
and agent.update_memory_on_run
):
log_debug("Managing user memories")
await agent.memory_manager.acreate_user_memories( # type: ignore
message=user_message_str,
user_id=user_id,
agent_id=agent.id,
run_metrics=collector,
)
if run_messages.extra_messages is not None and len(run_messages.extra_messages) > 0:
parsed_messages = []
for _im in run_messages.extra_messages:
if isinstance(_im, Message):
parsed_messages.append(_im)
elif isinstance(_im, dict):
try:
parsed_messages.append(Message(**_im))
except Exception as e:
log_warning(f"Failed to validate message during memory update: {e}")
else:
log_warning(f"Unsupported message type: {type(_im)}")
continue
# Filter out messages with empty content before passing to memory manager
non_empty_messages = [
msg
for msg in parsed_messages
if msg.content and (not isinstance(msg.content, str) or msg.content.strip() != "")
]
if len(non_empty_messages) > 0:
if agent.memory_manager is not None and agent.update_memory_on_run:
await agent.memory_manager.acreate_user_memories( # type: ignore
messages=non_empty_messages, user_id=user_id, agent_id=agent.id, run_metrics=collector
)
else:
log_warning(
"Unable to add messages to memory: memory_manager not configured or update_memory_on_run is disabled"
)
return collector
async def astart_memory_task(
agent: Agent,
run_messages: RunMessages,
user_id: Optional[str],
existing_task: Optional[Task],
) -> Optional[Task]:
"""Cancel any existing memory task and start a new one if conditions are met.
Args:
agent: The Agent instance.
run_messages: The run messages containing the user message.
user_id: The user ID for memory creation.
existing_task: An existing memory task to cancel before starting a new one.
Returns:
A new memory task if conditions are met, None otherwise.
"""
# Cancel any existing task from a previous retry attempt
if existing_task is not None and not existing_task.done():
existing_task.cancel()
try:
await existing_task
except CancelledError:
pass
# Create new task if conditions are met
if (
run_messages.user_message is not None
and agent.memory_manager is not None
and agent.update_memory_on_run
and not agent.enable_agentic_memory
):
log_debug("Starting memory creation in background task.")
return create_task(amake_memories(agent, run_messages=run_messages, user_id=user_id))
return None
def start_memory_future(
agent: Agent,
run_messages: RunMessages,
user_id: Optional[str],
existing_future: Optional[Future] = None,
) -> Optional[Future]:
"""Cancel any existing memory future and start a new one if conditions are met.
Args:
agent: The Agent instance.
run_messages: The run messages containing the user message.
user_id: The user ID for memory creation.
existing_future: An existing memory future to cancel before starting a new one.
Returns:
A new memory future if conditions are met, None otherwise.
"""
# Cancel any existing future from a previous retry attempt
# Note: cancel() only works if the future hasn't started yet
if existing_future is not None and not existing_future.done():
existing_future.cancel()
# Create new future if conditions are met
if (
run_messages.user_message is not None
and agent.memory_manager is not None
and agent.update_memory_on_run
and not agent.enable_agentic_memory
):
log_debug("Starting memory creation in background thread.")
return agent.background_executor.submit(make_memories, agent, run_messages=run_messages, user_id=user_id)
return None
def get_user_memories(agent: Agent, user_id: Optional[str] = None) -> Optional[List[UserMemory]]:
"""Get the user memories for the given user ID.
Args:
agent: The Agent instance.
user_id: The user ID to get the memories for. If not provided, the current cached user ID is used.
Returns:
Optional[List[UserMemory]]: The user memories.
"""
from agno.agent._init import set_memory_manager
if agent.memory_manager is None:
set_memory_manager(agent)
user_id = user_id if user_id is not None else agent.user_id
if user_id is None:
user_id = "default"
return agent.memory_manager.get_user_memories(user_id=user_id) # type: ignore
async def aget_user_memories(agent: Agent, user_id: Optional[str] = None) -> Optional[List[UserMemory]]:
"""Get the user memories for the given user ID.
Args:
agent: The Agent instance.
user_id: The user ID to get the memories for. If not provided, the current cached user ID is used.
Returns:
Optional[List[UserMemory]]: The user memories.
"""
from agno.agent._init import set_memory_manager
if agent.memory_manager is None:
set_memory_manager(agent)
user_id = user_id if user_id is not None else agent.user_id
if user_id is None:
user_id = "default"
return await agent.memory_manager.aget_user_memories(user_id=user_id) # type: ignore
# ---------------------------------------------------------------------------
# Cultural knowledge
# ---------------------------------------------------------------------------
def make_cultural_knowledge(
agent: Agent,
run_messages: RunMessages,
) -> Optional[RunMetrics]:
from agno.metrics import RunMetrics
collector = RunMetrics()
if run_messages.user_message is not None and agent.culture_manager is not None and agent.update_cultural_knowledge:
log_debug("Creating cultural knowledge.")
agent.culture_manager.create_cultural_knowledge(
message=run_messages.user_message.get_content_string(),
run_metrics=collector,
)
return collector
async def acreate_cultural_knowledge(
agent: Agent,
run_messages: RunMessages,
) -> Optional[RunMetrics]:
from agno.metrics import RunMetrics
collector = RunMetrics()
if run_messages.user_message is not None and agent.culture_manager is not None and agent.update_cultural_knowledge:
log_debug("Creating cultural knowledge.")
await agent.culture_manager.acreate_cultural_knowledge(
message=run_messages.user_message.get_content_string(),
run_metrics=collector,
)
return collector
async def astart_cultural_knowledge_task(
agent: Agent,
run_messages: RunMessages,
existing_task: Optional[Task],
) -> Optional[Task]:
"""Cancel any existing cultural knowledge task and start a new one if conditions are met.
Args:
agent: The Agent instance.
run_messages: The run messages containing the user message.
existing_task: An existing cultural knowledge task to cancel before starting a new one.
Returns:
A new cultural knowledge task if conditions are met, None otherwise.
"""
# Cancel any existing task from a previous retry attempt
if existing_task is not None and not existing_task.done():
existing_task.cancel()
try:
await existing_task
except CancelledError:
pass
# Create new task if conditions are met
if run_messages.user_message is not None and agent.culture_manager is not None and agent.update_cultural_knowledge:
log_debug("Starting cultural knowledge creation in background task.")
return create_task(acreate_cultural_knowledge(agent, run_messages=run_messages))
return None
def start_cultural_knowledge_future(
agent: Agent,
run_messages: RunMessages,
existing_future: Optional[Future] = None,
) -> Optional[Future]:
"""Cancel any existing cultural knowledge future and start a new one if conditions are met.
Args:
agent: The Agent instance.
run_messages: The run messages containing the user message.
existing_future: An existing cultural knowledge future to cancel before starting a new one.
Returns:
A new cultural knowledge future if conditions are met, None otherwise.
"""
# Cancel any existing future from a previous retry attempt
# Note: cancel() only works if the future hasn't started yet
if existing_future is not None and not existing_future.done():
existing_future.cancel()
# Create new future if conditions are met
if run_messages.user_message is not None and agent.culture_manager is not None and agent.update_cultural_knowledge:
log_debug("Starting cultural knowledge creation in background thread.")
return agent.background_executor.submit(make_cultural_knowledge, agent, run_messages=run_messages)
return None
def get_culture_knowledge(agent: Agent) -> Optional[List[CulturalKnowledge]]:
"""Get the cultural knowledge the agent has access to
Args:
agent: The Agent instance.
Returns:
Optional[List[CulturalKnowledge]]: The cultural knowledge.
"""
if agent.culture_manager is None:
return None
return agent.culture_manager.get_all_knowledge()
async def aget_culture_knowledge(agent: Agent) -> Optional[List[CulturalKnowledge]]:
"""Get the cultural knowledge the agent has access to
Args:
agent: The Agent instance.
Returns:
Optional[List[CulturalKnowledge]]: The cultural knowledge.
"""
if agent.culture_manager is None:
return None
return await agent.culture_manager.aget_all_knowledge()
# ---------------------------------------------------------------------------
# Learning
# ---------------------------------------------------------------------------
def process_learnings(
agent: Agent,
run_messages: RunMessages,
session: AgentSession,
user_id: Optional[str],
) -> Optional[RunMetrics]:
"""Process learnings from conversation (runs in background thread)."""
if agent._learning is None:
return None
from agno.metrics import RunMetrics
collector = RunMetrics()
try:
# Convert run messages to list format expected by LearningMachine
messages = run_messages.messages if run_messages else []
agent._learning.process(
messages=messages,
user_id=user_id,
session_id=session.session_id if session else None,
agent_id=agent.id,
team_id=agent.team_id,
run_metrics=collector,
)
log_debug("Learning extraction completed.")
except Exception as e:
log_warning(f"Error processing learnings: {e}")
return collector
async def aprocess_learnings(
agent: Agent,
run_messages: RunMessages,
session: AgentSession,
user_id: Optional[str],
) -> Optional[RunMetrics]:
"""Async process learnings from conversation."""
if agent._learning is None:
return None
from agno.metrics import RunMetrics
collector = RunMetrics()
try:
messages = run_messages.messages if run_messages else []
await agent._learning.aprocess(
messages=messages,
user_id=user_id,
session_id=session.session_id if session else None,
agent_id=agent.id,
team_id=agent.team_id,
run_metrics=collector,
)
log_debug("Learning extraction completed.")
except Exception as e:
log_warning(f"Error processing learnings: {e}")
return collector
async def astart_learning_task(
agent: Agent,
run_messages: RunMessages,
session: AgentSession,
user_id: Optional[str],
existing_task: Optional[Task] = None,
) -> Optional[Task]:
"""Start learning extraction as async task.
Args:
agent: The Agent instance.
run_messages: The run messages containing conversation.
session: The agent session.
user_id: The user ID for learning extraction.
existing_task: An existing task to cancel before starting a new one.
Returns:
A new learning task if conditions are met, None otherwise.
"""
# Cancel any existing task from a previous retry attempt
if existing_task is not None and not existing_task.done():
existing_task.cancel()
try:
await existing_task
except CancelledError:
pass
# Create new task if learning is enabled
if agent._learning is not None:
log_debug("Starting learning extraction as async task.")
return create_task(
aprocess_learnings(
agent,
run_messages=run_messages,
session=session,
user_id=user_id,
)
)
return None
def start_learning_future(
agent: Agent,
run_messages: RunMessages,
session: AgentSession,
user_id: Optional[str],
existing_future: Optional[Future] = None,
) -> Optional[Future]:
"""Start learning extraction in background thread.
Args:
agent: The Agent instance.
run_messages: The run messages containing conversation.
session: The agent session.
user_id: The user ID for learning extraction.
existing_future: An existing future to cancel before starting a new one.
Returns:
A new learning future if conditions are met, None otherwise.
"""
# Cancel any existing future from a previous retry attempt
if existing_future is not None and not existing_future.done():
existing_future.cancel()
# Create new future if learning is enabled
if agent._learning is not None:
log_debug("Starting learning extraction in background thread.")
return agent.background_executor.submit(
process_learnings,
agent,
run_messages=run_messages,
session=session,
user_id=user_id,
)
return None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/agent/_managers.py",
"license": "Apache License 2.0",
"lines": 429,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/agent/_messages.py | """System and user message construction helpers for Agent."""
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Type,
Union,
)
from pydantic import BaseModel
if TYPE_CHECKING:
from agno.agent.agent import Agent
from agno.agent._utils import convert_dependencies_to_string, convert_documents_to_string
from agno.filters import FilterExpr
from agno.media import Audio, File, Image, Video
from agno.models.message import Message, MessageReferences
from agno.models.response import ModelResponse
from agno.run import RunContext
from agno.run.agent import RunOutput
from agno.run.messages import RunMessages
from agno.session import AgentSession
from agno.tools.function import Function
from agno.utils.agent import (
aexecute_instructions,
aexecute_system_message,
execute_instructions,
execute_system_message,
)
from agno.utils.common import is_typed_dict
from agno.utils.log import log_debug, log_warning
from agno.utils.message import filter_tool_calls, get_text_from_message
from agno.utils.prompts import get_json_output_prompt, get_response_model_format_prompt
from agno.utils.timer import Timer
def _get_resolved_knowledge(agent: "Agent", run_context: Optional[RunContext] = None) -> Any:
"""Get the resolved knowledge, preferring run_context over agent.knowledge."""
from agno.utils.callables import get_resolved_knowledge
return get_resolved_knowledge(agent, run_context)
# ---------------------------------------------------------------------------
# Message formatting
# ---------------------------------------------------------------------------
def format_message_with_state_variables(
agent: Agent,
message: Any,
run_context: Optional[RunContext] = None,
) -> Any:
"""Format a message with the session state variables from run_context."""
import re
import string
from collections import ChainMap
from copy import deepcopy
if not isinstance(message, str):
return message
# Extract values from run_context
session_state = run_context.session_state if run_context else None
dependencies = run_context.dependencies if run_context else None
metadata = run_context.metadata if run_context else None
user_id = run_context.user_id if run_context else None
# Should already be resolved and passed from run() method
format_variables = ChainMap(
session_state if session_state is not None else {},
dependencies or {},
metadata or {},
{"user_id": user_id} if user_id is not None else {},
)
converted_msg = deepcopy(message)
for var_name in format_variables.keys():
# Only convert standalone {var_name} patterns, not nested ones
pattern = r"\{" + re.escape(var_name) + r"\}"
replacement = "${" + var_name + "}"
converted_msg = re.sub(pattern, replacement, converted_msg)
# Use Template to safely substitute variables
template = string.Template(converted_msg)
try:
result = template.safe_substitute(format_variables)
return result
except Exception as e:
log_warning(f"Template substitution failed: {e}")
return message
# ---------------------------------------------------------------------------
# System message
# ---------------------------------------------------------------------------
def get_system_message(
agent: Agent,
session: AgentSession,
run_context: Optional[RunContext] = None,
tools: Optional[List[Union[Function, dict]]] = None,
add_session_state_to_context: Optional[bool] = None,
) -> Optional[Message]:
"""Return the system message for the Agent.
1. If the system_message is provided, use that.
2. If build_context is False, return None.
3. Build and return the default system message for the Agent.
"""
# Extract values from run_context
from agno.agent._init import set_culture_manager, set_memory_manager
session_state = run_context.session_state if run_context else None
user_id = run_context.user_id if run_context else None
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
# 1. If the system_message is provided, use that.
if agent.system_message is not None:
if isinstance(agent.system_message, Message):
return agent.system_message
sys_message_content: str = ""
if isinstance(agent.system_message, str):
sys_message_content = agent.system_message
elif callable(agent.system_message):
sys_message_content = execute_system_message(
agent=agent, system_message=agent.system_message, session_state=session_state, run_context=run_context
)
if not isinstance(sys_message_content, str):
raise Exception("system_message must return a string")
if agent.resolve_in_context:
sys_message_content = format_message_with_state_variables(
agent,
sys_message_content,
run_context=run_context,
)
# type: ignore
return Message(role=agent.system_message_role, content=sys_message_content)
# 2. If build_context is False, return None.
if not agent.build_context:
return None
if agent.model is None:
raise Exception("model not set")
# 3. Build and return the default system message for the Agent.
# 3.1 Build the list of instructions for the system message
instructions: List[str] = []
if agent.instructions is not None:
_instructions = agent.instructions
if callable(agent.instructions):
_instructions = execute_instructions(
agent=agent, instructions=agent.instructions, session_state=session_state, run_context=run_context
)
if isinstance(_instructions, str):
instructions.append(_instructions)
elif isinstance(_instructions, list):
instructions.extend(_instructions)
# 3.1.1 Add instructions from the Model
_model_instructions = agent.model.get_instructions_for_model(tools)
if _model_instructions is not None:
instructions.extend(_model_instructions)
# 3.2 Build a list of additional information for the system message
additional_information: List[str] = []
# 3.2.1 Add instructions for using markdown
if agent.markdown and output_schema is None:
additional_information.append("Use markdown to format your answers.")
# 3.2.2 Add the current datetime
if agent.add_datetime_to_context:
from datetime import datetime
tz = None
if agent.timezone_identifier:
try:
from zoneinfo import ZoneInfo
tz = ZoneInfo(agent.timezone_identifier)
except Exception:
log_warning("Invalid timezone identifier")
time = datetime.now(tz) if tz else datetime.now()
additional_information.append(f"The current time is {time}.")
# 3.2.3 Add the current location
if agent.add_location_to_context:
from agno.utils.location import get_location
location = get_location()
if location:
location_str = ", ".join(
filter(
None,
[
location.get("city"),
location.get("region"),
location.get("country"),
],
)
)
if location_str:
additional_information.append(f"Your approximate location is: {location_str}.")
# 3.2.4 Add agent name if provided
if agent.name is not None and agent.add_name_to_context:
additional_information.append(f"Your name is: {agent.name}.")
# 3.3 Build the default system message for the Agent.
system_message_content: str = ""
# 3.3.1 First add the Agent description if provided
if agent.description is not None:
system_message_content += f"{agent.description}\n"
# 3.3.2 Then add the Agent role if provided
if agent.role is not None:
system_message_content += f"\n<your_role>\n{agent.role}\n</your_role>\n\n"
# 3.3.3 Then add instructions for the Agent
if len(instructions) > 0:
if agent.use_instruction_tags:
system_message_content += "<instructions>"
if len(instructions) > 1:
for _upi in instructions:
system_message_content += f"\n- {_upi}"
else:
system_message_content += "\n" + instructions[0]
system_message_content += "\n</instructions>\n\n"
else:
if len(instructions) > 1:
for _upi in instructions:
system_message_content += f"- {_upi}\n"
else:
system_message_content += instructions[0] + "\n\n"
# 3.3.4 Add additional information
if len(additional_information) > 0:
system_message_content += "<additional_information>"
for _ai in additional_information:
system_message_content += f"\n- {_ai}"
system_message_content += "\n</additional_information>\n\n"
# 3.3.5 Then add instructions for the tools
if agent._tool_instructions is not None:
for _ti in agent._tool_instructions:
system_message_content += f"{_ti}\n"
# Format the system message with the session state variables
if agent.resolve_in_context:
system_message_content = format_message_with_state_variables(
agent,
system_message_content,
run_context=run_context,
)
# 3.3.7 Then add the expected output
if agent.expected_output is not None:
system_message_content += f"<expected_output>\n{agent.expected_output.strip()}\n</expected_output>\n\n"
# 3.3.8 Then add additional context
if agent.additional_context is not None:
system_message_content += f"{agent.additional_context}\n"
# 3.3.8.1 Then add skills to the system prompt
if agent.skills is not None:
skills_snippet = agent.skills.get_system_prompt_snippet()
if skills_snippet:
system_message_content += f"\n{skills_snippet}\n"
# 3.3.9 Then add memories to the system prompt
if agent.add_memories_to_context:
_memory_manager_not_set = False
if not user_id:
user_id = "default"
if agent.memory_manager is None:
set_memory_manager(agent)
_memory_manager_not_set = True
user_memories = agent.memory_manager.get_user_memories(user_id=user_id) # type: ignore
if user_memories and len(user_memories) > 0:
system_message_content += "You have access to user info and preferences from previous interactions that you can use to personalize your response:\n\n"
system_message_content += "<memories_from_previous_interactions>"
for _memory in user_memories: # type: ignore
system_message_content += f"\n- {_memory.memory}"
system_message_content += "\n</memories_from_previous_interactions>\n\n"
system_message_content += (
"Note: this information is from previous interactions and may be updated in this conversation. "
"You should always prefer information from this conversation over the past memories.\n"
)
else:
system_message_content += (
"You have the capability to retain memories from previous interactions with the user, "
"but have not had any interactions with the user yet.\n"
)
if _memory_manager_not_set:
agent.memory_manager = None
if agent.enable_agentic_memory:
system_message_content += (
"\n<updating_user_memories>\n"
"- You have access to the `update_user_memory` tool that you can use to add new memories, update existing memories, delete memories, or clear all memories.\n"
"- If the user's message includes information that should be captured as a memory, use the `update_user_memory` tool to update your memory database.\n"
"- Memories should include details that could personalize ongoing interactions with the user.\n"
"- Use this tool to add new memories or update existing memories that you identify in the conversation.\n"
"- Use this tool if the user asks to update their memory, delete a memory, or clear all memories.\n"
"- If you use the `update_user_memory` tool, remember to pass on the response to the user.\n"
"</updating_user_memories>\n\n"
)
# 3.3.10 Then add cultural knowledge to the system prompt
if agent.add_culture_to_context:
_culture_manager_not_set = False
if not agent.culture_manager:
set_culture_manager(agent)
_culture_manager_not_set = True
cultural_knowledge = agent.culture_manager.get_all_knowledge() # type: ignore
if cultural_knowledge and len(cultural_knowledge) > 0:
system_message_content += (
"You have access to shared **Cultural Knowledge**, which provides context, norms, rules and guidance "
"for your reasoning, communication, and decision-making. "
"Cultural Knowledge represents the collective understanding, values, rules and practices that have "
"emerged across agents and teams. It encodes collective experience — including preferred "
"approaches, common patterns, lessons learned, and ethical guardrails.\n\n"
"When performing any task:\n"
"- **Reference Cultural Knowledge** to align with shared norms and best practices.\n"
"- **Apply it contextually**, not mechanically — adapt principles to the current situation.\n"
"- **Preserve consistency** with cultural values (tone, reasoning, and style) unless explicitly told otherwise.\n"
"- **Extend it** when you discover new insights — your outputs may become future Cultural Knowledge.\n"
"- **Clarify conflicts** if Cultural Knowledge appears to contradict explicit user instructions.\n\n"
"Your goal is to act not only intelligently but also *culturally coherently* — reflecting the "
"collective intelligence of the system.\n\n"
"Below is the currently available Cultural Knowledge for this context:\n\n"
)
system_message_content += "<cultural_knowledge>"
for _knowledge in cultural_knowledge: # type: ignore
system_message_content += "\n---"
system_message_content += f"\nName: {_knowledge.name}"
system_message_content += f"\nSummary: {_knowledge.summary}"
system_message_content += f"\nContent: {_knowledge.content}"
system_message_content += "\n</cultural_knowledge>\n"
else:
system_message_content += (
"You have the capability to access shared **Cultural Knowledge**, which normally provides "
"context, norms, and guidance for your behavior and reasoning. However, no cultural knowledge "
"is currently available in this session.\n"
"Proceed thoughtfully and document any useful insights you create — they may become future "
"Cultural Knowledge for others.\n\n"
)
if _culture_manager_not_set:
agent.culture_manager = None
if agent.enable_agentic_culture:
system_message_content += (
"\n<contributing_to_culture>\n"
"When you discover an insight, pattern, rule, or best practice that will help future agents, use the `create_or_update_cultural_knowledge` tool to add or update entries in the shared cultural knowledge.\n"
"\n"
"When to contribute:\n"
"- You discover a reusable insight, pattern, rule, or best practice that will help future agents.\n"
"- You correct or clarify an existing cultural entry.\n"
"- You capture a guardrail, decision rationale, postmortem lesson, or example template.\n"
"- You identify missing context that should persist across sessions or teams.\n"
"\n"
"Cultural knowledge should capture reusable insights, best practices, or contextual knowledge that transcends individual conversations.\n"
"Mention your contribution to the user only if it is relevant to their request or they asked to be notified.\n"
"</contributing_to_culture>\n\n"
)
# 3.3.11 Then add a summary of the interaction to the system prompt
if agent.add_session_summary_to_context and session.summary is not None:
system_message_content += "Here is a brief summary of your previous interactions:\n\n"
system_message_content += "<summary_of_previous_interactions>\n"
system_message_content += session.summary.summary
system_message_content += "\n</summary_of_previous_interactions>\n\n"
system_message_content += (
"Note: this information is from previous interactions and may be outdated. "
"You should ALWAYS prefer information from this conversation over the past summary.\n\n"
)
# 3.3.12 then add learnings to the system prompt
if agent._learning is not None and agent.add_learnings_to_context:
learning_context = agent._learning.build_context(
user_id=user_id,
session_id=session.session_id if session else None,
agent_id=agent.id,
)
if learning_context:
system_message_content += learning_context + "\n"
# 3.3.13 then add search_knowledge instructions to the system prompt
_resolved_knowledge = _get_resolved_knowledge(agent, run_context)
if _resolved_knowledge is not None and agent.search_knowledge and agent.add_search_knowledge_instructions:
build_context_fn = getattr(_resolved_knowledge, "build_context", None)
if callable(build_context_fn):
knowledge_context = build_context_fn(
enable_agentic_filters=agent.enable_agentic_knowledge_filters,
)
if knowledge_context is not None:
system_message_content += knowledge_context + "\n"
# 3.3.14 Add the system message from the Model
system_message_from_model = agent.model.get_system_message_for_model(tools)
if system_message_from_model is not None:
system_message_content += system_message_from_model
# 3.3.15 Add the JSON output prompt if output_schema is provided and the model does not support native structured outputs or JSON schema outputs
# or if use_json_mode is True
if (
output_schema is not None
and agent.parser_model is None
and not (
(agent.model.supports_native_structured_outputs or agent.model.supports_json_schema_outputs)
and (not agent.use_json_mode or agent.structured_outputs is True)
)
):
system_message_content += f"{get_json_output_prompt(output_schema)}" # type: ignore
# 3.3.16 Add the response model format prompt if output_schema is provided (Pydantic only)
if output_schema is not None and agent.parser_model is not None and not isinstance(output_schema, dict):
system_message_content += f"{get_response_model_format_prompt(output_schema)}"
# 3.3.17 Add the session state to the system message
if add_session_state_to_context and session_state is not None:
system_message_content += f"\n<session_state>\n{session_state}\n</session_state>\n\n"
# Return the system message
return (
Message(role=agent.system_message_role, content=system_message_content.strip()) # type: ignore
if system_message_content
else None
)
async def aget_system_message(
agent: Agent,
session: AgentSession,
run_context: Optional[RunContext] = None,
tools: Optional[List[Union[Function, dict]]] = None,
add_session_state_to_context: Optional[bool] = None,
) -> Optional[Message]:
"""Return the system message for the Agent.
1. If the system_message is provided, use that.
2. If build_context is False, return None.
3. Build and return the default system message for the Agent.
"""
# Extract values from run_context
from agno.agent._init import has_async_db, set_culture_manager, set_memory_manager
session_state = run_context.session_state if run_context else None
user_id = run_context.user_id if run_context else None
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
# 1. If the system_message is provided, use that.
if agent.system_message is not None:
if isinstance(agent.system_message, Message):
return agent.system_message
sys_message_content: str = ""
if isinstance(agent.system_message, str):
sys_message_content = agent.system_message
elif callable(agent.system_message):
sys_message_content = await aexecute_system_message(
agent=agent, system_message=agent.system_message, session_state=session_state, run_context=run_context
)
if not isinstance(sys_message_content, str):
raise Exception("system_message must return a string")
# Format the system message with the session state variables
if agent.resolve_in_context:
sys_message_content = format_message_with_state_variables(
agent,
sys_message_content,
run_context=run_context,
)
# type: ignore
return Message(role=agent.system_message_role, content=sys_message_content)
# 2. If build_context is False, return None.
if not agent.build_context:
return None
if agent.model is None:
raise Exception("model not set")
# 3. Build and return the default system message for the Agent.
# 3.1 Build the list of instructions for the system message
instructions: List[str] = []
if agent.instructions is not None:
_instructions = agent.instructions
if callable(agent.instructions):
_instructions = await aexecute_instructions(
agent=agent, instructions=agent.instructions, session_state=session_state, run_context=run_context
)
if isinstance(_instructions, str):
instructions.append(_instructions)
elif isinstance(_instructions, list):
instructions.extend(_instructions)
# 3.1.1 Add instructions from the Model
_model_instructions = agent.model.get_instructions_for_model(tools)
if _model_instructions is not None:
instructions.extend(_model_instructions)
# 3.2 Build a list of additional information for the system message
additional_information: List[str] = []
# 3.2.1 Add instructions for using markdown
if agent.markdown and output_schema is None:
additional_information.append("Use markdown to format your answers.")
# 3.2.2 Add the current datetime
if agent.add_datetime_to_context:
from datetime import datetime
tz = None
if agent.timezone_identifier:
try:
from zoneinfo import ZoneInfo
tz = ZoneInfo(agent.timezone_identifier)
except Exception:
log_warning("Invalid timezone identifier")
time = datetime.now(tz) if tz else datetime.now()
additional_information.append(f"The current time is {time}.")
# 3.2.3 Add the current location
if agent.add_location_to_context:
from agno.utils.location import get_location
location = get_location()
if location:
location_str = ", ".join(
filter(
None,
[
location.get("city"),
location.get("region"),
location.get("country"),
],
)
)
if location_str:
additional_information.append(f"Your approximate location is: {location_str}.")
# 3.2.4 Add agent name if provided
if agent.name is not None and agent.add_name_to_context:
additional_information.append(f"Your name is: {agent.name}.")
# 3.3 Build the default system message for the Agent.
system_message_content: str = ""
# 3.3.1 First add the Agent description if provided
if agent.description is not None:
system_message_content += f"{agent.description}\n"
# 3.3.2 Then add the Agent role if provided
if agent.role is not None:
system_message_content += f"\n<your_role>\n{agent.role}\n</your_role>\n\n"
# 3.3.3 Then add instructions for the Agent
if len(instructions) > 0:
if agent.use_instruction_tags:
system_message_content += "<instructions>"
if len(instructions) > 1:
for _upi in instructions:
system_message_content += f"\n- {_upi}"
else:
system_message_content += "\n" + instructions[0]
system_message_content += "\n</instructions>\n\n"
else:
if len(instructions) > 1:
for _upi in instructions:
system_message_content += f"- {_upi}\n"
else:
system_message_content += instructions[0] + "\n\n"
# 3.3.4 Add additional information
if len(additional_information) > 0:
system_message_content += "<additional_information>"
for _ai in additional_information:
system_message_content += f"\n- {_ai}"
system_message_content += "\n</additional_information>\n\n"
# 3.3.5 Then add instructions for the tools
if agent._tool_instructions is not None:
for _ti in agent._tool_instructions:
system_message_content += f"{_ti}\n"
# Format the system message with the session state variables
if agent.resolve_in_context:
system_message_content = format_message_with_state_variables(
agent,
system_message_content,
run_context=run_context,
)
# 3.3.7 Then add the expected output
if agent.expected_output is not None:
system_message_content += f"<expected_output>\n{agent.expected_output.strip()}\n</expected_output>\n\n"
# 3.3.8 Then add additional context
if agent.additional_context is not None:
system_message_content += f"{agent.additional_context}\n"
# 3.3.8.1 Then add skills to the system prompt
if agent.skills is not None:
skills_snippet = agent.skills.get_system_prompt_snippet()
if skills_snippet:
system_message_content += f"\n{skills_snippet}\n"
# 3.3.9 Then add memories to the system prompt
if agent.add_memories_to_context:
_memory_manager_not_set = False
if not user_id:
user_id = "default"
if agent.memory_manager is None:
set_memory_manager(agent)
_memory_manager_not_set = True
if has_async_db(agent):
user_memories = await agent.memory_manager.aget_user_memories(user_id=user_id) # type: ignore
else:
user_memories = agent.memory_manager.get_user_memories(user_id=user_id) # type: ignore
if user_memories and len(user_memories) > 0:
system_message_content += "You have access to user info and preferences from previous interactions that you can use to personalize your response:\n\n"
system_message_content += "<memories_from_previous_interactions>"
for _memory in user_memories: # type: ignore
system_message_content += f"\n- {_memory.memory}"
system_message_content += "\n</memories_from_previous_interactions>\n\n"
system_message_content += (
"Note: this information is from previous interactions and may be updated in this conversation. "
"You should always prefer information from this conversation over the past memories.\n"
)
else:
system_message_content += (
"You have the capability to retain memories from previous interactions with the user, "
"but have not had any interactions with the user yet.\n"
)
if _memory_manager_not_set:
agent.memory_manager = None
if agent.enable_agentic_memory:
system_message_content += (
"\n<updating_user_memories>\n"
"- You have access to the `update_user_memory` tool that you can use to add new memories, update existing memories, delete memories, or clear all memories.\n"
"- If the user's message includes information that should be captured as a memory, use the `update_user_memory` tool to update your memory database.\n"
"- Memories should include details that could personalize ongoing interactions with the user.\n"
"- Use this tool to add new memories or update existing memories that you identify in the conversation.\n"
"- Use this tool if the user asks to update their memory, delete a memory, or clear all memories.\n"
"- If you use the `update_user_memory` tool, remember to pass on the response to the user.\n"
"</updating_user_memories>\n\n"
)
# 3.3.10 Then add cultural knowledge to the system prompt
if agent.add_culture_to_context:
_culture_manager_not_set = False
if not agent.culture_manager:
set_culture_manager(agent)
_culture_manager_not_set = True
cultural_knowledge = await agent.culture_manager.aget_all_knowledge() # type: ignore
if cultural_knowledge and len(cultural_knowledge) > 0:
system_message_content += (
"You have access to shared **Cultural Knowledge**, which provides context, norms, rules and guidance "
"for your reasoning, communication, and decision-making.\n\n"
"Cultural Knowledge represents the collective understanding, values, rules and practices that have "
"emerged across agents and teams. It encodes collective experience — including preferred "
"approaches, common patterns, lessons learned, and ethical guardrails.\n\n"
"When performing any task:\n"
"- **Reference Cultural Knowledge** to align with shared norms and best practices.\n"
"- **Apply it contextually**, not mechanically — adapt principles to the current situation.\n"
"- **Preserve consistency** with cultural values (tone, reasoning, and style) unless explicitly told otherwise.\n"
"- **Extend it** when you discover new insights — your outputs may become future Cultural Knowledge.\n"
"- **Clarify conflicts** if Cultural Knowledge appears to contradict explicit user instructions.\n\n"
"Your goal is to act not only intelligently but also *culturally coherently* — reflecting the "
"collective intelligence of the system.\n\n"
"Below is the currently available Cultural Knowledge for this context:\n\n"
)
system_message_content += "<cultural_knowledge>"
for _knowledge in cultural_knowledge: # type: ignore
system_message_content += "\n---"
system_message_content += f"\nName: {_knowledge.name}"
system_message_content += f"\nSummary: {_knowledge.summary}"
system_message_content += f"\nContent: {_knowledge.content}"
system_message_content += "\n</cultural_knowledge>\n"
else:
system_message_content += (
"You have the capability to access shared **Cultural Knowledge**, which normally provides "
"context, norms, and guidance for your behavior and reasoning. However, no cultural knowledge "
"is currently available in this session.\n"
"Proceed thoughtfully and document any useful insights you create — they may become future "
"Cultural Knowledge for others.\n\n"
)
if _culture_manager_not_set:
agent.culture_manager = None
if agent.enable_agentic_culture:
system_message_content += (
"\n<contributing_to_culture>\n"
"When you discover an insight, pattern, rule, or best practice that will help future agents, use the `create_or_update_cultural_knowledge` tool to add or update entries in the shared cultural knowledge.\n"
"\n"
"When to contribute:\n"
"- You discover a reusable insight, pattern, rule, or best practice that will help future agents.\n"
"- You correct or clarify an existing cultural entry.\n"
"- You capture a guardrail, decision rationale, postmortem lesson, or example template.\n"
"- You identify missing context that should persist across sessions or teams.\n"
"\n"
"Cultural knowledge should capture reusable insights, best practices, or contextual knowledge that transcends individual conversations.\n"
"Mention your contribution to the user only if it is relevant to their request or they asked to be notified.\n"
"</contributing_to_culture>\n\n"
)
# 3.3.11 Then add a summary of the interaction to the system prompt
if agent.add_session_summary_to_context and session.summary is not None:
system_message_content += "Here is a brief summary of your previous interactions:\n\n"
system_message_content += "<summary_of_previous_interactions>\n"
system_message_content += session.summary.summary
system_message_content += "\n</summary_of_previous_interactions>\n\n"
system_message_content += (
"Note: this information is from previous interactions and may be outdated. "
"You should ALWAYS prefer information from this conversation over the past summary.\n\n"
)
# 3.3.12 then add learnings to the system prompt
if agent._learning is not None and agent.add_learnings_to_context:
learning_context = await agent._learning.abuild_context(
user_id=user_id,
session_id=session.session_id if session else None,
agent_id=agent.id,
)
if learning_context:
system_message_content += learning_context + "\n"
# 3.3.13 then add search_knowledge instructions to the system prompt
_resolved_knowledge = _get_resolved_knowledge(agent, run_context)
if _resolved_knowledge is not None and agent.search_knowledge and agent.add_search_knowledge_instructions:
# Prefer async version if available for async databases
abuild_context_fn = getattr(_resolved_knowledge, "abuild_context", None)
build_context_fn = getattr(_resolved_knowledge, "build_context", None)
if callable(abuild_context_fn):
knowledge_context = await abuild_context_fn(
enable_agentic_filters=agent.enable_agentic_knowledge_filters,
)
if knowledge_context is not None:
system_message_content += knowledge_context + "\n"
elif callable(build_context_fn):
knowledge_context = build_context_fn(
enable_agentic_filters=agent.enable_agentic_knowledge_filters,
)
if knowledge_context is not None:
system_message_content += knowledge_context + "\n"
# 3.3.14 Add the system message from the Model
system_message_from_model = agent.model.get_system_message_for_model(tools)
if system_message_from_model is not None:
system_message_content += system_message_from_model
# 3.3.15 Add the JSON output prompt if output_schema is provided and the model does not support native structured outputs or JSON schema outputs
# or if use_json_mode is True
if (
output_schema is not None
and agent.parser_model is None
and not (
(agent.model.supports_native_structured_outputs or agent.model.supports_json_schema_outputs)
and (not agent.use_json_mode or agent.structured_outputs is True)
)
):
system_message_content += f"{get_json_output_prompt(output_schema)}" # type: ignore
# 3.3.16 Add the response model format prompt if output_schema is provided (Pydantic only)
if output_schema is not None and agent.parser_model is not None and not isinstance(output_schema, dict):
system_message_content += f"{get_response_model_format_prompt(output_schema)}"
# 3.3.17 Add the session state to the system message
if add_session_state_to_context and session_state is not None:
system_message_content += get_formatted_session_state_for_system_message(agent, session_state)
# Return the system message
return (
Message(role=agent.system_message_role, content=system_message_content.strip()) # type: ignore
if system_message_content
else None
)
def get_formatted_session_state_for_system_message(agent: Agent, session_state: Dict[str, Any]) -> str:
return f"\n<session_state>\n{session_state}\n</session_state>\n\n"
# ---------------------------------------------------------------------------
# User message
# ---------------------------------------------------------------------------
def get_user_message(
agent: Agent,
*,
run_response: RunOutput,
run_context: Optional[RunContext] = None,
input: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
add_dependencies_to_context: Optional[bool] = None,
**kwargs: Any,
) -> Optional[Message]:
"""Return the user message for the Agent.
1. If the user_message is provided, use that.
2. If build_user_context is False or if the message is a list, return the message as is.
3. Build the default user message for the Agent
"""
# Extract values from run_context
dependencies = run_context.dependencies if run_context else None
knowledge_filters = run_context.knowledge_filters if run_context else None
# Get references from the knowledge base to use in the user message
references = None
# 1. If build_user_context is False or message is a list, return the message as is.
if not agent.build_user_context:
return Message(
role=agent.user_message_role or "user",
content=input, # type: ignore
images=None if not agent.send_media_to_model else images,
audio=None if not agent.send_media_to_model else audio,
videos=None if not agent.send_media_to_model else videos,
files=None if not agent.send_media_to_model else files,
**kwargs,
)
# 2. Build the user message for the Agent
elif input is None:
# If we have any media, return a message with empty content
if images is not None or audio is not None or videos is not None or files is not None:
return Message(
role=agent.user_message_role or "user",
content="",
images=None if not agent.send_media_to_model else images,
audio=None if not agent.send_media_to_model else audio,
videos=None if not agent.send_media_to_model else videos,
files=None if not agent.send_media_to_model else files,
**kwargs,
)
else:
# If the input is None, return None
return None
else:
# Handle list messages by converting to string
if isinstance(input, list):
# Convert list to string (join with newlines if all elements are strings)
if all(isinstance(item, str) for item in input):
message_content = "\n".join(input) # type: ignore
else:
message_content = str(input)
return Message(
role=agent.user_message_role,
content=message_content,
images=None if not agent.send_media_to_model else images,
audio=None if not agent.send_media_to_model else audio,
videos=None if not agent.send_media_to_model else videos,
files=None if not agent.send_media_to_model else files,
**kwargs,
)
# If message is provided as a Message, use it directly
elif isinstance(input, Message):
return input
# If message is provided as a dict, try to validate it as a Message
elif isinstance(input, dict):
try:
return Message.model_validate(input)
except Exception as e:
log_warning(f"Failed to validate message: {e}")
raise Exception(f"Failed to validate message: {e}")
# If message is provided as a BaseModel, convert it to a Message
elif isinstance(input, BaseModel):
try:
# Create a user message with the BaseModel content
content = input.model_dump_json(indent=2, exclude_none=True)
return Message(role=agent.user_message_role, content=content)
except Exception as e:
log_warning(f"Failed to convert BaseModel to message: {e}")
raise Exception(f"Failed to convert BaseModel to message: {e}")
else:
user_msg_content = input
if agent.add_knowledge_to_context:
if isinstance(input, str):
user_msg_content = input
elif callable(input):
user_msg_content = input(agent=agent)
else:
raise Exception("message must be a string or a callable when add_references is True")
try:
retrieval_timer = Timer()
retrieval_timer.start()
docs_from_knowledge = get_relevant_docs_from_knowledge(
agent, query=user_msg_content, filters=knowledge_filters, run_context=run_context, **kwargs
)
if docs_from_knowledge is not None:
references = MessageReferences(
query=user_msg_content,
references=docs_from_knowledge,
time=round(retrieval_timer.elapsed, 4),
)
# Add the references to the run_response
if run_response.references is None:
run_response.references = []
run_response.references.append(references)
retrieval_timer.stop()
log_debug(f"Time to get references: {retrieval_timer.elapsed:.4f}s")
except Exception as e:
log_warning(f"Failed to get references: {e}")
if agent.resolve_in_context:
user_msg_content = format_message_with_state_variables(
agent,
user_msg_content,
run_context=run_context,
)
# Convert to string for concatenation operations
user_msg_content_str = get_text_from_message(user_msg_content) if user_msg_content is not None else ""
# 4.1 Add knowledge references to user message
if (
agent.add_knowledge_to_context
and references is not None
and references.references is not None
and len(references.references) > 0
):
user_msg_content_str += "\n\nUse the following references from the knowledge base if it helps:\n"
user_msg_content_str += "<references>\n"
user_msg_content_str += convert_documents_to_string(agent, references.references) + "\n"
user_msg_content_str += "</references>"
# 4.2 Add context to user message
if add_dependencies_to_context and dependencies is not None:
user_msg_content_str += "\n\n<additional context>\n"
user_msg_content_str += convert_dependencies_to_string(agent, dependencies) + "\n"
user_msg_content_str += "</additional context>"
# Use the string version for the final content
user_msg_content = user_msg_content_str
# Return the user message
return Message(
role=agent.user_message_role,
content=user_msg_content,
audio=None if not agent.send_media_to_model else audio,
images=None if not agent.send_media_to_model else images,
videos=None if not agent.send_media_to_model else videos,
files=None if not agent.send_media_to_model else files,
**kwargs,
)
async def aget_user_message(
agent: Agent,
*,
run_response: RunOutput,
run_context: Optional[RunContext] = None,
input: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
add_dependencies_to_context: Optional[bool] = None,
**kwargs: Any,
) -> Optional[Message]:
"""Return the user message for the Agent (async version).
1. If the user_message is provided, use that.
2. If build_user_context is False or if the message is a list, return the message as is.
3. Build the default user message for the Agent
"""
# Extract values from run_context
dependencies = run_context.dependencies if run_context else None
knowledge_filters = run_context.knowledge_filters if run_context else None
# Get references from the knowledge base to use in the user message
references = None
# 1. If build_user_context is False or message is a list, return the message as is.
if not agent.build_user_context:
return Message(
role=agent.user_message_role or "user",
content=input, # type: ignore
images=None if not agent.send_media_to_model else images,
audio=None if not agent.send_media_to_model else audio,
videos=None if not agent.send_media_to_model else videos,
files=None if not agent.send_media_to_model else files,
**kwargs,
)
# 2. Build the user message for the Agent
elif input is None:
# If we have any media, return a message with empty content
if images is not None or audio is not None or videos is not None or files is not None:
return Message(
role=agent.user_message_role or "user",
content="",
images=None if not agent.send_media_to_model else images,
audio=None if not agent.send_media_to_model else audio,
videos=None if not agent.send_media_to_model else videos,
files=None if not agent.send_media_to_model else files,
**kwargs,
)
else:
# If the input is None, return None
return None
else:
# Handle list messages by converting to string
if isinstance(input, list):
# Convert list to string (join with newlines if all elements are strings)
if all(isinstance(item, str) for item in input):
message_content = "\n".join(input) # type: ignore
else:
message_content = str(input)
return Message(
role=agent.user_message_role,
content=message_content,
images=None if not agent.send_media_to_model else images,
audio=None if not agent.send_media_to_model else audio,
videos=None if not agent.send_media_to_model else videos,
files=None if not agent.send_media_to_model else files,
**kwargs,
)
# If message is provided as a Message, use it directly
elif isinstance(input, Message):
return input
# If message is provided as a dict, try to validate it as a Message
elif isinstance(input, dict):
try:
return Message.model_validate(input)
except Exception as e:
log_warning(f"Failed to validate message: {e}")
raise Exception(f"Failed to validate message: {e}")
# If message is provided as a BaseModel, convert it to a Message
elif isinstance(input, BaseModel):
try:
# Create a user message with the BaseModel content
content = input.model_dump_json(indent=2, exclude_none=True)
return Message(role=agent.user_message_role, content=content)
except Exception as e:
log_warning(f"Failed to convert BaseModel to message: {e}")
raise Exception(f"Failed to convert BaseModel to message: {e}")
else:
user_msg_content = input
if agent.add_knowledge_to_context:
if isinstance(input, str):
user_msg_content = input
elif callable(input):
user_msg_content = input(agent=agent)
else:
raise Exception("message must be a string or a callable when add_references is True")
try:
retrieval_timer = Timer()
retrieval_timer.start()
docs_from_knowledge = await aget_relevant_docs_from_knowledge(
agent, query=user_msg_content, filters=knowledge_filters, run_context=run_context, **kwargs
)
if docs_from_knowledge is not None:
references = MessageReferences(
query=user_msg_content,
references=docs_from_knowledge,
time=round(retrieval_timer.elapsed, 4),
)
# Add the references to the run_response
if run_response.references is None:
run_response.references = []
run_response.references.append(references)
retrieval_timer.stop()
log_debug(f"Time to get references: {retrieval_timer.elapsed:.4f}s")
except Exception as e:
log_warning(f"Failed to get references: {e}")
if agent.resolve_in_context:
user_msg_content = format_message_with_state_variables(
agent,
user_msg_content,
run_context=run_context,
)
# Convert to string for concatenation operations
user_msg_content_str = get_text_from_message(user_msg_content) if user_msg_content is not None else ""
# 4.1 Add knowledge references to user message
if (
agent.add_knowledge_to_context
and references is not None
and references.references is not None
and len(references.references) > 0
):
user_msg_content_str += "\n\nUse the following references from the knowledge base if it helps:\n"
user_msg_content_str += "<references>\n"
user_msg_content_str += convert_documents_to_string(agent, references.references) + "\n"
user_msg_content_str += "</references>"
# 4.2 Add context to user message
if add_dependencies_to_context and dependencies is not None:
user_msg_content_str += "\n\n<additional context>\n"
user_msg_content_str += convert_dependencies_to_string(agent, dependencies) + "\n"
user_msg_content_str += "</additional context>"
# Use the string version for the final content
user_msg_content = user_msg_content_str
# Return the user message
return Message(
role=agent.user_message_role,
content=user_msg_content,
audio=None if not agent.send_media_to_model else audio,
images=None if not agent.send_media_to_model else images,
videos=None if not agent.send_media_to_model else videos,
files=None if not agent.send_media_to_model else files,
**kwargs,
)
# ---------------------------------------------------------------------------
# Run messages
# ---------------------------------------------------------------------------
def get_run_messages(
agent: Agent,
*,
run_response: RunOutput,
run_context: RunContext,
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
session: AgentSession,
user_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
tools: Optional[List[Union[Function, dict]]] = None,
**kwargs: Any,
) -> RunMessages:
"""This function returns a RunMessages object with the following attributes:
- system_message: The system message for this run
- user_message: The user message for this run
- messages: List of messages to send to the model
To build the RunMessages object:
1. Add system message to run_messages
2. Add extra messages to run_messages if provided
3. Add history to run_messages
4. Add user message to run_messages (if input is single content)
5. Add input messages to run_messages if provided (if input is List[Message])
Returns:
RunMessages object with the following attributes:
- system_message: The system message for this run
- user_message: The user message for this run
- messages: List of all messages to send to the model
Typical usage:
run_messages = get_run_messages(
agent, input=input, session_id=session_id, user_id=user_id, audio=audio, images=images, videos=videos, files=files, **kwargs
)
"""
# Initialize the RunMessages object (no media here - that's in RunInput now)
run_messages = RunMessages()
# 1. Add system message to run_messages
system_message = get_system_message(
agent,
session=session,
run_context=run_context,
tools=tools,
add_session_state_to_context=add_session_state_to_context,
)
if system_message is not None:
run_messages.system_message = system_message
run_messages.messages.append(system_message)
# 2. Add extra messages to run_messages if provided
if agent.additional_input is not None:
messages_to_add_to_run_response: List[Message] = []
if run_messages.extra_messages is None:
run_messages.extra_messages = []
for _m in agent.additional_input:
if isinstance(_m, Message):
messages_to_add_to_run_response.append(_m)
run_messages.messages.append(_m)
run_messages.extra_messages.append(_m)
elif isinstance(_m, dict):
try:
_m_parsed = Message.model_validate(_m)
messages_to_add_to_run_response.append(_m_parsed)
run_messages.messages.append(_m_parsed)
run_messages.extra_messages.append(_m_parsed)
except Exception as e:
log_warning(f"Failed to validate message: {e}")
# Add the extra messages to the run_response
if len(messages_to_add_to_run_response) > 0:
log_debug(f"Adding {len(messages_to_add_to_run_response)} extra messages")
if run_response.additional_input is None:
run_response.additional_input = messages_to_add_to_run_response
else:
run_response.additional_input.extend(messages_to_add_to_run_response)
# 3. Add history to run_messages
if add_history_to_context:
from copy import deepcopy
# Only skip messages from history when system_message_role is NOT a standard conversation role.
# Standard conversation roles ("user", "assistant", "tool") should never be filtered
# to preserve conversation continuity.
skip_role = (
agent.system_message_role if agent.system_message_role not in ["user", "assistant", "tool"] else None
)
history: List[Message] = session.get_messages(
last_n_runs=agent.num_history_runs,
limit=agent.num_history_messages,
skip_roles=[skip_role] if skip_role else None,
agent_id=agent.id if agent.team_id is not None else None,
)
if len(history) > 0:
# Create a deep copy of the history messages to avoid modifying the original messages
history_copy = [deepcopy(msg) for msg in history]
# Tag each message as coming from history
for _msg in history_copy:
_msg.from_history = True
# Filter tool calls from history if limit is set (before adding to run_messages)
if agent.max_tool_calls_from_history is not None:
filter_tool_calls(history_copy, agent.max_tool_calls_from_history)
log_debug(f"Adding {len(history_copy)} messages from history")
run_messages.messages += history_copy
# 4. Add user message to run_messages
user_message: Optional[Message] = None
# 4.1 Build user message if input is None, str or list and not a list of Message/dict objects
if (
input is None
or isinstance(input, str)
or (
isinstance(input, list)
and not (
len(input) > 0
and (isinstance(input[0], Message) or (isinstance(input[0], dict) and "role" in input[0]))
)
)
):
user_message = get_user_message(
agent,
run_response=run_response,
run_context=run_context,
input=input,
audio=audio,
images=images,
videos=videos,
files=files,
add_dependencies_to_context=add_dependencies_to_context,
**kwargs,
)
# 4.2 If input is provided as a Message, use it directly
elif isinstance(input, Message):
user_message = input
# 4.3 If input is provided as a dict, try to validate it as a Message
elif isinstance(input, dict):
try:
if agent.input_schema and is_typed_dict(agent.input_schema):
import json
content = json.dumps(input, indent=2, ensure_ascii=False)
user_message = Message(role=agent.user_message_role, content=content)
else:
user_message = Message.model_validate(input)
except Exception as e:
log_warning(f"Failed to validate message: {e}")
# 4.4 If input is provided as a BaseModel, convert it to a Message
elif isinstance(input, BaseModel):
try:
# Create a user message with the BaseModel content
content = input.model_dump_json(indent=2, exclude_none=True)
user_message = Message(role=agent.user_message_role, content=content)
except Exception as e:
log_warning(f"Failed to convert BaseModel to message: {e}")
# 5. Add input messages to run_messages if provided (List[Message] or List[Dict])
if (
isinstance(input, list)
and len(input) > 0
and (isinstance(input[0], Message) or (isinstance(input[0], dict) and "role" in input[0]))
):
for _m in input:
if isinstance(_m, Message):
run_messages.messages.append(_m)
if run_messages.extra_messages is None:
run_messages.extra_messages = []
run_messages.extra_messages.append(_m)
elif isinstance(_m, dict):
try:
msg = Message.model_validate(_m)
run_messages.messages.append(msg)
if run_messages.extra_messages is None:
run_messages.extra_messages = []
run_messages.extra_messages.append(msg)
except Exception as e:
log_warning(f"Failed to validate message: {e}")
# Add user message to run_messages
if user_message is not None:
run_messages.user_message = user_message
run_messages.messages.append(user_message)
return run_messages
async def aget_run_messages(
agent: Agent,
*,
run_response: RunOutput,
run_context: RunContext,
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
session: AgentSession,
user_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
tools: Optional[List[Union[Function, dict]]] = None,
**kwargs: Any,
) -> RunMessages:
"""This function returns a RunMessages object with the following attributes:
- system_message: The system message for this run
- user_message: The user message for this run
- messages: List of messages to send to the model
To build the RunMessages object:
1. Add system message to run_messages
2. Add extra messages to run_messages if provided
3. Add history to run_messages
4. Add user message to run_messages (if input is single content)
5. Add input messages to run_messages if provided (if input is List[Message])
Returns:
RunMessages object with the following attributes:
- system_message: The system message for this run
- user_message: The user message for this run
- messages: List of all messages to send to the model
Typical usage:
run_messages = await aget_run_messages(
agent, input=input, session_id=session_id, user_id=user_id, audio=audio, images=images, videos=videos, files=files, **kwargs
)
"""
# Initialize the RunMessages object (no media here - that's in RunInput now)
run_messages = RunMessages()
# 1. Add system message to run_messages
system_message = await aget_system_message(
agent,
session=session,
run_context=run_context,
tools=tools,
add_session_state_to_context=add_session_state_to_context,
)
if system_message is not None:
run_messages.system_message = system_message
run_messages.messages.append(system_message)
# 2. Add extra messages to run_messages if provided
if agent.additional_input is not None:
messages_to_add_to_run_response: List[Message] = []
if run_messages.extra_messages is None:
run_messages.extra_messages = []
for _m in agent.additional_input:
if isinstance(_m, Message):
messages_to_add_to_run_response.append(_m)
run_messages.messages.append(_m)
run_messages.extra_messages.append(_m)
elif isinstance(_m, dict):
try:
_m_parsed = Message.model_validate(_m)
messages_to_add_to_run_response.append(_m_parsed)
run_messages.messages.append(_m_parsed)
run_messages.extra_messages.append(_m_parsed)
except Exception as e:
log_warning(f"Failed to validate message: {e}")
# Add the extra messages to the run_response
if len(messages_to_add_to_run_response) > 0:
log_debug(f"Adding {len(messages_to_add_to_run_response)} extra messages")
if run_response.additional_input is None:
run_response.additional_input = messages_to_add_to_run_response
else:
run_response.additional_input.extend(messages_to_add_to_run_response)
# 3. Add history to run_messages
if add_history_to_context:
from copy import deepcopy
# Only skip messages from history when system_message_role is NOT a standard conversation role.
# Standard conversation roles ("user", "assistant", "tool") should never be filtered
# to preserve conversation continuity.
skip_role = (
agent.system_message_role if agent.system_message_role not in ["user", "assistant", "tool"] else None
)
history: List[Message] = session.get_messages(
last_n_runs=agent.num_history_runs,
limit=agent.num_history_messages,
skip_roles=[skip_role] if skip_role else None,
agent_id=agent.id if agent.team_id is not None else None,
)
if len(history) > 0:
# Create a deep copy of the history messages to avoid modifying the original messages
history_copy = [deepcopy(msg) for msg in history]
# Tag each message as coming from history
for _msg in history_copy:
_msg.from_history = True
# Filter tool calls from history if limit is set (before adding to run_messages)
if agent.max_tool_calls_from_history is not None:
filter_tool_calls(history_copy, agent.max_tool_calls_from_history)
log_debug(f"Adding {len(history_copy)} messages from history")
run_messages.messages += history_copy
# 4. Add user message to run_messages
user_message: Optional[Message] = None
# 4.1 Build user message if input is None, str or list and not a list of Message/dict objects
if (
input is None
or isinstance(input, str)
or (
isinstance(input, list)
and not (
len(input) > 0
and (isinstance(input[0], Message) or (isinstance(input[0], dict) and "role" in input[0]))
)
)
):
user_message = await aget_user_message(
agent,
run_response=run_response,
run_context=run_context,
input=input,
audio=audio,
images=images,
videos=videos,
files=files,
add_dependencies_to_context=add_dependencies_to_context,
**kwargs,
)
# 4.2 If input is provided as a Message, use it directly
elif isinstance(input, Message):
user_message = input
# 4.3 If input is provided as a dict, try to validate it as a Message
elif isinstance(input, dict):
try:
if agent.input_schema and is_typed_dict(agent.input_schema):
import json
content = json.dumps(input, indent=2, ensure_ascii=False)
user_message = Message(role=agent.user_message_role, content=content)
else:
user_message = Message.model_validate(input)
except Exception as e:
log_warning(f"Failed to validate message: {e}")
# 4.4 If input is provided as a BaseModel, convert it to a Message
elif isinstance(input, BaseModel):
try:
# Create a user message with the BaseModel content
content = input.model_dump_json(indent=2, exclude_none=True)
user_message = Message(role=agent.user_message_role, content=content)
except Exception as e:
log_warning(f"Failed to convert BaseModel to message: {e}")
# 5. Add input messages to run_messages if provided (List[Message] or List[Dict])
if (
isinstance(input, list)
and len(input) > 0
and (isinstance(input[0], Message) or (isinstance(input[0], dict) and "role" in input[0]))
):
for _m in input:
if isinstance(_m, Message):
run_messages.messages.append(_m)
if run_messages.extra_messages is None:
run_messages.extra_messages = []
run_messages.extra_messages.append(_m)
elif isinstance(_m, dict):
try:
msg = Message.model_validate(_m)
run_messages.messages.append(msg)
if run_messages.extra_messages is None:
run_messages.extra_messages = []
run_messages.extra_messages.append(msg)
except Exception as e:
log_warning(f"Failed to validate message: {e}")
# Add user message to run_messages
if user_message is not None:
run_messages.user_message = user_message
run_messages.messages.append(user_message)
return run_messages
def get_continue_run_messages(
agent: Agent,
input: List[Message],
) -> RunMessages:
"""This function returns a RunMessages object with the following attributes:
- system_message: The system message for this run
- user_message: The user message for this run
- messages: List of messages to send to the model
It continues from a previous run and completes a tool call that was paused.
"""
# Initialize the RunMessages object
run_messages = RunMessages()
# Extract most recent user message from messages as the original user message
user_message = None
for msg in reversed(input):
if msg.role == agent.user_message_role:
user_message = msg
break
# Extract system message from messages
system_message = None
for msg in input:
if msg.role == agent.system_message_role:
system_message = msg
break
run_messages.system_message = system_message
run_messages.user_message = user_message
run_messages.messages = input
return run_messages
# ---------------------------------------------------------------------------
# Parser / output model messages
# ---------------------------------------------------------------------------
def get_messages_for_parser_model(
agent: Agent,
model_response: ModelResponse,
response_format: Optional[Union[Dict, Type[BaseModel]]],
run_context: Optional[RunContext] = None,
) -> List[Message]:
"""Get the messages for the parser model."""
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
system_content = (
agent.parser_model_prompt
if agent.parser_model_prompt is not None
else "You are tasked with creating a structured output from the provided user message."
)
if response_format == {"type": "json_object"} and output_schema is not None:
system_content += f"{get_json_output_prompt(output_schema)}" # type: ignore
return [
Message(role="system", content=system_content),
Message(role="user", content=model_response.content),
]
def get_messages_for_parser_model_stream(
agent: Agent,
run_response: RunOutput,
response_format: Optional[Union[Dict, Type[BaseModel]]],
run_context: Optional[RunContext] = None,
) -> List[Message]:
"""Get the messages for the parser model."""
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
system_content = (
agent.parser_model_prompt
if agent.parser_model_prompt is not None
else "You are tasked with creating a structured output from the provided data."
)
if response_format == {"type": "json_object"} and output_schema is not None:
system_content += f"{get_json_output_prompt(output_schema)}" # type: ignore
return [
Message(role="system", content=system_content),
Message(role="user", content=run_response.content),
]
def get_messages_for_output_model(agent: Agent, messages: List[Message]) -> List[Message]:
"""Get the messages for the output model."""
if agent.output_model_prompt is not None:
system_message_exists = False
for message in messages:
if message.role == "system":
system_message_exists = True
message.content = agent.output_model_prompt
break
if not system_message_exists:
messages.insert(0, Message(role="system", content=agent.output_model_prompt))
# Remove the last assistant message from the messages list
messages.pop(-1)
return messages
# ---------------------------------------------------------------------------
# Knowledge retrieval
# ---------------------------------------------------------------------------
def get_relevant_docs_from_knowledge(
agent: Agent,
query: str,
num_documents: Optional[int] = None,
filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
validate_filters: bool = False,
run_context: Optional[RunContext] = None,
**kwargs: Any,
) -> Optional[List[Union[Dict[str, Any], str]]]:
"""Get relevant docs from the knowledge base to answer a query.
Args:
agent: The Agent instance.
query (str): The query to search for.
num_documents (Optional[int]): Number of documents to return.
filters (Optional[Dict[str, Any]]): Filters to apply to the search.
validate_filters (bool): Whether to validate the filters against known valid filter keys.
run_context (Optional[RunContext]): Runtime context containing dependencies and other context.
**kwargs: Additional keyword arguments.
Returns:
Optional[List[Dict[str, Any]]]: List of relevant document dicts.
"""
from agno.knowledge.document import Document
# Extract dependencies from run_context if available
dependencies = run_context.dependencies if run_context else None
resolved_knowledge = _get_resolved_knowledge(agent, run_context)
if num_documents is None and resolved_knowledge is not None:
num_documents = getattr(resolved_knowledge, "max_results", None)
# Validate the filters against known valid filter keys
if resolved_knowledge is not None and filters is not None:
if validate_filters:
valid_filters, invalid_keys = resolved_knowledge.validate_filters(filters) # type: ignore
# Warn about invalid filter keys
if invalid_keys:
# type: ignore
log_warning(f"Invalid filter keys provided: {invalid_keys}. These filters will be ignored.")
# Only use valid filters
filters = valid_filters
if not filters:
log_warning("No valid filters remain after validation. Search will proceed without filters.")
if invalid_keys == [] and valid_filters == {}:
log_warning("No valid filters provided. Search will proceed without filters.")
filters = None
if agent.knowledge_retriever is not None and callable(agent.knowledge_retriever):
from inspect import signature
try:
sig = signature(agent.knowledge_retriever)
knowledge_retriever_kwargs: Dict[str, Any] = {}
if "agent" in sig.parameters:
knowledge_retriever_kwargs = {"agent": agent}
if "filters" in sig.parameters:
knowledge_retriever_kwargs["filters"] = filters
if "run_context" in sig.parameters:
knowledge_retriever_kwargs["run_context"] = run_context
elif "dependencies" in sig.parameters:
# Backward compatibility: support dependencies parameter
knowledge_retriever_kwargs["dependencies"] = dependencies
knowledge_retriever_kwargs.update({"query": query, "num_documents": num_documents, **kwargs})
return agent.knowledge_retriever(**knowledge_retriever_kwargs)
except Exception as e:
log_warning(f"Knowledge retriever failed: {e}")
raise e
# Use knowledge protocol's retrieve method
try:
if resolved_knowledge is None:
return None
# Use protocol retrieve() method if available
retrieve_fn = getattr(resolved_knowledge, "retrieve", None)
if not callable(retrieve_fn):
log_debug("Knowledge does not implement retrieve()")
return None
if num_documents is None:
num_documents = getattr(resolved_knowledge, "max_results", 10)
log_debug(f"Retrieving from knowledge base with filters: {filters}")
relevant_docs: List[Document] = retrieve_fn(query=query, max_results=num_documents, filters=filters)
if not relevant_docs or len(relevant_docs) == 0:
log_debug("No relevant documents found for query")
return None
return [doc.to_dict() for doc in relevant_docs]
except Exception as e:
log_warning(f"Error retrieving from knowledge base: {e}")
raise e
async def aget_relevant_docs_from_knowledge(
agent: Agent,
query: str,
num_documents: Optional[int] = None,
filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
validate_filters: bool = False,
run_context: Optional[RunContext] = None,
**kwargs: Any,
) -> Optional[List[Union[Dict[str, Any], str]]]:
"""Get relevant documents from knowledge base asynchronously."""
from agno.knowledge.document import Document
# Extract dependencies from run_context if available
dependencies = run_context.dependencies if run_context else None
resolved_knowledge = _get_resolved_knowledge(agent, run_context)
if num_documents is None and resolved_knowledge is not None:
num_documents = getattr(resolved_knowledge, "max_results", None)
# Validate the filters against known valid filter keys
if resolved_knowledge is not None and filters is not None:
if validate_filters:
valid_filters, invalid_keys = await resolved_knowledge.avalidate_filters(filters) # type: ignore
# Warn about invalid filter keys
if invalid_keys: # type: ignore
log_warning(f"Invalid filter keys provided: {invalid_keys}. These filters will be ignored.")
# Only use valid filters
filters = valid_filters
if not filters:
log_warning("No valid filters remain after validation. Search will proceed without filters.")
if invalid_keys == [] and valid_filters == {}:
log_warning("No valid filters provided. Search will proceed without filters.")
filters = None
if agent.knowledge_retriever is not None and callable(agent.knowledge_retriever):
from inspect import isawaitable, signature
try:
sig = signature(agent.knowledge_retriever)
knowledge_retriever_kwargs: Dict[str, Any] = {}
if "agent" in sig.parameters:
knowledge_retriever_kwargs = {"agent": agent}
if "filters" in sig.parameters:
knowledge_retriever_kwargs["filters"] = filters
if "run_context" in sig.parameters:
knowledge_retriever_kwargs["run_context"] = run_context
elif "dependencies" in sig.parameters:
# Backward compatibility: support dependencies parameter
knowledge_retriever_kwargs["dependencies"] = dependencies
knowledge_retriever_kwargs.update({"query": query, "num_documents": num_documents, **kwargs})
result = agent.knowledge_retriever(**knowledge_retriever_kwargs)
if isawaitable(result):
result = await result
return result
except Exception as e:
log_warning(f"Knowledge retriever failed: {e}")
raise e
# Use knowledge protocol's retrieve method
try:
if resolved_knowledge is None:
return None
# Use protocol aretrieve() or retrieve() method if available
aretrieve_fn = getattr(resolved_knowledge, "aretrieve", None)
retrieve_fn = getattr(resolved_knowledge, "retrieve", None)
if not callable(aretrieve_fn) and not callable(retrieve_fn):
log_debug("Knowledge does not implement retrieve()")
return None
if num_documents is None:
num_documents = getattr(resolved_knowledge, "max_results", 10)
log_debug(f"Retrieving from knowledge base with filters: {filters}")
if callable(aretrieve_fn):
relevant_docs: List[Document] = await aretrieve_fn(query=query, max_results=num_documents, filters=filters)
elif callable(retrieve_fn):
relevant_docs = retrieve_fn(query=query, max_results=num_documents, filters=filters)
else:
return None
if not relevant_docs or len(relevant_docs) == 0:
log_debug("No relevant documents found for query")
return None
return [doc.to_dict() for doc in relevant_docs]
except Exception as e:
log_warning(f"Error retrieving from knowledge base: {e}")
raise e
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/agent/_messages.py",
"license": "Apache License 2.0",
"lines": 1603,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/agent/_response.py | """Response processing, reasoning, output format, and model response handling for Agent."""
from __future__ import annotations
from collections import deque
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Dict,
Iterator,
List,
Optional,
Type,
Union,
cast,
get_args,
)
from uuid import uuid4
from pydantic import BaseModel
if TYPE_CHECKING:
from agno.agent.agent import Agent
from agno.media import Audio
from agno.models.base import Model
from agno.models.message import Message
from agno.models.response import ModelResponse, ModelResponseEvent
from agno.reasoning.step import NextAction, ReasoningStep, ReasoningSteps
from agno.run import RunContext
from agno.run.agent import RunEvent, RunOutput, RunOutputEvent
from agno.run.messages import RunMessages
from agno.run.requirement import RunRequirement
from agno.run.team import TeamRunOutputEvent
from agno.session import AgentSession
from agno.tools.function import Function
from agno.utils.events import (
create_compression_completed_event,
create_compression_started_event,
create_model_request_completed_event,
create_model_request_started_event,
create_parser_model_response_completed_event,
create_parser_model_response_started_event,
create_reasoning_completed_event,
create_reasoning_content_delta_event,
create_reasoning_started_event,
create_reasoning_step_event,
create_run_output_content_event,
create_tool_call_completed_event,
create_tool_call_error_event,
create_tool_call_started_event,
handle_event,
)
from agno.utils.log import log_debug, log_warning
from agno.utils.merge_dict import merge_dictionaries
from agno.utils.reasoning import (
add_reasoning_metrics_to_metadata,
add_reasoning_step_to_metadata,
append_to_reasoning_content,
update_run_output_with_reasoning,
)
from agno.utils.string import parse_response_dict_str, parse_response_model_str
###########################################################################
# Reasoning
###########################################################################
def handle_reasoning(
agent: Agent, run_response: RunOutput, run_messages: RunMessages, run_context: Optional[RunContext] = None
) -> None:
if agent.reasoning or agent.reasoning_model is not None:
reasoning_generator = reason(
agent=agent,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
stream_events=False,
)
# Consume the generator without yielding
deque(reasoning_generator, maxlen=0)
def handle_reasoning_stream(
agent: Agent,
run_response: RunOutput,
run_messages: RunMessages,
run_context: Optional[RunContext] = None,
stream_events: Optional[bool] = None,
) -> Iterator[RunOutputEvent]:
if agent.reasoning or agent.reasoning_model is not None:
reasoning_generator = reason(
agent=agent,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
stream_events=stream_events,
)
yield from reasoning_generator
async def ahandle_reasoning(
agent: Agent, run_response: RunOutput, run_messages: RunMessages, run_context: Optional[RunContext] = None
) -> None:
if agent.reasoning or agent.reasoning_model is not None:
reason_generator = areason(
agent=agent,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
stream_events=False,
)
# Consume the generator without yielding
async for _ in reason_generator: # type: ignore
pass
async def ahandle_reasoning_stream(
agent: Agent,
run_response: RunOutput,
run_messages: RunMessages,
run_context: Optional[RunContext] = None,
stream_events: Optional[bool] = None,
) -> AsyncIterator[RunOutputEvent]:
if agent.reasoning or agent.reasoning_model is not None:
reason_generator = areason(
agent=agent,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
stream_events=stream_events,
)
async for item in reason_generator: # type: ignore
yield item
def format_reasoning_step_content(agent: Agent, run_response: RunOutput, reasoning_step: ReasoningStep) -> str:
"""Format content for a reasoning step without changing any existing logic."""
step_content = ""
if reasoning_step.title:
step_content += f"## {reasoning_step.title}\n"
if reasoning_step.reasoning:
step_content += f"{reasoning_step.reasoning}\n"
if reasoning_step.action:
step_content += f"Action: {reasoning_step.action}\n"
if reasoning_step.result:
step_content += f"Result: {reasoning_step.result}\n"
step_content += "\n"
# Get the current reasoning_content and append this step
current_reasoning_content = ""
if hasattr(run_response, "reasoning_content") and run_response.reasoning_content: # type: ignore
current_reasoning_content = run_response.reasoning_content # type: ignore
# Create updated reasoning_content
updated_reasoning_content = current_reasoning_content + step_content
return updated_reasoning_content
def handle_reasoning_event(
agent: Agent,
event: "ReasoningEvent", # type: ignore # noqa: F821
run_response: RunOutput,
stream_events: Optional[bool] = None,
) -> Iterator[RunOutputEvent]:
"""
Convert a ReasoningEvent from the ReasoningManager to Agent-specific RunOutputEvents.
This method handles the conversion of generic reasoning events to Agent events,
keeping the Agent._reason() method clean and simple.
"""
from agno.reasoning.manager import ReasoningEventType
if event.event_type == ReasoningEventType.started:
if stream_events:
yield handle_event( # type: ignore
create_reasoning_started_event(from_run_response=run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
elif event.event_type == ReasoningEventType.content_delta:
if stream_events and event.reasoning_content:
yield handle_event( # type: ignore
create_reasoning_content_delta_event(
from_run_response=run_response,
reasoning_content=event.reasoning_content,
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
elif event.event_type == ReasoningEventType.step:
if event.reasoning_step:
# Update run_response with this step
update_run_output_with_reasoning(
run_response=run_response,
reasoning_steps=[event.reasoning_step],
reasoning_agent_messages=[],
)
if stream_events:
updated_reasoning_content = format_reasoning_step_content(
agent=agent,
run_response=run_response,
reasoning_step=event.reasoning_step,
)
yield handle_event( # type: ignore
create_reasoning_step_event(
from_run_response=run_response,
reasoning_step=event.reasoning_step,
reasoning_content=updated_reasoning_content,
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
elif event.event_type == ReasoningEventType.completed:
if event.message and event.reasoning_steps:
# This is from native reasoning - update with the message and steps
update_run_output_with_reasoning(
run_response=run_response,
reasoning_steps=event.reasoning_steps,
reasoning_agent_messages=event.reasoning_messages,
)
if stream_events:
yield handle_event( # type: ignore
create_reasoning_completed_event(
from_run_response=run_response,
content=ReasoningSteps(reasoning_steps=event.reasoning_steps),
content_type=ReasoningSteps.__name__,
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
elif event.event_type == ReasoningEventType.error:
log_warning(f"Reasoning error. {event.error}, continuing regular session...")
def reason(
agent: Agent,
run_response: RunOutput,
run_messages: RunMessages,
run_context: Optional[RunContext] = None,
stream_events: Optional[bool] = None,
) -> Iterator[RunOutputEvent]:
"""
Run reasoning using the ReasoningManager.
Handles both native reasoning models (DeepSeek, Anthropic, etc.) and
default Chain-of-Thought reasoning with a clean, unified interface.
"""
from agno.reasoning.manager import ReasoningConfig, ReasoningManager
# Get the reasoning model (use copy of main model if not provided)
reasoning_model: Optional[Model] = agent.reasoning_model
if reasoning_model is None and agent.model is not None:
from copy import deepcopy
reasoning_model = deepcopy(agent.model)
# Create reasoning manager with config
manager = ReasoningManager(
ReasoningConfig(
reasoning_model=reasoning_model,
reasoning_agent=agent.reasoning_agent,
min_steps=agent.reasoning_min_steps,
max_steps=agent.reasoning_max_steps,
tools=agent.tools if isinstance(agent.tools, list) else None,
tool_call_limit=agent.tool_call_limit,
use_json_mode=agent.use_json_mode,
telemetry=agent.telemetry,
debug_mode=agent.debug_mode,
debug_level=agent.debug_level,
run_context=run_context,
run_metrics=run_response.metrics,
)
)
# Use the unified reason() method and convert events
for event in manager.reason(run_messages, stream=bool(stream_events)):
yield from handle_reasoning_event(
agent=agent, event=event, run_response=run_response, stream_events=stream_events
)
async def areason(
agent: Agent,
run_response: RunOutput,
run_messages: RunMessages,
run_context: Optional[RunContext] = None,
stream_events: Optional[bool] = None,
) -> AsyncIterator[RunOutputEvent]:
"""
Run reasoning asynchronously using the ReasoningManager.
Handles both native reasoning models (DeepSeek, Anthropic, etc.) and
default Chain-of-Thought reasoning with a clean, unified interface.
"""
from agno.reasoning.manager import ReasoningConfig, ReasoningManager
# Get the reasoning model (use copy of main model if not provided)
reasoning_model: Optional[Model] = agent.reasoning_model
if reasoning_model is None and agent.model is not None:
from copy import deepcopy
reasoning_model = deepcopy(agent.model)
# Create reasoning manager with config
manager = ReasoningManager(
ReasoningConfig(
reasoning_model=reasoning_model,
reasoning_agent=agent.reasoning_agent,
min_steps=agent.reasoning_min_steps,
max_steps=agent.reasoning_max_steps,
tools=agent.tools if isinstance(agent.tools, list) else None,
tool_call_limit=agent.tool_call_limit,
use_json_mode=agent.use_json_mode,
telemetry=agent.telemetry,
debug_mode=agent.debug_mode,
debug_level=agent.debug_level,
run_context=run_context,
run_metrics=run_response.metrics,
)
)
# Use the unified areason() method and convert events
async for event in manager.areason(run_messages, stream=bool(stream_events)):
for output_event in handle_reasoning_event(
agent=agent, event=event, run_response=run_response, stream_events=stream_events
):
yield output_event
def process_parser_response(
agent: Agent,
model_response: ModelResponse,
run_messages: RunMessages,
parser_model_response: ModelResponse,
messages_for_parser_model: list,
) -> None:
"""Common logic for processing parser model response."""
parser_model_response_message: Optional[Message] = None
for message in reversed(messages_for_parser_model):
if message.role == "assistant":
parser_model_response_message = message
break
if parser_model_response_message is not None:
run_messages.messages.append(parser_model_response_message)
model_response.parsed = parser_model_response.parsed
model_response.content = parser_model_response.content
else:
log_warning("Unable to parse response with parser model")
def parse_response_with_parser_model(
agent: Agent,
model_response: ModelResponse,
run_messages: RunMessages,
run_context: Optional[RunContext] = None,
run_response: Optional[RunOutput] = None,
) -> None:
"""Parse the model response using the parser model."""
from agno.agent._messages import get_messages_for_parser_model
if agent.parser_model is None:
return
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
if output_schema is not None:
parser_response_format = get_response_format(agent, agent.parser_model, run_context=run_context)
messages_for_parser_model = get_messages_for_parser_model(
agent, model_response, parser_response_format, run_context=run_context
)
parser_model_response: ModelResponse = agent.parser_model.response(
messages=messages_for_parser_model,
response_format=parser_response_format,
)
# Accumulate parser model metrics
if run_response is not None:
from agno.metrics import ModelType, accumulate_model_metrics
accumulate_model_metrics(
parser_model_response,
agent.parser_model,
ModelType.PARSER_MODEL,
run_response.metrics if run_response is not None else None,
)
process_parser_response(
agent=agent,
model_response=model_response,
run_messages=run_messages,
parser_model_response=parser_model_response,
messages_for_parser_model=messages_for_parser_model,
)
else:
log_warning("A response model is required to parse the response with a parser model")
async def aparse_response_with_parser_model(
agent: Agent,
model_response: ModelResponse,
run_messages: RunMessages,
run_context: Optional[RunContext] = None,
run_response: Optional[RunOutput] = None,
) -> None:
"""Parse the model response using the parser model."""
from agno.agent._messages import get_messages_for_parser_model
if agent.parser_model is None:
return
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
if output_schema is not None:
parser_response_format = get_response_format(agent, agent.parser_model, run_context=run_context)
messages_for_parser_model = get_messages_for_parser_model(
agent, model_response, parser_response_format, run_context=run_context
)
parser_model_response: ModelResponse = await agent.parser_model.aresponse(
messages=messages_for_parser_model,
response_format=parser_response_format,
)
# Accumulate parser model metrics
if run_response is not None:
from agno.metrics import ModelType, accumulate_model_metrics
accumulate_model_metrics(
parser_model_response,
agent.parser_model,
ModelType.PARSER_MODEL,
run_response.metrics if run_response is not None else None,
)
process_parser_response(
agent=agent,
model_response=model_response,
run_messages=run_messages,
parser_model_response=parser_model_response,
messages_for_parser_model=messages_for_parser_model,
)
else:
log_warning("A response model is required to parse the response with a parser model")
def parse_response_with_parser_model_stream(
agent: Agent,
session: AgentSession,
run_response: RunOutput,
stream_events: bool = True,
run_context: Optional[RunContext] = None,
) -> Iterator[RunOutputEvent]:
"""Parse the model response using the parser model"""
from agno.agent._messages import get_messages_for_parser_model_stream
if agent.parser_model is not None:
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
if output_schema is not None:
if stream_events:
yield handle_event(
create_parser_model_response_started_event(run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
parser_model_response = ModelResponse(content="")
parser_response_format = get_response_format(agent, agent.parser_model, run_context=run_context)
messages_for_parser_model = get_messages_for_parser_model_stream(
agent, run_response, parser_response_format, run_context=run_context
)
for model_response_event in agent.parser_model.response_stream(
messages=messages_for_parser_model,
response_format=parser_response_format,
stream_model_response=False,
run_response=run_response,
):
yield from handle_model_response_chunk(
agent,
session=session,
run_response=run_response,
model_response=parser_model_response,
model_response_event=model_response_event,
parse_structured_output=True,
stream_events=stream_events,
run_context=run_context,
)
parser_model_response_message: Optional[Message] = None
for message in reversed(messages_for_parser_model):
if message.role == "assistant":
parser_model_response_message = message
break
if parser_model_response_message is not None:
if run_response.messages is not None:
run_response.messages.append(parser_model_response_message)
else:
log_warning("Unable to parse response with parser model")
if stream_events:
yield handle_event(
create_parser_model_response_completed_event(run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
else:
log_warning("A response model is required to parse the response with a parser model")
async def aparse_response_with_parser_model_stream(
agent: Agent,
session: AgentSession,
run_response: RunOutput,
stream_events: bool = True,
run_context: Optional[RunContext] = None,
) -> AsyncIterator[RunOutputEvent]:
"""Parse the model response using the parser model stream."""
from agno.agent._messages import get_messages_for_parser_model_stream
if agent.parser_model is not None:
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
if output_schema is not None:
if stream_events:
yield handle_event(
create_parser_model_response_started_event(run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
parser_model_response = ModelResponse(content="")
parser_response_format = get_response_format(agent, agent.parser_model, run_context=run_context)
messages_for_parser_model = get_messages_for_parser_model_stream(
agent, run_response, parser_response_format, run_context=run_context
)
model_response_stream = agent.parser_model.aresponse_stream(
messages=messages_for_parser_model,
response_format=parser_response_format,
stream_model_response=False,
run_response=run_response,
)
async for model_response_event in model_response_stream: # type: ignore
for event in handle_model_response_chunk(
agent,
session=session,
run_response=run_response,
model_response=parser_model_response,
model_response_event=model_response_event,
parse_structured_output=True,
stream_events=stream_events,
run_context=run_context,
):
yield event
parser_model_response_message: Optional[Message] = None
for message in reversed(messages_for_parser_model):
if message.role == "assistant":
parser_model_response_message = message
break
if parser_model_response_message is not None:
if run_response.messages is not None:
run_response.messages.append(parser_model_response_message)
else:
log_warning("Unable to parse response with parser model")
if stream_events:
yield handle_event(
create_parser_model_response_completed_event(run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
else:
log_warning("A response model is required to parse the response with a parser model")
def generate_response_with_output_model(
agent: Agent, model_response: ModelResponse, run_messages: RunMessages, run_response: Optional[RunOutput] = None
) -> None:
"""Parse the model response using the output model."""
from agno.agent._messages import get_messages_for_output_model
if agent.output_model is None:
return
messages_for_output_model = get_messages_for_output_model(agent, run_messages.messages)
output_model_response: ModelResponse = agent.output_model.response(messages=messages_for_output_model)
# Accumulate output model metrics
if run_response is not None:
from agno.metrics import ModelType, accumulate_model_metrics
accumulate_model_metrics(
output_model_response,
agent.output_model,
ModelType.OUTPUT_MODEL,
run_response.metrics if run_response is not None else None,
)
model_response.content = output_model_response.content
def generate_response_with_output_model_stream(
agent: Agent,
session: AgentSession,
run_response: RunOutput,
run_messages: RunMessages,
stream_events: bool = False,
) -> Iterator[RunOutputEvent]:
"""Parse the model response using the output model."""
from agno.agent._messages import get_messages_for_output_model
from agno.utils.events import (
create_output_model_response_completed_event,
create_output_model_response_started_event,
)
if agent.output_model is None:
return
if stream_events:
yield handle_event(
create_output_model_response_started_event(run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
messages_for_output_model = get_messages_for_output_model(agent, run_messages.messages)
model_response = ModelResponse(content="")
for model_response_event in agent.output_model.response_stream(
messages=messages_for_output_model, run_response=run_response
):
yield from handle_model_response_chunk(
agent,
session=session,
run_response=run_response,
model_response=model_response,
model_response_event=model_response_event,
stream_events=stream_events,
)
if stream_events:
yield handle_event(
create_output_model_response_completed_event(run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Build a list of messages that should be added to the RunResponse
messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
# Update the RunResponse messages
run_response.messages = messages_for_run_response
async def agenerate_response_with_output_model(
agent: Agent, model_response: ModelResponse, run_messages: RunMessages, run_response: Optional[RunOutput] = None
) -> None:
"""Parse the model response using the output model."""
from agno.agent._messages import get_messages_for_output_model
if agent.output_model is None:
return
messages_for_output_model = get_messages_for_output_model(agent, run_messages.messages)
output_model_response: ModelResponse = await agent.output_model.aresponse(messages=messages_for_output_model)
# Accumulate output model metrics
if run_response is not None:
from agno.metrics import ModelType, accumulate_model_metrics
accumulate_model_metrics(
output_model_response,
agent.output_model,
ModelType.OUTPUT_MODEL,
run_response.metrics if run_response is not None else None,
)
model_response.content = output_model_response.content
async def agenerate_response_with_output_model_stream(
agent: Agent,
session: AgentSession,
run_response: RunOutput,
run_messages: RunMessages,
stream_events: bool = False,
) -> AsyncIterator[RunOutputEvent]:
"""Parse the model response using the output model."""
from agno.agent._messages import get_messages_for_output_model
from agno.utils.events import (
create_output_model_response_completed_event,
create_output_model_response_started_event,
)
if agent.output_model is None:
return
if stream_events:
yield handle_event(
create_output_model_response_started_event(run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
messages_for_output_model = get_messages_for_output_model(agent, run_messages.messages)
model_response = ModelResponse(content="")
model_response_stream = agent.output_model.aresponse_stream(
messages=messages_for_output_model, run_response=run_response
)
async for model_response_event in model_response_stream:
for event in handle_model_response_chunk(
agent,
session=session,
run_response=run_response,
model_response=model_response,
model_response_event=model_response_event,
stream_events=stream_events,
):
yield event
if stream_events:
yield handle_event(
create_output_model_response_completed_event(run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Build a list of messages that should be added to the RunResponse
messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
# Update the RunResponse messages
run_response.messages = messages_for_run_response
# ---------------------------------------------------------------------------
# Reasoning tool-call helpers
# ---------------------------------------------------------------------------
def update_reasoning_content_from_tool_call(
agent: Agent, run_response: RunOutput, tool_name: str, tool_args: Dict[str, Any]
) -> Optional[ReasoningStep]:
"""Update reasoning_content based on tool calls that look like thinking or reasoning tools."""
# Case 1: ReasoningTools.think (has title, thought, optional action and confidence)
if tool_name.lower() == "think" and "title" in tool_args and "thought" in tool_args:
title = tool_args["title"]
thought = tool_args["thought"]
action = tool_args.get("action", "")
confidence = tool_args.get("confidence", None)
reasoning_step = ReasoningStep(
title=title,
reasoning=thought,
action=action,
next_action=NextAction.CONTINUE,
confidence=confidence,
result=None,
)
add_reasoning_step_to_metadata(run_response=run_response, reasoning_step=reasoning_step)
formatted_content = f"## {title}\n{thought}\n"
if action:
formatted_content += f"Action: {action}\n"
if confidence is not None:
formatted_content += f"Confidence: {confidence}\n"
formatted_content += "\n"
append_to_reasoning_content(run_response=run_response, content=formatted_content)
return reasoning_step
# Case 2: ReasoningTools.analyze (has title, result, analysis, optional next_action and confidence)
elif tool_name.lower() == "analyze" and "title" in tool_args:
title = tool_args["title"]
result = tool_args.get("result", "")
analysis = tool_args.get("analysis", "")
next_action = tool_args.get("next_action", "")
confidence = tool_args.get("confidence", None)
next_action_enum = NextAction.CONTINUE
if next_action.lower() == "validate":
next_action_enum = NextAction.VALIDATE
elif next_action.lower() in ["final", "final_answer", "finalize"]:
next_action_enum = NextAction.FINAL_ANSWER
reasoning_step = ReasoningStep(
title=title,
result=result,
reasoning=analysis,
next_action=next_action_enum,
confidence=confidence,
action=None,
)
add_reasoning_step_to_metadata(run_response=run_response, reasoning_step=reasoning_step)
formatted_content = f"## {title}\n"
if result:
formatted_content += f"Result: {result}\n"
if analysis:
formatted_content += f"{analysis}\n"
if next_action and next_action.lower() != "continue":
formatted_content += f"Next Action: {next_action}\n"
if confidence is not None:
formatted_content += f"Confidence: {confidence}\n"
formatted_content += "\n"
append_to_reasoning_content(run_response=run_response, content=formatted_content)
return reasoning_step
# Case 3: ReasoningTool.think (simple format, just has 'thought')
elif tool_name.lower() == "think" and "thought" in tool_args:
thought = tool_args["thought"]
reasoning_step = ReasoningStep( # type: ignore
title="Thinking",
reasoning=thought,
confidence=None,
)
formatted_content = f"## Thinking\n{thought}\n\n"
add_reasoning_step_to_metadata(run_response=run_response, reasoning_step=reasoning_step)
append_to_reasoning_content(run_response=run_response, content=formatted_content)
return reasoning_step
return None
# ---------------------------------------------------------------------------
# Output format resolution
# ---------------------------------------------------------------------------
def model_should_return_structured_output(agent: Agent, run_context: Optional[RunContext] = None) -> bool:
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
agent.model = cast(Model, agent.model)
return bool(
agent.model.supports_native_structured_outputs
and output_schema is not None
and (not agent.use_json_mode or agent.structured_outputs)
)
def get_response_format(
agent: Agent, model: Optional[Model] = None, run_context: Optional[RunContext] = None
) -> Optional[Union[Dict, Type[BaseModel]]]:
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
model = cast(Model, model or agent.model)
if output_schema is None:
return None
else:
json_response_format: Dict[str, Any] = {"type": "json_object"}
if model.supports_native_structured_outputs:
if not agent.use_json_mode or agent.structured_outputs:
log_debug("Setting Model.response_format to Agent.output_schema")
return output_schema
else:
log_debug("Model supports native structured outputs but it is not enabled. Using JSON mode instead.")
return json_response_format
elif model.supports_json_schema_outputs:
if agent.use_json_mode or (not agent.structured_outputs):
log_debug("Setting Model.response_format to JSON response mode")
# Handle JSON schema - pass through directly (user provides full provider format)
if isinstance(output_schema, dict):
return output_schema
# Handle Pydantic schema
return {
"type": "json_schema",
"json_schema": {
"name": output_schema.__name__,
"schema": output_schema.model_json_schema(),
},
}
else:
return None
else:
log_debug("Model does not support structured or JSON schema outputs.")
return json_response_format
# ---------------------------------------------------------------------------
# Response conversion
# ---------------------------------------------------------------------------
def convert_response_to_structured_format(
agent: Agent, run_response: Union[RunOutput, ModelResponse], run_context: Optional[RunContext] = None
):
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
# Convert the response to the structured format if needed
if output_schema is not None:
# If the output schema is a dict, do not convert it into a BaseModel
if isinstance(output_schema, dict):
if isinstance(run_response.content, str):
parsed_dict = parse_response_dict_str(run_response.content)
if parsed_dict is not None:
run_response.content = parsed_dict
if isinstance(run_response, RunOutput):
run_response.content_type = "dict"
else:
log_warning("Failed to parse JSON response against the provided output schema.")
# If the output schema is a Pydantic model and parse_response is True, parse it into a BaseModel
elif not isinstance(run_response.content, output_schema):
if isinstance(run_response.content, str) and agent.parse_response:
try:
structured_output = parse_response_model_str(run_response.content, output_schema)
# Update RunOutput
if structured_output is not None:
run_response.content = structured_output
if isinstance(run_response, RunOutput):
run_response.content_type = output_schema.__name__
else:
log_warning("Failed to convert response to output_schema")
except Exception as e:
log_warning(f"Failed to convert response to output model: {e}")
else:
log_warning("Something went wrong. Run response content is not a string")
# ---------------------------------------------------------------------------
# Run response update
# ---------------------------------------------------------------------------
def update_run_response(
agent: Agent,
model_response: ModelResponse,
run_response: RunOutput,
run_messages: RunMessages,
run_context: Optional[RunContext] = None,
):
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
# Handle structured outputs
if output_schema is not None and model_response.parsed is not None:
# We get native structured outputs from the model
if model_should_return_structured_output(agent, run_context=run_context):
# Update the run_response content with the structured output
run_response.content = model_response.parsed
# Update the run_response content_type with the structured output class name
run_response.content_type = "dict" if isinstance(output_schema, dict) else output_schema.__name__
else:
# Update the run_response content with the model response content
run_response.content = model_response.content
# Update the run_response reasoning content with the model response reasoning content
if model_response.reasoning_content is not None:
run_response.reasoning_content = model_response.reasoning_content
if model_response.redacted_reasoning_content is not None:
if run_response.reasoning_content is None:
run_response.reasoning_content = model_response.redacted_reasoning_content
else:
run_response.reasoning_content += model_response.redacted_reasoning_content
# Update the run_response citations with the model response citations
if model_response.citations is not None:
run_response.citations = model_response.citations
if model_response.provider_data is not None:
run_response.model_provider_data = model_response.provider_data
# Update the run_response tools with the model response tool_executions
if model_response.tool_executions is not None:
if run_response.tools is None:
run_response.tools = model_response.tool_executions
else:
run_response.tools.extend(model_response.tool_executions)
# For Reasoning/Thinking/Knowledge Tools update reasoning_content in RunOutput
for tool_call in model_response.tool_executions:
tool_name = tool_call.tool_name or ""
if tool_name.lower() in ["think", "analyze"]:
tool_args = tool_call.tool_args or {}
update_reasoning_content_from_tool_call(
agent,
run_response=run_response,
tool_name=tool_name,
tool_args=tool_args,
)
# Update the run_response audio with the model response audio
if model_response.audio is not None:
run_response.response_audio = model_response.audio
# Update the run_response created_at with the model response created_at
run_response.created_at = model_response.created_at
# Build a list of messages that should be added to the RunOutput
messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
# Update the RunOutput messages
run_response.messages = messages_for_run_response
# ---------------------------------------------------------------------------
# Model response streaming
# ---------------------------------------------------------------------------
def handle_model_response_stream(
agent: Agent,
session: AgentSession,
run_response: RunOutput,
run_messages: RunMessages,
tools: Optional[List[Union[Function, dict]]] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
stream_events: bool = False,
session_state: Optional[Dict[str, Any]] = None,
run_context: Optional[RunContext] = None,
) -> Iterator[RunOutputEvent]:
agent.model = cast(Model, agent.model)
reasoning_state = {
"reasoning_started": False,
"reasoning_time_taken": 0.0,
}
model_response = ModelResponse(content="")
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
should_parse_structured_output = output_schema is not None and agent.parse_response and agent.parser_model is None
stream_model_response = True
if should_parse_structured_output:
log_debug("Response model set, model response is not streamed.")
stream_model_response = False
for model_response_event in agent.model.response_stream(
messages=run_messages.messages,
response_format=response_format,
tools=tools,
tool_choice=agent.tool_choice,
tool_call_limit=agent.tool_call_limit,
stream_model_response=stream_model_response,
run_response=run_response,
send_media_to_model=agent.send_media_to_model,
compression_manager=agent.compression_manager if agent.compress_tool_results else None,
):
# Handle LLM request events and compression events from ModelResponse
if isinstance(model_response_event, ModelResponse):
if model_response_event.event == ModelResponseEvent.model_request_started.value:
if stream_events:
yield handle_event( # type: ignore
create_model_request_started_event(
from_run_response=run_response,
model=agent.model.id,
model_provider=agent.model.provider,
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
continue
if model_response_event.event == ModelResponseEvent.model_request_completed.value:
if stream_events:
yield handle_event( # type: ignore
create_model_request_completed_event(
from_run_response=run_response,
model=agent.model.id,
model_provider=agent.model.provider,
input_tokens=model_response_event.input_tokens,
output_tokens=model_response_event.output_tokens,
total_tokens=model_response_event.total_tokens,
time_to_first_token=model_response_event.time_to_first_token,
reasoning_tokens=model_response_event.reasoning_tokens,
cache_read_tokens=model_response_event.cache_read_tokens,
cache_write_tokens=model_response_event.cache_write_tokens,
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
continue
# Handle compression events
if model_response_event.event == ModelResponseEvent.compression_started.value:
if stream_events:
yield handle_event( # type: ignore
create_compression_started_event(from_run_response=run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
continue
if model_response_event.event == ModelResponseEvent.compression_completed.value:
if stream_events:
stats = model_response_event.compression_stats or {}
yield handle_event( # type: ignore
create_compression_completed_event(
from_run_response=run_response,
tool_results_compressed=stats.get("tool_results_compressed"),
original_size=stats.get("original_size"),
compressed_size=stats.get("compressed_size"),
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
continue
yield from handle_model_response_chunk(
agent,
session=session,
run_response=run_response,
model_response=model_response,
model_response_event=model_response_event,
reasoning_state=reasoning_state,
parse_structured_output=should_parse_structured_output,
stream_events=stream_events,
session_state=session_state,
run_context=run_context,
)
# Update RunOutput
# Build a list of messages that should be added to the RunOutput
messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
# Update the RunOutput messages
run_response.messages = messages_for_run_response
# Determine reasoning completed
if stream_events and reasoning_state["reasoning_started"]:
all_reasoning_steps: List[ReasoningStep] = []
if run_response and run_response.reasoning_steps:
all_reasoning_steps = cast(List[ReasoningStep], run_response.reasoning_steps)
if all_reasoning_steps:
add_reasoning_metrics_to_metadata(
run_response=run_response,
reasoning_time_taken=reasoning_state["reasoning_time_taken"],
)
yield handle_event( # type: ignore
create_reasoning_completed_event(
from_run_response=run_response,
content=ReasoningSteps(reasoning_steps=all_reasoning_steps),
content_type=ReasoningSteps.__name__,
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Update the run_response audio if streaming
if model_response.audio is not None:
run_response.response_audio = model_response.audio
async def ahandle_model_response_stream(
agent: Agent,
session: AgentSession,
run_response: RunOutput,
run_messages: RunMessages,
tools: Optional[List[Union[Function, dict]]] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
stream_events: bool = False,
session_state: Optional[Dict[str, Any]] = None,
run_context: Optional[RunContext] = None,
) -> AsyncIterator[RunOutputEvent]:
agent.model = cast(Model, agent.model)
reasoning_state = {
"reasoning_started": False,
"reasoning_time_taken": 0.0,
}
model_response = ModelResponse(content="")
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
should_parse_structured_output = output_schema is not None and agent.parse_response and agent.parser_model is None
stream_model_response = True
if should_parse_structured_output:
log_debug("Response model set, model response is not streamed.")
stream_model_response = False
model_response_stream = agent.model.aresponse_stream(
messages=run_messages.messages,
response_format=response_format,
tools=tools,
tool_choice=agent.tool_choice,
tool_call_limit=agent.tool_call_limit,
stream_model_response=stream_model_response,
run_response=run_response,
send_media_to_model=agent.send_media_to_model,
compression_manager=agent.compression_manager if agent.compress_tool_results else None,
) # type: ignore
async for model_response_event in model_response_stream: # type: ignore
# Handle LLM request events and compression events from ModelResponse
if isinstance(model_response_event, ModelResponse):
if model_response_event.event == ModelResponseEvent.model_request_started.value:
if stream_events:
yield handle_event( # type: ignore
create_model_request_started_event(
from_run_response=run_response,
model=agent.model.id,
model_provider=agent.model.provider,
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
continue
if model_response_event.event == ModelResponseEvent.model_request_completed.value:
if stream_events:
yield handle_event( # type: ignore
create_model_request_completed_event(
from_run_response=run_response,
model=agent.model.id,
model_provider=agent.model.provider,
input_tokens=model_response_event.input_tokens,
output_tokens=model_response_event.output_tokens,
total_tokens=model_response_event.total_tokens,
time_to_first_token=model_response_event.time_to_first_token,
reasoning_tokens=model_response_event.reasoning_tokens,
cache_read_tokens=model_response_event.cache_read_tokens,
cache_write_tokens=model_response_event.cache_write_tokens,
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
continue
# Handle compression events
if model_response_event.event == ModelResponseEvent.compression_started.value:
if stream_events:
yield handle_event( # type: ignore
create_compression_started_event(from_run_response=run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
continue
if model_response_event.event == ModelResponseEvent.compression_completed.value:
if stream_events:
stats = model_response_event.compression_stats or {}
yield handle_event( # type: ignore
create_compression_completed_event(
from_run_response=run_response,
tool_results_compressed=stats.get("tool_results_compressed"),
original_size=stats.get("original_size"),
compressed_size=stats.get("compressed_size"),
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
continue
for event in handle_model_response_chunk(
agent,
session=session,
run_response=run_response,
model_response=model_response,
model_response_event=model_response_event,
reasoning_state=reasoning_state,
parse_structured_output=should_parse_structured_output,
stream_events=stream_events,
session_state=session_state,
run_context=run_context,
):
yield event
# Update RunOutput
# Build a list of messages that should be added to the RunOutput
messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
# Update the RunOutput messages
run_response.messages = messages_for_run_response
if stream_events and reasoning_state["reasoning_started"]:
all_reasoning_steps: List[ReasoningStep] = []
if run_response and run_response.reasoning_steps:
all_reasoning_steps = cast(List[ReasoningStep], run_response.reasoning_steps)
if all_reasoning_steps:
add_reasoning_metrics_to_metadata(
run_response=run_response,
reasoning_time_taken=reasoning_state["reasoning_time_taken"],
)
yield handle_event( # type: ignore
create_reasoning_completed_event(
from_run_response=run_response,
content=ReasoningSteps(reasoning_steps=all_reasoning_steps),
content_type=ReasoningSteps.__name__,
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Update the run_response audio if streaming
if model_response.audio is not None:
run_response.response_audio = model_response.audio
def handle_model_response_chunk(
agent: Agent,
session: AgentSession,
run_response: RunOutput,
model_response: ModelResponse,
model_response_event: Union[ModelResponse, RunOutputEvent, TeamRunOutputEvent],
reasoning_state: Optional[Dict[str, Any]] = None,
parse_structured_output: bool = False,
stream_events: bool = False,
session_state: Optional[Dict[str, Any]] = None,
run_context: Optional[RunContext] = None,
) -> Iterator[RunOutputEvent]:
from agno.run.workflow import WorkflowRunOutputEvent
if (
isinstance(model_response_event, tuple(get_args(RunOutputEvent)))
or isinstance(model_response_event, tuple(get_args(TeamRunOutputEvent)))
or isinstance(model_response_event, tuple(get_args(WorkflowRunOutputEvent)))
):
if model_response_event.event == RunEvent.custom_event: # type: ignore
model_response_event.agent_id = agent.id # type: ignore
model_response_event.agent_name = agent.name # type: ignore
model_response_event.session_id = session.session_id # type: ignore
model_response_event.run_id = run_response.run_id # type: ignore
# We just bubble the event up
yield handle_event( # type: ignore
model_response_event, # type: ignore
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
else:
model_response_event = cast(ModelResponse, model_response_event)
# If the model response is an assistant_response, yield a RunOutput
if model_response_event.event == ModelResponseEvent.assistant_response.value:
content_type = "str"
# Process content and thinking
if model_response_event.content is not None:
if parse_structured_output:
model_response.content = model_response_event.content
convert_response_to_structured_format(agent, model_response, run_context=run_context)
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
content_type = "dict" if isinstance(output_schema, dict) else output_schema.__name__ # type: ignore
run_response.content = model_response.content
run_response.content_type = content_type
else:
model_response.content = (model_response.content or "") + model_response_event.content
run_response.content = model_response.content
run_response.content_type = "str"
# Process reasoning content
if model_response_event.reasoning_content is not None:
model_response.reasoning_content = (
model_response.reasoning_content or ""
) + model_response_event.reasoning_content
run_response.reasoning_content = model_response.reasoning_content
if model_response_event.redacted_reasoning_content is not None:
if not model_response.reasoning_content:
model_response.reasoning_content = model_response_event.redacted_reasoning_content
else:
model_response.reasoning_content += model_response_event.redacted_reasoning_content
run_response.reasoning_content = model_response.reasoning_content
# Handle provider data (one chunk)
if model_response_event.provider_data is not None:
run_response.model_provider_data = model_response_event.provider_data
# Handle citations (one chunk)
if model_response_event.citations is not None:
run_response.citations = model_response_event.citations
# Only yield if we have content to show
if content_type != "str":
yield handle_event( # type: ignore
create_run_output_content_event(
from_run_response=run_response,
content=model_response.content,
content_type=content_type,
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
elif (
model_response_event.content is not None
or model_response_event.reasoning_content is not None
or model_response_event.redacted_reasoning_content is not None
or model_response_event.citations is not None
or model_response_event.provider_data is not None
):
yield handle_event( # type: ignore
create_run_output_content_event(
from_run_response=run_response,
content=model_response_event.content,
reasoning_content=model_response_event.reasoning_content,
redacted_reasoning_content=model_response_event.redacted_reasoning_content,
citations=model_response_event.citations,
model_provider_data=model_response_event.provider_data,
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Process audio
if model_response_event.audio is not None:
if model_response.audio is None:
model_response.audio = Audio(id=str(uuid4()), content=b"", transcript="")
if model_response_event.audio.id is not None:
model_response.audio.id = model_response_event.audio.id # type: ignore
if model_response_event.audio.content is not None:
# Handle both base64 string and bytes content
if isinstance(model_response_event.audio.content, str):
# Decode base64 string to bytes
try:
import base64
decoded_content = base64.b64decode(model_response_event.audio.content)
if model_response.audio.content is None:
model_response.audio.content = b""
model_response.audio.content += decoded_content
except Exception:
# If decode fails, encode string as bytes
if model_response.audio.content is None:
model_response.audio.content = b""
model_response.audio.content += model_response_event.audio.content.encode("utf-8")
elif isinstance(model_response_event.audio.content, bytes):
# Content is already bytes
if model_response.audio.content is None:
model_response.audio.content = b""
model_response.audio.content += model_response_event.audio.content
if model_response_event.audio.transcript is not None:
model_response.audio.transcript += model_response_event.audio.transcript # type: ignore
if model_response_event.audio.expires_at is not None:
model_response.audio.expires_at = model_response_event.audio.expires_at # type: ignore
if model_response_event.audio.mime_type is not None:
model_response.audio.mime_type = model_response_event.audio.mime_type # type: ignore
if model_response_event.audio.sample_rate is not None:
model_response.audio.sample_rate = model_response_event.audio.sample_rate
if model_response_event.audio.channels is not None:
model_response.audio.channels = model_response_event.audio.channels
# Yield the audio and transcript bit by bit
run_response.response_audio = Audio(
id=model_response_event.audio.id,
content=model_response_event.audio.content,
transcript=model_response_event.audio.transcript,
sample_rate=model_response_event.audio.sample_rate,
channels=model_response_event.audio.channels,
)
run_response.created_at = model_response_event.created_at
yield handle_event( # type: ignore
create_run_output_content_event(
from_run_response=run_response,
response_audio=run_response.response_audio,
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
if model_response_event.images is not None:
yield handle_event( # type: ignore
create_run_output_content_event(
from_run_response=run_response,
image=model_response_event.images[-1],
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
if model_response.images is None:
model_response.images = []
model_response.images.extend(model_response_event.images)
# Store media in run_response if store_media is enabled
if agent.store_media:
for image in model_response_event.images:
if run_response.images is None:
run_response.images = []
run_response.images.append(image)
# Handle tool interruption events (HITL flow)
elif model_response_event.event == ModelResponseEvent.tool_call_paused.value:
# Add tool calls to the run_response
tool_executions_list = model_response_event.tool_executions
if tool_executions_list is not None:
# Add tool calls to the agent.run_response
if run_response.tools is None:
run_response.tools = tool_executions_list
else:
run_response.tools.extend(tool_executions_list)
# Add requirement to the run_response
if run_response.requirements is None:
run_response.requirements = []
run_response.requirements.append(RunRequirement(tool_execution=tool_executions_list[-1]))
# If the model response is a tool_call_started, add the tool call to the run_response
elif (
model_response_event.event == ModelResponseEvent.tool_call_started.value
): # Add tool calls to the run_response
tool_executions_list = model_response_event.tool_executions
if tool_executions_list is not None:
# Add tool calls to the agent.run_response
if run_response.tools is None:
run_response.tools = tool_executions_list
else:
run_response.tools.extend(tool_executions_list)
# Yield each tool call started event
if stream_events:
for tool in tool_executions_list:
yield handle_event( # type: ignore
create_tool_call_started_event(from_run_response=run_response, tool=tool),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# If the model response is a tool_call_completed, update the existing tool call in the run_response
elif model_response_event.event == ModelResponseEvent.tool_call_completed.value:
if model_response_event.updated_session_state is not None:
# update the session_state for RunOutput
if session_state is not None:
merge_dictionaries(session_state, model_response_event.updated_session_state)
# update the DB session
if session.session_data is not None and session.session_data.get("session_state") is not None:
merge_dictionaries(
session.session_data["session_state"], model_response_event.updated_session_state
)
if model_response_event.images is not None:
for image in model_response_event.images:
if run_response.images is None:
run_response.images = []
run_response.images.append(image)
if model_response_event.videos is not None:
for video in model_response_event.videos:
if run_response.videos is None:
run_response.videos = []
run_response.videos.append(video)
if model_response_event.audios is not None:
for audio in model_response_event.audios:
if run_response.audio is None:
run_response.audio = []
run_response.audio.append(audio)
if model_response_event.files is not None:
for file_obj in model_response_event.files:
if run_response.files is None:
run_response.files = []
run_response.files.append(file_obj)
reasoning_step: Optional[ReasoningStep] = None
tool_executions_list = model_response_event.tool_executions
if tool_executions_list is not None:
# Update the existing tool call in the run_response
if run_response.tools:
# Create a mapping of tool_call_id to index
tool_call_index_map = {
tc.tool_call_id: i for i, tc in enumerate(run_response.tools) if tc.tool_call_id is not None
}
# Process tool calls
for tool_call_dict in tool_executions_list:
tool_call_id = tool_call_dict.tool_call_id or ""
index = tool_call_index_map.get(tool_call_id)
if index is not None:
run_response.tools[index] = tool_call_dict
else:
run_response.tools = tool_executions_list
# Only iterate through new tool calls
for tool_call in tool_executions_list:
tool_name = tool_call.tool_name or ""
if tool_name.lower() in ["think", "analyze"]:
tool_args = tool_call.tool_args or {}
reasoning_step = update_reasoning_content_from_tool_call(
agent,
run_response=run_response,
tool_name=tool_name,
tool_args=tool_args,
)
tool_call_metrics = tool_call.metrics
if (
tool_call_metrics is not None
and tool_call_metrics.duration is not None
and reasoning_state is not None
):
reasoning_state["reasoning_time_taken"] = reasoning_state["reasoning_time_taken"] + float(
tool_call_metrics.duration
)
if stream_events:
yield handle_event( # type: ignore
create_tool_call_completed_event(
from_run_response=run_response, tool=tool_call, content=model_response_event.content
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
if tool_call.tool_call_error:
yield handle_event( # type: ignore
create_tool_call_error_event(
from_run_response=run_response, tool=tool_call, error=str(tool_call.result)
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
if stream_events:
if reasoning_step is not None:
if reasoning_state and not reasoning_state["reasoning_started"]:
yield handle_event( # type: ignore
create_reasoning_started_event(from_run_response=run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
reasoning_state["reasoning_started"] = True
yield handle_event( # type: ignore
create_reasoning_step_event(
from_run_response=run_response,
reasoning_step=reasoning_step,
reasoning_content=run_response.reasoning_content or "",
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/agent/_response.py",
"license": "Apache License 2.0",
"lines": 1458,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/agent/_run.py | """Core run loop and execution helpers for Agent."""
from __future__ import annotations
import asyncio
import time
import warnings
from collections import deque
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Dict,
Iterator,
List,
Optional,
Sequence,
Type,
Union,
cast,
)
from uuid import uuid4
from pydantic import BaseModel
if TYPE_CHECKING:
from agno.agent.agent import Agent
from agno.agent._init import _initialize_session_state
from agno.agent._run_options import resolve_run_options
from agno.agent._session import initialize_session, update_session_metrics
from agno.exceptions import (
InputCheckError,
OutputCheckError,
RunCancelledException,
)
from agno.filters import FilterExpr
from agno.media import Audio, File, Image, Video
from agno.models.base import Model
from agno.models.message import Message
from agno.models.metrics import RunMetrics, merge_background_metrics
from agno.models.response import ModelResponse, ToolExecution
from agno.run import RunContext, RunStatus
from agno.run.agent import (
RunInput,
RunOutput,
RunOutputEvent,
)
from agno.run.approval import (
acreate_approval_from_pause,
create_approval_from_pause,
)
from agno.run.cancel import (
acancel_run as acancel_run_global,
)
from agno.run.cancel import (
acleanup_run,
araise_if_cancelled,
aregister_run,
cleanup_run,
raise_if_cancelled,
register_run,
)
from agno.run.cancel import (
cancel_run as cancel_run_global,
)
from agno.run.messages import RunMessages
from agno.run.requirement import RunRequirement
from agno.session import AgentSession
from agno.tools.function import Function
from agno.utils.agent import (
await_for_open_threads,
await_for_thread_tasks_stream,
collect_background_metrics,
scrub_history_messages_from_run_output,
scrub_media_from_run_output,
scrub_tool_results_from_run_output,
store_media_util,
validate_input,
validate_media_object_id,
wait_for_open_threads,
wait_for_thread_tasks_stream,
)
from agno.utils.events import (
add_error_event,
create_run_cancelled_event,
create_run_completed_event,
create_run_content_completed_event,
create_run_continued_event,
create_run_error_event,
create_run_paused_event,
create_run_started_event,
create_session_summary_completed_event,
create_session_summary_started_event,
handle_event,
)
from agno.utils.hooks import (
normalize_post_hooks,
normalize_pre_hooks,
)
from agno.utils.log import (
log_debug,
log_error,
log_info,
log_warning,
)
from agno.utils.response import get_paused_content
# Strong references to background tasks so they aren't garbage-collected mid-execution.
# See: https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task
_background_tasks: set[asyncio.Task[None]] = set()
# ---------------------------------------------------------------------------
# Run dependency resolution
# ---------------------------------------------------------------------------
def resolve_run_dependencies(agent: Agent, run_context: RunContext) -> None:
from inspect import iscoroutine, iscoroutinefunction, signature
# Dependencies should already be resolved in run() method
log_debug("Resolving dependencies")
if not isinstance(run_context.dependencies, dict):
log_warning("Run dependencies are not a dict")
return
for key, value in run_context.dependencies.items():
if iscoroutine(value) or iscoroutinefunction(value):
log_warning(f"Dependency {key} is a coroutine. Use agent.arun() or agent.aprint_response() instead.")
continue
elif callable(value):
try:
sig = signature(value)
# Build kwargs for the function
kwargs: Dict[str, Any] = {}
if "agent" in sig.parameters:
kwargs["agent"] = agent
if "run_context" in sig.parameters:
kwargs["run_context"] = run_context
# Run the function
result = value(**kwargs)
# Carry the result in the run context
if result is not None:
run_context.dependencies[key] = result
except Exception as e:
log_warning(f"Failed to resolve dependencies for '{key}': {e}")
else:
run_context.dependencies[key] = value
async def aresolve_run_dependencies(agent: Agent, run_context: RunContext) -> None:
from inspect import iscoroutine, signature
log_debug("Resolving context (async)")
if not isinstance(run_context.dependencies, dict):
log_warning("Run dependencies are not a dict")
return
for key, value in run_context.dependencies.items():
if not callable(value):
run_context.dependencies[key] = value
continue
try:
sig = signature(value)
# Build kwargs for the function
kwargs: Dict[str, Any] = {}
if "agent" in sig.parameters:
kwargs["agent"] = agent
if "run_context" in sig.parameters:
kwargs["run_context"] = run_context
# Run the function
result = value(**kwargs)
if iscoroutine(result):
result = await result # type: ignore
run_context.dependencies[key] = result
except Exception as e:
log_warning(f"Failed to resolve context for '{key}': {e}")
# ---------------------------------------------------------------------------
# Pause handling
# ---------------------------------------------------------------------------
def handle_agent_run_paused(
agent: Agent,
run_response: RunOutput,
session: AgentSession,
user_id: Optional[str] = None,
run_context: Optional[RunContext] = None,
) -> RunOutput:
run_response.status = RunStatus.paused
if not run_response.content:
run_response.content = get_paused_content(run_response)
# Stamp approval_id on tools before storing so the DB has the complete data.
create_approval_from_pause(
db=agent.db, run_response=run_response, agent_id=agent.id, agent_name=agent.name, user_id=user_id
)
cleanup_and_store(agent, run_response=run_response, session=session, run_context=run_context, user_id=user_id)
log_debug(f"Agent Run Paused: {run_response.run_id}", center=True, symbol="*")
# We return and await confirmation/completion for the tools that require it
return run_response
def handle_agent_run_paused_stream(
agent: Agent,
run_response: RunOutput,
session: AgentSession,
user_id: Optional[str] = None,
run_context: Optional[RunContext] = None,
yield_run_output: bool = False,
) -> Iterator[Union[RunOutputEvent, RunOutput]]:
run_response.status = RunStatus.paused
if not run_response.content:
run_response.content = get_paused_content(run_response)
# Stamp approval_id on tools before storing so the DB has the complete data.
create_approval_from_pause(
db=agent.db, run_response=run_response, agent_id=agent.id, agent_name=agent.name, user_id=user_id
)
# Create RunPausedEvent and add to run_response.events before storing
pause_event = handle_event(
create_run_paused_event(
from_run_response=run_response,
tools=run_response.tools,
requirements=run_response.requirements,
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
cleanup_and_store(agent, run_response=run_response, session=session, run_context=run_context, user_id=user_id)
yield pause_event # type: ignore
# Also yield the run_response if requested, so callers can capture it
if yield_run_output:
yield run_response
log_debug(f"Agent Run Paused: {run_response.run_id}", center=True, symbol="*")
async def ahandle_agent_run_paused(
agent: Agent,
run_response: RunOutput,
session: AgentSession,
user_id: Optional[str] = None,
run_context: Optional[RunContext] = None,
) -> RunOutput:
run_response.status = RunStatus.paused
if not run_response.content:
run_response.content = get_paused_content(run_response)
# Stamp approval_id on tools before storing so the DB has the complete data.
await acreate_approval_from_pause(
db=agent.db, run_response=run_response, agent_id=agent.id, agent_name=agent.name, user_id=user_id
)
await acleanup_and_store(
agent, run_response=run_response, session=session, run_context=run_context, user_id=user_id
)
log_debug(f"Agent Run Paused: {run_response.run_id}", center=True, symbol="*")
# We return and await confirmation/completion for the tools that require it
return run_response
async def ahandle_agent_run_paused_stream(
agent: Agent,
run_response: RunOutput,
session: AgentSession,
user_id: Optional[str] = None,
run_context: Optional[RunContext] = None,
yield_run_output: bool = False,
) -> AsyncIterator[Union[RunOutputEvent, RunOutput]]:
run_response.status = RunStatus.paused
if not run_response.content:
run_response.content = get_paused_content(run_response)
# Stamp approval_id on tools before storing so the DB has the complete data.
await acreate_approval_from_pause(
db=agent.db, run_response=run_response, agent_id=agent.id, agent_name=agent.name, user_id=user_id
)
# Create RunPausedEvent and add to run_response.events before storing
pause_event = handle_event(
create_run_paused_event(
from_run_response=run_response,
tools=run_response.tools,
requirements=run_response.requirements,
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
await acleanup_and_store(
agent, run_response=run_response, session=session, run_context=run_context, user_id=user_id
)
yield pause_event # type: ignore
# Also yield the run_response if requested, so callers can capture it
if yield_run_output:
yield run_response
log_debug(f"Agent Run Paused: {run_response.run_id}", center=True, symbol="*")
def _run(
agent: Agent,
run_response: RunOutput,
run_context: RunContext,
session_id: str,
user_id: Optional[str] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
pre_session: Optional[AgentSession] = None,
**kwargs: Any,
) -> RunOutput:
"""Run the Agent and return the RunOutput.
Steps:
1. Read or create session
2. Update metadata and session state
3. Resolve dependencies
4. Execute pre-hooks
5. Determine tools for model
6. Prepare run messages
7. Start memory creation in background thread
8. Reason about the task if reasoning is enabled
9. Generate a response from the Model (includes running function calls)
10. Update the RunOutput with the model response
11. Store media if enabled
12. Convert the response to the structured format if needed
13. Execute post-hooks
14. Wait for background memory creation and cultural knowledge creation
15. Create session summary
16. Cleanup and store the run response and session
"""
from agno.agent._hooks import execute_post_hooks, execute_pre_hooks
from agno.agent._init import disconnect_connectable_tools
from agno.agent._messages import get_run_messages
from agno.agent._response import (
convert_response_to_structured_format,
generate_response_with_output_model,
handle_reasoning,
parse_response_with_parser_model,
update_run_response,
)
from agno.agent._storage import load_session_state, read_or_create_session, update_metadata
from agno.agent._telemetry import log_agent_telemetry
from agno.agent._tools import determine_tools_for_model
register_run(run_context.run_id)
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
memory_future = None
learning_future = None
cultural_knowledge_future = None
agent_session: Optional[AgentSession] = None
try:
# Set up retry logic
num_attempts = agent.retries + 1
for attempt in range(num_attempts):
if attempt > 0:
log_debug(f"Retrying Agent run {run_response.run_id}. Attempt {attempt + 1} of {num_attempts}...")
try:
# 1. Read or create session. Reuse pre-read session on first attempt.
if attempt == 0 and pre_session is not None:
agent_session = pre_session
else:
agent_session = read_or_create_session(agent, session_id=session_id, user_id=user_id)
# 2. Update metadata and session state
if not (attempt == 0 and pre_session is not None):
update_metadata(agent, session=agent_session)
# Initialize session state. Get it from DB if relevant.
run_context.session_state = load_session_state(
agent,
session=agent_session,
session_state=run_context.session_state if run_context.session_state is not None else {},
)
_initialize_session_state(
run_context.session_state,
user_id=user_id,
session_id=session_id,
run_id=run_context.run_id,
)
# 3. Resolve dependencies
if run_context.dependencies is not None:
resolve_run_dependencies(agent, run_context=run_context)
# 4. Execute pre-hooks
run_input = cast(RunInput, run_response.input)
agent.model = cast(Model, agent.model)
if agent.pre_hooks is not None:
# Can modify the run input
pre_hook_iterator = execute_pre_hooks(
agent,
hooks=agent.pre_hooks, # type: ignore
run_response=run_response,
run_input=run_input,
run_context=run_context,
session=agent_session,
user_id=user_id,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
# Consume the generator without yielding
deque(pre_hook_iterator, maxlen=0)
# 5. Determine tools for model
processed_tools = agent.get_tools(
run_response=run_response,
run_context=run_context,
session=agent_session,
user_id=user_id,
)
_tools = determine_tools_for_model(
agent,
model=agent.model,
processed_tools=processed_tools,
run_response=run_response,
session=agent_session,
run_context=run_context,
)
# 6. Prepare run messages
run_messages: RunMessages = get_run_messages(
agent,
run_response=run_response,
run_context=run_context,
input=run_input.input_content,
session=agent_session,
user_id=user_id,
audio=run_input.audios,
images=run_input.images,
videos=run_input.videos,
files=run_input.files,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
tools=_tools,
**kwargs,
)
if len(run_messages.messages) == 0:
log_error("No messages to be sent to the model.")
# Start memory creation in background thread
from agno.agent import _managers
memory_future = _managers.start_memory_future(
agent,
run_messages=run_messages,
user_id=user_id,
existing_future=memory_future,
)
# Start learning extraction as a background task (runs concurrently with the main execution)
learning_future = _managers.start_learning_future(
agent,
run_messages=run_messages,
session=agent_session,
user_id=user_id,
existing_future=learning_future,
)
# Start cultural knowledge creation in background thread
cultural_knowledge_future = _managers.start_cultural_knowledge_future(
agent,
run_messages=run_messages,
existing_future=cultural_knowledge_future,
)
raise_if_cancelled(run_response.run_id) # type: ignore
# 5. Reason about the task
handle_reasoning(agent, run_response=run_response, run_messages=run_messages, run_context=run_context)
# Check for cancellation before model call
raise_if_cancelled(run_response.run_id) # type: ignore
# 6. Generate a response from the Model (includes running function calls)
agent.model = cast(Model, agent.model)
model_response: ModelResponse = agent.model.response(
messages=run_messages.messages,
tools=_tools,
tool_choice=agent.tool_choice,
tool_call_limit=agent.tool_call_limit,
response_format=response_format,
run_response=run_response,
send_media_to_model=agent.send_media_to_model,
compression_manager=agent.compression_manager if agent.compress_tool_results else None,
)
# Check for cancellation after model call
raise_if_cancelled(run_response.run_id) # type: ignore
# If an output model is provided, generate output using the output model
generate_response_with_output_model(agent, model_response, run_messages, run_response=run_response)
# If a parser model is provided, structure the response separately
parse_response_with_parser_model(
agent, model_response, run_messages, run_context=run_context, run_response=run_response
)
# 7. Update the RunOutput with the model response
update_run_response(
agent,
model_response=model_response,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
)
# We should break out of the run function
if any(tool_call.is_paused for tool_call in run_response.tools or []):
wait_for_open_threads(
memory_future=memory_future, # type: ignore
cultural_knowledge_future=cultural_knowledge_future, # type: ignore
learning_future=learning_future, # type: ignore
)
merge_background_metrics(
run_response.metrics,
collect_background_metrics(memory_future, cultural_knowledge_future, learning_future),
)
return handle_agent_run_paused(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
# 8. Store media if enabled
if agent.store_media:
store_media_util(run_response, model_response)
# 9. Convert the response to the structured format if needed
convert_response_to_structured_format(agent, run_response, run_context=run_context)
# 10. Execute post-hooks after output is generated but before response is returned
if agent.post_hooks is not None:
post_hook_iterator = execute_post_hooks(
agent,
hooks=agent.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=agent_session,
user_id=user_id,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
deque(post_hook_iterator, maxlen=0)
# Check for cancellation
raise_if_cancelled(run_response.run_id) # type: ignore
# 11. Wait for background memory creation and cultural knowledge creation
wait_for_open_threads(
memory_future=memory_future, # type: ignore
cultural_knowledge_future=cultural_knowledge_future, # type: ignore
learning_future=learning_future, # type: ignore
)
merge_background_metrics(
run_response.metrics,
collect_background_metrics(memory_future, cultural_knowledge_future, learning_future),
)
# 12. Create session summary
if agent.session_summary_manager is not None and agent.enable_session_summaries:
# Upsert the RunOutput to Agent Session before creating the session summary
agent_session.upsert_run(run=run_response)
try:
agent.session_summary_manager.create_session_summary(
session=agent_session, run_metrics=run_response.metrics
)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
run_response.status = RunStatus.completed
# 13. Cleanup and store the run response and session
cleanup_and_store(
agent, run_response=run_response, session=agent_session, run_context=run_context, user_id=user_id
)
# Log Agent Telemetry
log_agent_telemetry(agent, session_id=agent_session.session_id, run_id=run_response.run_id)
log_debug(f"Agent Run End: {run_response.run_id}", center=True, symbol="*")
return run_response
except RunCancelledException as e:
log_info(f"Run {run_response.run_id} was cancelled")
run_response.content = str(e)
run_response.status = RunStatus.cancelled
# Cleanup and store the run response and session
if agent_session is not None:
cleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
return run_response
except (InputCheckError, OutputCheckError) as e:
# Handle exceptions during streaming
run_response.status = RunStatus.error
# If the content is None, set it to the error message
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check trigger: {e.check_trigger}")
if agent_session is not None:
cleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
return run_response
except KeyboardInterrupt:
run_response = cast(RunOutput, run_response)
run_response.status = RunStatus.cancelled
run_response.content = "Operation cancelled by user"
return run_response
except Exception as e:
if attempt < num_attempts - 1:
# Calculate delay with exponential backoff if enabled
if agent.exponential_backoff:
delay = agent.delay_between_retries * (2**attempt)
else:
delay = agent.delay_between_retries
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
time.sleep(delay)
continue
run_response.status = RunStatus.error
# If the content is None, set it to the error message
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Agent run: {str(e)}")
# Cleanup and store the run response and session
if agent_session is not None:
cleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
return run_response
finally:
# Cancel background futures on error (wait_for_open_threads handles waiting on success)
for future in (memory_future, cultural_knowledge_future, learning_future):
if future is not None and not future.done():
future.cancel()
try:
future.result(timeout=0)
except Exception:
pass
# Always disconnect connectable tools
disconnect_connectable_tools(agent)
# Always clean up the run tracking
cleanup_run(run_response.run_id) # type: ignore
return run_response
def _run_stream(
agent: Agent,
run_response: RunOutput,
run_context: RunContext,
session_id: str,
user_id: Optional[str] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
stream_events: bool = False,
yield_run_output: Optional[bool] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
pre_session: Optional[AgentSession] = None,
**kwargs: Any,
) -> Iterator[Union[RunOutputEvent, RunOutput]]:
"""Run the Agent and yield the RunOutput.
Steps:
1. Read or create session
2. Update metadata and session state
3. Resolve dependencies
4. Execute pre-hooks
5. Determine tools for model
6. Prepare run messages
7. Start memory creation in background thread
8. Reason about the task if reasoning is enabled
9. Process model response
10. Parse response with parser model if provided
11. Wait for background memory creation and cultural knowledge creation
12. Create session summary
13. Cleanup and store the run response and session
"""
from agno.agent._hooks import execute_post_hooks, execute_pre_hooks
from agno.agent._init import disconnect_connectable_tools
from agno.agent._messages import get_run_messages
from agno.agent._response import (
generate_response_with_output_model_stream,
handle_model_response_stream,
handle_reasoning_stream,
parse_response_with_parser_model_stream,
)
from agno.agent._storage import load_session_state, read_or_create_session, update_metadata
from agno.agent._telemetry import log_agent_telemetry
from agno.agent._tools import determine_tools_for_model
register_run(run_context.run_id)
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
memory_future = None
learning_future = None
cultural_knowledge_future = None
agent_session: Optional[AgentSession] = None
try:
# Set up retry logic
num_attempts = agent.retries + 1
for attempt in range(num_attempts):
if attempt > 0:
log_debug(f"Retrying Agent run {run_response.run_id}. Attempt {attempt + 1} of {num_attempts}...")
try:
# 1. Read or create session. Reuse pre-read session on first attempt.
if attempt == 0 and pre_session is not None:
agent_session = pre_session
else:
agent_session = read_or_create_session(agent, session_id=session_id, user_id=user_id)
# 2. Update metadata and session state
if not (attempt == 0 and pre_session is not None):
update_metadata(agent, session=agent_session)
# Initialize session state. Get it from DB if relevant.
run_context.session_state = load_session_state(
agent,
session=agent_session,
session_state=run_context.session_state if run_context.session_state is not None else {},
)
_initialize_session_state(
run_context.session_state,
user_id=user_id,
session_id=session_id,
run_id=run_context.run_id,
)
# 3. Resolve dependencies
if run_context.dependencies is not None:
resolve_run_dependencies(agent, run_context=run_context)
# 4. Execute pre-hooks
run_input = cast(RunInput, run_response.input)
agent.model = cast(Model, agent.model)
if agent.pre_hooks is not None:
# Can modify the run input
pre_hook_iterator = execute_pre_hooks(
agent,
hooks=agent.pre_hooks, # type: ignore
run_response=run_response,
run_input=run_input,
run_context=run_context,
session=agent_session,
user_id=user_id,
debug_mode=debug_mode,
stream_events=stream_events,
background_tasks=background_tasks,
**kwargs,
)
for event in pre_hook_iterator:
yield event
# 5. Determine tools for model
processed_tools = agent.get_tools(
run_response=run_response,
run_context=run_context,
session=agent_session,
user_id=user_id,
)
_tools = determine_tools_for_model(
agent,
model=agent.model,
processed_tools=processed_tools,
run_response=run_response,
session=agent_session,
run_context=run_context,
)
# 6. Prepare run messages
run_messages: RunMessages = get_run_messages(
agent,
run_response=run_response,
input=run_input.input_content,
session=agent_session,
run_context=run_context,
user_id=user_id,
audio=run_input.audios,
images=run_input.images,
videos=run_input.videos,
files=run_input.files,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
tools=_tools,
**kwargs,
)
if len(run_messages.messages) == 0:
log_error("No messages to be sent to the model.")
# 7. Start memory creation in background thread
from agno.agent import _managers
memory_future = _managers.start_memory_future(
agent,
run_messages=run_messages,
user_id=user_id,
existing_future=memory_future,
)
# Start learning extraction as a background task (runs concurrently with the main execution)
learning_future = _managers.start_learning_future(
agent,
run_messages=run_messages,
session=agent_session,
user_id=user_id,
existing_future=learning_future,
)
# Start cultural knowledge creation in background thread
cultural_knowledge_future = _managers.start_cultural_knowledge_future(
agent,
run_messages=run_messages,
existing_future=cultural_knowledge_future,
)
# Start the Run by yielding a RunStarted event
if stream_events:
yield handle_event( # type: ignore
create_run_started_event(run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# 5. Reason about the task if reasoning is enabled
yield from handle_reasoning_stream(
agent,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
stream_events=stream_events,
)
# Check for cancellation before model processing
raise_if_cancelled(run_response.run_id) # type: ignore
# 6. Process model response
if agent.output_model is None:
for event in handle_model_response_stream(
agent,
session=agent_session,
run_response=run_response,
run_messages=run_messages,
tools=_tools,
response_format=response_format,
stream_events=stream_events,
session_state=run_context.session_state,
run_context=run_context,
):
raise_if_cancelled(run_response.run_id) # type: ignore
yield event
else:
from agno.run.agent import (
IntermediateRunContentEvent,
RunContentEvent,
) # type: ignore
for event in handle_model_response_stream(
agent,
session=agent_session,
run_response=run_response,
run_messages=run_messages,
tools=_tools,
response_format=response_format,
stream_events=stream_events,
session_state=run_context.session_state,
run_context=run_context,
):
raise_if_cancelled(run_response.run_id) # type: ignore
if isinstance(event, RunContentEvent):
if stream_events:
yield IntermediateRunContentEvent(
content=event.content,
content_type=event.content_type,
)
else:
yield event
# If an output model is provided, generate output using the output model
for event in generate_response_with_output_model_stream(
agent,
session=agent_session,
run_response=run_response,
run_messages=run_messages,
stream_events=stream_events,
):
raise_if_cancelled(run_response.run_id) # type: ignore
yield event # type: ignore
# Check for cancellation after model processing
raise_if_cancelled(run_response.run_id) # type: ignore
# 7. Parse response with parser model if provided
yield from parse_response_with_parser_model_stream(
agent, # type: ignore
session=agent_session,
run_response=run_response,
stream_events=stream_events,
run_context=run_context,
)
# We should break out of the run function
if any(tool_call.is_paused for tool_call in run_response.tools or []):
yield from wait_for_thread_tasks_stream(
memory_future=memory_future, # type: ignore
cultural_knowledge_future=cultural_knowledge_future, # type: ignore
learning_future=learning_future, # type: ignore
stream_events=stream_events,
run_response=run_response,
events_to_skip=agent.events_to_skip,
store_events=agent.store_events,
get_memories_callback=lambda: agent.get_user_memories(user_id=user_id),
)
merge_background_metrics(
run_response.metrics,
collect_background_metrics(memory_future, cultural_knowledge_future, learning_future),
)
# Handle the paused run
yield from handle_agent_run_paused_stream(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
yield_run_output=yield_run_output or False,
)
return
# Yield RunContentCompletedEvent
if stream_events:
yield handle_event( # type: ignore
create_run_content_completed_event(from_run_response=run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Execute post-hooks after output is generated but before response is returned
if agent.post_hooks is not None:
yield from execute_post_hooks(
agent,
hooks=agent.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=agent_session,
user_id=user_id,
debug_mode=debug_mode,
stream_events=stream_events,
background_tasks=background_tasks,
**kwargs,
)
# 8. Wait for background memory creation and cultural knowledge creation
yield from wait_for_thread_tasks_stream(
memory_future=memory_future, # type: ignore
cultural_knowledge_future=cultural_knowledge_future, # type: ignore
learning_future=learning_future, # type: ignore
stream_events=stream_events,
run_response=run_response,
events_to_skip=agent.events_to_skip,
store_events=agent.store_events,
get_memories_callback=lambda: agent.get_user_memories(user_id=user_id),
)
merge_background_metrics(
run_response.metrics,
collect_background_metrics(memory_future, cultural_knowledge_future, learning_future),
)
# 9. Create session summary
if agent.session_summary_manager is not None and agent.enable_session_summaries:
# Upsert the RunOutput to Agent Session before creating the session summary
agent_session.upsert_run(run=run_response)
if stream_events:
yield handle_event( # type: ignore
create_session_summary_started_event(from_run_response=run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
try:
agent.session_summary_manager.create_session_summary(
session=agent_session, run_metrics=run_response.metrics
)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
if stream_events:
yield handle_event( # type: ignore
create_session_summary_completed_event(
from_run_response=run_response, session_summary=agent_session.summary
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Update run_response.session_state before creating RunCompletedEvent
# This ensures the event has the final state after all tool modifications
if agent_session.session_data is not None and "session_state" in agent_session.session_data:
run_response.session_state = agent_session.session_data["session_state"]
# Create the run completed event
completed_event = handle_event( # type: ignore
create_run_completed_event(from_run_response=run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Set the run status to completed
run_response.status = RunStatus.completed
# 10. Cleanup and store the run response and session
cleanup_and_store(
agent, run_response=run_response, session=agent_session, run_context=run_context, user_id=user_id
)
if stream_events:
yield completed_event # type: ignore
if yield_run_output:
yield run_response
# Log Agent Telemetry
log_agent_telemetry(agent, session_id=agent_session.session_id, run_id=run_response.run_id)
log_debug(f"Agent Run End: {run_response.run_id}", center=True, symbol="*")
break
except RunCancelledException as e:
# Handle run cancellation during streaming
log_info(f"Run {run_response.run_id} was cancelled during streaming")
run_response.content = str(e)
run_response.status = RunStatus.cancelled
yield handle_event(
create_run_cancelled_event(from_run_response=run_response, reason=str(e)),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Cleanup and store the run response and session
if agent_session is not None:
cleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
break
except (InputCheckError, OutputCheckError) as e:
# Handle exceptions during streaming
run_response.status = RunStatus.error
# Add error event to list of events
run_error = create_run_error_event(
run_response,
error=str(e),
error_id=e.error_id,
error_type=e.type,
additional_data=e.additional_data,
)
run_response.events = add_error_event(error=run_error, events=run_response.events)
# If the content is None, set it to the error message
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check trigger: {e.check_trigger}")
if agent_session is not None:
cleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
yield run_error
break
except KeyboardInterrupt:
run_response = cast(RunOutput, run_response)
yield handle_event( # type: ignore
create_run_cancelled_event(from_run_response=run_response, reason="Operation cancelled by user"),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
break
except Exception as e:
if attempt < num_attempts - 1:
# Calculate delay with exponential backoff if enabled
if agent.exponential_backoff:
delay = agent.delay_between_retries * (2**attempt)
else:
delay = agent.delay_between_retries
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
time.sleep(delay)
continue
run_response.status = RunStatus.error
# Add error event to list of events
run_error = create_run_error_event(run_response, error=str(e))
run_response.events = add_error_event(error=run_error, events=run_response.events)
# If the content is None, set it to the error message
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Agent run: {str(e)}")
# Cleanup and store the run response and session
if agent_session is not None:
cleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
yield run_error
finally:
# Cancel background futures on error (wait_for_thread_tasks_stream handles waiting on success)
for future in (memory_future, cultural_knowledge_future, learning_future):
if future is not None and not future.done():
future.cancel()
try:
future.result(timeout=0)
except Exception:
pass
# Always disconnect connectable tools
disconnect_connectable_tools(agent)
# Always clean up the run tracking
cleanup_run(run_response.run_id) # type: ignore
def run_dispatch(
agent: Agent,
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
*,
stream: Optional[bool] = None,
stream_events: Optional[bool] = None,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
run_context: Optional[RunContext] = None,
run_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
yield_run_output: Optional[bool] = None,
debug_mode: Optional[bool] = None,
**kwargs: Any,
) -> Union[RunOutput, Iterator[Union[RunOutputEvent, RunOutput]]]:
"""Run the Agent and return the response."""
from agno.agent._init import has_async_db
from agno.agent._response import get_response_format
if has_async_db(agent):
raise RuntimeError("`run` method is not supported with an async database. Please use `arun` method instead.")
# Set the id for the run and register it immediately for cancellation tracking
run_id = run_id or str(uuid4())
if (add_history_to_context or agent.add_history_to_context) and not agent.db and not agent.team_id:
log_warning(
"add_history_to_context is True, but no database has been assigned to the agent. History will not be added to the context."
)
background_tasks = kwargs.pop("background_tasks", None)
if background_tasks is not None:
from fastapi import BackgroundTasks
background_tasks: BackgroundTasks = background_tasks # type: ignore
# Validate input against input_schema if provided
validated_input = validate_input(input, agent.input_schema)
# Normalise hook & guardails
if not agent._hooks_normalised:
if agent.pre_hooks:
agent.pre_hooks = normalize_pre_hooks(agent.pre_hooks) # type: ignore
if agent.post_hooks:
agent.post_hooks = normalize_post_hooks(agent.post_hooks) # type: ignore
agent._hooks_normalised = True
# Initialize session
session_id, user_id = initialize_session(agent, session_id=session_id, user_id=user_id)
# Initialize the Agent
agent.initialize_agent(debug_mode=debug_mode)
image_artifacts, video_artifacts, audio_artifacts, file_artifacts = validate_media_object_id(
images=images, videos=videos, audios=audio, files=files
)
# Create RunInput to capture the original user input
run_input = RunInput(
input_content=validated_input,
images=image_artifacts,
videos=video_artifacts,
audios=audio_artifacts,
files=file_artifacts,
)
# Read existing session and update metadata BEFORE resolving run options,
# so that session-stored metadata is visible to resolve_run_options.
from agno.agent._storage import read_or_create_session, update_metadata
agent_session = read_or_create_session(agent, session_id=session_id, user_id=user_id)
update_metadata(agent, session=agent_session)
# Resolve all run options centrally
opts = resolve_run_options(
agent,
stream=stream,
stream_events=stream_events,
yield_run_output=yield_run_output,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
dependencies=dependencies,
knowledge_filters=knowledge_filters,
metadata=metadata,
output_schema=output_schema,
)
agent.model = cast(Model, agent.model)
# Initialize run context
run_context = run_context or RunContext(
run_id=run_id,
session_id=session_id,
user_id=user_id,
session_state=session_state,
dependencies=opts.dependencies,
knowledge_filters=opts.knowledge_filters,
metadata=opts.metadata,
output_schema=opts.output_schema,
)
# Apply options with precedence: explicit args > existing run_context > resolved defaults.
opts.apply_to_context(
run_context,
dependencies_provided=dependencies is not None,
knowledge_filters_provided=knowledge_filters is not None,
metadata_provided=metadata is not None,
)
# Prepare arguments for the model (must be after run_context is fully initialized)
response_format = get_response_format(agent, run_context=run_context) if agent.parser_model is None else None
# Create a new run_response for this attempt
run_response = RunOutput(
run_id=run_id,
session_id=session_id,
agent_id=agent.id,
user_id=user_id,
agent_name=agent.name,
metadata=run_context.metadata,
session_state=run_context.session_state,
input=run_input,
)
run_response.model = agent.model.id if agent.model is not None else None
run_response.model_provider = agent.model.provider if agent.model is not None else None
# Start the run metrics timer, to calculate the run duration
run_response.metrics = RunMetrics()
run_response.metrics.start_timer()
if opts.stream:
response_iterator = _run_stream(
agent,
run_response=run_response,
run_context=run_context,
session_id=session_id,
user_id=user_id,
add_history_to_context=opts.add_history_to_context,
add_dependencies_to_context=opts.add_dependencies_to_context,
add_session_state_to_context=opts.add_session_state_to_context,
response_format=response_format,
stream_events=opts.stream_events,
yield_run_output=opts.yield_run_output,
debug_mode=debug_mode,
background_tasks=background_tasks,
pre_session=agent_session,
**kwargs,
)
return response_iterator
else:
response = _run(
agent,
run_response=run_response,
run_context=run_context,
session_id=session_id,
user_id=user_id,
add_history_to_context=opts.add_history_to_context,
add_dependencies_to_context=opts.add_dependencies_to_context,
add_session_state_to_context=opts.add_session_state_to_context,
response_format=response_format,
debug_mode=debug_mode,
background_tasks=background_tasks,
pre_session=agent_session,
**kwargs,
)
return response
async def _arun(
agent: Agent,
run_response: RunOutput,
run_context: RunContext,
session_id: str,
user_id: Optional[str] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
pre_session: Optional[AgentSession] = None,
**kwargs: Any,
) -> RunOutput:
"""Run the Agent and return the RunOutput.
Steps:
1. Read or create session
2. Update metadata and session state
3. Resolve dependencies
4. Execute pre-hooks
5. Determine tools for model
6. Prepare run messages
7. Start memory creation in background task
8. Reason about the task if reasoning is enabled
9. Generate a response from the Model (includes running function calls)
10. Update the RunOutput with the model response
11. Convert response to structured format
12. Store media if enabled
13. Execute post-hooks
14. Wait for background memory creation
15. Create session summary
16. Cleanup and store (scrub, stop timer, save to file, add to session, calculate metrics, save session)
"""
from agno.agent._hooks import aexecute_post_hooks, aexecute_pre_hooks
from agno.agent._init import disconnect_connectable_tools, disconnect_mcp_tools
from agno.agent._messages import aget_run_messages
from agno.agent._response import (
agenerate_response_with_output_model,
ahandle_reasoning,
aparse_response_with_parser_model,
convert_response_to_structured_format,
update_run_response,
)
from agno.agent._storage import aread_or_create_session, load_session_state, update_metadata
from agno.agent._telemetry import alog_agent_telemetry
from agno.agent._tools import determine_tools_for_model
await aregister_run(run_context.run_id)
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
memory_task = None
learning_task = None
cultural_knowledge_task = None
agent_session: Optional[AgentSession] = None
# Set up retry logic
num_attempts = agent.retries + 1
try:
for attempt in range(num_attempts):
if attempt > 0:
log_debug(f"Retrying Agent run {run_response.run_id}. Attempt {attempt + 1} of {num_attempts}...")
try:
# 1. Read or create session. Reuse pre-read session on first attempt.
if attempt == 0 and pre_session is not None:
agent_session = pre_session
else:
agent_session = await aread_or_create_session(agent, session_id=session_id, user_id=user_id)
# 2. Update metadata and session state
if not (attempt == 0 and pre_session is not None):
update_metadata(agent, session=agent_session)
# Initialize session state. Get it from DB if relevant.
run_context.session_state = load_session_state(
agent,
session=agent_session,
session_state=run_context.session_state if run_context.session_state is not None else {},
)
_initialize_session_state(
run_context.session_state,
user_id=user_id,
session_id=session_id,
run_id=run_context.run_id,
)
# 3. Resolve dependencies
if run_context.dependencies is not None:
await aresolve_run_dependencies(agent, run_context=run_context)
# 4. Execute pre-hooks
run_input = cast(RunInput, run_response.input)
agent.model = cast(Model, agent.model)
if agent.pre_hooks is not None:
# Can modify the run input
pre_hook_iterator = aexecute_pre_hooks(
agent,
hooks=agent.pre_hooks, # type: ignore
run_response=run_response,
run_context=run_context,
run_input=run_input,
session=agent_session,
user_id=user_id,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
# Consume the async iterator without yielding
async for _ in pre_hook_iterator:
pass
# 5. Determine tools for model
agent.model = cast(Model, agent.model)
processed_tools = await agent.aget_tools(
run_response=run_response,
run_context=run_context,
session=agent_session,
user_id=user_id,
)
_tools = determine_tools_for_model(
agent,
model=agent.model,
processed_tools=processed_tools,
run_response=run_response,
run_context=run_context,
session=agent_session,
async_mode=True,
)
# 6. Prepare run messages
run_messages: RunMessages = await aget_run_messages(
agent,
run_response=run_response,
run_context=run_context,
input=run_input.input_content,
session=agent_session,
user_id=user_id,
audio=run_input.audios,
images=run_input.images,
videos=run_input.videos,
files=run_input.files,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
tools=_tools,
**kwargs,
)
if len(run_messages.messages) == 0:
log_error("No messages to be sent to the model.")
# 7. Start memory creation as a background task (runs concurrently with the main execution)
from agno.agent import _managers
memory_task = await _managers.astart_memory_task(
agent,
run_messages=run_messages,
user_id=user_id,
existing_task=memory_task,
)
# Start learning extraction as a background task
learning_task = await _managers.astart_learning_task(
agent,
run_messages=run_messages,
session=agent_session,
user_id=user_id,
existing_task=learning_task,
)
# Start cultural knowledge creation as a background task (runs concurrently with the main execution)
cultural_knowledge_task = await _managers.astart_cultural_knowledge_task(
agent,
run_messages=run_messages,
existing_task=cultural_knowledge_task,
)
# Check for cancellation before model call
await araise_if_cancelled(run_response.run_id) # type: ignore
# 8. Reason about the task if reasoning is enabled
await ahandle_reasoning(
agent, run_response=run_response, run_messages=run_messages, run_context=run_context
)
# Check for cancellation before model call
await araise_if_cancelled(run_response.run_id) # type: ignore
# 9. Generate a response from the Model (includes running function calls)
model_response: ModelResponse = await agent.model.aresponse(
messages=run_messages.messages,
tools=_tools,
tool_choice=agent.tool_choice,
tool_call_limit=agent.tool_call_limit,
response_format=response_format,
send_media_to_model=agent.send_media_to_model,
run_response=run_response,
compression_manager=agent.compression_manager if agent.compress_tool_results else None,
)
# Check for cancellation after model call
await araise_if_cancelled(run_response.run_id) # type: ignore
# If an output model is provided, generate output using the output model
await agenerate_response_with_output_model(
agent, model_response=model_response, run_messages=run_messages, run_response=run_response
)
# If a parser model is provided, structure the response separately
await aparse_response_with_parser_model(
agent,
model_response=model_response,
run_messages=run_messages,
run_context=run_context,
run_response=run_response,
)
# 10. Update the RunOutput with the model response
update_run_response(
agent,
model_response=model_response,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
)
# We should break out of the run function
if any(tool_call.is_paused for tool_call in run_response.tools or []):
await await_for_open_threads(
memory_task=memory_task,
cultural_knowledge_task=cultural_knowledge_task,
learning_task=learning_task,
)
merge_background_metrics(
run_response.metrics,
collect_background_metrics(memory_task, cultural_knowledge_task, learning_task),
)
return await ahandle_agent_run_paused(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
# 11. Convert the response to the structured format if needed
convert_response_to_structured_format(agent, run_response, run_context=run_context)
# 12. Store media if enabled
if agent.store_media:
store_media_util(run_response, model_response)
# 13. Execute post-hooks (after output is generated but before response is returned)
if agent.post_hooks is not None:
async for _ in aexecute_post_hooks(
agent,
hooks=agent.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=agent_session,
user_id=user_id,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
):
pass
# Check for cancellation
await araise_if_cancelled(run_response.run_id) # type: ignore
# 14. Wait for background memory creation
await await_for_open_threads(
memory_task=memory_task,
cultural_knowledge_task=cultural_knowledge_task,
learning_task=learning_task,
)
merge_background_metrics(
run_response.metrics,
collect_background_metrics(memory_task, cultural_knowledge_task, learning_task),
)
# 15. Create session summary
if agent.session_summary_manager is not None and agent.enable_session_summaries:
# Upsert the RunOutput to Agent Session before creating the session summary
agent_session.upsert_run(run=run_response)
try:
await agent.session_summary_manager.acreate_session_summary(
session=agent_session, run_metrics=run_response.metrics
)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
run_response.status = RunStatus.completed
# 16. Cleanup and store the run response and session
await acleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
# Log Agent Telemetry
await alog_agent_telemetry(agent, session_id=agent_session.session_id, run_id=run_response.run_id)
log_debug(f"Agent Run End: {run_response.run_id}", center=True, symbol="*")
return run_response
except RunCancelledException as e:
# Handle run cancellation
log_info(f"Run {run_response.run_id} was cancelled")
run_response.content = str(e)
run_response.status = RunStatus.cancelled
# Cleanup and store the run response and session
if agent_session is not None:
await acleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
return run_response
except (InputCheckError, OutputCheckError) as e:
# Handle exceptions during streaming
run_response.status = RunStatus.error
# If the content is None, set it to the error message
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check trigger: {e.check_trigger}")
if agent_session is not None:
await acleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
return run_response
except KeyboardInterrupt:
run_response = cast(RunOutput, run_response)
run_response.status = RunStatus.cancelled
run_response.content = "Operation cancelled by user"
return run_response
except Exception as e:
# Check if this is the last attempt
if attempt < num_attempts - 1:
# Calculate delay with exponential backoff if enabled
if agent.exponential_backoff:
delay = agent.delay_between_retries * (2**attempt)
else:
delay = agent.delay_between_retries
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
await asyncio.sleep(delay)
continue
run_response.status = RunStatus.error
# If the content is None, set it to the error message
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Agent run: {str(e)}")
# Cleanup and store the run response and session
if agent_session is not None:
await acleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
return run_response
finally:
# Always disconnect connectable tools
disconnect_connectable_tools(agent)
# Always disconnect MCP tools
await disconnect_mcp_tools(agent)
# Cancel background tasks on error (await_for_open_threads handles waiting on success)
if memory_task is not None and not memory_task.done():
memory_task.cancel()
try:
await memory_task
except asyncio.CancelledError:
pass
if cultural_knowledge_task is not None and not cultural_knowledge_task.done():
cultural_knowledge_task.cancel()
try:
await cultural_knowledge_task
except asyncio.CancelledError:
pass
if learning_task is not None and not learning_task.done():
learning_task.cancel()
try:
await learning_task
except asyncio.CancelledError:
pass
# Always clean up the run tracking
await acleanup_run(run_response.run_id) # type: ignore
return run_response
async def _arun_background(
agent: Agent,
run_response: RunOutput,
run_context: RunContext,
session_id: str,
user_id: Optional[str] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> RunOutput:
"""Start an agent run in the background and return immediately with PENDING status.
The run is persisted with PENDING status, then an asyncio task is spawned
to execute the actual run. The task transitions through RUNNING -> COMPLETED/ERROR.
Callers can poll for results via agent.aget_run_output(run_id, session_id).
"""
from agno.agent._session import asave_session
from agno.agent._storage import aread_or_create_session, update_metadata
# 1. Register the run for cancellation tracking (before spawning the task)
await aregister_run(run_context.run_id)
# 2. Set status to PENDING
run_response.status = RunStatus.pending
# 3. Persist the PENDING run so polling can find it immediately
agent_session = await aread_or_create_session(agent, session_id=session_id, user_id=user_id)
update_metadata(agent, session=agent_session)
agent_session.upsert_run(run=run_response)
await asave_session(agent, session=agent_session)
log_info(f"Background run {run_response.run_id} created with PENDING status")
# 4. Spawn the background task
async def _background_task() -> None:
try:
# Transition to RUNNING
run_response.status = RunStatus.running
agent_session.upsert_run(run=run_response)
await asave_session(agent, session=agent_session)
# Execute the actual run — _arun handles everything including
# session persistence and cleanup
await _arun(
agent,
run_response=run_response,
run_context=run_context,
user_id=user_id,
response_format=response_format,
session_id=session_id,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
except Exception:
log_error(f"Background run {run_response.run_id} failed", exc_info=True)
# Persist ERROR status
try:
run_response.status = RunStatus.error
agent_session.upsert_run(run=run_response)
await asave_session(agent, session=agent_session)
except Exception:
log_error(f"Failed to persist error state for background run {run_response.run_id}", exc_info=True)
# Note: acleanup_run is already called by _arun's finally block
task = asyncio.create_task(_background_task())
_background_tasks.add(task)
task.add_done_callback(_background_tasks.discard)
# 5. Return immediately with the PENDING response
return run_response
async def _arun_stream(
agent: Agent,
run_response: RunOutput,
run_context: RunContext,
session_id: str,
user_id: Optional[str] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
stream_events: bool = False,
yield_run_output: Optional[bool] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
pre_session: Optional[AgentSession] = None,
**kwargs: Any,
) -> AsyncIterator[Union[RunOutputEvent, RunOutput]]:
"""Run the Agent and yield the RunOutput.
Steps:
1. Read or create session
2. Update metadata and session state
3. Resolve dependencies
4. Execute pre-hooks
5. Determine tools for model
6. Prepare run messages
7. Start memory creation in background task
8. Reason about the task if reasoning is enabled
9. Generate a response from the Model (includes running function calls)
10. Parse response with parser model if provided
11. Wait for background memory creation
12. Create session summary
13. Cleanup and store (scrub, stop timer, save to file, add to session, calculate metrics, save session)
"""
from agno.agent._hooks import aexecute_post_hooks, aexecute_pre_hooks
from agno.agent._init import disconnect_connectable_tools, disconnect_mcp_tools
from agno.agent._messages import aget_run_messages
from agno.agent._response import (
agenerate_response_with_output_model_stream,
ahandle_model_response_stream,
ahandle_reasoning_stream,
aparse_response_with_parser_model_stream,
)
from agno.agent._storage import aread_or_create_session, load_session_state, update_metadata
from agno.agent._telemetry import alog_agent_telemetry
from agno.agent._tools import determine_tools_for_model
await aregister_run(run_context.run_id)
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
memory_task = None
cultural_knowledge_task = None
learning_task = None
agent_session: Optional[AgentSession] = None
# Set up retry logic
num_attempts = agent.retries + 1
try:
for attempt in range(num_attempts):
if attempt > 0:
log_debug(f"Retrying Agent run {run_response.run_id}. Attempt {attempt + 1} of {num_attempts}...")
try:
# 1. Read or create session. Reuse pre-read session on first attempt.
if attempt == 0 and pre_session is not None:
agent_session = pre_session
else:
agent_session = await aread_or_create_session(agent, session_id=session_id, user_id=user_id)
# Start the Run by yielding a RunStarted event
if stream_events:
yield handle_event( # type: ignore
create_run_started_event(run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# 2. Update metadata and session state
if not (attempt == 0 and pre_session is not None):
update_metadata(agent, session=agent_session)
# Initialize session state. Get it from DB if relevant.
run_context.session_state = load_session_state(
agent,
session=agent_session,
session_state=run_context.session_state if run_context.session_state is not None else {},
)
_initialize_session_state(
run_context.session_state,
user_id=user_id,
session_id=session_id,
run_id=run_context.run_id,
)
# 3. Resolve dependencies
if run_context.dependencies is not None:
await aresolve_run_dependencies(agent, run_context=run_context)
# 4. Execute pre-hooks
run_input = cast(RunInput, run_response.input)
agent.model = cast(Model, agent.model)
if agent.pre_hooks is not None:
pre_hook_iterator = aexecute_pre_hooks(
agent,
hooks=agent.pre_hooks, # type: ignore
run_response=run_response,
run_context=run_context,
run_input=run_input,
session=agent_session,
user_id=user_id,
debug_mode=debug_mode,
stream_events=stream_events,
background_tasks=background_tasks,
**kwargs,
)
async for event in pre_hook_iterator:
await araise_if_cancelled(run_response.run_id) # type: ignore
yield event
# 5. Determine tools for model
agent.model = cast(Model, agent.model)
processed_tools = await agent.aget_tools(
run_response=run_response,
run_context=run_context,
session=agent_session,
user_id=user_id,
)
_tools = determine_tools_for_model(
agent,
model=agent.model,
processed_tools=processed_tools,
run_response=run_response,
run_context=run_context,
session=agent_session,
async_mode=True,
)
# 6. Prepare run messages
run_messages: RunMessages = await aget_run_messages(
agent,
run_response=run_response,
run_context=run_context,
input=run_input.input_content,
session=agent_session,
user_id=user_id,
audio=run_input.audios,
images=run_input.images,
videos=run_input.videos,
files=run_input.files,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
tools=_tools,
**kwargs,
)
if len(run_messages.messages) == 0:
log_error("No messages to be sent to the model.")
# 7. Start memory creation as a background task (runs concurrently with the main execution)
from agno.agent import _managers
memory_task = await _managers.astart_memory_task(
agent,
run_messages=run_messages,
user_id=user_id,
existing_task=memory_task,
)
# Start learning extraction as a background task
learning_task = await _managers.astart_learning_task(
agent,
run_messages=run_messages,
session=agent_session,
user_id=user_id,
existing_task=learning_task,
)
# Start cultural knowledge creation as a background task (runs concurrently with the main execution)
cultural_knowledge_task = await _managers.astart_cultural_knowledge_task(
agent,
run_messages=run_messages,
existing_task=cultural_knowledge_task,
)
# 8. Reason about the task if reasoning is enabled
async for item in ahandle_reasoning_stream(
agent,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
stream_events=stream_events,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
yield item
await araise_if_cancelled(run_response.run_id) # type: ignore
# 9. Generate a response from the Model
if agent.output_model is None:
async for event in ahandle_model_response_stream(
agent,
session=agent_session,
run_response=run_response,
run_messages=run_messages,
tools=_tools,
response_format=response_format,
stream_events=stream_events,
session_state=run_context.session_state,
run_context=run_context,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
yield event
else:
from agno.run.agent import (
IntermediateRunContentEvent,
RunContentEvent,
) # type: ignore
async for event in ahandle_model_response_stream(
agent,
session=agent_session,
run_response=run_response,
run_messages=run_messages,
tools=_tools,
response_format=response_format,
stream_events=stream_events,
session_state=run_context.session_state,
run_context=run_context,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
if isinstance(event, RunContentEvent):
if stream_events:
yield IntermediateRunContentEvent(
content=event.content,
content_type=event.content_type,
)
else:
yield event
# If an output model is provided, generate output using the output model
async for event in agenerate_response_with_output_model_stream(
agent,
session=agent_session,
run_response=run_response,
run_messages=run_messages,
stream_events=stream_events,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
yield event # type: ignore
# Check for cancellation after model processing
await araise_if_cancelled(run_response.run_id) # type: ignore
# 10. Parse response with parser model if provided
async for event in aparse_response_with_parser_model_stream(
agent,
session=agent_session,
run_response=run_response,
stream_events=stream_events,
run_context=run_context,
):
yield event # type: ignore
if stream_events:
yield handle_event( # type: ignore
create_run_content_completed_event(from_run_response=run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Break out of the run function if a tool call is paused
if any(tool_call.is_paused for tool_call in run_response.tools or []):
async for item in await_for_thread_tasks_stream(
memory_task=memory_task,
cultural_knowledge_task=cultural_knowledge_task,
learning_task=learning_task,
stream_events=stream_events,
run_response=run_response,
events_to_skip=agent.events_to_skip,
store_events=agent.store_events,
get_memories_callback=lambda: agent.aget_user_memories(user_id=user_id),
):
yield item
merge_background_metrics(
run_response.metrics,
collect_background_metrics(memory_task, cultural_knowledge_task, learning_task),
)
async for item in ahandle_agent_run_paused_stream( # type: ignore[assignment]
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
yield_run_output=yield_run_output or False,
):
yield item
return
# Execute post-hooks (after output is generated but before response is returned)
if agent.post_hooks is not None:
async for event in aexecute_post_hooks(
agent,
hooks=agent.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=agent_session,
user_id=user_id,
debug_mode=debug_mode,
stream_events=stream_events,
background_tasks=background_tasks,
**kwargs,
):
yield event
# 11. Wait for background memory creation
async for item in await_for_thread_tasks_stream(
memory_task=memory_task,
cultural_knowledge_task=cultural_knowledge_task,
learning_task=learning_task,
stream_events=stream_events,
run_response=run_response,
events_to_skip=agent.events_to_skip,
store_events=agent.store_events,
get_memories_callback=lambda: agent.aget_user_memories(user_id=user_id),
):
yield item
merge_background_metrics(
run_response.metrics,
collect_background_metrics(memory_task, cultural_knowledge_task, learning_task),
)
# 12. Create session summary
if agent.session_summary_manager is not None and agent.enable_session_summaries:
# Upsert the RunOutput to Agent Session before creating the session summary
agent_session.upsert_run(run=run_response)
if stream_events:
yield handle_event( # type: ignore
create_session_summary_started_event(from_run_response=run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
try:
await agent.session_summary_manager.acreate_session_summary(
session=agent_session, run_metrics=run_response.metrics
)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
if stream_events:
yield handle_event( # type: ignore
create_session_summary_completed_event(
from_run_response=run_response, session_summary=agent_session.summary
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Update run_response.session_state before creating RunCompletedEvent
# This ensures the event has the final state after all tool modifications
if agent_session.session_data is not None and "session_state" in agent_session.session_data:
run_response.session_state = agent_session.session_data["session_state"]
# Create the run completed event
completed_event = handle_event(
create_run_completed_event(from_run_response=run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Set the run status to completed
run_response.status = RunStatus.completed
# 13. Cleanup and store the run response and session
await acleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
if stream_events:
yield completed_event # type: ignore
if yield_run_output:
yield run_response
# Log Agent Telemetry
await alog_agent_telemetry(agent, session_id=agent_session.session_id, run_id=run_response.run_id)
log_debug(f"Agent Run End: {run_response.run_id}", center=True, symbol="*")
# Break out of the run function
break
except RunCancelledException as e:
# Handle run cancellation during async streaming
log_info(f"Run {run_response.run_id} was cancelled during async streaming")
run_response.status = RunStatus.cancelled
# Don't overwrite content - preserve any partial content that was streamed
# Only set content if it's empty
if not run_response.content:
run_response.content = str(e)
# Yield the cancellation event
yield handle_event( # type: ignore
create_run_cancelled_event(from_run_response=run_response, reason=str(e)),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Cleanup and store the run response and session
if agent_session is not None:
await acleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
break
except (InputCheckError, OutputCheckError) as e:
# Handle exceptions during async streaming
run_response.status = RunStatus.error
# Add error event to list of events
run_error = create_run_error_event(
run_response,
error=str(e),
error_id=e.error_id,
error_type=e.type,
additional_data=e.additional_data,
)
run_response.events = add_error_event(error=run_error, events=run_response.events)
# If the content is None, set it to the error message
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check trigger: {e.check_trigger}")
# Cleanup and store the run response and session
if agent_session is not None:
await acleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
# Yield the error event
yield run_error
break
except KeyboardInterrupt:
run_response = cast(RunOutput, run_response)
yield handle_event( # type: ignore
create_run_cancelled_event(from_run_response=run_response, reason="Operation cancelled by user"),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
break
except Exception as e:
# Check if this is the last attempt
if attempt < num_attempts - 1:
# Calculate delay with exponential backoff if enabled
if agent.exponential_backoff:
delay = agent.delay_between_retries * (2**attempt)
else:
delay = agent.delay_between_retries
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
await asyncio.sleep(delay)
continue
# Handle exceptions during async streaming
run_response.status = RunStatus.error
# Add error event to list of events
run_error = create_run_error_event(run_response, error=str(e))
run_response.events = add_error_event(error=run_error, events=run_response.events)
# If the content is None, set it to the error message
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Agent run: {str(e)}")
# Cleanup and store the run response and session
if agent_session is not None:
await acleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
# Yield the error event
yield run_error
finally:
# Always disconnect connectable tools
disconnect_connectable_tools(agent)
# Always disconnect MCP tools
await disconnect_mcp_tools(agent)
# Cancel background tasks on error (await_for_thread_tasks_stream handles waiting on success)
if memory_task is not None and not memory_task.done():
memory_task.cancel()
try:
await memory_task
except asyncio.CancelledError:
pass
if cultural_knowledge_task is not None and not cultural_knowledge_task.done():
cultural_knowledge_task.cancel()
try:
await cultural_knowledge_task
except asyncio.CancelledError:
pass
if learning_task is not None and not learning_task.done():
learning_task.cancel()
try:
await learning_task
except asyncio.CancelledError:
pass
# Always clean up the run tracking
await acleanup_run(run_response.run_id) # type: ignore
def arun_dispatch( # type: ignore
agent: Agent,
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
*,
stream: Optional[bool] = None,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
run_context: Optional[RunContext] = None,
run_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
stream_events: Optional[bool] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
yield_run_output: Optional[bool] = None,
debug_mode: Optional[bool] = None,
background: bool = False,
**kwargs: Any,
) -> Union[RunOutput, AsyncIterator[RunOutputEvent]]:
"""Async Run the Agent and return the response."""
# Set the id for the run and register it immediately for cancellation tracking
from agno.agent._response import get_response_format
run_id = run_id or str(uuid4())
if (add_history_to_context or agent.add_history_to_context) and not agent.db and not agent.team_id:
log_warning(
"add_history_to_context is True, but no database has been assigned to the agent. History will not be added to the context."
)
background_tasks = kwargs.pop("background_tasks", None)
if background_tasks is not None:
from fastapi import BackgroundTasks
background_tasks: BackgroundTasks = background_tasks # type: ignore
# 2. Validate input against input_schema if provided
validated_input = validate_input(input, agent.input_schema)
# Normalise hooks & guardails
if not agent._hooks_normalised:
if agent.pre_hooks:
agent.pre_hooks = normalize_pre_hooks(agent.pre_hooks, async_mode=True) # type: ignore
if agent.post_hooks:
agent.post_hooks = normalize_post_hooks(agent.post_hooks, async_mode=True) # type: ignore
agent._hooks_normalised = True
# Initialize session
session_id, user_id = initialize_session(agent, session_id=session_id, user_id=user_id)
# Initialize the Agent
agent.initialize_agent(debug_mode=debug_mode)
image_artifacts, video_artifacts, audio_artifacts, file_artifacts = validate_media_object_id(
images=images, videos=videos, audios=audio, files=files
)
# Create RunInput to capture the original user input
run_input = RunInput(
input_content=validated_input,
images=image_artifacts,
videos=video_artifacts,
audios=audio_artifacts,
files=file_artifacts,
)
# Read existing session and update metadata BEFORE resolving run options,
# so that session-stored metadata is visible to resolve_run_options.
# Note: arun_dispatch is NOT async, so we can only pre-read with a sync DB.
# For async DB, _arun/_arun_stream will handle the session read themselves.
from agno.agent._init import has_async_db
from agno.agent._storage import update_metadata
_pre_session: Optional[AgentSession] = None
if not has_async_db(agent):
from agno.agent._storage import read_or_create_session
_pre_session = read_or_create_session(agent, session_id=session_id, user_id=user_id)
update_metadata(agent, session=_pre_session)
# Resolve all run options centrally
opts = resolve_run_options(
agent,
stream=stream,
stream_events=stream_events,
yield_run_output=yield_run_output,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
dependencies=dependencies,
knowledge_filters=knowledge_filters,
metadata=metadata,
output_schema=output_schema,
)
agent.model = cast(Model, agent.model)
# Initialize run context
run_context = run_context or RunContext(
run_id=run_id,
session_id=session_id,
user_id=user_id,
session_state=session_state,
dependencies=opts.dependencies,
knowledge_filters=opts.knowledge_filters,
metadata=opts.metadata,
output_schema=opts.output_schema,
)
# Apply options with precedence: explicit args > existing run_context > resolved defaults.
opts.apply_to_context(
run_context,
dependencies_provided=dependencies is not None,
knowledge_filters_provided=knowledge_filters is not None,
metadata_provided=metadata is not None,
)
# Prepare arguments for the model (must be after run_context is fully initialized)
response_format = get_response_format(agent, run_context=run_context) if agent.parser_model is None else None
# Create a new run_response for this attempt
run_response = RunOutput(
run_id=run_id,
session_id=session_id,
agent_id=agent.id,
user_id=user_id,
agent_name=agent.name,
metadata=run_context.metadata,
session_state=run_context.session_state,
input=run_input,
)
run_response.model = agent.model.id if agent.model is not None else None
run_response.model_provider = agent.model.provider if agent.model is not None else None
# Start the run metrics timer, to calculate the run duration
run_response.metrics = RunMetrics()
run_response.metrics.start_timer()
# Background execution: return immediately with PENDING status
if background:
if opts.stream:
raise ValueError(
"Background execution cannot be combined with streaming. Set stream=False when using background=True."
)
if not agent.db:
raise ValueError(
"Background execution requires a database to be configured on the agent for run persistence."
)
return _arun_background( # type: ignore[return-value]
agent,
run_response=run_response,
run_context=run_context,
user_id=user_id,
response_format=response_format,
session_id=session_id,
add_history_to_context=opts.add_history_to_context,
add_dependencies_to_context=opts.add_dependencies_to_context,
add_session_state_to_context=opts.add_session_state_to_context,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
# Pass the new run_response to _arun
if opts.stream:
return _arun_stream( # type: ignore
agent,
run_response=run_response,
run_context=run_context,
user_id=user_id,
response_format=response_format,
stream_events=opts.stream_events,
yield_run_output=opts.yield_run_output,
session_id=session_id,
add_history_to_context=opts.add_history_to_context,
add_dependencies_to_context=opts.add_dependencies_to_context,
add_session_state_to_context=opts.add_session_state_to_context,
debug_mode=debug_mode,
background_tasks=background_tasks,
pre_session=_pre_session,
**kwargs,
) # type: ignore[assignment]
else:
return _arun( # type: ignore
agent,
run_response=run_response,
run_context=run_context,
user_id=user_id,
response_format=response_format,
session_id=session_id,
add_history_to_context=opts.add_history_to_context,
add_dependencies_to_context=opts.add_dependencies_to_context,
add_session_state_to_context=opts.add_session_state_to_context,
debug_mode=debug_mode,
background_tasks=background_tasks,
pre_session=_pre_session,
**kwargs,
)
def continue_run_dispatch(
agent: Agent,
run_response: Optional[RunOutput] = None,
*,
run_id: Optional[str] = None, # type: ignore
updated_tools: Optional[List[ToolExecution]] = None,
requirements: Optional[List[RunRequirement]] = None,
stream: Optional[bool] = None,
stream_events: Optional[bool] = False,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
run_context: Optional[RunContext] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
dependencies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
debug_mode: Optional[bool] = None,
yield_run_output: bool = False,
**kwargs,
) -> Union[RunOutput, Iterator[Union[RunOutputEvent, RunOutput]]]:
"""Continue a previous run.
Args:
run_response: The run response to continue.
run_id: The run id to continue. Alternative to passing run_response.
requirements: The requirements to continue the run. This or updated_tools is required with `run_id`.
stream: Whether to stream the response.
stream_events: Whether to stream all events.
user_id: The user id to continue the run for.
session_id: The session id to continue the run for.
run_context: The run context to use for the run.
knowledge_filters: The knowledge filters to use for the run.
dependencies: The dependencies to use for the run.
metadata: The metadata to use for the run.
debug_mode: Whether to enable debug mode.
"""
from agno.agent._init import has_async_db, set_default_model
from agno.agent._messages import get_continue_run_messages
from agno.agent._response import get_response_format
from agno.agent._storage import load_session_state, read_or_create_session, update_metadata
from agno.agent._tools import determine_tools_for_model
if run_response is None and run_id is None:
raise ValueError("Either run_response or run_id must be provided.")
if run_response is None and (run_id is not None and (session_id is None and agent.session_id is None)):
raise ValueError("Session ID is required to continue a run from a run_id.")
if has_async_db(agent):
raise Exception("continue_run() is not supported with an async DB. Please use acontinue_run() instead.")
background_tasks = kwargs.pop("background_tasks", None)
if background_tasks is not None:
from fastapi import BackgroundTasks
background_tasks: BackgroundTasks = background_tasks # type: ignore
session_id = run_response.session_id if run_response else session_id
run_id: str = run_response.run_id if run_response else run_id # type: ignore
session_id, user_id = initialize_session(
agent,
session_id=session_id,
user_id=user_id,
)
# Initialize the Agent
agent.initialize_agent(debug_mode=debug_mode)
# Read existing session from storage
agent_session = read_or_create_session(agent, session_id=session_id, user_id=user_id)
update_metadata(agent, session=agent_session)
# Initialize session state. Get it from DB if relevant.
session_state = load_session_state(agent, session=agent_session, session_state={})
# Resolve all run options centrally
opts = resolve_run_options(
agent,
stream=stream,
stream_events=stream_events,
yield_run_output=yield_run_output,
dependencies=dependencies,
knowledge_filters=knowledge_filters,
metadata=metadata,
)
# Initialize run context
run_context = run_context or RunContext(
run_id=run_id, # type: ignore
session_id=session_id,
user_id=user_id,
session_state=session_state,
dependencies=opts.dependencies,
knowledge_filters=opts.knowledge_filters,
metadata=opts.metadata,
)
# Apply options with precedence: explicit args > existing run_context > resolved defaults.
opts.apply_to_context(
run_context,
dependencies_provided=dependencies is not None,
knowledge_filters_provided=knowledge_filters is not None,
metadata_provided=metadata is not None,
)
# Resolve dependencies
if run_context.dependencies is not None:
resolve_run_dependencies(agent, run_context=run_context)
# Run can be continued from previous run response or from passed run_response context
if run_response is not None:
# The run is continued from a provided run_response. This contains the updated tools.
input = run_response.messages or []
elif run_id is not None:
# The run is continued from a run_id.
runs = agent_session.runs or []
run_response = next((r for r in runs if r.run_id == run_id), None) # type: ignore
if run_response is None:
raise RuntimeError(f"No runs found for run ID {run_id}")
input = run_response.messages or []
# If we have updated_tools, set them in the run_response
if updated_tools is not None:
warnings.warn(
"The 'updated_tools' parameter is deprecated and will be removed in future versions. Use 'requirements' instead.",
DeprecationWarning,
stacklevel=2,
)
run_response.tools = updated_tools
# If we have requirements, get the updated tools and set them in the run_response
elif requirements is not None:
run_response.requirements = requirements
updated_tools = [req.tool_execution for req in requirements if req.tool_execution is not None]
if updated_tools and run_response.tools:
updated_tools_map = {tool.tool_call_id: tool for tool in updated_tools}
run_response.tools = [updated_tools_map.get(tool.tool_call_id, tool) for tool in run_response.tools]
else:
run_response.tools = updated_tools
# If no tools/requirements provided, check for resolved admin approval
elif run_response.tools:
from agno.run.approval import check_and_apply_approval_resolution
try:
# This will apply resolution_data to tools if approval is resolved
check_and_apply_approval_resolution(agent.db, run_id, run_response)
except RuntimeError:
# No resolved approval found - fall back to requiring tools/requirements
raise ValueError(
"To continue a run from a given run_id, the requirements parameter must be provided "
"(or resolve an admin approval first)."
)
else:
# No tools on the run_response either
raise ValueError("To continue a run from a given run_id, the requirements parameter must be provided.")
else:
raise ValueError("Either run_response or run_id must be provided.")
# Prepare arguments for the model
set_default_model(agent)
response_format = get_response_format(agent, run_context=run_context)
agent.model = cast(Model, agent.model)
processed_tools = agent.get_tools(
run_response=run_response,
run_context=run_context,
session=agent_session,
user_id=user_id,
)
_tools = determine_tools_for_model(
agent,
model=agent.model,
processed_tools=processed_tools,
run_response=run_response,
run_context=run_context,
session=agent_session,
)
run_response = cast(RunOutput, run_response)
log_debug(f"Agent Run Start: {run_response.run_id}", center=True)
# Prepare run messages
run_messages = get_continue_run_messages(
agent,
input=input,
)
# Reset the run state
run_response.status = RunStatus.running
if opts.stream:
response_iterator = _continue_run_stream(
agent,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
tools=_tools,
user_id=user_id,
session=agent_session,
response_format=response_format,
stream_events=opts.stream_events,
yield_run_output=opts.yield_run_output,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
return response_iterator
else:
response = _continue_run(
agent,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
tools=_tools,
user_id=user_id,
session=agent_session,
response_format=response_format,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
return response
def _continue_run(
agent: Agent,
run_response: RunOutput,
run_messages: RunMessages,
run_context: RunContext,
session: AgentSession,
tools: List[Union[Function, dict]],
user_id: Optional[str] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs,
) -> RunOutput:
"""Continue a previous run.
Steps:
1. Handle any updated tools
2. Generate a response from the Model
3. Update the RunOutput with the model response
4. Convert response to structured format
5. Store media if enabled
6. Execute post-hooks
7. Create session summary
8. Cleanup and store (scrub, stop timer, save to file, add to session, calculate metrics, save session)
"""
# Register run for cancellation tracking
from agno.agent._hooks import execute_post_hooks
from agno.agent._init import disconnect_connectable_tools
from agno.agent._response import (
convert_response_to_structured_format,
generate_response_with_output_model,
parse_response_with_parser_model,
update_run_response,
)
from agno.agent._telemetry import log_agent_telemetry
from agno.agent._tools import handle_tool_call_updates
register_run(run_response.run_id) # type: ignore
agent.model = cast(Model, agent.model)
# 1. Handle the updated tools
handle_tool_call_updates(agent, run_response=run_response, run_messages=run_messages, tools=tools)
try:
num_attempts = agent.retries + 1
for attempt in range(num_attempts):
try:
# Check for cancellation before model call
raise_if_cancelled(run_response.run_id) # type: ignore
# 2. Generate a response from the Model (includes running function calls)
agent.model = cast(Model, agent.model)
model_response: ModelResponse = agent.model.response(
messages=run_messages.messages,
response_format=response_format,
tools=tools,
tool_choice=agent.tool_choice,
tool_call_limit=agent.tool_call_limit,
run_response=run_response,
send_media_to_model=agent.send_media_to_model,
compression_manager=agent.compression_manager if agent.compress_tool_results else None,
)
# Check for cancellation after model processing
raise_if_cancelled(run_response.run_id) # type: ignore
# If an output model is provided, generate output using the output model
generate_response_with_output_model(agent, model_response, run_messages, run_response=run_response)
# If a parser model is provided, structure the response separately
parse_response_with_parser_model(
agent, model_response, run_messages, run_context=run_context, run_response=run_response
)
# 3. Update the RunOutput with the model response
update_run_response(
agent,
model_response=model_response,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
)
# We should break out of the run function
if any(tool_call.is_paused for tool_call in run_response.tools or []):
return handle_agent_run_paused(
agent, run_response=run_response, session=session, run_context=run_context, user_id=user_id
)
# 4. Convert the response to the structured format if needed
convert_response_to_structured_format(agent, run_response, run_context=run_context)
# 5. Store media if enabled
if agent.store_media:
store_media_util(run_response, model_response)
# 6. Execute post-hooks
if agent.post_hooks is not None:
post_hook_iterator = execute_post_hooks(
agent,
hooks=agent.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=session,
user_id=user_id,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
deque(post_hook_iterator, maxlen=0)
# Check for cancellation
raise_if_cancelled(run_response.run_id) # type: ignore
# 7. Create session summary
if agent.session_summary_manager is not None and agent.enable_session_summaries:
# Upsert the RunOutput to Agent Session before creating the session summary
session.upsert_run(run=run_response)
try:
agent.session_summary_manager.create_session_summary(
session=session, run_metrics=run_response.metrics
)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
# Set the run status to completed
run_response.status = RunStatus.completed
# 8. Cleanup and store the run response and session
cleanup_and_store(
agent, run_response=run_response, session=session, run_context=run_context, user_id=user_id
)
# Log Agent Telemetry
log_agent_telemetry(agent, session_id=session.session_id, run_id=run_response.run_id)
return run_response
except RunCancelledException as e:
run_response = cast(RunOutput, run_response)
# Handle run cancellation during async streaming
log_info(f"Run {run_response.run_id} was cancelled")
run_response.status = RunStatus.cancelled
run_response.content = str(e)
# Cleanup and store the run response and session
cleanup_and_store(
agent, run_response=run_response, session=session, run_context=run_context, user_id=user_id
)
return run_response
except (InputCheckError, OutputCheckError) as e:
run_response = cast(RunOutput, run_response)
# Handle exceptions during streaming
run_response.status = RunStatus.error
# If the content is None, set it to the error message
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check trigger: {e.check_trigger}")
cleanup_and_store(
agent, run_response=run_response, session=session, run_context=run_context, user_id=user_id
)
return run_response
except KeyboardInterrupt:
run_response = cast(RunOutput, run_response)
run_response.status = RunStatus.cancelled
run_response.content = "Operation cancelled by user"
return run_response
except Exception as e:
run_response = cast(RunOutput, run_response)
# Check if this is the last attempt
if attempt < num_attempts - 1:
# Calculate delay with exponential backoff if enabled
if agent.exponential_backoff:
delay = agent.delay_between_retries * (2**attempt)
else:
delay = agent.delay_between_retries
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
time.sleep(delay)
continue
run_response.status = RunStatus.error
# If the content is None, set it to the error message
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Agent run: {str(e)}")
# Cleanup and store the run response and session
cleanup_and_store(
agent, run_response=run_response, session=session, run_context=run_context, user_id=user_id
)
return run_response
finally:
# Always disconnect connectable tools
disconnect_connectable_tools(agent)
# Always clean up the run tracking
cleanup_run(run_response.run_id) # type: ignore
return run_response
def _continue_run_stream(
agent: Agent,
run_response: RunOutput,
run_messages: RunMessages,
run_context: RunContext,
session: AgentSession,
tools: List[Union[Function, dict]],
user_id: Optional[str] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
stream_events: bool = False,
debug_mode: Optional[bool] = None,
yield_run_output: bool = False,
background_tasks: Optional[Any] = None,
**kwargs,
) -> Iterator[Union[RunOutputEvent, RunOutput]]:
"""Continue a previous run.
Steps:
1. Resolve dependencies
2. Handle any updated tools
3. Process model response
4. Execute post-hooks
5. Create session summary
6. Cleanup and store the run response and session
"""
from agno.agent._hooks import execute_post_hooks
from agno.agent._init import disconnect_connectable_tools
from agno.agent._response import handle_model_response_stream, parse_response_with_parser_model_stream
from agno.agent._telemetry import log_agent_telemetry
from agno.agent._tools import handle_tool_call_updates_stream
register_run(run_response.run_id) # type: ignore
# Set up retry logic
num_attempts = agent.retries + 1
try:
for attempt in range(num_attempts):
try:
# 1. Resolve dependencies
if run_context.dependencies is not None:
resolve_run_dependencies(agent, run_context=run_context)
# Start the Run by yielding a RunContinued event
if stream_events:
yield handle_event( # type: ignore
create_run_continued_event(run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# 2. Handle the updated tools
yield from handle_tool_call_updates_stream(
agent,
run_response=run_response,
run_messages=run_messages,
tools=tools,
stream_events=stream_events,
)
# 3. Process model response
for event in handle_model_response_stream(
agent,
session=session,
run_response=run_response,
run_messages=run_messages,
tools=tools,
response_format=response_format,
stream_events=stream_events,
session_state=run_context.session_state,
run_context=run_context,
):
yield event
# Parse response with parser model if provided
yield from parse_response_with_parser_model_stream(
agent, # type: ignore
session=session,
run_response=run_response,
stream_events=stream_events,
run_context=run_context,
)
# Yield RunContentCompletedEvent
if stream_events:
yield handle_event( # type: ignore
create_run_content_completed_event(from_run_response=run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# We should break out of the run function
if any(tool_call.is_paused for tool_call in run_response.tools or []):
yield from handle_agent_run_paused_stream(
agent,
run_response=run_response,
session=session,
run_context=run_context,
user_id=user_id,
yield_run_output=yield_run_output or False,
)
return
# Execute post-hooks
if agent.post_hooks is not None:
yield from execute_post_hooks(
agent,
hooks=agent.post_hooks, # type: ignore
run_output=run_response,
session=session,
run_context=run_context,
user_id=user_id,
debug_mode=debug_mode,
stream_events=stream_events,
background_tasks=background_tasks,
**kwargs,
)
# Check for cancellation before model call
raise_if_cancelled(run_response.run_id) # type: ignore
# 4. Create session summary
if agent.session_summary_manager is not None and agent.enable_session_summaries:
# Upsert the RunOutput to Agent Session before creating the session summary
session.upsert_run(run=run_response)
if stream_events:
yield handle_event( # type: ignore
create_session_summary_started_event(from_run_response=run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
try:
agent.session_summary_manager.create_session_summary(
session=session, run_metrics=run_response.metrics
)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
if stream_events:
yield handle_event( # type: ignore
create_session_summary_completed_event(
from_run_response=run_response, session_summary=session.summary
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Update run_response.session_state before creating RunCompletedEvent
# This ensures the event has the final state after all tool modifications
if session.session_data is not None and "session_state" in session.session_data:
run_response.session_state = session.session_data["session_state"]
# Create the run completed event
completed_event = handle_event(
create_run_completed_event(run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Set the run status to completed
run_response.status = RunStatus.completed
# 5. Cleanup and store the run response and session
cleanup_and_store(
agent, run_response=run_response, session=session, run_context=run_context, user_id=user_id
)
if stream_events:
yield completed_event # type: ignore
if yield_run_output:
yield run_response
# Log Agent Telemetry
log_agent_telemetry(agent, session_id=session.session_id, run_id=run_response.run_id)
log_debug(f"Agent Run End: {run_response.run_id}", center=True, symbol="*")
break
except RunCancelledException as e:
run_response = cast(RunOutput, run_response)
# Handle run cancellation during async streaming
log_info(f"Run {run_response.run_id} was cancelled during streaming")
run_response.status = RunStatus.cancelled
run_response.content = str(e)
# Yield the cancellation event
yield handle_event( # type: ignore
create_run_cancelled_event(from_run_response=run_response, reason=str(e)),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Cleanup and store the run response and session
cleanup_and_store(
agent, run_response=run_response, session=session, run_context=run_context, user_id=user_id
)
break
except (InputCheckError, OutputCheckError) as e:
run_response = cast(RunOutput, run_response)
# Handle exceptions during streaming
run_response.status = RunStatus.error
# Add error event to list of events
run_error = create_run_error_event(
run_response,
error=str(e),
error_id=e.error_id,
error_type=e.type,
additional_data=e.additional_data,
)
run_response.events = add_error_event(error=run_error, events=run_response.events)
# If the content is None, set it to the error message
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check trigger: {e.check_trigger}")
cleanup_and_store(
agent, run_response=run_response, session=session, run_context=run_context, user_id=user_id
)
yield run_error
break
except KeyboardInterrupt:
run_response = cast(RunOutput, run_response)
yield handle_event( # type: ignore
create_run_cancelled_event(from_run_response=run_response, reason="Operation cancelled by user"),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
break
except Exception as e:
run_response = cast(RunOutput, run_response)
# Check if this is the last attempt
if attempt < num_attempts - 1:
# Calculate delay with exponential backoff if enabled
if agent.exponential_backoff:
delay = agent.delay_between_retries * (2**attempt)
else:
delay = agent.delay_between_retries
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
time.sleep(delay)
continue
run_response.status = RunStatus.error
# Add error event to list of events
run_error = create_run_error_event(run_response, error=str(e))
run_response.events = add_error_event(error=run_error, events=run_response.events)
# If the content is None, set it to the error message
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Agent run: {str(e)}")
# Cleanup and store the run response and session
cleanup_and_store(
agent, run_response=run_response, session=session, run_context=run_context, user_id=user_id
)
yield run_error
finally:
# Always disconnect connectable tools
disconnect_connectable_tools(agent)
# Always clean up the run tracking
cleanup_run(run_response.run_id) # type: ignore
def acontinue_run_dispatch( # type: ignore
agent: Agent,
run_response: Optional[RunOutput] = None,
*,
run_id: Optional[str] = None, # type: ignore
updated_tools: Optional[List[ToolExecution]] = None,
requirements: Optional[List[RunRequirement]] = None,
stream: Optional[bool] = None,
stream_events: Optional[bool] = None,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
run_context: Optional[RunContext] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
dependencies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
debug_mode: Optional[bool] = None,
yield_run_output: bool = False,
**kwargs,
) -> Union[RunOutput, AsyncIterator[Union[RunOutputEvent, RunOutput]]]:
"""Continue a previous run.
Args:
run_response: The run response to continue.
run_id: The run id to continue. Alternative to passing run_response.
requirements: The requirements to continue the run. This or updated_tools is required with `run_id`.
stream: Whether to stream the response.
stream_events: Whether to stream all events.
user_id: The user id to continue the run for.
session_id: The session id to continue the run for.
run_context: The run context to use for the run.
knowledge_filters: The knowledge filters to use for the run.
dependencies: The dependencies to use for continuing the run.
metadata: The metadata to use for continuing the run.
debug_mode: Whether to enable debug mode.
yield_run_output: Whether to yield the run response.
(deprecated) updated_tools: Use 'requirements' instead.
"""
from agno.agent._response import get_response_format
if run_response is None and run_id is None:
raise ValueError("Either run_response or run_id must be provided.")
if run_response is None and (run_id is not None and (session_id is None and agent.session_id is None)):
raise ValueError("Session ID is required to continue a run from a run_id.")
if updated_tools is not None:
warnings.warn(
"The 'updated_tools' parameter is deprecated and will be removed in future versions. Use 'requirements' instead.",
DeprecationWarning,
stacklevel=2,
)
background_tasks = kwargs.pop("background_tasks", None)
if background_tasks is not None:
from fastapi import BackgroundTasks
background_tasks: BackgroundTasks = background_tasks # type: ignore
session_id = run_response.session_id if run_response else session_id
run_id: str = run_response.run_id if run_response else run_id # type: ignore
session_id, user_id = initialize_session(
agent,
session_id=session_id,
user_id=user_id,
)
# Initialize the Agent
agent.initialize_agent(debug_mode=debug_mode)
# Read existing session and update metadata BEFORE resolving run options,
# so that session-stored metadata is visible to resolve_run_options.
from agno.agent._init import has_async_db
_session_state: Dict[str, Any] = {}
if not has_async_db(agent):
from agno.agent._storage import load_session_state, read_or_create_session, update_metadata
_pre_session = read_or_create_session(agent, session_id=session_id, user_id=user_id)
update_metadata(agent, session=_pre_session)
_session_state = load_session_state(agent, session=_pre_session, session_state={})
# Resolve all run options centrally
opts = resolve_run_options(
agent,
stream=stream,
stream_events=stream_events,
yield_run_output=yield_run_output,
dependencies=dependencies,
knowledge_filters=knowledge_filters,
metadata=metadata,
)
# Prepare arguments for the model
agent.model = cast(Model, agent.model)
# Initialize run context before computing response_format (needs run_context)
run_context = run_context or RunContext(
run_id=run_id, # type: ignore
session_id=session_id,
user_id=user_id,
session_state=_session_state,
dependencies=opts.dependencies,
knowledge_filters=opts.knowledge_filters,
metadata=opts.metadata,
)
# Apply options with precedence: explicit args > existing run_context > resolved defaults.
opts.apply_to_context(
run_context,
dependencies_provided=dependencies is not None,
knowledge_filters_provided=knowledge_filters is not None,
metadata_provided=metadata is not None,
)
response_format = get_response_format(agent, run_context=run_context)
if opts.stream:
return _acontinue_run_stream(
agent,
run_response=run_response,
run_context=run_context,
updated_tools=updated_tools,
requirements=requirements,
run_id=run_id,
user_id=user_id,
session_id=session_id,
response_format=response_format,
stream_events=opts.stream_events,
yield_run_output=opts.yield_run_output,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
else:
return _acontinue_run( # type: ignore
agent,
session_id=session_id,
run_response=run_response,
run_context=run_context,
updated_tools=updated_tools,
requirements=requirements,
run_id=run_id,
user_id=user_id,
response_format=response_format,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
async def _acontinue_run(
agent: Agent,
session_id: str,
run_context: RunContext,
run_response: Optional[RunOutput] = None,
updated_tools: Optional[List[ToolExecution]] = None,
requirements: Optional[List[RunRequirement]] = None,
run_id: Optional[str] = None,
user_id: Optional[str] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs,
) -> RunOutput:
"""Continue a previous run.
Steps:
1. Read existing session from db
2. Resolve dependencies
3. Update metadata and session state
4. Prepare run response
5. Determine tools for model
6. Prepare run messages
7. Handle the updated tools
8. Get model response
9. Update the RunOutput with the model response
10. Convert response to structured format
11. Store media if enabled
12. Execute post-hooks
13. Create session summary
14. Cleanup and store (scrub, stop timer, save to file, add to session, calculate metrics, save session)
"""
from agno.agent._hooks import aexecute_post_hooks
from agno.agent._init import disconnect_connectable_tools, disconnect_mcp_tools
from agno.agent._messages import get_continue_run_messages
from agno.agent._response import (
agenerate_response_with_output_model,
aparse_response_with_parser_model,
convert_response_to_structured_format,
update_run_response,
)
from agno.agent._storage import aread_or_create_session, load_session_state, update_metadata
from agno.agent._telemetry import alog_agent_telemetry
from agno.agent._tools import ahandle_tool_call_updates, determine_tools_for_model
log_debug(f"Agent Run Continue: {run_response.run_id if run_response else run_id}", center=True) # type: ignore
agent_session: Optional[AgentSession] = None
# Resolve retry parameters
try:
num_attempts = agent.retries + 1
for attempt in range(num_attempts):
try:
if attempt > 0:
log_debug(f"Retrying Agent acontinue_run {run_id}. Attempt {attempt + 1} of {num_attempts}...")
# 1. Read existing session from db
agent_session = await aread_or_create_session(agent, session_id=session_id, user_id=user_id)
# 2. Resolve dependencies
if run_context.dependencies is not None:
await aresolve_run_dependencies(agent, run_context=run_context)
# 3. Update metadata and session state
update_metadata(agent, session=agent_session)
# Initialize session state. Get it from DB if relevant.
run_context.session_state = load_session_state(
agent,
session=agent_session,
session_state=run_context.session_state if run_context.session_state is not None else {},
)
_initialize_session_state(
run_context.session_state,
user_id=user_id,
session_id=session_id,
run_id=run_context.run_id,
)
# 4. Prepare run response
if run_response is not None:
# The run is continued from a provided run_response. This contains the updated tools.
input = run_response.messages or []
elif run_id is not None:
# The run is continued from a run_id.
runs = agent_session.runs or []
run_response = next((r for r in runs if r.run_id == run_id), None) # type: ignore
if run_response is None:
raise RuntimeError(f"No runs found for run ID {run_id}")
input = run_response.messages or []
# If we have updated_tools, set them in the run_response
if updated_tools is not None:
run_response.tools = updated_tools
# If we have requirements, get the updated tools and set them in the run_response
elif requirements is not None:
run_response.requirements = requirements
updated_tools = [req.tool_execution for req in requirements if req.tool_execution is not None]
if updated_tools and run_response.tools:
updated_tools_map = {tool.tool_call_id: tool for tool in updated_tools}
run_response.tools = [
updated_tools_map.get(tool.tool_call_id, tool) for tool in run_response.tools
]
else:
run_response.tools = updated_tools
# If no tools/requirements provided, check for resolved admin approval
elif run_response.tools:
from agno.run.approval import acheck_and_apply_approval_resolution
try:
# This will apply resolution_data to tools if approval is resolved
await acheck_and_apply_approval_resolution(agent.db, run_id, run_response)
except RuntimeError:
# No resolved approval found - fall back to requiring tools/requirements
raise ValueError(
"To continue a run from a given run_id, the requirements parameter must be provided "
"(or resolve an admin approval first)."
)
else:
# No tools on the run_response either
raise ValueError(
"To continue a run from a given run_id, the requirements parameter must be provided."
)
else:
raise ValueError("Either run_response or run_id must be provided.")
run_response = cast(RunOutput, run_response)
run_response.status = RunStatus.running
# 5. Determine tools for model
agent.model = cast(Model, agent.model)
processed_tools = await agent.aget_tools(
run_response=run_response,
run_context=run_context,
session=agent_session,
user_id=user_id,
)
_tools = determine_tools_for_model(
agent,
model=agent.model,
processed_tools=processed_tools,
run_response=run_response,
run_context=run_context,
session=agent_session,
async_mode=True,
)
# 6. Prepare run messages
run_messages: RunMessages = get_continue_run_messages(
agent,
input=input,
)
# Register run for cancellation tracking
await aregister_run(run_response.run_id) # type: ignore
# 7. Handle the updated tools
await ahandle_tool_call_updates(
agent, run_response=run_response, run_messages=run_messages, tools=_tools
)
# 8. Get model response
model_response: ModelResponse = await agent.model.aresponse(
messages=run_messages.messages,
response_format=response_format,
tools=_tools,
tool_choice=agent.tool_choice,
tool_call_limit=agent.tool_call_limit,
run_response=run_response,
send_media_to_model=agent.send_media_to_model,
compression_manager=agent.compression_manager if agent.compress_tool_results else None,
)
# Check for cancellation after model call
await araise_if_cancelled(run_response.run_id) # type: ignore
# If an output model is provided, generate output using the output model
await agenerate_response_with_output_model(
agent, model_response=model_response, run_messages=run_messages, run_response=run_response
)
# If a parser model is provided, structure the response separately
await aparse_response_with_parser_model(
agent,
model_response=model_response,
run_messages=run_messages,
run_context=run_context,
run_response=run_response,
)
# 9. Update the RunOutput with the model response
update_run_response(
agent,
model_response=model_response,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
)
# Break out of the run function if a tool call is paused
if any(tool_call.is_paused for tool_call in run_response.tools or []):
return await ahandle_agent_run_paused(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
# 10. Convert the response to the structured format if needed
convert_response_to_structured_format(agent, run_response, run_context=run_context)
# 11. Store media if enabled
if agent.store_media:
store_media_util(run_response, model_response)
await araise_if_cancelled(run_response.run_id) # type: ignore
# 12. Execute post-hooks
if agent.post_hooks is not None:
async for _ in aexecute_post_hooks(
agent,
hooks=agent.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=agent_session,
user_id=user_id,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
):
pass
# Check for cancellation
await araise_if_cancelled(run_response.run_id) # type: ignore
# 13. Create session summary
if agent.session_summary_manager is not None and agent.enable_session_summaries:
# Upsert the RunOutput to Agent Session before creating the session summary
agent_session.upsert_run(run=run_response)
try:
await agent.session_summary_manager.acreate_session_summary(
session=agent_session, run_metrics=run_response.metrics
)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
# Set the run status to completed
run_response.status = RunStatus.completed
# 14. Cleanup and store the run response and session
await acleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
# Log Agent Telemetry
await alog_agent_telemetry(agent, session_id=agent_session.session_id, run_id=run_response.run_id)
log_debug(f"Agent Run End: {run_response.run_id}", center=True, symbol="*")
return run_response
except RunCancelledException as e:
if run_response is None:
run_response = RunOutput(run_id=run_id)
run_response = cast(RunOutput, run_response)
# Handle run cancellation
log_info(f"Run {run_response.run_id if run_response else run_id} was cancelled")
run_response.status = RunStatus.cancelled
run_response.content = str(e)
# Cleanup and store the run response and session
if agent_session is not None:
await acleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
return run_response
except (InputCheckError, OutputCheckError) as e:
run_response = cast(RunOutput, run_response)
# Handle exceptions during streaming
run_response.status = RunStatus.error
# If the content is None, set it to the error message
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check trigger: {e.check_trigger}")
if agent_session is not None:
await acleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
return run_response
except KeyboardInterrupt:
run_response = cast(RunOutput, run_response)
run_response.status = RunStatus.cancelled
run_response.content = "Operation cancelled by user"
return run_response
except Exception as e:
run_response = cast(RunOutput, run_response)
# Check if this is the last attempt
if attempt < num_attempts - 1:
# Calculate delay with exponential backoff if enabled
if agent.exponential_backoff:
delay = agent.delay_between_retries * (2**attempt)
else:
delay = agent.delay_between_retries
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
await asyncio.sleep(delay)
continue
if not run_response:
run_response = RunOutput(run_id=run_id)
run_response.status = RunStatus.error
# Add error event to list of events
run_error = create_run_error_event(run_response, error=str(e)) # type: ignore
run_response.events = add_error_event(error=run_error, events=run_response.events) # type: ignore
# If the content is None, set it to the error message
if run_response.content is None: # type: ignore
run_response.content = str(e) # type: ignore
log_error(f"Error in Agent run: {str(e)}")
# Cleanup and store the run response and session
if agent_session is not None:
await acleanup_and_store(
agent,
run_response=run_response, # type: ignore
session=agent_session,
run_context=run_context,
user_id=user_id,
)
return run_response # type: ignore
finally:
# Always disconnect connectable tools
disconnect_connectable_tools(agent)
# Always disconnect MCP tools
await disconnect_mcp_tools(agent)
# Always clean up the run tracking
await acleanup_run(run_response.run_id) # type: ignore
return run_response # type: ignore
async def _acontinue_run_stream(
agent: Agent,
session_id: str,
run_context: RunContext,
run_response: Optional[RunOutput] = None,
updated_tools: Optional[List[ToolExecution]] = None,
requirements: Optional[List[RunRequirement]] = None,
run_id: Optional[str] = None,
user_id: Optional[str] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
stream_events: bool = False,
yield_run_output: bool = False,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs,
) -> AsyncIterator[Union[RunOutputEvent, RunOutput]]:
"""Continue a previous run.
Steps:
1. Resolve dependencies
2. Read existing session from db
3. Update session state and metadata
4. Prepare run response
5. Determine tools for model
6. Prepare run messages
7. Handle the updated tools
8. Process model response
9. Create session summary
10. Execute post-hooks
11. Cleanup and store the run response and session
"""
from agno.agent._hooks import aexecute_post_hooks
from agno.agent._init import disconnect_connectable_tools, disconnect_mcp_tools
from agno.agent._messages import get_continue_run_messages
from agno.agent._response import (
agenerate_response_with_output_model_stream,
ahandle_model_response_stream,
aparse_response_with_parser_model_stream,
)
from agno.agent._storage import aread_or_create_session, load_session_state, update_metadata
from agno.agent._telemetry import alog_agent_telemetry
from agno.agent._tools import ahandle_tool_call_updates_stream, determine_tools_for_model
log_debug(f"Agent Run Continue: {run_response.run_id if run_response else run_id}", center=True) # type: ignore
agent_session: Optional[AgentSession] = None
# Resolve retry parameters
try:
num_attempts = agent.retries + 1
for attempt in range(num_attempts):
try:
# 1. Read existing session from db
agent_session = await aread_or_create_session(agent, session_id=session_id, user_id=user_id)
# 2. Update session state and metadata
update_metadata(agent, session=agent_session)
# Initialize session state. Get it from DB if relevant.
run_context.session_state = load_session_state(
agent,
session=agent_session,
session_state=run_context.session_state if run_context.session_state is not None else {},
)
_initialize_session_state(
run_context.session_state,
user_id=user_id,
session_id=session_id,
run_id=run_context.run_id,
)
# 3. Resolve dependencies
if run_context.dependencies is not None:
await aresolve_run_dependencies(agent, run_context=run_context)
# 4. Prepare run response
if run_response is not None:
# The run is continued from a provided run_response. This contains the updated tools.
input = run_response.messages or []
elif run_id is not None:
# The run is continued from a run_id.
runs = agent_session.runs or []
run_response = next((r for r in runs if r.run_id == run_id), None) # type: ignore
if run_response is None:
raise RuntimeError(f"No runs found for run ID {run_id}")
input = run_response.messages or []
# If we have updated_tools, set them in the run_response
if updated_tools is not None:
run_response.tools = updated_tools
# If we have requirements, get the updated tools and set them in the run_response
elif requirements is not None:
run_response.requirements = requirements
updated_tools = [req.tool_execution for req in requirements if req.tool_execution is not None]
if updated_tools and run_response.tools:
updated_tools_map = {tool.tool_call_id: tool for tool in updated_tools}
run_response.tools = [
updated_tools_map.get(tool.tool_call_id, tool) for tool in run_response.tools
]
else:
run_response.tools = updated_tools
# If no tools/requirements provided, check for resolved admin approval
elif run_response.tools:
from agno.run.approval import acheck_and_apply_approval_resolution
try:
# This will apply resolution_data to tools if approval is resolved
await acheck_and_apply_approval_resolution(agent.db, run_id, run_response)
except RuntimeError:
# No resolved approval found - fall back to requiring tools/requirements
raise ValueError(
"To continue a run from a given run_id, the requirements parameter must be provided "
"(or resolve an admin approval first)."
)
else:
# No tools on the run_response either
raise ValueError(
"To continue a run from a given run_id, the requirements parameter must be provided."
)
else:
raise ValueError("Either run_response or run_id must be provided.")
run_response = cast(RunOutput, run_response)
run_response.status = RunStatus.running
# 5. Determine tools for model
agent.model = cast(Model, agent.model)
processed_tools = await agent.aget_tools(
run_response=run_response,
run_context=run_context,
session=agent_session,
user_id=user_id,
)
_tools = determine_tools_for_model(
agent,
model=agent.model,
processed_tools=processed_tools,
run_response=run_response,
run_context=run_context,
session=agent_session,
async_mode=True,
)
# 6. Prepare run messages
run_messages: RunMessages = get_continue_run_messages(
agent,
input=input,
)
# Register run for cancellation tracking
await aregister_run(run_response.run_id) # type: ignore
# Start the Run by yielding a RunContinued event
if stream_events:
yield handle_event( # type: ignore
create_run_continued_event(run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# 7. Handle the updated tools
async for event in ahandle_tool_call_updates_stream(
agent,
run_response=run_response,
run_messages=run_messages,
tools=_tools,
stream_events=stream_events,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
yield event
# 8. Process model response
if agent.output_model is None:
async for event in ahandle_model_response_stream(
agent,
session=agent_session,
run_response=run_response,
run_messages=run_messages,
tools=_tools,
response_format=response_format,
stream_events=stream_events,
run_context=run_context,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
yield event
else:
from agno.run.agent import (
IntermediateRunContentEvent,
RunContentEvent,
) # type: ignore
async for event in ahandle_model_response_stream(
agent,
session=agent_session,
run_response=run_response,
run_messages=run_messages,
tools=_tools,
response_format=response_format,
stream_events=stream_events,
run_context=run_context,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
if isinstance(event, RunContentEvent):
if stream_events:
yield IntermediateRunContentEvent(
content=event.content,
content_type=event.content_type,
)
else:
yield event
# If an output model is provided, generate output using the output model
async for event in agenerate_response_with_output_model_stream(
agent,
session=agent_session,
run_response=run_response,
run_messages=run_messages,
stream_events=stream_events,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
yield event # type: ignore
# Check for cancellation after model processing
await araise_if_cancelled(run_response.run_id) # type: ignore
# Parse response with parser model if provided
async for event in aparse_response_with_parser_model_stream(
agent,
session=agent_session,
run_response=run_response,
stream_events=stream_events,
run_context=run_context,
):
yield event # type: ignore
# Yield RunContentCompletedEvent
if stream_events:
yield handle_event( # type: ignore
create_run_content_completed_event(from_run_response=run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Break out of the run function if a tool call is paused
if any(tool_call.is_paused for tool_call in run_response.tools or []):
async for item in ahandle_agent_run_paused_stream(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
yield_run_output=yield_run_output or False,
):
yield item
return
# 8. Execute post-hooks
if agent.post_hooks is not None:
async for event in aexecute_post_hooks(
agent,
hooks=agent.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=agent_session,
user_id=user_id,
debug_mode=debug_mode,
stream_events=stream_events,
background_tasks=background_tasks,
**kwargs,
):
yield event
# Check for cancellation before model call
await araise_if_cancelled(run_response.run_id) # type: ignore
# 9. Create session summary
if agent.session_summary_manager is not None and agent.enable_session_summaries:
# Upsert the RunOutput to Agent Session before creating the session summary
agent_session.upsert_run(run=run_response)
if stream_events:
yield handle_event( # type: ignore
create_session_summary_started_event(from_run_response=run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
try:
await agent.session_summary_manager.acreate_session_summary(
session=agent_session, run_metrics=run_response.metrics
)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
if stream_events:
yield handle_event( # type: ignore
create_session_summary_completed_event(
from_run_response=run_response, session_summary=agent_session.summary
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Update run_response.session_state before creating RunCompletedEvent
# This ensures the event has the final state after all tool modifications
if agent_session.session_data is not None and "session_state" in agent_session.session_data:
run_response.session_state = agent_session.session_data["session_state"]
# Create the run completed event
completed_event = handle_event(
create_run_completed_event(run_response),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Set the run status to completed
run_response.status = RunStatus.completed
# 10. Cleanup and store the run response and session
await acleanup_and_store(
agent, run_response=run_response, session=agent_session, run_context=run_context, user_id=user_id
)
if stream_events:
yield completed_event # type: ignore
if yield_run_output:
yield run_response
# Log Agent Telemetry
await alog_agent_telemetry(agent, session_id=agent_session.session_id, run_id=run_response.run_id)
log_debug(f"Agent Run End: {run_response.run_id}", center=True, symbol="*")
break
except RunCancelledException as e:
if run_response is None:
run_response = RunOutput(run_id=run_id)
run_response = cast(RunOutput, run_response)
# Handle run cancellation during streaming
log_info(f"Run {run_response.run_id if run_response.run_id else run_id} was cancelled during streaming")
run_response.status = RunStatus.cancelled
# Don't overwrite content - preserve any partial content that was streamed
# Only set content if it's empty
if not run_response.content:
run_response.content = str(e)
# Yield the cancellation event
yield handle_event( # type: ignore
create_run_cancelled_event(from_run_response=run_response, reason=str(e)),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Cleanup and store the run response and session
if agent_session is not None:
await acleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
break
except (InputCheckError, OutputCheckError) as e:
if run_response is None:
run_response = RunOutput(run_id=run_id)
run_response = cast(RunOutput, run_response)
# Handle exceptions during async streaming
run_response.status = RunStatus.error
# Add error event to list of events
run_error = create_run_error_event(
run_response,
error=str(e),
error_id=e.error_id,
error_type=e.type,
additional_data=e.additional_data,
)
run_response.events = add_error_event(error=run_error, events=run_response.events)
# If the content is None, set it to the error message
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check trigger: {e.check_trigger}")
# Cleanup and store the run response and session
if agent_session is not None:
await acleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
# Yield the error event
yield run_error
break
except KeyboardInterrupt:
if run_response is None:
run_response = RunOutput(run_id=run_id)
run_response = cast(RunOutput, run_response)
yield handle_event( # type: ignore
create_run_cancelled_event(from_run_response=run_response, reason="Operation cancelled by user"),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
break
except Exception as e:
if run_response is None:
run_response = RunOutput(run_id=run_id)
run_response = cast(RunOutput, run_response)
# Check if this is the last attempt
if attempt < num_attempts - 1:
# Calculate delay with exponential backoff if enabled
if agent.exponential_backoff:
delay = agent.delay_between_retries * (2**attempt)
else:
delay = agent.delay_between_retries
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
await asyncio.sleep(delay)
continue
# Handle exceptions during async streaming
run_response.status = RunStatus.error
# Add error event to list of events
run_error = create_run_error_event(run_response, error=str(e))
run_response.events = add_error_event(error=run_error, events=run_response.events)
# If the content is None, set it to the error message
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Agent run: {str(e)}")
# Cleanup and store the run response and session
if agent_session is not None:
await acleanup_and_store(
agent,
run_response=run_response,
session=agent_session,
run_context=run_context,
user_id=user_id,
)
# Yield the error event
yield run_error
finally:
# Always disconnect connectable tools
disconnect_connectable_tools(agent)
# Always disconnect MCP tools
await disconnect_mcp_tools(agent)
# Always clean up the run tracking
cleanup_run_id = run_response.run_id if run_response and run_response.run_id is not None else run_id
if cleanup_run_id is not None:
await acleanup_run(cleanup_run_id)
# ---------------------------------------------------------------------------
# Post-run cleanup
# ---------------------------------------------------------------------------
def save_run_response_to_file(
agent: Agent,
run_response: RunOutput,
input: Optional[Union[str, List, Dict, Message]] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
) -> None:
if agent.save_response_to_file is not None and run_response is not None:
message_str = None
if input is not None:
if isinstance(input, str):
message_str = input
else:
log_warning("Did not use input in output file name: input is not a string")
try:
from pathlib import Path
def _sanitize(value: Any) -> str:
"""Strip path-traversal characters from format values."""
s = str(value) if value is not None else ""
return s.replace("/", "_").replace("\\", "_").replace("..", "_")
fn = agent.save_response_to_file.format(
name=_sanitize(agent.name),
session_id=_sanitize(session_id),
user_id=_sanitize(user_id),
message=_sanitize(message_str),
run_id=_sanitize(run_response.run_id),
)
fn_path = Path(fn)
if not fn_path.parent.exists():
fn_path.parent.mkdir(parents=True, exist_ok=True)
if isinstance(run_response.content, str):
fn_path.write_text(run_response.content)
else:
import json
fn_path.write_text(json.dumps(run_response.content, indent=2))
except Exception as e:
log_warning(f"Failed to save output to file: {e}")
def scrub_run_output_for_storage(agent: Agent, run_response: RunOutput) -> None:
"""Scrub run output based on storage flags before persisting to database."""
if not agent.store_media:
scrub_media_from_run_output(run_response)
if not agent.store_tool_messages:
scrub_tool_results_from_run_output(run_response)
if not agent.store_history_messages:
scrub_history_messages_from_run_output(run_response)
def cleanup_and_store(
agent: Agent,
run_response: RunOutput,
session: AgentSession,
run_context: Optional[RunContext] = None,
user_id: Optional[str] = None,
) -> None:
from agno.agent import _session
from agno.run.approval import update_approval_run_status
# Scrub the stored run based on storage flags
scrub_run_output_for_storage(agent, run_response)
# Stop the timer for the Run duration
if run_response.metrics:
run_response.metrics.stop_timer()
# Update run_response.session_state before saving
# This ensures RunOutput reflects all tool modifications
if run_context is not None and run_context.session_state is not None:
run_response.session_state = run_context.session_state
# Optional: Save output to file if save_response_to_file is set
save_run_response_to_file(
agent,
run_response=run_response,
input=run_response.input.input_content_string() if run_response.input else "",
session_id=session.session_id,
user_id=user_id,
)
# Add RunOutput to Agent Session
session.upsert_run(run=run_response)
# Calculate session metrics
update_session_metrics(agent, session=session, run_response=run_response)
# Update session state before saving the session
if run_context is not None and run_context.session_state is not None:
if session.session_data is not None:
session.session_data["session_state"] = run_context.session_state
else:
session.session_data = {"session_state": run_context.session_state}
# Save session to memory
_session.save_session(agent, session=session)
# Update approval run_status if this run has an associated approval.
# This is a no-op if no approval exists for this run_id.
if run_response.status is not None and run_response.run_id is not None:
update_approval_run_status(agent.db, run_response.run_id, run_response.status)
async def acleanup_and_store(
agent: Agent,
run_response: RunOutput,
session: AgentSession,
run_context: Optional[RunContext] = None,
user_id: Optional[str] = None,
) -> None:
from agno.agent import _session
from agno.run.approval import aupdate_approval_run_status
# Scrub the stored run based on storage flags
scrub_run_output_for_storage(agent, run_response)
# Stop the timer for the Run duration
if run_response.metrics:
run_response.metrics.stop_timer()
# Update run_response.session_state before saving
# This ensures RunOutput reflects all tool modifications
if run_context is not None and run_context.session_state is not None:
run_response.session_state = run_context.session_state
# Optional: Save output to file if save_response_to_file is set
save_run_response_to_file(
agent,
run_response=run_response,
input=run_response.input.input_content_string() if run_response.input else "",
session_id=session.session_id,
user_id=user_id,
)
# Add RunOutput to Agent Session
session.upsert_run(run=run_response)
# Calculate session metrics
update_session_metrics(agent, session=session, run_response=run_response)
# Update session state before saving the session
if run_context is not None and run_context.session_state is not None:
if session.session_data is not None:
session.session_data["session_state"] = run_context.session_state
else:
session.session_data = {"session_state": run_context.session_state}
# Save session to memory
await _session.asave_session(agent, session=session)
# Update approval run_status if this run has an associated approval.
# This is a no-op if no approval exists for this run_id.
if run_response.status is not None and run_response.run_id is not None:
await aupdate_approval_run_status(agent.db, run_response.run_id, run_response.status)
# ---------------------------------------------------------------------------
# Run cancellation — re-export from agno.run.cancel
# ---------------------------------------------------------------------------
cancel_run = cancel_run_global
acancel_run = acancel_run_global
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/agent/_run.py",
"license": "Apache License 2.0",
"lines": 3911,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/agent/_run_options.py | """Centralized run option resolution for agent dispatch functions."""
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
from pydantic import BaseModel
from agno.filters import FilterExpr
if TYPE_CHECKING:
from agno.agent.agent import Agent
from agno.run import RunContext
@dataclass(frozen=True)
class ResolvedRunOptions:
"""Immutable snapshot of resolved run options.
All values are fully resolved (call-site > agent default > fallback)
at construction time, except metadata where agent-level values take
precedence on conflicting keys.
"""
stream: bool
stream_events: bool
yield_run_output: bool
add_history_to_context: bool
add_dependencies_to_context: bool
add_session_state_to_context: bool
dependencies: Optional[Dict[str, Any]]
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]]
metadata: Optional[Dict[str, Any]]
output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]]
def apply_to_context(
self,
run_context: RunContext,
*,
dependencies_provided: bool = False,
knowledge_filters_provided: bool = False,
metadata_provided: bool = False,
) -> None:
"""Apply resolved options to run_context with precedence:
explicit args > existing run_context > resolved defaults."""
if dependencies_provided:
run_context.dependencies = self.dependencies
elif run_context.dependencies is None:
run_context.dependencies = self.dependencies
if knowledge_filters_provided:
run_context.knowledge_filters = self.knowledge_filters
elif run_context.knowledge_filters is None:
run_context.knowledge_filters = self.knowledge_filters
if metadata_provided:
run_context.metadata = self.metadata
elif run_context.metadata is None:
run_context.metadata = self.metadata
# Always set output_schema from resolved options.
# Unlike other fields, output_schema must always be updated because the same run_context
# may be reused across workflow steps with different agents, each with their own output_schema.
# This matches the main branch behavior: "output_schema parameter takes priority, even if
# run_context was provided"
run_context.output_schema = self.output_schema
def resolve_run_options(
agent: Agent,
*,
stream: Optional[bool] = None,
stream_events: Optional[bool] = None,
yield_run_output: Optional[bool] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
metadata: Optional[Dict[str, Any]] = None,
output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
) -> ResolvedRunOptions:
"""Resolve all run options from call-site values and agent defaults.
Reads from ``agent`` but does not mutate it.
"""
from agno.agent._utils import get_effective_filters
from agno.utils.merge_dict import merge_dictionaries
# stream: call-site > agent.stream > False
resolved_stream: bool
if stream is not None:
resolved_stream = stream
elif agent.stream is not None:
resolved_stream = agent.stream
else:
resolved_stream = False
# stream_events: forced False when not streaming;
# otherwise call-site > agent.stream_events > False
resolved_stream_events: bool
if resolved_stream is False:
resolved_stream_events = False
elif stream_events is not None:
resolved_stream_events = stream_events
elif agent.stream_events is not None:
resolved_stream_events = agent.stream_events
else:
resolved_stream_events = False
# yield_run_output: call-site > False
resolved_yield = yield_run_output if yield_run_output is not None else False
# Context flags: call-site > agent.<field>
resolved_add_history = (
add_history_to_context if add_history_to_context is not None else agent.add_history_to_context
)
resolved_add_deps = (
add_dependencies_to_context if add_dependencies_to_context is not None else agent.add_dependencies_to_context
)
resolved_add_state = (
add_session_state_to_context if add_session_state_to_context is not None else agent.add_session_state_to_context
)
# dependencies: call-site > agent.dependencies
# Defensive copy to prevent dependency resolution from mutating agent defaults
if dependencies is not None:
resolved_deps = dependencies.copy()
elif agent.dependencies is not None:
resolved_deps = agent.dependencies.copy()
else:
resolved_deps = None
# knowledge_filters: delegate to existing get_effective_filters()
resolved_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
if agent.knowledge_filters or knowledge_filters:
resolved_filters = get_effective_filters(agent, knowledge_filters=knowledge_filters)
# metadata: merge call-site + agent.metadata (agent values take precedence)
resolved_metadata: Optional[Dict[str, Any]] = None
if metadata is not None and agent.metadata is not None:
resolved_metadata = metadata.copy()
merge_dictionaries(resolved_metadata, agent.metadata)
elif metadata is not None:
resolved_metadata = metadata.copy()
elif agent.metadata is not None:
resolved_metadata = agent.metadata.copy()
# output_schema: call-site > agent.output_schema
resolved_output_schema = output_schema if output_schema is not None else agent.output_schema
return ResolvedRunOptions(
stream=resolved_stream,
stream_events=resolved_stream_events,
yield_run_output=resolved_yield,
add_history_to_context=resolved_add_history,
add_dependencies_to_context=resolved_add_deps,
add_session_state_to_context=resolved_add_state,
dependencies=resolved_deps,
knowledge_filters=resolved_filters,
metadata=resolved_metadata,
output_schema=resolved_output_schema,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/agent/_run_options.py",
"license": "Apache License 2.0",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/agent/_session.py | """Public session accessors and management for Agent."""
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Tuple,
Union,
cast,
)
from uuid import uuid4
if TYPE_CHECKING:
from agno.agent.agent import Agent
from agno.db.base import SessionType
from agno.metrics import SessionMetrics
from agno.models.message import Message
from agno.run import RunStatus
from agno.run.agent import RunOutput
from agno.session import AgentSession, TeamSession, WorkflowSession
from agno.session.summary import SessionSummary
from agno.utils.agent import (
aget_session_metrics_util,
aget_session_name_util,
aget_session_state_util,
aset_session_name_util,
aupdate_session_state_util,
get_session_metrics_util,
get_session_name_util,
get_session_state_util,
set_session_name_util,
update_session_state_util,
)
from agno.utils.log import log_debug, log_error, log_warning
# ---------------------------------------------------------------------------
# Session initialization
# ---------------------------------------------------------------------------
def initialize_session(
agent: Agent,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
) -> Tuple[str, Optional[str]]:
"""Initialize the session for the agent."""
if session_id is None:
if agent.session_id:
session_id = agent.session_id
else:
session_id = str(uuid4())
# We make the session_id sticky to the agent instance if no session_id is provided
agent.session_id = session_id
log_debug(f"Session ID: {session_id}", center=True)
# Use the default user_id when necessary
if user_id is None or user_id == "":
user_id = agent.user_id
return session_id, user_id
# ---------------------------------------------------------------------------
# Session CRUD
# ---------------------------------------------------------------------------
def get_session(
agent: Agent,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
) -> Optional[Union[AgentSession, TeamSession, WorkflowSession]]:
"""Load an AgentSession from database or cache.
Args:
agent: The Agent instance.
session_id: The session_id to load from storage.
Returns:
AgentSession: The AgentSession loaded from the database/cache or None if not found.
"""
from agno.agent import _init, _storage
if not session_id and not agent.session_id:
raise Exception("No session_id provided")
session_id_to_load: str = session_id or agent.session_id # type: ignore[assignment]
# If there is a cached session, return it
if agent.cache_session and hasattr(agent, "_cached_session") and agent._cached_session is not None:
if agent._cached_session.session_id == session_id_to_load and (
user_id is None or agent._cached_session.user_id == user_id
):
return agent._cached_session
if _init.has_async_db(agent):
raise ValueError("Cannot use sync get_session() with an async database. Use aget_session() instead.")
# Load and return the session from the database
if agent.db is not None:
loaded_session: Optional[Union[AgentSession, TeamSession, WorkflowSession]] = None
# We have a standalone agent, so we are loading an AgentSession
if agent.team_id is None and agent.workflow_id is None:
loaded_session = cast(
AgentSession,
_storage.read_session(
agent, session_id=session_id_to_load, session_type=SessionType.AGENT, user_id=user_id
), # type: ignore[arg-type]
)
# We have a team member agent, so we are loading a TeamSession
if loaded_session is None and agent.team_id is not None:
loaded_session = cast(
TeamSession,
_storage.read_session(
agent, session_id=session_id_to_load, session_type=SessionType.TEAM, user_id=user_id
), # type: ignore[arg-type]
)
# We have a workflow member agent, so we are loading a WorkflowSession
if loaded_session is None and agent.workflow_id is not None:
loaded_session = cast(
WorkflowSession,
_storage.read_session(
agent, session_id=session_id_to_load, session_type=SessionType.WORKFLOW, user_id=user_id
), # type: ignore[arg-type]
)
# Cache the session if relevant
if loaded_session is not None and agent.cache_session:
agent._cached_session = loaded_session # type: ignore
return loaded_session
log_debug(f"Session {session_id_to_load} not found in db")
return None
async def aget_session(
agent: Agent,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
) -> Optional[Union[AgentSession, TeamSession, WorkflowSession]]:
"""Load an AgentSession from database or cache.
Args:
agent: The Agent instance.
session_id: The session_id to load from storage.
Returns:
AgentSession: The AgentSession loaded from the database/cache or None if not found.
"""
from agno.agent import _storage
if not session_id and not agent.session_id:
raise Exception("No session_id provided")
session_id_to_load: str = session_id or agent.session_id # type: ignore[assignment]
# If there is a cached session, return it
if agent.cache_session and hasattr(agent, "_cached_session") and agent._cached_session is not None:
if agent._cached_session.session_id == session_id_to_load and (
user_id is None or agent._cached_session.user_id == user_id
):
return agent._cached_session
# Load and return the session from the database
if agent.db is not None:
loaded_session: Optional[Union[AgentSession, TeamSession, WorkflowSession]] = None
# We have a standalone agent, so we are loading an AgentSession
if agent.team_id is None and agent.workflow_id is None:
loaded_session = cast(
AgentSession,
await _storage.aread_session(
agent, session_id=session_id_to_load, session_type=SessionType.AGENT, user_id=user_id
), # type: ignore[arg-type]
)
# We have a team member agent, so we are loading a TeamSession
if loaded_session is None and agent.team_id is not None:
loaded_session = cast(
TeamSession,
await _storage.aread_session(
agent, session_id=session_id_to_load, session_type=SessionType.TEAM, user_id=user_id
), # type: ignore[arg-type]
)
# We have a workflow member agent, so we are loading a WorkflowSession
if loaded_session is None and agent.workflow_id is not None:
loaded_session = cast(
WorkflowSession,
await _storage.aread_session(
agent, session_id=session_id_to_load, session_type=SessionType.WORKFLOW, user_id=user_id
), # type: ignore[arg-type]
)
# Cache the session if relevant
if loaded_session is not None and agent.cache_session:
agent._cached_session = loaded_session # type: ignore
return loaded_session
log_debug(f"AgentSession {session_id_to_load} not found in db")
return None
def save_session(agent: Agent, session: Union[AgentSession, TeamSession, WorkflowSession]) -> None:
"""
Save the AgentSession to storage
"""
from agno.agent import _init, _storage
if _init.has_async_db(agent):
raise ValueError("Cannot use sync save_session() with an async database. Use asave_session() instead.")
# If the agent is a member of a team, do not save the session to the database
if (
agent.db is not None
and agent.team_id is None
and agent.workflow_id is None
and session.session_data is not None
):
if session.session_data is not None and isinstance(session.session_data.get("session_state"), dict):
session.session_data["session_state"].pop("current_session_id", None)
session.session_data["session_state"].pop("current_user_id", None)
session.session_data["session_state"].pop("current_run_id", None)
_storage.upsert_session(agent, session=session)
log_debug(f"Created or updated AgentSession record: {session.session_id}")
async def asave_session(agent: Agent, session: Union[AgentSession, TeamSession, WorkflowSession]) -> None:
"""
Save the AgentSession to storage
"""
from agno.agent import _init, _storage
# If the agent is a member of a team, do not save the session to the database
if (
agent.db is not None
and agent.team_id is None
and agent.workflow_id is None
and session.session_data is not None
):
if session.session_data is not None and isinstance(session.session_data.get("session_state"), dict):
session.session_data["session_state"].pop("current_session_id", None)
session.session_data["session_state"].pop("current_user_id", None)
session.session_data["session_state"].pop("current_run_id", None)
if _init.has_async_db(agent):
await _storage.aupsert_session(agent, session=session)
else:
_storage.upsert_session(agent, session=session)
log_debug(f"Created or updated AgentSession record: {session.session_id}")
def delete_session(agent: Agent, session_id: str, user_id: Optional[str] = None):
"""Delete the current session and save to storage"""
if agent.db is None:
return
agent.db.delete_session(session_id=session_id, user_id=user_id)
async def adelete_session(agent: Agent, session_id: str, user_id: Optional[str] = None):
"""Delete the current session and save to storage"""
from agno.agent import _init
if agent.db is None:
return
if _init.has_async_db(agent):
await agent.db.delete_session(session_id=session_id, user_id=user_id) # type: ignore
else:
agent.db.delete_session(session_id=session_id, user_id=user_id)
# ---------------------------------------------------------------------------
# Session name
# ---------------------------------------------------------------------------
def rename(agent: Agent, name: str, session_id: Optional[str] = None) -> None:
"""
Rename the Agent and save to storage
Args:
agent: The Agent instance.
name (str): The new name for the Agent.
session_id (Optional[str]): The session_id of the session where to store the new name. If not provided, the current cached session ID is used.
"""
from agno.agent import _init
session_id = session_id or agent.session_id
if session_id is None:
raise Exception("Session ID is not set")
if _init.has_async_db(agent):
raise RuntimeError("`rename` is not supported with an async database. Please use `arename` instead.")
session = get_session(agent, session_id=session_id)
if session is None:
raise Exception("Session not found")
if not hasattr(session, "agent_data"):
raise Exception("Session is not an AgentSession")
# -*- Rename Agent
agent.name = name
if session.agent_data is not None: # type: ignore
session.agent_data["name"] = name # type: ignore
else:
session.agent_data = {"name": name} # type: ignore
# -*- Save to storage
save_session(agent, session=session)
def set_session_name(
agent: Agent,
session_id: Optional[str] = None,
autogenerate: bool = False,
session_name: Optional[str] = None,
) -> AgentSession:
"""
Set the session name and save to storage
Args:
agent: The Agent instance.
session_id: The session ID to set the name for. If not provided, the current cached session ID is used.
autogenerate: Whether to autogenerate the session name.
session_name: The session name to set. If not provided, the session name will be autogenerated.
Returns:
AgentSession: The updated session.
"""
session_id = session_id or agent.session_id
if session_id is None:
raise Exception("Session ID is not set")
return cast(
AgentSession,
set_session_name_util(agent, session_id=session_id, autogenerate=autogenerate, session_name=session_name),
)
async def aset_session_name(
agent: Agent,
session_id: Optional[str] = None,
autogenerate: bool = False,
session_name: Optional[str] = None,
) -> AgentSession:
"""
Set the session name and save to storage
Args:
agent: The Agent instance.
session_id: The session ID to set the name for. If not provided, the current cached session ID is used.
autogenerate: Whether to autogenerate the session name.
session_name: The session name to set. If not provided, the session name will be autogenerated.
Returns:
AgentSession: The updated session.
"""
session_id = session_id or agent.session_id
if session_id is None:
raise Exception("Session ID is not set")
return cast(
AgentSession,
await aset_session_name_util(
agent, session_id=session_id, autogenerate=autogenerate, session_name=session_name
),
)
def generate_session_name(agent: Agent, session: AgentSession, max_retries: int = 3, _attempt: int = 0) -> str:
"""
Generate a name for the session using the first 6 messages from the memory
Args:
agent: The Agent instance.
session (AgentSession): The session to generate a name for.
max_retries: Maximum number of retries if generation fails.
_attempt: Current attempt number (used internally for recursion).
Returns:
str: The generated session name.
"""
if agent.model is None:
raise Exception("Model not set")
gen_session_name_prompt = "Conversation\n"
messages_for_generating_session_name = session.get_messages()
for message in messages_for_generating_session_name:
gen_session_name_prompt += f"{message.role.upper()}: {message.content}\n"
gen_session_name_prompt += "\n\nConversation Name: "
system_message = Message(
role=agent.system_message_role,
content="Please provide a suitable name for this conversation in maximum 5 words. "
"Remember, do not exceed 5 words.",
)
user_message = Message(role=agent.user_message_role, content=gen_session_name_prompt)
generate_name_messages = [system_message, user_message]
# Generate name
generated_name = agent.model.response(messages=generate_name_messages)
content = generated_name.content
if content is None:
if _attempt >= max_retries:
return "New Session"
log_error("Generated name is None. Trying again.")
return generate_session_name(agent, session=session, max_retries=max_retries, _attempt=_attempt + 1)
if len(content.split()) > 5:
if _attempt >= max_retries:
return " ".join(content.split()[:5])
log_error("Generated name is too long. It should be less than 5 words. Trying again.")
return generate_session_name(agent, session=session, max_retries=max_retries, _attempt=_attempt + 1)
return content.replace('"', "").strip()
def get_session_name(agent: Agent, session_id: Optional[str] = None) -> str:
"""
Get the session name for the given session ID.
Args:
agent: The Agent instance.
session_id: The session ID to get the name for. If not provided, the current cached session ID is used.
Returns:
str: The session name.
"""
session_id = session_id or agent.session_id
if session_id is None:
raise Exception("Session ID is not set")
return get_session_name_util(agent, session_id=session_id)
async def aget_session_name(agent: Agent, session_id: Optional[str] = None) -> str:
"""
Get the session name for the given session ID.
Args:
agent: The Agent instance.
session_id: The session ID to get the name for. If not provided, the current cached session ID is used.
Returns:
str: The session name.
"""
session_id = session_id or agent.session_id
if session_id is None:
raise Exception("Session ID is not set")
return await aget_session_name_util(agent, session_id=session_id)
# ---------------------------------------------------------------------------
# Session state
# ---------------------------------------------------------------------------
def get_session_state(agent: Agent, session_id: Optional[str] = None) -> Dict[str, Any]:
"""
Get the session state for the given session ID.
Args:
agent: The Agent instance.
session_id: The session ID to get the state for. If not provided, the current cached session ID is used.
Returns:
Dict[str, Any]: The session state.
"""
session_id = session_id or agent.session_id
if session_id is None:
raise Exception("Session ID is not set")
return get_session_state_util(agent, session_id=session_id)
async def aget_session_state(agent: Agent, session_id: Optional[str] = None) -> Dict[str, Any]:
"""
Get the session state for the given session ID.
Args:
agent: The Agent instance.
session_id: The session ID to get the state for. If not provided, the current cached session ID is used.
Returns:
Dict[str, Any]: The session state.
"""
session_id = session_id or agent.session_id
if session_id is None:
raise Exception("Session ID is not set")
return await aget_session_state_util(agent, session_id=session_id)
def update_session_state(agent: Agent, session_state_updates: Dict[str, Any], session_id: Optional[str] = None) -> str:
"""
Update the session state for the given session ID and user ID.
Args:
agent: The Agent instance.
session_state_updates: The updates to apply to the session state. Should be a dictionary of key-value pairs.
session_id: The session ID to update. If not provided, the current cached session ID is used.
Returns:
dict: The updated session state.
"""
session_id = session_id or agent.session_id
if session_id is None:
raise Exception("Session ID is not set")
return update_session_state_util(agent, session_state_updates=session_state_updates, session_id=session_id)
async def aupdate_session_state(
agent: Agent, session_state_updates: Dict[str, Any], session_id: Optional[str] = None
) -> str:
"""
Update the session state for the given session ID and user ID.
Args:
agent: The Agent instance.
session_state_updates: The updates to apply to the session state. Should be a dictionary of key-value pairs.
session_id: The session ID to update. If not provided, the current cached session ID is used.
Returns:
dict: The updated session state.
"""
session_id = session_id or agent.session_id
if session_id is None:
raise Exception("Session ID is not set")
return await aupdate_session_state_util(agent, session_state_updates=session_state_updates, session_id=session_id)
# ---------------------------------------------------------------------------
# Session metrics
# ---------------------------------------------------------------------------
def get_session_metrics(agent: Agent, session_id: Optional[str] = None) -> Optional[SessionMetrics]:
"""Get the session metrics for the given session ID.
Args:
agent: The Agent instance.
session_id: The session ID to get the metrics for. If not provided, the current cached session ID is used.
Returns:
Optional[SessionMetrics]: The session metrics.
"""
session_id = session_id or agent.session_id
if session_id is None:
raise Exception("Session ID is not set")
return get_session_metrics_util(agent, session_id=session_id)
async def aget_session_metrics(agent: Agent, session_id: Optional[str] = None) -> Optional[SessionMetrics]:
"""Get the session metrics for the given session ID.
Args:
agent: The Agent instance.
session_id: The session ID to get the metrics for. If not provided, the current cached session ID is used.
Returns:
Optional[SessionMetrics]: The session metrics.
"""
session_id = session_id or agent.session_id
if session_id is None:
raise Exception("Session ID is not set")
return await aget_session_metrics_util(agent, session_id=session_id)
def update_session_metrics(agent: Agent, session: AgentSession, run_response: RunOutput) -> None:
"""Calculate session metrics convert run Metrics to SessionMetrics."""
from agno.agent._storage import update_session_metrics as _update_session_metrics_storage
_update_session_metrics_storage(agent, session=session, run_response=run_response)
# ---------------------------------------------------------------------------
# Session messages
# ---------------------------------------------------------------------------
def get_session_messages(
agent: Agent,
session_id: Optional[str] = None,
last_n_runs: Optional[int] = None,
limit: Optional[int] = None,
skip_roles: Optional[List[str]] = None,
skip_statuses: Optional[List[RunStatus]] = None,
skip_history_messages: bool = True,
) -> List[Message]:
"""Get all messages belonging to the given session.
Args:
agent: The Agent instance.
session_id: The session ID to get the messages for. If not provided, the latest used session ID is used.
last_n_runs: The number of runs to return messages from, counting from the latest. Defaults to all runs.
limit: The number of messages to return, counting from the latest. Defaults to all messages.
skip_roles: Skip messages with these roles.
skip_statuses: Skip messages with these statuses.
skip_history_messages: Skip messages that were tagged as history in previous runs.
Returns:
List[Message]: The messages for the session.
"""
session_id = session_id or agent.session_id
if session_id is None:
log_warning("Session ID is not set, cannot get messages for session")
return []
session = get_session(agent, session_id=session_id)
if session is None:
raise Exception("Session not found")
# Handle the case in which the agent is reusing a team session
if isinstance(session, TeamSession):
return session.get_messages(
member_ids=[agent.id] if agent.team_id and agent.id else None,
last_n_runs=last_n_runs,
limit=limit,
skip_roles=skip_roles,
skip_statuses=skip_statuses,
skip_history_messages=skip_history_messages,
)
return session.get_messages(
# Only filter by agent_id if this is part of a team
agent_id=agent.id if agent.team_id is not None else None,
last_n_runs=last_n_runs,
limit=limit,
skip_roles=skip_roles,
skip_statuses=skip_statuses,
skip_history_messages=skip_history_messages,
)
async def aget_session_messages(
agent: Agent,
session_id: Optional[str] = None,
last_n_runs: Optional[int] = None,
limit: Optional[int] = None,
skip_roles: Optional[List[str]] = None,
skip_statuses: Optional[List[RunStatus]] = None,
skip_history_messages: bool = True,
) -> List[Message]:
"""Get all messages belonging to the given session.
Args:
agent: The Agent instance.
session_id: The session ID to get the messages for. If not provided, the current cached session ID is used.
last_n_runs: The number of runs to return messages from, counting from the latest. Defaults to all runs.
limit: The number of messages to return, counting from the latest. Defaults to all messages.
skip_roles: Skip messages with these roles.
skip_statuses: Skip messages with these statuses.
skip_history_messages: Skip messages that were tagged as history in previous runs.
Returns:
List[Message]: The messages for the session.
"""
session_id = session_id or agent.session_id
if session_id is None:
log_warning("Session ID is not set, cannot get messages for session")
return []
session = await aget_session(agent, session_id=session_id)
if session is None:
raise Exception("Session not found")
# Handle the case in which the agent is reusing a team session
if isinstance(session, TeamSession):
return session.get_messages(
member_ids=[agent.id] if agent.team_id and agent.id else None,
last_n_runs=last_n_runs,
limit=limit,
skip_roles=skip_roles,
skip_statuses=skip_statuses,
skip_history_messages=skip_history_messages,
)
# Only filter by agent_id if this is part of a team
return session.get_messages(
agent_id=agent.id if agent.team_id is not None else None,
last_n_runs=last_n_runs,
limit=limit,
skip_roles=skip_roles,
skip_statuses=skip_statuses,
skip_history_messages=skip_history_messages,
)
def get_chat_history(
agent: Agent, session_id: Optional[str] = None, last_n_runs: Optional[int] = None
) -> List[Message]:
"""Return the chat history (user and assistant messages) for the session.
Use get_messages() for more filtering options.
Returns:
A list of user and assistant Messages belonging to the session.
"""
return get_session_messages(agent, session_id=session_id, last_n_runs=last_n_runs, skip_roles=["system", "tool"])
async def aget_chat_history(
agent: Agent, session_id: Optional[str] = None, last_n_runs: Optional[int] = None
) -> List[Message]:
"""Return the chat history (user and assistant messages) for the session.
Use get_messages() for more filtering options.
Returns:
A list of user and assistant Messages belonging to the session.
"""
return await aget_session_messages(
agent, session_id=session_id, last_n_runs=last_n_runs, skip_roles=["system", "tool"]
)
# ---------------------------------------------------------------------------
# Session summary
# ---------------------------------------------------------------------------
def get_session_summary(agent: Agent, session_id: Optional[str] = None) -> Optional[SessionSummary]:
"""Get the session summary for the given session ID and user ID
Args:
agent: The Agent instance.
session_id: The session ID to get the summary for. If not provided, the current cached session ID is used.
Returns:
SessionSummary: The session summary.
"""
session_id = session_id if session_id is not None else agent.session_id
if session_id is None:
raise ValueError("Session ID is required")
session = get_session(agent, session_id=session_id)
if session is None:
raise Exception(f"Session {session_id} not found")
return session.get_session_summary() # type: ignore
async def aget_session_summary(agent: Agent, session_id: Optional[str] = None) -> Optional[SessionSummary]:
"""Get the session summary for the given session ID and user ID.
Args:
agent: The Agent instance.
session_id: The session ID to get the summary for. If not provided, the current cached session ID is used.
Returns:
SessionSummary: The session summary.
"""
session_id = session_id if session_id is not None else agent.session_id
if session_id is None:
raise ValueError("Session ID is required")
session = await aget_session(agent, session_id=session_id)
if session is None:
raise Exception(f"Session {session_id} not found")
return session.get_session_summary() # type: ignore
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/agent/_session.py",
"license": "Apache License 2.0",
"lines": 609,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/agent/_storage.py | """Database persistence and serialization helpers for Agent."""
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Type,
Union,
cast,
)
from pydantic import BaseModel
if TYPE_CHECKING:
from agno.agent.agent import Agent
from agno.db.base import BaseDb, ComponentType, SessionType
from agno.db.utils import db_from_dict
from agno.metrics import RunMetrics, SessionMetrics
from agno.models.base import Model
from agno.models.message import Message
from agno.registry.registry import Registry
from agno.run.agent import RunOutput
from agno.session import AgentSession, TeamSession, WorkflowSession
from agno.tools.function import Function
from agno.utils.agent import (
aget_last_run_output_util,
aget_run_output_util,
get_last_run_output_util,
get_run_output_util,
)
from agno.utils.log import log_debug, log_error, log_warning
from agno.utils.merge_dict import merge_dictionaries
from agno.utils.string import generate_id_from_name
# ---------------------------------------------------------------------------
# Run output accessors
# ---------------------------------------------------------------------------
def get_run_output(agent: Agent, run_id: str, session_id: Optional[str] = None) -> Optional[RunOutput]:
"""
Get a RunOutput from the database.
Args:
agent: The Agent instance.
run_id (str): The run_id to load from storage.
session_id (Optional[str]): The session_id to load from storage.
Returns:
Optional[RunOutput]: The RunOutput from the database or None if not found.
"""
if not session_id and not agent.session_id:
raise Exception("No session_id provided")
session_id_to_load = session_id or agent.session_id
return cast(RunOutput, get_run_output_util(agent, run_id=run_id, session_id=session_id_to_load))
async def aget_run_output(agent: Agent, run_id: str, session_id: Optional[str] = None) -> Optional[RunOutput]:
"""
Get a RunOutput from the database.
Args:
agent: The Agent instance.
run_id (str): The run_id to load from storage.
session_id (Optional[str]): The session_id to load from storage.
Returns:
Optional[RunOutput]: The RunOutput from the database or None if not found.
"""
if not session_id and not agent.session_id:
raise Exception("No session_id provided")
session_id_to_load = session_id or agent.session_id
return cast(RunOutput, await aget_run_output_util(agent, run_id=run_id, session_id=session_id_to_load))
def get_last_run_output(agent: Agent, session_id: Optional[str] = None) -> Optional[RunOutput]:
"""
Get the last run response from the database.
Args:
agent: The Agent instance.
session_id (Optional[str]): The session_id to load from storage.
Returns:
Optional[RunOutput]: The last run response from the database or None if not found.
"""
if not session_id and not agent.session_id:
raise Exception("No session_id provided")
session_id_to_load = session_id or agent.session_id
return cast(RunOutput, get_last_run_output_util(agent, session_id=session_id_to_load))
async def aget_last_run_output(agent: Agent, session_id: Optional[str] = None) -> Optional[RunOutput]:
"""
Get the last run response from the database.
Args:
agent: The Agent instance.
session_id (Optional[str]): The session_id to load from storage.
Returns:
Optional[RunOutput]: The last run response from the database or None if not found.
"""
if not session_id and not agent.session_id:
raise Exception("No session_id provided")
session_id_to_load = session_id or agent.session_id
return cast(RunOutput, await aget_last_run_output_util(agent, session_id=session_id_to_load))
# ---------------------------------------------------------------------------
# Session I/O (low-level DB calls)
# ---------------------------------------------------------------------------
def read_session(
agent: Agent, session_id: str, session_type: SessionType = SessionType.AGENT, user_id: Optional[str] = None
) -> Optional[Union[AgentSession, TeamSession, WorkflowSession]]:
"""Get a Session from the database."""
try:
if not agent.db:
raise ValueError("Db not initialized")
return agent.db.get_session(session_id=session_id, session_type=session_type, user_id=user_id) # type: ignore
except Exception as e:
import traceback
traceback.print_exc(limit=3)
log_warning(f"Error getting session from db: {e}")
return None
async def aread_session(
agent: Agent, session_id: str, session_type: SessionType = SessionType.AGENT, user_id: Optional[str] = None
) -> Optional[Union[AgentSession, TeamSession, WorkflowSession]]:
"""Get a Session from the database."""
from agno.agent import _init
try:
if not agent.db:
raise ValueError("Db not initialized")
if _init.has_async_db(agent):
return await agent.db.get_session(session_id=session_id, session_type=session_type, user_id=user_id) # type: ignore
else:
return agent.db.get_session(session_id=session_id, session_type=session_type, user_id=user_id) # type: ignore
except Exception as e:
import traceback
traceback.print_exc(limit=3)
log_warning(f"Error getting session from db: {e}")
return None
def upsert_session(
agent: Agent, session: Union[AgentSession, TeamSession, WorkflowSession]
) -> Optional[Union[AgentSession, TeamSession, WorkflowSession]]:
"""Upsert a Session into the database."""
try:
if not agent.db:
raise ValueError("Db not initialized")
return agent.db.upsert_session(session=session) # type: ignore
except Exception as e:
import traceback
traceback.print_exc(limit=3)
log_warning(f"Error upserting session into db: {e}")
return None
async def aupsert_session(
agent: Agent, session: Union[AgentSession, TeamSession, WorkflowSession]
) -> Optional[Union[AgentSession, TeamSession, WorkflowSession]]:
"""Upsert a Session into the database."""
from agno.agent import _init
try:
if not agent.db:
raise ValueError("Db not initialized")
if _init.has_async_db(agent):
return await agent.db.upsert_session(session=session) # type: ignore
else:
return agent.db.upsert_session(session=session) # type: ignore
except Exception as e:
import traceback
traceback.print_exc(limit=3)
log_warning(f"Error upserting session into db: {e}")
return None
# ---------------------------------------------------------------------------
# Session state helpers
# ---------------------------------------------------------------------------
def load_session_state(agent: Agent, session: AgentSession, session_state: Dict[str, Any]):
"""Load and return the stored session_state from the database, optionally merging it with the given one"""
# Get the session_state from the database and merge with proper precedence
# At this point session_state contains: agent_defaults + run_params
if session.session_data is not None and "session_state" in session.session_data:
session_state_from_db = session.session_data.get("session_state")
if (
session_state_from_db is not None
and isinstance(session_state_from_db, dict)
and len(session_state_from_db) > 0
and not agent.overwrite_db_session_state
):
# This preserves precedence: run_params > db_state > agent_defaults
merged_state = session_state_from_db.copy()
merge_dictionaries(merged_state, session_state)
session_state.clear()
session_state.update(merged_state)
# Update the session_state in the session
if session.session_data is not None:
session.session_data["session_state"] = session_state
return session_state
def update_metadata(agent: Agent, session: AgentSession):
"""Update the extra_data in the session"""
# Read metadata from the database
if session.metadata is not None:
# If metadata is set in the agent, update the database metadata with the agent's metadata
if agent.metadata is not None:
# Updates agent's session metadata in place
merge_dictionaries(session.metadata, agent.metadata)
# Update the current metadata with the metadata from the database which is updated in place
agent.metadata = session.metadata
def get_session_metrics_internal(agent: Agent, session: AgentSession) -> SessionMetrics:
# Get the session_metrics from the database
if session.session_data is not None and "session_metrics" in session.session_data:
session_metrics_from_db = session.session_data.get("session_metrics")
if session_metrics_from_db is not None:
if isinstance(session_metrics_from_db, dict):
return SessionMetrics.from_dict(session_metrics_from_db)
elif isinstance(session_metrics_from_db, SessionMetrics):
return session_metrics_from_db
elif isinstance(session_metrics_from_db, RunMetrics):
# Convert legacy RunMetrics to SessionMetrics
return SessionMetrics(
input_tokens=session_metrics_from_db.input_tokens,
output_tokens=session_metrics_from_db.output_tokens,
total_tokens=session_metrics_from_db.total_tokens,
audio_input_tokens=session_metrics_from_db.audio_input_tokens,
audio_output_tokens=session_metrics_from_db.audio_output_tokens,
audio_total_tokens=session_metrics_from_db.audio_total_tokens,
cache_read_tokens=session_metrics_from_db.cache_read_tokens,
cache_write_tokens=session_metrics_from_db.cache_write_tokens,
reasoning_tokens=session_metrics_from_db.reasoning_tokens,
cost=session_metrics_from_db.cost,
)
return SessionMetrics()
def update_session_metrics(agent: Agent, session: AgentSession, run_response: RunOutput) -> None:
"""Calculate session metrics - convert run Metrics to SessionMetrics."""
session_metrics = get_session_metrics_internal(agent, session=session)
# Add the metrics for the current run to the session metrics
if session_metrics is None:
return
if run_response.metrics is not None:
session_metrics.accumulate_from_run(run_response.metrics)
if session.session_data is not None:
session.session_data["session_metrics"] = session_metrics.to_dict()
def read_or_create_session(
agent: Agent,
session_id: str,
user_id: Optional[str] = None,
) -> AgentSession:
from time import time
from uuid import uuid4
# Returning cached session if we have one
if (
agent._cached_session is not None
and agent._cached_session.session_id == session_id
and (user_id is None or agent._cached_session.user_id == user_id)
):
return agent._cached_session
# Try to load from database
agent_session = None
if agent.db is not None and agent.team_id is None and agent.workflow_id is None:
log_debug(f"Reading AgentSession: {session_id}")
agent_session = cast(AgentSession, read_session(agent, session_id=session_id, user_id=user_id))
if agent_session is None:
# Creating new session if none found
log_debug(f"Creating new AgentSession: {session_id}")
session_data = {}
if agent.session_state is not None:
from copy import deepcopy
session_data["session_state"] = deepcopy(agent.session_state)
agent_session = AgentSession(
session_id=session_id,
agent_id=agent.id,
user_id=user_id,
agent_data=get_agent_data(agent),
session_data=session_data,
metadata=agent.metadata,
created_at=int(time()),
)
if agent.introduction is not None:
agent_session.upsert_run(
RunOutput(
run_id=str(uuid4()),
session_id=session_id,
agent_id=agent.id,
agent_name=agent.name,
user_id=user_id,
content=agent.introduction,
messages=[
Message(role=agent.model.assistant_message_role, content=agent.introduction) # type: ignore
],
)
)
if agent.cache_session:
agent._cached_session = agent_session
return agent_session
async def aread_or_create_session(
agent: Agent,
session_id: str,
user_id: Optional[str] = None,
) -> AgentSession:
from time import time
from uuid import uuid4
from agno.agent import _init
# Returning cached session if we have one
if (
agent._cached_session is not None
and agent._cached_session.session_id == session_id
and (user_id is None or agent._cached_session.user_id == user_id)
):
return agent._cached_session
# Try to load from database
agent_session = None
if agent.db is not None and agent.team_id is None and agent.workflow_id is None:
log_debug(f"Reading AgentSession: {session_id}")
if _init.has_async_db(agent):
agent_session = cast(AgentSession, await aread_session(agent, session_id=session_id, user_id=user_id))
else:
agent_session = cast(AgentSession, read_session(agent, session_id=session_id, user_id=user_id))
if agent_session is None:
# Creating new session if none found
log_debug(f"Creating new AgentSession: {session_id}")
session_data = {}
if agent.session_state is not None:
from copy import deepcopy
session_data["session_state"] = deepcopy(agent.session_state)
agent_session = AgentSession(
session_id=session_id,
agent_id=agent.id,
user_id=user_id,
agent_data=get_agent_data(agent),
session_data=session_data,
metadata=agent.metadata,
created_at=int(time()),
)
if agent.introduction is not None:
agent_session.upsert_run(
RunOutput(
run_id=str(uuid4()),
session_id=session_id,
agent_id=agent.id,
agent_name=agent.name,
user_id=user_id,
content=agent.introduction,
messages=[
Message(role=agent.model.assistant_message_role, content=agent.introduction) # type: ignore
],
)
)
if agent.cache_session:
agent._cached_session = agent_session
return agent_session
# ---------------------------------------------------------------------------
# Serialization
# ---------------------------------------------------------------------------
def get_agent_data(agent: Agent) -> Dict[str, Any]:
agent_data: Dict[str, Any] = {}
if agent.name is not None:
agent_data["name"] = agent.name
if agent.id is not None:
agent_data["agent_id"] = agent.id
if agent.model is not None:
agent_data["model"] = agent.model.to_dict()
return agent_data
def to_dict(agent: Agent) -> Dict[str, Any]:
"""
Convert the Agent to a dictionary.
Returns:
Dict[str, Any]: Dictionary representation of the agent configuration
"""
from agno.agent._tools import parse_tools
config: Dict[str, Any] = {}
# --- Agent Settings ---
if agent.model is not None:
if isinstance(agent.model, Model):
config["model"] = agent.model.to_dict()
else:
config["model"] = str(agent.model)
if agent.name is not None:
config["name"] = agent.name
if agent.id is not None:
config["id"] = agent.id
# --- User settings ---
if agent.user_id is not None:
config["user_id"] = agent.user_id
# --- Session settings ---
if agent.session_id is not None:
config["session_id"] = agent.session_id
if agent.session_state is not None:
config["session_state"] = agent.session_state
if agent.add_session_state_to_context:
config["add_session_state_to_context"] = agent.add_session_state_to_context
if agent.enable_agentic_state:
config["enable_agentic_state"] = agent.enable_agentic_state
if agent.overwrite_db_session_state:
config["overwrite_db_session_state"] = agent.overwrite_db_session_state
if agent.cache_session:
config["cache_session"] = agent.cache_session
if agent.search_session_history:
config["search_session_history"] = agent.search_session_history
if agent.num_history_sessions is not None:
config["num_history_sessions"] = agent.num_history_sessions
if agent.enable_session_summaries:
config["enable_session_summaries"] = agent.enable_session_summaries
if agent.add_session_summary_to_context is not None:
config["add_session_summary_to_context"] = agent.add_session_summary_to_context
# TODO: implement session summary manager serialization
# if agent.session_summary_manager is not None:
# config["session_summary_manager"] = agent.session_summary_manager.to_dict()
# --- Dependencies ---
if agent.dependencies is not None:
config["dependencies"] = agent.dependencies
if agent.add_dependencies_to_context:
config["add_dependencies_to_context"] = agent.add_dependencies_to_context
# --- Agentic Memory settings ---
# TODO: implement agentic memory serialization
# if agent.memory_manager is not None:
# config["memory_manager"] = agent.memory_manager.to_dict()
if agent.enable_agentic_memory:
config["enable_agentic_memory"] = agent.enable_agentic_memory
if agent.enable_user_memories:
config["enable_user_memories"] = agent.enable_user_memories
if agent.add_memories_to_context is not None:
config["add_memories_to_context"] = agent.add_memories_to_context
# --- Learning settings ---
if agent.learning is not None:
if agent.learning is True:
config["learning"] = True
elif agent.learning is False:
config["learning"] = False
elif hasattr(agent.learning, "to_dict"):
config["learning"] = agent.learning.to_dict()
else:
config["learning"] = True if agent.learning else False
if not agent.add_learnings_to_context: # default is True
config["add_learnings_to_context"] = agent.add_learnings_to_context
# --- Database settings ---
if agent.db is not None and hasattr(agent.db, "to_dict"):
config["db"] = agent.db.to_dict()
# --- History settings ---
if agent.add_history_to_context:
config["add_history_to_context"] = agent.add_history_to_context
if agent.num_history_runs is not None:
config["num_history_runs"] = agent.num_history_runs
if agent.num_history_messages is not None:
config["num_history_messages"] = agent.num_history_messages
if agent.max_tool_calls_from_history is not None:
config["max_tool_calls_from_history"] = agent.max_tool_calls_from_history
# --- Knowledge settings ---
# TODO: implement knowledge serialization
# if agent.knowledge is not None:
# config["knowledge"] = agent.knowledge.to_dict()
if agent.knowledge_filters is not None:
config["knowledge_filters"] = agent.knowledge_filters
if agent.enable_agentic_knowledge_filters:
config["enable_agentic_knowledge_filters"] = agent.enable_agentic_knowledge_filters
if agent.add_knowledge_to_context:
config["add_knowledge_to_context"] = agent.add_knowledge_to_context
if not agent.search_knowledge:
config["search_knowledge"] = agent.search_knowledge
if agent.add_search_knowledge_instructions:
config["add_search_knowledge_instructions"] = agent.add_search_knowledge_instructions
# Skip knowledge_retriever as it's a callable
if agent.references_format != "json":
config["references_format"] = agent.references_format
# --- Tools ---
# Serialize tools to their dictionary representations (skip callable factories)
_tools: List[Union[Function, dict]] = []
if agent.model is not None and agent.tools and isinstance(agent.tools, list):
_tools = parse_tools(
agent,
model=agent.model,
tools=agent.tools,
)
if _tools:
serialized_tools = []
for tool in _tools:
try:
if isinstance(tool, Function):
serialized_tools.append(tool.to_dict())
else:
serialized_tools.append(tool)
except Exception as e:
# Skip tools that can't be serialized
log_warning(f"Could not serialize tool {tool}: {e}")
if serialized_tools:
config["tools"] = serialized_tools
if agent.tool_call_limit is not None:
config["tool_call_limit"] = agent.tool_call_limit
if agent.tool_choice is not None:
config["tool_choice"] = agent.tool_choice
# --- Reasoning settings ---
if agent.reasoning:
config["reasoning"] = agent.reasoning
if agent.reasoning_model is not None:
if isinstance(agent.reasoning_model, Model):
config["reasoning_model"] = agent.reasoning_model.to_dict()
else:
config["reasoning_model"] = str(agent.reasoning_model)
# Skip reasoning_agent to avoid circular serialization
if agent.reasoning_min_steps != 1:
config["reasoning_min_steps"] = agent.reasoning_min_steps
if agent.reasoning_max_steps != 10:
config["reasoning_max_steps"] = agent.reasoning_max_steps
# --- Default tools settings ---
if agent.read_chat_history:
config["read_chat_history"] = agent.read_chat_history
if agent.update_knowledge:
config["update_knowledge"] = agent.update_knowledge
if agent.read_tool_call_history:
config["read_tool_call_history"] = agent.read_tool_call_history
if not agent.send_media_to_model:
config["send_media_to_model"] = agent.send_media_to_model
if not agent.store_media:
config["store_media"] = agent.store_media
if not agent.store_tool_messages:
config["store_tool_messages"] = agent.store_tool_messages
if agent.store_history_messages:
config["store_history_messages"] = agent.store_history_messages
# --- System message settings ---
# Skip system_message if it's a callable or Message object
# TODO: Support Message objects
if agent.system_message is not None and isinstance(agent.system_message, str):
config["system_message"] = agent.system_message
if agent.system_message_role != "system":
config["system_message_role"] = agent.system_message_role
if not agent.build_context:
config["build_context"] = agent.build_context
# --- Context building settings ---
if agent.description is not None:
config["description"] = agent.description
# Handle instructions (can be str, list, or callable)
if agent.instructions is not None:
if isinstance(agent.instructions, str):
config["instructions"] = agent.instructions
elif isinstance(agent.instructions, list):
config["instructions"] = agent.instructions
# Skip if callable
if agent.expected_output is not None:
config["expected_output"] = agent.expected_output
if agent.additional_context is not None:
config["additional_context"] = agent.additional_context
if agent.markdown:
config["markdown"] = agent.markdown
if agent.add_name_to_context:
config["add_name_to_context"] = agent.add_name_to_context
if agent.add_datetime_to_context:
config["add_datetime_to_context"] = agent.add_datetime_to_context
if agent.add_location_to_context:
config["add_location_to_context"] = agent.add_location_to_context
if agent.timezone_identifier is not None:
config["timezone_identifier"] = agent.timezone_identifier
if not agent.resolve_in_context:
config["resolve_in_context"] = agent.resolve_in_context
# --- Additional input ---
# Skip additional_input as it may contain complex Message objects
# TODO: Support Message objects
# --- User message settings ---
if agent.user_message_role != "user":
config["user_message_role"] = agent.user_message_role
if not agent.build_user_context:
config["build_user_context"] = agent.build_user_context
# --- Response settings ---
if agent.retries > 0:
config["retries"] = agent.retries
if agent.delay_between_retries != 1:
config["delay_between_retries"] = agent.delay_between_retries
if agent.exponential_backoff:
config["exponential_backoff"] = agent.exponential_backoff
# --- Schema settings ---
if agent.input_schema is not None:
if isinstance(agent.input_schema, type) and issubclass(agent.input_schema, BaseModel):
config["input_schema"] = agent.input_schema.__name__
elif isinstance(agent.input_schema, dict):
config["input_schema"] = agent.input_schema
if agent.output_schema is not None:
if isinstance(agent.output_schema, type) and issubclass(agent.output_schema, BaseModel):
config["output_schema"] = agent.output_schema.__name__
elif isinstance(agent.output_schema, dict):
config["output_schema"] = agent.output_schema
# --- Parser and output settings ---
if agent.parser_model is not None:
if isinstance(agent.parser_model, Model):
config["parser_model"] = agent.parser_model.to_dict()
else:
config["parser_model"] = str(agent.parser_model)
if agent.parser_model_prompt is not None:
config["parser_model_prompt"] = agent.parser_model_prompt
if agent.output_model is not None:
if isinstance(agent.output_model, Model):
config["output_model"] = agent.output_model.to_dict()
else:
config["output_model"] = str(agent.output_model)
if agent.output_model_prompt is not None:
config["output_model_prompt"] = agent.output_model_prompt
if not agent.parse_response:
config["parse_response"] = agent.parse_response
if agent.structured_outputs is not None:
config["structured_outputs"] = agent.structured_outputs
if agent.use_json_mode:
config["use_json_mode"] = agent.use_json_mode
if agent.save_response_to_file is not None:
config["save_response_to_file"] = agent.save_response_to_file
# --- Streaming settings ---
if agent.stream is not None:
config["stream"] = agent.stream
if agent.stream_events is not None:
config["stream_events"] = agent.stream_events
if agent.store_events:
config["store_events"] = agent.store_events
# Skip events_to_skip as it contains RunEvent enums
# --- Role and culture settings ---
if agent.role is not None:
config["role"] = agent.role
# --- Team and workflow settings ---
if agent.team_id is not None:
config["team_id"] = agent.team_id
if agent.workflow_id is not None:
config["workflow_id"] = agent.workflow_id
# --- Metadata ---
if agent.metadata is not None:
config["metadata"] = agent.metadata
# --- Context compression settings ---
if agent.compress_tool_results:
config["compress_tool_results"] = agent.compress_tool_results
# TODO: implement compression manager serialization
# if agent.compression_manager is not None:
# config["compression_manager"] = agent.compression_manager.to_dict()
# --- Callable factory settings ---
if not agent.cache_callables:
config["cache_callables"] = agent.cache_callables
# --- Debug and telemetry settings ---
if agent.debug_mode:
config["debug_mode"] = agent.debug_mode
if agent.debug_level != 1:
config["debug_level"] = agent.debug_level
if not agent.telemetry:
config["telemetry"] = agent.telemetry
return config
def from_dict(cls: Type[Agent], data: Dict[str, Any], registry: Optional[Registry] = None) -> Agent:
"""
Create an agent from a dictionary.
Args:
cls: The Agent class (or subclass) to instantiate.
data: Dictionary containing agent configuration
registry: Optional registry for rehydrating tools and schemas
Returns:
Agent: Reconstructed agent instance
"""
from agno.models.utils import get_model
config = data.copy()
# --- Handle Model reconstruction ---
if "model" in config:
model_data = config["model"]
if isinstance(model_data, dict) and "id" in model_data:
config["model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
elif isinstance(model_data, str):
config["model"] = get_model(model_data)
# --- Handle reasoning_model reconstruction ---
# TODO: implement reasoning model deserialization
# if "reasoning_model" in config:
# model_data = config["reasoning_model"]
# if isinstance(model_data, dict) and "id" in model_data:
# config["reasoning_model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
# elif isinstance(model_data, str):
# config["reasoning_model"] = get_model(model_data)
# --- Handle parser_model reconstruction ---
# TODO: implement parser model deserialization
# if "parser_model" in config:
# model_data = config["parser_model"]
# if isinstance(model_data, dict) and "id" in model_data:
# config["parser_model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
# elif isinstance(model_data, str):
# config["parser_model"] = get_model(model_data)
# --- Handle output_model reconstruction ---
# TODO: implement output model deserialization
# if "output_model" in config:
# model_data = config["output_model"]
# if isinstance(model_data, dict) and "id" in model_data:
# config["output_model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
# elif isinstance(model_data, str):
# config["output_model"] = get_model(model_data)
# --- Handle tools reconstruction ---
if "tools" in config and config["tools"]:
if registry:
config["tools"] = [registry.rehydrate_function(t) for t in config["tools"]]
else:
log_warning("No registry provided, tools will not be rehydrated.")
del config["tools"]
# --- Handle DB reconstruction ---
if "db" in config and isinstance(config["db"], dict):
db_data = config["db"]
db_id = db_data.get("id")
# First try to get the db from the registry (preferred - reuses existing connection)
if registry and db_id:
registry_db = registry.get_db(db_id)
if registry_db is not None:
config["db"] = registry_db
else:
del config["db"]
else:
# No registry or no db_id, fall back to creating from dict
config["db"] = db_from_dict(db_data)
if config["db"] is None:
del config["db"]
# --- Handle Schema reconstruction ---
if "input_schema" in config and isinstance(config["input_schema"], str):
schema_cls = registry.get_schema(config["input_schema"]) if registry else None
if schema_cls:
config["input_schema"] = schema_cls
else:
log_warning(f"Input schema {config['input_schema']} not found in registry, skipping.")
del config["input_schema"]
if "output_schema" in config and isinstance(config["output_schema"], str):
schema_cls = registry.get_schema(config["output_schema"]) if registry else None
if schema_cls:
config["output_schema"] = schema_cls
else:
log_warning(f"Output schema {config['output_schema']} not found in registry, skipping.")
del config["output_schema"]
# --- Handle MemoryManager reconstruction ---
# TODO: implement memory manager deserialization
# if "memory_manager" in config and isinstance(config["memory_manager"], dict):
# from agno.memory import MemoryManager
# config["memory_manager"] = MemoryManager.from_dict(config["memory_manager"])
# --- Handle SessionSummaryManager reconstruction ---
# TODO: implement session summary manager deserialization
# if "session_summary_manager" in config and isinstance(config["session_summary_manager"], dict):
# from agno.session import SessionSummaryManager
# config["session_summary_manager"] = SessionSummaryManager.from_dict(config["session_summary_manager"])
# --- Handle CultureManager reconstruction ---
# TODO: implement culture manager deserialization
# if "culture_manager" in config and isinstance(config["culture_manager"], dict):
# from agno.culture import CultureManager
# config["culture_manager"] = CultureManager.from_dict(config["culture_manager"])
# --- Handle Knowledge reconstruction ---
# TODO: implement knowledge deserialization
# if "knowledge" in config and isinstance(config["knowledge"], dict):
# from agno.knowledge import Knowledge
# config["knowledge"] = Knowledge.from_dict(config["knowledge"])
# --- Handle CompressionManager reconstruction ---
# TODO: implement compression manager deserialization
# if "compression_manager" in config and isinstance(config["compression_manager"], dict):
# from agno.compression.manager import CompressionManager
# config["compression_manager"] = CompressionManager.from_dict(config["compression_manager"])
# --- Handle Learning reconstruction ---
if "learning" in config and isinstance(config["learning"], dict):
from agno.learn.machine import LearningMachine
config["learning"] = LearningMachine.from_dict(config["learning"])
# Remove keys that aren't constructor parameters
config.pop("team_id", None)
config.pop("workflow_id", None)
return cls(
# --- Agent settings ---
model=config.get("model"),
name=config.get("name"),
id=config.get("id"),
# --- User settings ---
user_id=config.get("user_id"),
# --- Session settings ---
session_id=config.get("session_id"),
session_state=config.get("session_state"),
add_session_state_to_context=config.get("add_session_state_to_context", False),
enable_agentic_state=config.get("enable_agentic_state", False),
overwrite_db_session_state=config.get("overwrite_db_session_state", False),
cache_session=config.get("cache_session", False),
search_session_history=config.get("search_session_history", False),
num_history_sessions=config.get("num_history_sessions"),
enable_session_summaries=config.get("enable_session_summaries", False),
add_session_summary_to_context=config.get("add_session_summary_to_context"),
# session_summary_manager=config.get("session_summary_manager"), # TODO
# --- Dependencies ---
dependencies=config.get("dependencies"),
add_dependencies_to_context=config.get("add_dependencies_to_context", False),
# --- Agentic Memory settings ---
# memory_manager=config.get("memory_manager"), # TODO
enable_agentic_memory=config.get("enable_agentic_memory", False),
enable_user_memories=config.get("enable_user_memories", False),
add_memories_to_context=config.get("add_memories_to_context"),
# --- Learning settings ---
learning=config.get("learning"),
add_learnings_to_context=config.get("add_learnings_to_context", True),
# --- Database settings ---
db=config.get("db"),
# --- History settings ---
add_history_to_context=config.get("add_history_to_context", False),
num_history_runs=config.get("num_history_runs"),
num_history_messages=config.get("num_history_messages"),
max_tool_calls_from_history=config.get("max_tool_calls_from_history"),
# --- Knowledge settings ---
# knowledge=config.get("knowledge"), # TODO
knowledge_filters=config.get("knowledge_filters"),
enable_agentic_knowledge_filters=config.get("enable_agentic_knowledge_filters", False),
add_knowledge_to_context=config.get("add_knowledge_to_context", False),
references_format=config.get("references_format", "json"),
# --- Tools ---
tools=config.get("tools"),
tool_call_limit=config.get("tool_call_limit"),
tool_choice=config.get("tool_choice"),
# --- Reasoning settings ---
reasoning=config.get("reasoning", False),
# reasoning_model=config.get("reasoning_model"), # TODO
reasoning_min_steps=config.get("reasoning_min_steps", 1),
reasoning_max_steps=config.get("reasoning_max_steps", 10),
# --- Default tools settings ---
read_chat_history=config.get("read_chat_history", False),
search_knowledge=config.get("search_knowledge", True),
add_search_knowledge_instructions=config.get("add_search_knowledge_instructions", True),
update_knowledge=config.get("update_knowledge", False),
read_tool_call_history=config.get("read_tool_call_history", False),
send_media_to_model=config.get("send_media_to_model", True),
store_media=config.get("store_media", True),
store_tool_messages=config.get("store_tool_messages", True),
store_history_messages=config.get("store_history_messages", False),
# --- System message settings ---
system_message=config.get("system_message"),
system_message_role=config.get("system_message_role", "system"),
build_context=config.get("build_context", True),
# --- Context building settings ---
description=config.get("description"),
instructions=config.get("instructions"),
expected_output=config.get("expected_output"),
additional_context=config.get("additional_context"),
markdown=config.get("markdown", False),
add_name_to_context=config.get("add_name_to_context", False),
add_datetime_to_context=config.get("add_datetime_to_context", False),
add_location_to_context=config.get("add_location_to_context", False),
timezone_identifier=config.get("timezone_identifier"),
resolve_in_context=config.get("resolve_in_context", True),
# --- User message settings ---
user_message_role=config.get("user_message_role", "user"),
build_user_context=config.get("build_user_context", True),
# --- Response settings ---
retries=config.get("retries", 0),
delay_between_retries=config.get("delay_between_retries", 1),
exponential_backoff=config.get("exponential_backoff", False),
# --- Schema settings ---
input_schema=config.get("input_schema"),
output_schema=config.get("output_schema"),
# --- Parser and output settings ---
# parser_model=config.get("parser_model"), # TODO
parser_model_prompt=config.get("parser_model_prompt"),
# output_model=config.get("output_model"), # TODO
output_model_prompt=config.get("output_model_prompt"),
parse_response=config.get("parse_response", True),
structured_outputs=config.get("structured_outputs"),
use_json_mode=config.get("use_json_mode", False),
save_response_to_file=config.get("save_response_to_file"),
# --- Streaming settings ---
stream=config.get("stream"),
stream_events=config.get("stream_events"),
store_events=config.get("store_events", False),
role=config.get("role"),
# --- Culture settings ---
# culture_manager=config.get("culture_manager"), # TODO
# --- Metadata ---
metadata=config.get("metadata"),
# --- Compression settings ---
compress_tool_results=config.get("compress_tool_results", False),
# compression_manager=config.get("compression_manager"), # TODO
# --- Debug and telemetry settings ---
debug_mode=config.get("debug_mode", False),
debug_level=config.get("debug_level", 1),
telemetry=config.get("telemetry", True),
)
# ---------------------------------------------------------------------------
# Component persistence
# ---------------------------------------------------------------------------
def save(
agent: Agent,
*,
db: Optional[BaseDb] = None,
stage: str = "published",
label: Optional[str] = None,
notes: Optional[str] = None,
) -> Optional[int]:
"""
Save the agent component and config.
Args:
agent: The Agent instance.
db: The database to save the component and config to.
stage: The stage of the component. Defaults to "published".
label: The label of the component.
notes: The notes of the component.
Returns:
Optional[int]: The version number of the saved config.
"""
db_ = db or agent.db
if not db_:
raise ValueError("Db not initialized or provided")
if not isinstance(db_, BaseDb):
raise ValueError("Async databases not yet supported for save(). Use a sync database.")
if agent.id is None:
agent.id = generate_id_from_name(agent.name)
try:
# Create or update component
db_.upsert_component(
component_id=agent.id,
component_type=ComponentType.AGENT,
name=getattr(agent, "name", agent.id),
description=getattr(agent, "description", None),
metadata=getattr(agent, "metadata", None),
)
# Create or update config
config = db_.upsert_config(
component_id=agent.id,
config=to_dict(agent),
label=label,
stage=stage,
notes=notes,
)
return config.get("version")
except Exception as e:
log_error(f"Error saving Agent to database: {e}")
raise
def load(
cls: Type[Agent],
id: str,
*,
db: BaseDb,
registry: Optional[Registry] = None,
label: Optional[str] = None,
version: Optional[int] = None,
) -> Optional[Agent]:
"""
Load an agent by id.
Args:
cls: The Agent class (or subclass) to instantiate.
id: The id of the agent to load.
db: The database to load the agent from.
registry: Optional registry for rehydrating tools and schemas.
label: The label of the agent to load.
version: The version of the agent to load.
Returns:
The agent loaded from the database or None if not found.
"""
data = db.get_config(component_id=id, label=label, version=version)
if data is None:
return None
config = data.get("config")
if config is None:
return None
agent = cls.from_dict(config, registry=registry)
agent.id = id
agent.db = db
return agent
def delete(
agent: Agent,
*,
db: Optional[BaseDb] = None,
hard_delete: bool = False,
) -> bool:
"""
Delete the agent component.
Args:
agent: The Agent instance.
db: The database to delete the component from.
hard_delete: Whether to hard delete the component.
Returns:
True if the component was deleted, False otherwise.
"""
db_ = db or agent.db
if not db_:
raise ValueError("Db not initialized or provided")
if not isinstance(db_, BaseDb):
raise ValueError("Async databases not yet supported for delete(). Use a sync database.")
if agent.id is None:
raise ValueError("Cannot delete agent without an id")
return db_.delete_component(component_id=agent.id, hard_delete=hard_delete)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/agent/_storage.py",
"license": "Apache License 2.0",
"lines": 946,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/agent/_telemetry.py | """Telemetry logging helpers for Agent."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, Optional
if TYPE_CHECKING:
from agno.agent.agent import Agent
from agno.utils.log import log_debug
def get_telemetry_data(agent: Agent) -> Dict[str, Any]:
"""Get the telemetry data for the agent."""
return {
"agent_id": agent.id,
"db_type": agent.db.__class__.__name__ if agent.db else None,
"model_provider": agent.model.provider if agent.model else None,
"model_name": agent.model.name if agent.model else None,
"model_id": agent.model.id if agent.model else None,
"parser_model": agent.parser_model.to_dict() if agent.parser_model else None,
"output_model": agent.output_model.to_dict() if agent.output_model else None,
"has_tools": agent.tools is not None,
"has_memory": agent.update_memory_on_run is True
or agent.enable_agentic_memory is True
or agent.memory_manager is not None,
"has_learnings": agent._learning is not None,
"has_culture": agent.enable_agentic_culture is True
or agent.update_cultural_knowledge is True
or agent.culture_manager is not None,
"has_reasoning": agent.reasoning is True,
"has_knowledge": agent.knowledge is not None,
"has_input_schema": agent.input_schema is not None,
"has_output_schema": agent.output_schema is not None,
"has_team": agent.team_id is not None,
}
def log_agent_telemetry(agent: Agent, session_id: str, run_id: Optional[str] = None) -> None:
"""Send a telemetry event to the API for a created Agent run."""
from agno.agent import _init
_init.set_telemetry(agent)
if not agent.telemetry:
return
from agno.api.agent import AgentRunCreate, create_agent_run
try:
create_agent_run(
run=AgentRunCreate(
session_id=session_id,
run_id=run_id,
data=get_telemetry_data(agent),
),
)
except Exception as e:
log_debug(f"Could not create Agent run telemetry event: {e}")
async def alog_agent_telemetry(agent: Agent, session_id: str, run_id: Optional[str] = None) -> None:
"""Send a telemetry event to the API for a created Agent async run."""
from agno.agent import _init
_init.set_telemetry(agent)
if not agent.telemetry:
return
from agno.api.agent import AgentRunCreate, acreate_agent_run
try:
await acreate_agent_run(
run=AgentRunCreate(
session_id=session_id,
run_id=run_id,
data=get_telemetry_data(agent),
)
)
except Exception as e:
log_debug(f"Could not create Agent run telemetry event: {e}")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/agent/_telemetry.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/agent/_tools.py | """Tool resolution, formatting, and execution helpers for Agent."""
from __future__ import annotations
from collections import deque
from typing import (
TYPE_CHECKING,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Optional,
Union,
cast,
)
if TYPE_CHECKING:
from agno.agent.agent import Agent
from agno.models.base import Model
from agno.models.message import Message
from agno.models.metrics import MessageMetrics
from agno.models.response import ModelResponse, ModelResponseEvent, ToolExecution
from agno.run import RunContext
from agno.run.agent import RunOutput, RunOutputEvent
from agno.run.messages import RunMessages
from agno.session import AgentSession
from agno.tools import Toolkit
from agno.tools.function import Function
from agno.utils.agent import (
collect_joint_audios,
collect_joint_files,
collect_joint_images,
collect_joint_videos,
)
from agno.utils.events import (
create_tool_call_completed_event,
create_tool_call_error_event,
create_tool_call_started_event,
handle_event,
)
from agno.utils.log import log_debug, log_warning
def raise_if_async_tools(agent: Agent) -> None:
"""Raise an exception if any tools contain async functions."""
if agent.tools is None:
return
# Skip check if tools is a callable factory (not yet resolved)
if not isinstance(agent.tools, list):
return
from inspect import iscoroutinefunction
for tool in agent.tools:
if isinstance(tool, Toolkit):
for func in tool.functions:
if iscoroutinefunction(tool.functions[func].entrypoint):
raise Exception(
f"Async tool {tool.name} can't be used with synchronous agent.run() or agent.print_response(). "
"Use agent.arun() or agent.aprint_response() instead to use this tool."
)
elif isinstance(tool, Function):
if iscoroutinefunction(tool.entrypoint):
raise Exception(
f"Async function {tool.name} can't be used with synchronous agent.run() or agent.print_response(). "
"Use agent.arun() or agent.aprint_response() instead to use this tool."
)
elif callable(tool):
if iscoroutinefunction(tool):
raise Exception(
f"Async function {tool.__name__} can't be used with synchronous agent.run() or agent.print_response(). "
"Use agent.arun() or agent.aprint_response() instead to use this tool."
)
def _raise_if_async_tools_in_list(tools: list) -> None:
"""Raise if any tools in a concrete list are async."""
from inspect import iscoroutinefunction
for tool in tools:
if isinstance(tool, Toolkit):
for func in tool.functions:
if iscoroutinefunction(tool.functions[func].entrypoint):
raise Exception(
f"Async tool {tool.name} can't be used with synchronous agent.run() or agent.print_response(). "
"Use agent.arun() or agent.aprint_response() instead to use this tool."
)
elif isinstance(tool, Function):
if iscoroutinefunction(tool.entrypoint):
raise Exception(
f"Async function {tool.name} can't be used with synchronous agent.run() or agent.print_response(). "
"Use agent.arun() or agent.aprint_response() instead to use this tool."
)
elif callable(tool):
if iscoroutinefunction(tool):
raise Exception(
f"Async function {tool.__name__} can't be used with synchronous agent.run() or agent.print_response(). "
"Use agent.arun() or agent.aprint_response() instead to use this tool."
)
def get_tools(
agent: Agent,
run_response: RunOutput,
run_context: RunContext,
session: AgentSession,
user_id: Optional[str] = None,
) -> List[Union[Toolkit, Callable, Function, Dict]]:
from agno.agent import _default_tools, _init
from agno.utils.callables import (
get_resolved_knowledge,
get_resolved_tools,
resolve_callable_knowledge,
resolve_callable_tools,
)
agent_tools: List[Union[Toolkit, Callable, Function, Dict]] = []
# Resolve callable factories
resolve_callable_tools(agent, run_context)
resolve_callable_knowledge(agent, run_context)
resolved_tools = get_resolved_tools(agent, run_context)
resolved_knowledge = get_resolved_knowledge(agent, run_context)
# Connect tools that require connection management
_init.connect_connectable_tools(agent)
# Add provided tools
if resolved_tools is not None:
# If not running in async mode, raise if any tool is async
_raise_if_async_tools_in_list(resolved_tools)
agent_tools.extend(resolved_tools)
# Add tools for accessing memory
if agent.read_chat_history:
agent_tools.append(_default_tools.get_chat_history_function(agent, session=session))
if agent.read_tool_call_history:
agent_tools.append(_default_tools.get_tool_call_history_function(agent, session=session))
if agent.search_session_history:
agent_tools.append(
_default_tools.get_previous_sessions_messages_function(
agent, num_history_sessions=agent.num_history_sessions, user_id=user_id
)
)
if agent.enable_agentic_memory:
agent_tools.append(_default_tools.get_update_user_memory_function(agent, user_id=user_id, async_mode=False))
# Add learning machine tools
if agent._learning is not None:
learning_tools = agent._learning.get_tools(
user_id=user_id,
session_id=session.session_id if session else None,
agent_id=agent.id,
)
agent_tools.extend(learning_tools)
if agent.enable_agentic_culture:
agent_tools.append(_default_tools.get_update_cultural_knowledge_function(agent, async_mode=False))
if agent.enable_agentic_state:
agent_tools.append(
Function(
name="update_session_state",
entrypoint=_default_tools.make_update_session_state_entrypoint(agent),
)
)
# Add tools for accessing knowledge
# Single unified path through get_relevant_docs_from_knowledge(),
# which checks knowledge_retriever first, then falls back to knowledge.search().
if (resolved_knowledge is not None or agent.knowledge_retriever is not None) and agent.search_knowledge:
agent_tools.append(
_default_tools.create_knowledge_search_tool(
agent,
run_response=run_response,
run_context=run_context,
knowledge_filters=run_context.knowledge_filters,
enable_agentic_filters=agent.enable_agentic_knowledge_filters,
async_mode=False,
)
)
if resolved_knowledge is not None and agent.update_knowledge:
agent_tools.append(agent.add_to_knowledge)
# Add tools for accessing skills
if agent.skills is not None:
agent_tools.extend(agent.skills.get_tools())
return agent_tools
async def aget_tools(
agent: Agent,
run_response: RunOutput,
run_context: RunContext,
session: AgentSession,
user_id: Optional[str] = None,
check_mcp_tools: bool = True,
) -> List[Union[Toolkit, Callable, Function, Dict]]:
from agno.agent import _default_tools, _init
from agno.utils.callables import (
aresolve_callable_knowledge,
aresolve_callable_tools,
get_resolved_knowledge,
get_resolved_tools,
)
agent_tools: List[Union[Toolkit, Callable, Function, Dict]] = []
# Resolve callable factories
await aresolve_callable_tools(agent, run_context)
await aresolve_callable_knowledge(agent, run_context)
resolved_tools = get_resolved_tools(agent, run_context)
resolved_knowledge = get_resolved_knowledge(agent, run_context)
# Connect tools that require connection management
_init.connect_connectable_tools(agent)
# Connect MCP tools
await _init.connect_mcp_tools(agent)
# Add provided tools
if resolved_tools is not None:
for tool in resolved_tools:
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
is_mcp_tool = hasattr(type(tool), "__mro__") and any(
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
)
if is_mcp_tool:
if tool.refresh_connection: # type: ignore
try:
is_alive = await tool.is_alive() # type: ignore
if not is_alive:
await tool.connect(force=True) # type: ignore
except (RuntimeError, BaseException) as e:
log_warning(f"Failed to check if MCP tool is alive or to connect to it: {e}")
continue
try:
await tool.build_tools() # type: ignore
except (RuntimeError, BaseException) as e:
log_warning(f"Failed to build tools for {str(tool)}: {e}")
continue
# Only add the tool if it successfully connected and built its tools
if check_mcp_tools and not tool.initialized: # type: ignore
continue
# Add the tool (MCP tools that passed checks, or any non-MCP tool)
agent_tools.append(tool)
# Add tools for accessing memory
if agent.read_chat_history:
agent_tools.append(_default_tools.get_chat_history_function(agent, session=session))
if agent.read_tool_call_history:
agent_tools.append(_default_tools.get_tool_call_history_function(agent, session=session))
if agent.search_session_history:
agent_tools.append(
await _default_tools.aget_previous_sessions_messages_function(
agent, num_history_sessions=agent.num_history_sessions, user_id=user_id
)
)
if agent.enable_agentic_memory:
agent_tools.append(_default_tools.get_update_user_memory_function(agent, user_id=user_id, async_mode=True))
# Add learning machine tools (async)
if agent._learning is not None:
learning_tools = await agent._learning.aget_tools(
user_id=user_id,
session_id=session.session_id if session else None,
agent_id=agent.id,
)
agent_tools.extend(learning_tools)
if agent.enable_agentic_culture:
agent_tools.append(_default_tools.get_update_cultural_knowledge_function(agent, async_mode=True))
if agent.enable_agentic_state:
agent_tools.append(
Function(
name="update_session_state",
entrypoint=_default_tools.make_update_session_state_entrypoint(agent),
)
)
# Add tools for accessing knowledge
# Single unified path through aget_relevant_docs_from_knowledge(),
# which checks knowledge_retriever first, then falls back to knowledge.search().
if (resolved_knowledge is not None or agent.knowledge_retriever is not None) and agent.search_knowledge:
agent_tools.append(
_default_tools.create_knowledge_search_tool(
agent,
run_response=run_response,
run_context=run_context,
knowledge_filters=run_context.knowledge_filters,
enable_agentic_filters=agent.enable_agentic_knowledge_filters,
async_mode=True,
)
)
if resolved_knowledge is not None and agent.update_knowledge:
agent_tools.append(agent.add_to_knowledge)
# Add tools for accessing skills
if agent.skills is not None:
agent_tools.extend(agent.skills.get_tools())
return agent_tools
def parse_tools(
agent: Agent,
tools: List[Union[Toolkit, Callable, Function, Dict]],
model: Model,
run_context: Optional[RunContext] = None,
async_mode: bool = False,
) -> List[Union[Function, dict]]:
_function_names: List[str] = []
_functions: List[Union[Function, dict]] = []
agent._tool_instructions = []
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
# Check if we need strict mode for the functions for the model
strict = False
if (
output_schema is not None
and (agent.structured_outputs or (not agent.use_json_mode))
and model.supports_native_structured_outputs
):
strict = True
for tool in tools:
if isinstance(tool, Dict):
# If a dict is passed, it is a builtin tool
# that is run by the model provider and not the Agent
_functions.append(tool)
log_debug(f"Included builtin tool {tool}")
elif isinstance(tool, Toolkit):
# For each function in the toolkit and process entrypoint
toolkit_functions = tool.get_async_functions() if async_mode else tool.get_functions()
for name, _func in toolkit_functions.items():
if name in _function_names:
continue
_function_names.append(name)
_func = _func.model_copy(deep=True)
_func._agent = agent
# Respect the function's explicit strict setting if set
effective_strict = strict if _func.strict is None else _func.strict
_func.process_entrypoint(strict=effective_strict)
if strict and _func.strict is None:
_func.strict = True
if agent.tool_hooks is not None:
_func.tool_hooks = agent.tool_hooks
_functions.append(_func)
log_debug(f"Added tool {name} from {tool.name}")
# Add instructions from the toolkit
if tool.add_instructions and tool.instructions is not None:
agent._tool_instructions.append(tool.instructions)
elif isinstance(tool, Function):
if tool.name in _function_names:
continue
_function_names.append(tool.name)
tool = tool.model_copy(deep=True)
# Respect the function's explicit strict setting if set
effective_strict = strict if tool.strict is None else tool.strict
tool.process_entrypoint(strict=effective_strict)
tool._agent = agent
if strict and tool.strict is None:
tool.strict = True
if agent.tool_hooks is not None:
tool.tool_hooks = agent.tool_hooks
_functions.append(tool)
log_debug(f"Added tool {tool.name}")
# Add instructions from the Function
if tool.add_instructions and tool.instructions is not None:
agent._tool_instructions.append(tool.instructions)
elif callable(tool):
try:
function_name = tool.__name__
if function_name in _function_names:
continue
_function_names.append(function_name)
_func = Function.from_callable(tool, strict=strict)
# Detect @approval sentinel on raw callable
_approval_type = getattr(tool, "_agno_approval_type", None)
if _approval_type is not None:
_func.approval_type = _approval_type
if _approval_type == "required" and not any(
[_func.requires_user_input, _func.requires_confirmation, _func.external_execution]
):
_func.requires_confirmation = True
elif _approval_type == "audit" and not any(
[_func.requires_user_input, _func.requires_confirmation, _func.external_execution]
):
raise ValueError(
"@approval(type='audit') requires at least one HITL flag "
"('requires_confirmation', 'requires_user_input', or 'external_execution') "
"to be set on @tool()."
)
_func = _func.model_copy(deep=True)
_func._agent = agent
if strict:
_func.strict = True
if agent.tool_hooks is not None:
_func.tool_hooks = agent.tool_hooks
_functions.append(_func)
log_debug(f"Added tool {_func.name}")
except Exception as e:
log_warning(f"Could not add tool {tool}: {e}")
return _functions
def determine_tools_for_model(
agent: Agent,
model: Model,
processed_tools: List[Union[Toolkit, Callable, Function, Dict]],
run_response: RunOutput,
run_context: RunContext,
session: AgentSession,
async_mode: bool = False,
) -> List[Union[Function, dict]]:
_functions: List[Union[Function, dict]] = []
# Get Agent tools
if processed_tools is not None and len(processed_tools) > 0:
log_debug("Processing tools for model")
_functions = parse_tools(
agent, tools=processed_tools, model=model, run_context=run_context, async_mode=async_mode
)
# Update the session state for the functions
if _functions:
from inspect import signature
# Check if any functions need media before collecting
needs_media = any(
any(param in signature(func.entrypoint).parameters for param in ["images", "videos", "audios", "files"])
for func in _functions
if isinstance(func, Function) and func.entrypoint is not None
)
# Only collect media if functions actually need them
joint_images = collect_joint_images(run_response.input, session) if needs_media else None
joint_files = collect_joint_files(run_response.input) if needs_media else None
joint_audios = collect_joint_audios(run_response.input, session) if needs_media else None
joint_videos = collect_joint_videos(run_response.input, session) if needs_media else None
for func in _functions: # type: ignore
if isinstance(func, Function):
func._run_context = run_context
func._images = joint_images
func._files = joint_files
func._audios = joint_audios
func._videos = joint_videos
return _functions
# ---------------------------------------------------------------------------
# Tool Execution
# ---------------------------------------------------------------------------
def handle_external_execution_update(agent: Agent, run_messages: RunMessages, tool: ToolExecution):
agent.model = cast(Model, agent.model)
if tool.result is not None:
for msg in run_messages.messages:
# Skip if the message is already in the run_messages
if msg.tool_call_id == tool.tool_call_id:
break
else:
run_messages.messages.append(
Message(
role=agent.model.tool_message_role,
content=tool.result,
tool_call_id=tool.tool_call_id,
tool_name=tool.tool_name,
tool_args=tool.tool_args,
tool_call_error=tool.tool_call_error,
stop_after_tool_call=tool.stop_after_tool_call,
)
)
tool.external_execution_required = False
else:
raise ValueError(f"Tool {tool.tool_name} requires external execution, cannot continue run")
def handle_user_input_update(agent: Agent, tool: ToolExecution):
for field in tool.user_input_schema or []:
if not tool.tool_args:
tool.tool_args = {}
tool.tool_args[field.name] = field.value
def handle_get_user_input_tool_update(agent: Agent, run_messages: RunMessages, tool: ToolExecution):
import json
agent.model = cast(Model, agent.model)
# Skipping tool without user_input_schema so that tool_call_id is not repeated
if not hasattr(tool, "user_input_schema") or not tool.user_input_schema:
return
user_input_result = [
{"name": user_input_field.name, "value": user_input_field.value}
for user_input_field in tool.user_input_schema or []
]
# Add the tool call result to the run_messages
run_messages.messages.append(
Message(
role=agent.model.tool_message_role,
content=f"User inputs retrieved: {json.dumps(user_input_result)}",
tool_call_id=tool.tool_call_id,
tool_name=tool.tool_name,
tool_args=tool.tool_args,
metrics=MessageMetrics(duration=0),
)
)
def handle_ask_user_tool_update(agent: Agent, run_messages: RunMessages, tool: ToolExecution):
import json
agent.model = cast(Model, agent.model)
if not hasattr(tool, "user_feedback_schema") or not tool.user_feedback_schema:
return
feedback_result = [
{"question": q.question, "selected": q.selected_options or []} for q in tool.user_feedback_schema
]
run_messages.messages.append(
Message(
role=agent.model.tool_message_role,
content=f"User feedback received: {json.dumps(feedback_result)}",
tool_call_id=tool.tool_call_id,
tool_name=tool.tool_name,
tool_args=tool.tool_args,
metrics=MessageMetrics(duration=0),
)
)
def _maybe_create_audit_approval(
agent: "Agent", tool_execution: ToolExecution, run_response: RunOutput, status: str
) -> None:
"""Create an audit approval record if the tool has approval_type='audit'."""
if getattr(tool_execution, "approval_type", None) == "audit":
from agno.run.approval import create_audit_approval
create_audit_approval(
db=agent.db,
tool_execution=tool_execution,
run_response=run_response,
status=status,
agent_id=agent.id,
agent_name=agent.name,
)
async def _amaybe_create_audit_approval(
agent: "Agent", tool_execution: ToolExecution, run_response: RunOutput, status: str
) -> None:
"""Async: create an audit approval record if the tool has approval_type='audit'."""
if getattr(tool_execution, "approval_type", None) == "audit":
from agno.run.approval import acreate_audit_approval
await acreate_audit_approval(
db=agent.db,
tool_execution=tool_execution,
run_response=run_response,
status=status,
agent_id=agent.id,
agent_name=agent.name,
)
def run_tool(
agent: Agent,
run_response: RunOutput,
run_messages: RunMessages,
tool: ToolExecution,
functions: Optional[Dict[str, Function]] = None,
stream_events: bool = False,
) -> Iterator[RunOutputEvent]:
from agno.run.agent import CustomEvent
agent.model = cast(Model, agent.model)
# Execute the tool
function_call = agent.model.get_function_call_to_run_from_tool_execution(tool, functions)
function_call_results: List[Message] = []
for call_result in agent.model.run_function_call(
function_call=function_call,
function_call_results=function_call_results,
):
if isinstance(call_result, ModelResponse):
if call_result.event == ModelResponseEvent.tool_call_started.value:
if stream_events:
yield handle_event( # type: ignore
create_tool_call_started_event(from_run_response=run_response, tool=tool),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
if call_result.event == ModelResponseEvent.tool_call_completed.value and call_result.tool_executions:
tool_execution = call_result.tool_executions[0]
tool.result = tool_execution.result
tool.tool_call_error = tool_execution.tool_call_error
if stream_events:
yield handle_event( # type: ignore
create_tool_call_completed_event(
from_run_response=run_response, tool=tool, content=call_result.content
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
if tool.tool_call_error:
yield handle_event( # type: ignore
create_tool_call_error_event(
from_run_response=run_response, tool=tool, error=str(tool.result)
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Yield CustomEvent instances from sync tool generators
elif isinstance(call_result, CustomEvent):
if stream_events:
yield call_result # type: ignore
if len(function_call_results) > 0:
run_messages.messages.extend(function_call_results)
def reject_tool_call(
agent: Agent, run_messages: RunMessages, tool: ToolExecution, functions: Optional[Dict[str, Function]] = None
):
agent.model = cast(Model, agent.model)
function_call = agent.model.get_function_call_to_run_from_tool_execution(tool, functions)
function_call.error = tool.confirmation_note or "Function call was rejected by the user"
function_call_result = agent.model.create_function_call_result(
function_call=function_call,
success=False,
)
run_messages.messages.append(function_call_result)
async def arun_tool(
agent: Agent,
run_response: RunOutput,
run_messages: RunMessages,
tool: ToolExecution,
functions: Optional[Dict[str, Function]] = None,
stream_events: bool = False,
) -> AsyncIterator[RunOutputEvent]:
from agno.run.agent import CustomEvent
agent.model = cast(Model, agent.model)
# Execute the tool
function_call = agent.model.get_function_call_to_run_from_tool_execution(tool, functions)
function_call_results: List[Message] = []
async for call_result in agent.model.arun_function_calls(
function_calls=[function_call],
function_call_results=function_call_results,
skip_pause_check=True,
):
if isinstance(call_result, ModelResponse):
if call_result.event == ModelResponseEvent.tool_call_started.value:
if stream_events:
yield handle_event( # type: ignore
create_tool_call_started_event(from_run_response=run_response, tool=tool),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
if call_result.event == ModelResponseEvent.tool_call_completed.value and call_result.tool_executions:
tool_execution = call_result.tool_executions[0]
tool.result = tool_execution.result
tool.tool_call_error = tool_execution.tool_call_error
if stream_events:
yield handle_event( # type: ignore
create_tool_call_completed_event(
from_run_response=run_response, tool=tool, content=call_result.content
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
if tool.tool_call_error:
yield handle_event( # type: ignore
create_tool_call_error_event(
from_run_response=run_response, tool=tool, error=str(tool.result)
),
run_response,
events_to_skip=agent.events_to_skip, # type: ignore
store_events=agent.store_events,
)
# Yield CustomEvent instances from async tool generators
elif isinstance(call_result, CustomEvent):
if stream_events:
yield call_result # type: ignore
if len(function_call_results) > 0:
run_messages.messages.extend(function_call_results)
def handle_tool_call_updates(
agent: Agent, run_response: RunOutput, run_messages: RunMessages, tools: List[Union[Function, dict]]
):
agent.model = cast(Model, agent.model)
_functions = {tool.name: tool for tool in tools if isinstance(tool, Function)}
for _t in run_response.tools or []:
# Case 1: Handle confirmed tools and execute them
if _t.requires_confirmation is not None and _t.requires_confirmation is True and _functions:
# Tool is confirmed and hasn't been run before
if _t.confirmed is not None and _t.confirmed is True and _t.result is None:
# Consume the generator without yielding
deque(run_tool(agent, run_response, run_messages, _t, functions=_functions), maxlen=0)
else:
reject_tool_call(agent, run_messages, _t, functions=_functions)
_t.confirmed = False
_t.confirmation_note = _t.confirmation_note or "Tool call was rejected"
_t.tool_call_error = True
_maybe_create_audit_approval(agent, _t, run_response, "approved" if _t.confirmed is True else "rejected")
_t.requires_confirmation = False
# Case 2: Handle external execution required tools
elif _t.external_execution_required is not None and _t.external_execution_required is True:
handle_external_execution_update(agent, run_messages=run_messages, tool=_t)
_maybe_create_audit_approval(agent, _t, run_response, "approved")
# Case 3a: Agentic user input required
elif _t.tool_name == "get_user_input" and _t.requires_user_input is not None and _t.requires_user_input is True:
handle_get_user_input_tool_update(agent, run_messages=run_messages, tool=_t)
_t.requires_user_input = False
_t.answered = True
# Case 3b: User feedback (ask_user) required
elif _t.tool_name == "ask_user" and _t.requires_user_input is not None and _t.requires_user_input is True:
handle_ask_user_tool_update(agent, run_messages=run_messages, tool=_t)
_t.requires_user_input = False
_t.answered = True
# Case 4: Handle user input required tools
elif _t.requires_user_input is not None and _t.requires_user_input is True:
handle_user_input_update(agent, tool=_t)
_t.requires_user_input = False
_t.answered = True
# Consume the generator without yielding
deque(run_tool(agent, run_response, run_messages, _t, functions=_functions), maxlen=0)
_maybe_create_audit_approval(agent, _t, run_response, "approved")
def handle_tool_call_updates_stream(
agent: Agent,
run_response: RunOutput,
run_messages: RunMessages,
tools: List[Union[Function, dict]],
stream_events: bool = False,
) -> Iterator[RunOutputEvent]:
agent.model = cast(Model, agent.model)
_functions = {tool.name: tool for tool in tools if isinstance(tool, Function)}
for _t in run_response.tools or []:
# Case 1: Handle confirmed tools and execute them
if _t.requires_confirmation is not None and _t.requires_confirmation is True and _functions:
# Tool is confirmed and hasn't been run before
if _t.confirmed is not None and _t.confirmed is True and _t.result is None:
yield from run_tool(
agent, run_response, run_messages, _t, functions=_functions, stream_events=stream_events
)
else:
reject_tool_call(agent, run_messages, _t, functions=_functions)
_t.confirmed = False
_t.confirmation_note = _t.confirmation_note or "Tool call was rejected"
_t.tool_call_error = True
_maybe_create_audit_approval(agent, _t, run_response, "approved" if _t.confirmed is True else "rejected")
_t.requires_confirmation = False
# Case 2: Handle external execution required tools
elif _t.external_execution_required is not None and _t.external_execution_required is True:
handle_external_execution_update(agent, run_messages=run_messages, tool=_t)
_maybe_create_audit_approval(agent, _t, run_response, "approved")
# Case 3a: Agentic user input required
elif _t.tool_name == "get_user_input" and _t.requires_user_input is not None and _t.requires_user_input is True:
handle_get_user_input_tool_update(agent, run_messages=run_messages, tool=_t)
_t.requires_user_input = False
_t.answered = True
# Case 3b: User feedback (ask_user) required
elif _t.tool_name == "ask_user" and _t.requires_user_input is not None and _t.requires_user_input is True:
handle_ask_user_tool_update(agent, run_messages=run_messages, tool=_t)
_t.requires_user_input = False
_t.answered = True
# Case 4: Handle user input required tools
elif _t.requires_user_input is not None and _t.requires_user_input is True:
handle_user_input_update(agent, tool=_t)
yield from run_tool(
agent, run_response, run_messages, _t, functions=_functions, stream_events=stream_events
)
_t.requires_user_input = False
_t.answered = True
_maybe_create_audit_approval(agent, _t, run_response, "approved")
async def ahandle_tool_call_updates(
agent: Agent, run_response: RunOutput, run_messages: RunMessages, tools: List[Union[Function, dict]]
):
agent.model = cast(Model, agent.model)
_functions = {tool.name: tool for tool in tools if isinstance(tool, Function)}
for _t in run_response.tools or []:
# Case 1: Handle confirmed tools and execute them
if _t.requires_confirmation is not None and _t.requires_confirmation is True and _functions:
# Tool is confirmed and hasn't been run before
if _t.confirmed is not None and _t.confirmed is True and _t.result is None:
async for _ in arun_tool(agent, run_response, run_messages, _t, functions=_functions):
pass
else:
reject_tool_call(agent, run_messages, _t, functions=_functions)
_t.confirmed = False
_t.confirmation_note = _t.confirmation_note or "Tool call was rejected"
_t.tool_call_error = True
await _amaybe_create_audit_approval(
agent, _t, run_response, "approved" if _t.confirmed is True else "rejected"
)
_t.requires_confirmation = False
# Case 2: Handle external execution required tools
elif _t.external_execution_required is not None and _t.external_execution_required is True:
handle_external_execution_update(agent, run_messages=run_messages, tool=_t)
await _amaybe_create_audit_approval(agent, _t, run_response, "approved")
# Case 3a: Agentic user input required
elif _t.tool_name == "get_user_input" and _t.requires_user_input is not None and _t.requires_user_input is True:
handle_get_user_input_tool_update(agent, run_messages=run_messages, tool=_t)
_t.requires_user_input = False
_t.answered = True
# Case 3b: User feedback (ask_user) required
elif _t.tool_name == "ask_user" and _t.requires_user_input is not None and _t.requires_user_input is True:
handle_ask_user_tool_update(agent, run_messages=run_messages, tool=_t)
_t.requires_user_input = False
_t.answered = True
# Case 4: Handle user input required tools
elif _t.requires_user_input is not None and _t.requires_user_input is True:
handle_user_input_update(agent, tool=_t)
async for _ in arun_tool(agent, run_response, run_messages, _t, functions=_functions):
pass
_t.requires_user_input = False
_t.answered = True
await _amaybe_create_audit_approval(agent, _t, run_response, "approved")
async def ahandle_tool_call_updates_stream(
agent: Agent,
run_response: RunOutput,
run_messages: RunMessages,
tools: List[Union[Function, dict]],
stream_events: bool = False,
) -> AsyncIterator[RunOutputEvent]:
agent.model = cast(Model, agent.model)
_functions = {tool.name: tool for tool in tools if isinstance(tool, Function)}
for _t in run_response.tools or []:
# Case 1: Handle confirmed tools and execute them
if _t.requires_confirmation is not None and _t.requires_confirmation is True and _functions:
# Tool is confirmed and hasn't been run before
if _t.confirmed is not None and _t.confirmed is True and _t.result is None:
async for event in arun_tool(
agent, run_response, run_messages, _t, functions=_functions, stream_events=stream_events
):
yield event
else:
reject_tool_call(agent, run_messages, _t, functions=_functions)
_t.confirmed = False
_t.confirmation_note = _t.confirmation_note or "Tool call was rejected"
_t.tool_call_error = True
await _amaybe_create_audit_approval(
agent, _t, run_response, "approved" if _t.confirmed is True else "rejected"
)
_t.requires_confirmation = False
# Case 2: Handle external execution required tools
elif _t.external_execution_required is not None and _t.external_execution_required is True:
handle_external_execution_update(agent, run_messages=run_messages, tool=_t)
await _amaybe_create_audit_approval(agent, _t, run_response, "approved")
# Case 3a: Agentic user input required
elif _t.tool_name == "get_user_input" and _t.requires_user_input is not None and _t.requires_user_input is True:
handle_get_user_input_tool_update(agent, run_messages=run_messages, tool=_t)
_t.requires_user_input = False
_t.answered = True
# Case 3b: User feedback (ask_user) required
elif _t.tool_name == "ask_user" and _t.requires_user_input is not None and _t.requires_user_input is True:
handle_ask_user_tool_update(agent, run_messages=run_messages, tool=_t)
_t.requires_user_input = False
_t.answered = True
# Case 4: Handle user input required tools
elif _t.requires_user_input is not None and _t.requires_user_input is True:
handle_user_input_update(agent, tool=_t)
async for event in arun_tool(
agent, run_response, run_messages, _t, functions=_functions, stream_events=stream_events
):
yield event
_t.requires_user_input = False
_t.answered = True
await _amaybe_create_audit_approval(agent, _t, run_response, "approved")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/agent/_tools.py",
"license": "Apache License 2.0",
"lines": 804,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/agent/_utils.py | """Shared utility helpers for Agent."""
from __future__ import annotations
import json
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Union,
)
if TYPE_CHECKING:
from agno.agent.agent import Agent
from agno.filters import FilterExpr
from agno.utils.log import log_debug, log_error, log_warning
def get_effective_filters(
agent: Agent, knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
) -> Optional[Any]:
"""
Determine which knowledge filters to use, with priority to run-level filters.
Args:
agent: The Agent instance.
knowledge_filters: Filters passed at run time.
Returns:
The effective filters to use, with run-level filters taking priority.
"""
effective_filters = None
# If agent has filters, use those as a base
if agent.knowledge_filters:
effective_filters = agent.knowledge_filters.copy()
# If run has filters, they override agent filters
if knowledge_filters:
if effective_filters:
if isinstance(knowledge_filters, dict):
if isinstance(effective_filters, dict):
effective_filters.update(knowledge_filters)
else:
effective_filters = knowledge_filters
elif isinstance(knowledge_filters, list):
effective_filters = [*effective_filters, *knowledge_filters]
else:
effective_filters = knowledge_filters
if effective_filters:
log_debug(f"Using knowledge filters: {effective_filters}")
return effective_filters
def convert_documents_to_string(agent: Agent, docs: List[Union[Dict[str, Any], str]]) -> str:
if docs is None or len(docs) == 0:
return ""
if agent.references_format == "yaml":
import yaml
return yaml.dump(docs)
return json.dumps(docs, indent=2, ensure_ascii=False)
def convert_dependencies_to_string(agent: Agent, context: Dict[str, Any]) -> str:
"""Convert the context dictionary to a string representation.
Args:
agent: The Agent instance.
context: Dictionary containing context data
Returns:
String representation of the context, or empty string if conversion fails
"""
if context is None:
return ""
try:
return json.dumps(context, indent=2, default=str)
except (TypeError, ValueError, OverflowError) as e:
log_warning(f"Failed to convert context to JSON: {e}")
# Attempt a fallback conversion for non-serializable objects
sanitized_context = {}
for key, value in context.items():
try:
# Try to serialize each value individually
json.dumps({key: value}, default=str)
sanitized_context[key] = value
except Exception:
# If serialization fails, convert to string representation
sanitized_context[key] = str(value)
try:
return json.dumps(sanitized_context, indent=2)
except Exception as e:
log_error(f"Failed to convert sanitized context to JSON: {e}")
return str(context)
# ---------------------------------------------------------------------------
# Deep copy
# ---------------------------------------------------------------------------
def deep_copy(agent: Agent, *, update: Optional[Dict[str, Any]] = None) -> Agent:
"""Create and return a deep copy of this Agent, optionally updating fields.
Args:
agent: The Agent instance to copy.
update (Optional[Dict[str, Any]]): Optional dictionary of fields for the new Agent.
Returns:
Agent: A new Agent instance.
"""
from dataclasses import fields
from inspect import signature
# Get the set of valid __init__ parameter names
init_params = set(signature(agent.__class__.__init__).parameters.keys()) - {"self"}
# Extract the fields to set for the new Agent
fields_for_new_agent: Dict = {}
for f in fields(agent):
# Skip private fields and fields not accepted by __init__
if f.name.startswith("_") or f.name not in init_params:
continue
field_value = getattr(agent, f.name)
if field_value is not None:
try:
fields_for_new_agent[f.name] = deep_copy_field(agent, f.name, field_value)
except Exception as e:
log_warning(f"Failed to deep copy field '{f.name}': {e}. Using original value.")
fields_for_new_agent[f.name] = field_value
# Update fields if provided
if update:
fields_for_new_agent.update(update)
# Create a new Agent
try:
new_agent = agent.__class__(**fields_for_new_agent)
log_debug(f"Created new {agent.__class__.__name__}")
return new_agent
except Exception as e:
log_error(f"Failed to create deep copy of {agent.__class__.__name__}: {e}")
raise
def deep_copy_field(agent: Agent, field_name: str, field_value: Any) -> Any:
"""Helper function to deep copy a field based on its type."""
from copy import copy, deepcopy
from pydantic import BaseModel
# For memory and reasoning_agent, use their deep_copy methods
if field_name == "reasoning_agent":
return field_value.deep_copy() # type: ignore
# For tools, share MCP tools but copy others
if field_name == "tools" and field_value is not None:
try:
copied_tools = []
for tool in field_value: # type: ignore
try:
# Share MCP tools (they maintain server connections)
is_mcp_tool = hasattr(type(tool), "__mro__") and any(
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
)
if is_mcp_tool:
copied_tools.append(tool)
else:
try:
copied_tools.append(deepcopy(tool))
except Exception:
# Tool can't be deep copied, share by reference
copied_tools.append(tool)
except Exception:
# MCP detection failed, share tool by reference to be safe
copied_tools.append(tool)
return copied_tools
except Exception as e:
# If entire tools processing fails, log and return original list
log_warning(f"Failed to process tools for deep copy: {e}")
return field_value
# Share heavy resources - these maintain connections/pools that shouldn't be duplicated
if field_name in (
"db",
"model",
"reasoning_model",
"knowledge",
"memory_manager",
"parser_model",
"output_model",
"session_summary_manager",
"culture_manager",
"compression_manager",
"learning",
"skills",
):
return field_value
# For compound types, attempt a deep copy
if isinstance(field_value, (list, dict, set)):
try:
return deepcopy(field_value)
except Exception:
try:
return copy(field_value)
except Exception as e:
log_warning(f"Failed to copy field: {field_name} - {e}")
return field_value
# For pydantic models, attempt a model_copy
if isinstance(field_value, BaseModel):
try:
return field_value.model_copy(deep=True)
except Exception:
try:
return field_value.model_copy(deep=False)
except Exception as e:
log_warning(f"Failed to copy field: {field_name} - {e}")
return field_value
# For other types, attempt a shallow copy first
try:
return copy(field_value)
except Exception:
# If copy fails, return as is
return field_value
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/agent/_utils.py",
"license": "Apache License 2.0",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/team/_cli.py | """User-facing CLI helpers for Team: response printing and interactive REPL."""
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Union,
)
from pydantic import BaseModel
if TYPE_CHECKING:
from agno.team.team import Team
from agno.agent import Agent
from agno.filters import FilterExpr
from agno.media import Audio, File, Image, Video
from agno.models.message import Message
from agno.utils.print_response.team import (
aprint_response,
aprint_response_stream,
print_response,
print_response_stream,
)
def _get_member_name(team: "Team", entity_id: str) -> str:
from agno.team.team import Team
if isinstance(team.members, list):
for member in team.members:
if isinstance(member, Agent):
if member.id == entity_id:
return member.name or entity_id
elif isinstance(member, Team):
if member.id == entity_id:
return member.name or entity_id
return entity_id
def team_print_response(
team: "Team",
input: Union[List, Dict, str, Message, BaseModel, List[Message]],
*,
stream: Optional[bool] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
run_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
markdown: Optional[bool] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
debug_mode: Optional[bool] = None,
show_message: bool = True,
show_reasoning: bool = True,
show_full_reasoning: bool = False,
show_member_responses: Optional[bool] = None,
console: Optional[Any] = None,
tags_to_include_in_markdown: Optional[Set[str]] = None,
**kwargs: Any,
) -> None:
from agno.team._init import _has_async_db
if _has_async_db(team):
raise Exception("This method is not supported with an async DB. Please use the async version of this method.")
if not tags_to_include_in_markdown:
tags_to_include_in_markdown = {"think", "thinking"}
if markdown is None:
markdown = team.markdown
if team.output_schema is not None:
markdown = False
if stream is None:
stream = team.stream or False
if "stream_events" in kwargs:
kwargs.pop("stream_events")
if show_member_responses is None:
show_member_responses = team.show_members_responses
if stream:
print_response_stream(
team=team,
input=input,
console=console,
show_message=show_message,
show_reasoning=show_reasoning,
show_full_reasoning=show_full_reasoning,
show_member_responses=show_member_responses,
tags_to_include_in_markdown=tags_to_include_in_markdown,
session_id=session_id,
session_state=session_state,
user_id=user_id,
run_id=run_id,
audio=audio,
images=images,
videos=videos,
files=files,
markdown=markdown,
stream_events=True,
knowledge_filters=knowledge_filters,
add_history_to_context=add_history_to_context,
dependencies=dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=metadata,
debug_mode=debug_mode,
**kwargs,
)
else:
print_response(
team=team,
input=input,
console=console,
show_message=show_message,
show_reasoning=show_reasoning,
show_full_reasoning=show_full_reasoning,
show_member_responses=show_member_responses,
tags_to_include_in_markdown=tags_to_include_in_markdown,
session_id=session_id,
session_state=session_state,
user_id=user_id,
run_id=run_id,
audio=audio,
images=images,
videos=videos,
files=files,
markdown=markdown,
knowledge_filters=knowledge_filters,
add_history_to_context=add_history_to_context,
dependencies=dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=metadata,
debug_mode=debug_mode,
**kwargs,
)
async def team_aprint_response(
team: "Team",
input: Union[List, Dict, str, Message, BaseModel, List[Message]],
*,
stream: Optional[bool] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
run_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
markdown: Optional[bool] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
add_history_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
metadata: Optional[Dict[str, Any]] = None,
debug_mode: Optional[bool] = None,
show_message: bool = True,
show_reasoning: bool = True,
show_full_reasoning: bool = False,
show_member_responses: Optional[bool] = None,
console: Optional[Any] = None,
tags_to_include_in_markdown: Optional[Set[str]] = None,
**kwargs: Any,
) -> None:
if not tags_to_include_in_markdown:
tags_to_include_in_markdown = {"think", "thinking"}
if markdown is None:
markdown = team.markdown
if team.output_schema is not None:
markdown = False
if stream is None:
stream = team.stream or False
if "stream_events" in kwargs:
kwargs.pop("stream_events")
if show_member_responses is None:
show_member_responses = team.show_members_responses
if stream:
await aprint_response_stream(
team=team,
input=input,
console=console,
show_message=show_message,
show_reasoning=show_reasoning,
show_full_reasoning=show_full_reasoning,
show_member_responses=show_member_responses,
tags_to_include_in_markdown=tags_to_include_in_markdown,
session_id=session_id,
session_state=session_state,
user_id=user_id,
run_id=run_id,
audio=audio,
images=images,
videos=videos,
files=files,
markdown=markdown,
stream_events=True,
knowledge_filters=knowledge_filters,
add_history_to_context=add_history_to_context,
dependencies=dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=metadata,
debug_mode=debug_mode,
**kwargs,
)
else:
await aprint_response(
team=team,
input=input,
console=console,
show_message=show_message,
show_reasoning=show_reasoning,
show_full_reasoning=show_full_reasoning,
show_member_responses=show_member_responses,
tags_to_include_in_markdown=tags_to_include_in_markdown,
session_id=session_id,
session_state=session_state,
user_id=user_id,
run_id=run_id,
audio=audio,
images=images,
videos=videos,
files=files,
markdown=markdown,
knowledge_filters=knowledge_filters,
add_history_to_context=add_history_to_context,
dependencies=dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=metadata,
debug_mode=debug_mode,
**kwargs,
)
def cli_app(
team: "Team",
input: Optional[str] = None,
user: str = "User",
emoji: str = ":sunglasses:",
stream: bool = False,
markdown: bool = False,
exit_on: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
"""Run an interactive command-line interface to interact with the team."""
from inspect import isawaitable
from rich.prompt import Prompt
# Ensuring the team is not using async tools
if team.tools is not None and isinstance(team.tools, list):
for tool in team.tools:
if isawaitable(tool):
raise NotImplementedError("Use `acli_app` to use async tools.")
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
if hasattr(type(tool), "__mro__") and any(
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
):
raise NotImplementedError("Use `acli_app` to use MCP tools.")
if input:
team_print_response(team, input=input, stream=stream, markdown=markdown, **kwargs)
_exit_on = exit_on or ["exit", "quit", "bye"]
while True:
user_input = Prompt.ask(f"[bold] {emoji} {user} [/bold]")
if user_input in _exit_on:
break
team_print_response(team, input=user_input, stream=stream, markdown=markdown, **kwargs)
async def acli_app(
team: "Team",
input: Optional[str] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
user: str = "User",
emoji: str = ":sunglasses:",
stream: bool = False,
markdown: bool = False,
exit_on: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
"""
Run an interactive command-line interface to interact with the team.
Works with team dependencies requiring async logic.
"""
from rich.prompt import Prompt
if input:
await team_aprint_response(
team, input=input, stream=stream, markdown=markdown, user_id=user_id, session_id=session_id, **kwargs
)
_exit_on = exit_on or ["exit", "quit", "bye"]
while True:
message = Prompt.ask(f"[bold] {emoji} {user} [/bold]")
if message in _exit_on:
break
await team_aprint_response(
team, input=message, stream=stream, markdown=markdown, user_id=user_id, session_id=session_id, **kwargs
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/team/_cli.py",
"license": "Apache License 2.0",
"lines": 295,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/team/_default_tools.py | """Built-in tool factory functions for Team."""
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from agno.team.team import Team
import asyncio
import contextlib
import json
from copy import copy
from typing import (
Any,
AsyncIterator,
Dict,
Iterator,
List,
Optional,
Union,
cast,
)
from pydantic import BaseModel
from agno.agent import Agent
from agno.db.base import AsyncBaseDb, BaseDb, SessionType
from agno.filters import FilterExpr
from agno.knowledge.types import KnowledgeFilter
from agno.media import Audio, File, Image, Video
from agno.memory import MemoryManager
from agno.models.message import Message, MessageReferences
from agno.run import RunContext
from agno.run.agent import RunOutput, RunOutputEvent
from agno.run.team import (
TeamRunOutput,
TeamRunOutputEvent,
)
from agno.session import TeamSession
from agno.tools.function import Function
from agno.utils.knowledge import get_agentic_or_user_search_filters
from agno.utils.log import (
log_debug,
log_info,
log_warning,
use_agent_logger,
use_team_logger,
)
from agno.utils.merge_dict import merge_dictionaries
from agno.utils.response import (
check_if_run_cancelled,
)
from agno.utils.team import (
add_interaction_to_team_run_context,
format_member_agent_task,
)
from agno.utils.timer import Timer
def _get_update_user_memory_function(team: "Team", user_id: Optional[str] = None, async_mode: bool = False) -> Function:
def update_user_memory(task: str) -> str:
"""
Use this function to submit a task to modify the Agent's memory.
Describe the task in detail and be specific.
The task can include adding a memory, updating a memory, deleting a memory, or clearing all memories.
Args:
task: The task to update the memory. Be specific and describe the task in detail.
Returns:
str: A string indicating the status of the update.
"""
team.memory_manager = cast(MemoryManager, team.memory_manager)
response = team.memory_manager.update_memory_task(task=task, user_id=user_id)
return response
async def aupdate_user_memory(task: str) -> str:
"""
Use this function to submit a task to modify the Agent's memory.
Describe the task in detail and be specific.
The task can include adding a memory, updating a memory, deleting a memory, or clearing all memories.
Args:
task: The task to update the memory. Be specific and describe the task in detail.
Returns:
str: A string indicating the status of the update.
"""
team.memory_manager = cast(MemoryManager, team.memory_manager)
response = await team.memory_manager.aupdate_memory_task(task=task, user_id=user_id)
return response
if async_mode:
update_memory_function = aupdate_user_memory
else:
update_memory_function = update_user_memory # type: ignore
return Function.from_callable(update_memory_function, name="update_user_memory")
def _get_chat_history_function(team: "Team", session: TeamSession, async_mode: bool = False):
def get_chat_history(num_chats: Optional[int] = None) -> str:
"""
Use this function to get the team chat history in reverse chronological order.
Leave the num_chats parameter blank to get the entire chat history.
Example:
- To get the last chat, use num_chats=1
- To get the last 5 chats, use num_chats=5
- To get all chats, leave num_chats blank
Args:
num_chats: The number of chats to return.
Each chat contains 2 messages. One from the team and one from the user.
Default: None
Returns:
str: A JSON string containing a list of dictionaries representing the team chat history.
"""
import json
all_chats = session.get_messages(team_id=team.id)
if len(all_chats) == 0:
return ""
history: List[Dict[str, Any]] = [chat.to_dict() for chat in all_chats] # type: ignore
if num_chats is not None:
history = history[-num_chats:]
return json.dumps(history)
async def aget_chat_history(num_chats: Optional[int] = None) -> str:
"""
Use this function to get the team chat history in reverse chronological order.
Leave the num_chats parameter blank to get the entire chat history.
Example:
- To get the last chat, use num_chats=1
- To get the last 5 chats, use num_chats=5
- To get all chats, leave num_chats blank
Args:
num_chats: The number of chats to return.
Each chat contains 2 messages. One from the team and one from the user.
Default: None
Returns:
str: A JSON string containing a list of dictionaries representing the team chat history.
"""
import json
all_chats = session.get_messages(team_id=team.id)
if len(all_chats) == 0:
return ""
history: List[Dict[str, Any]] = [chat.to_dict() for chat in all_chats] # type: ignore
if num_chats is not None:
history = history[-num_chats:]
return json.dumps(history)
if async_mode:
get_chat_history_func = aget_chat_history
else:
get_chat_history_func = get_chat_history # type: ignore
return Function.from_callable(get_chat_history_func, name="get_chat_history")
def _update_session_state_tool(team: "Team", run_context: RunContext, session_state_updates: dict) -> str:
"""
Update the shared session state. Provide any updates as a dictionary of key-value pairs.
Example:
"session_state_updates": {"shopping_list": ["milk", "eggs", "bread"]}
Args:
session_state_updates (dict): The updates to apply to the shared session state. Should be a dictionary of key-value pairs.
"""
if run_context.session_state is None:
run_context.session_state = {}
session_state = run_context.session_state
for key, value in session_state_updates.items():
session_state[key] = value
return f"Updated session state: {session_state}"
def _get_previous_sessions_messages_function(
team: "Team", num_history_sessions: Optional[int] = 2, user_id: Optional[str] = None, async_mode: bool = False
):
"""Factory function to create a get_previous_session_messages function.
Args:
num_history_sessions: The last n sessions to be taken from db
user_id: The user ID to filter sessions by
Returns:
Callable: A function that retrieves messages from previous sessions
"""
from agno.team._init import _has_async_db
def get_previous_session_messages() -> str:
"""Use this function to retrieve messages from previous chat sessions.
USE THIS TOOL ONLY WHEN THE QUESTION IS EITHER "What was my last conversation?" or "What was my last question?" and similar to it.
Returns:
str: JSON formatted list of message pairs from previous sessions
"""
import json
if team.db is None:
return "Previous session messages not available"
team.db = cast(BaseDb, team.db)
selected_sessions = team.db.get_sessions(
session_type=SessionType.TEAM,
limit=num_history_sessions,
user_id=user_id,
sort_by="created_at",
sort_order="desc",
)
all_messages = []
seen_message_pairs = set()
for session in selected_sessions:
if isinstance(session, TeamSession) and session.runs:
for run in session.runs:
messages = run.messages
if messages is not None:
for i in range(0, len(messages) - 1, 2):
if i + 1 < len(messages):
try:
user_msg = messages[i]
assistant_msg = messages[i + 1]
user_content = user_msg.content
assistant_content = assistant_msg.content
if user_content is None or assistant_content is None:
continue # Skip this pair if either message has no content
msg_pair_id = f"{user_content}:{assistant_content}"
if msg_pair_id not in seen_message_pairs:
seen_message_pairs.add(msg_pair_id)
all_messages.append(Message.model_validate(user_msg))
all_messages.append(Message.model_validate(assistant_msg))
except Exception as e:
log_warning(f"Error processing message pair: {e}")
continue
return json.dumps([msg.to_dict() for msg in all_messages]) if all_messages else "No history found"
async def aget_previous_session_messages() -> str:
"""Use this function to retrieve messages from previous chat sessions.
USE THIS TOOL ONLY WHEN THE QUESTION IS EITHER "What was my last conversation?" or "What was my last question?" and similar to it.
Returns:
str: JSON formatted list of message pairs from previous sessions
"""
import json
from agno.team._init import _has_async_db
if team.db is None:
return "Previous session messages not available"
if _has_async_db(team):
selected_sessions = await cast(AsyncBaseDb, team.db).get_sessions( # type: ignore
session_type=SessionType.TEAM,
limit=num_history_sessions,
user_id=user_id,
sort_by="created_at",
sort_order="desc",
)
else:
selected_sessions = team.db.get_sessions( # type: ignore
session_type=SessionType.TEAM,
limit=num_history_sessions,
user_id=user_id,
sort_by="created_at",
sort_order="desc",
)
all_messages = []
seen_message_pairs = set()
for session in selected_sessions:
if isinstance(session, TeamSession) and session.runs:
for run in session.runs:
messages = run.messages
if messages is not None:
for i in range(0, len(messages) - 1, 2):
if i + 1 < len(messages):
try:
user_msg = messages[i]
assistant_msg = messages[i + 1]
user_content = user_msg.content
assistant_content = assistant_msg.content
if user_content is None or assistant_content is None:
continue # Skip this pair if either message has no content
msg_pair_id = f"{user_content}:{assistant_content}"
if msg_pair_id not in seen_message_pairs:
seen_message_pairs.add(msg_pair_id)
all_messages.append(Message.model_validate(user_msg))
all_messages.append(Message.model_validate(assistant_msg))
except Exception as e:
log_warning(f"Error processing message pair: {e}")
continue
return json.dumps([msg.to_dict() for msg in all_messages]) if all_messages else "No history found"
if _has_async_db(team):
return Function.from_callable(aget_previous_session_messages, name="get_previous_session_messages")
else:
return Function.from_callable(get_previous_session_messages, name="get_previous_session_messages")
def _get_delegate_task_function(
team: "Team",
run_response: TeamRunOutput,
run_context: RunContext,
session: TeamSession,
team_run_context: Dict[str, Any],
user_id: Optional[str] = None,
stream: bool = False,
stream_events: bool = False,
async_mode: bool = False,
input: Optional[str] = None, # Used for determine_input_for_members=False
images: Optional[List[Image]] = None,
videos: Optional[List[Video]] = None,
audio: Optional[List[Audio]] = None,
files: Optional[List[File]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
debug_mode: Optional[bool] = None,
) -> Function:
from agno.team._init import _initialize_member
from agno.team._run import _update_team_media
from agno.team._tools import (
_determine_team_member_interactions,
_find_member_by_id,
_get_history_for_member_agent,
_propagate_member_pause,
)
if not images:
images = []
if not videos:
videos = []
if not audio:
audio = []
if not files:
files = []
def _setup_delegate_task_to_member(member_agent: Union[Agent, "Team"], task: str):
# 1. Initialize the member agent
_initialize_member(team, member_agent)
# If team has send_media_to_model=False, ensure member agent also has it set to False
# This allows tools to access files while preventing models from receiving them
if not team.send_media_to_model:
member_agent.send_media_to_model = False
# 2. Handle respond_directly nuances
if team.respond_directly:
# Since we return the response directly from the member agent, we need to set the output schema from the team down.
# Get output_schema from run_context
team_output_schema = run_context.output_schema if run_context else None
if not member_agent.output_schema and team_output_schema:
member_agent.output_schema = team_output_schema
# If the member will produce structured output, we need to parse the response
if member_agent.output_schema is not None:
team._member_response_model = member_agent.output_schema
# 3. Handle enable_agentic_knowledge_filters on the member agent
if team.enable_agentic_knowledge_filters and not member_agent.enable_agentic_knowledge_filters:
member_agent.enable_agentic_knowledge_filters = team.enable_agentic_knowledge_filters
# 4. Determine team context to send
team_member_interactions_str = _determine_team_member_interactions(
team, team_run_context, images=images, videos=videos, audio=audio, files=files
)
# 5. Get the team history
team_history_str = None
if team.add_team_history_to_members and session:
team_history_str = session.get_team_history_context(num_runs=team.num_team_history_runs)
# 6. Create the member agent task or use the input directly
if team.determine_input_for_members is False:
member_agent_task = input # type: ignore
else:
member_agent_task = task
if team_history_str or team_member_interactions_str:
member_agent_task = format_member_agent_task( # type: ignore
task_description=member_agent_task or "",
team_member_interactions_str=team_member_interactions_str,
team_history_str=team_history_str,
)
# 7. Add member-level history for the member if enabled (because we won't load the session for the member, so history won't be loaded automatically)
history = None
if hasattr(member_agent, "add_history_to_context") and member_agent.add_history_to_context:
history = _get_history_for_member_agent(team, session, member_agent)
if history:
if isinstance(member_agent_task, str):
history.append(Message(role="user", content=member_agent_task))
return member_agent_task, history
def _process_delegate_task_to_member(
member_agent_run_response: Optional[Union[TeamRunOutput, RunOutput]],
member_agent: Union[Agent, "Team"],
member_agent_task: Union[str, Message],
member_session_state_copy: Dict[str, Any],
):
# Add team run id to the member run
if member_agent_run_response is not None:
member_agent_run_response.parent_run_id = run_response.run_id # type: ignore
# Update the top-level team run_response tool call to have the run_id of the member run
if run_response.tools is not None and member_agent_run_response is not None:
for tool in run_response.tools:
if tool.tool_name and tool.tool_name.lower() == "delegate_task_to_member":
tool.child_run_id = member_agent_run_response.run_id # type: ignore
# Update the team run context
member_name = member_agent.name if member_agent.name else member_agent.id if member_agent.id else "Unknown"
if isinstance(member_agent_task, str):
normalized_task = member_agent_task
elif member_agent_task.content:
normalized_task = str(member_agent_task.content)
else:
normalized_task = ""
add_interaction_to_team_run_context(
team_run_context=team_run_context,
member_name=member_name,
task=normalized_task,
run_response=member_agent_run_response, # type: ignore
)
# Add the member run to the team run response if enabled
if run_response and member_agent_run_response:
run_response.add_member_run(member_agent_run_response)
# Scrub the member run based on that member's storage flags before storing
if member_agent_run_response:
if (
not member_agent.store_media
or not member_agent.store_tool_messages
or not member_agent.store_history_messages
):
from agno.agent._run import scrub_run_output_for_storage
scrub_run_output_for_storage(member_agent, run_response=member_agent_run_response) # type: ignore[arg-type]
# Add the member run to the team session
session.upsert_run(member_agent_run_response)
# Update team session state
merge_dictionaries(run_context.session_state, member_session_state_copy) # type: ignore
# Update the team media
if member_agent_run_response is not None:
_update_team_media(team, member_agent_run_response) # type: ignore
def delegate_task_to_member(member_id: str, task: str) -> Iterator[Union[RunOutputEvent, TeamRunOutputEvent, str]]:
"""Use this function to delegate a task to the selected team member.
You must provide a clear and concise description of the task the member should achieve AND the expected output.
Args:
member_id (str): The ID of the member to delegate the task to. Use only the ID of the member, not the ID of the team followed by the ID of the member.
task (str): A clear and concise description of the task the member should achieve.
Returns:
str: The result of the delegated task.
"""
# Find the member agent using the helper function
result = _find_member_by_id(team, member_id, run_context=run_context)
if result is None:
yield f"Member with ID {member_id} not found in the team or any subteams. Please choose the correct member from the list of members:\n\n{team.get_members_system_message_content(indent=0, run_context=run_context)}"
return
_, member_agent = result
member_agent_task, history = _setup_delegate_task_to_member(member_agent=member_agent, task=task)
# Make sure for the member agent, we are using the agent logger
use_agent_logger()
member_session_state_copy = copy(run_context.session_state)
if stream:
member_agent_run_response_stream = member_agent.run(
input=member_agent_task if not history else history,
user_id=user_id,
# All members have the same session_id
session_id=session.session_id,
session_state=member_session_state_copy, # Send a copy to the agent
images=images,
videos=videos,
audio=audio,
files=files,
stream=True,
stream_events=stream_events or team.stream_member_events,
debug_mode=debug_mode,
dependencies=run_context.dependencies,
add_dependencies_to_context=add_dependencies_to_context,
metadata=run_context.metadata,
add_session_state_to_context=add_session_state_to_context,
knowledge_filters=run_context.knowledge_filters
if not member_agent.knowledge_filters and member_agent.knowledge
else None,
yield_run_output=True,
)
member_agent_run_response = None
for member_agent_run_output_event in member_agent_run_response_stream:
# Do NOT break out of the loop, Iterator need to exit properly
if isinstance(member_agent_run_output_event, (TeamRunOutput, RunOutput)):
member_agent_run_response = member_agent_run_output_event # type: ignore
continue # Don't yield TeamRunOutput or RunOutput, only yield events
# Check if the run is cancelled
check_if_run_cancelled(member_agent_run_output_event)
# Yield the member event directly
member_agent_run_output_event.parent_run_id = (
member_agent_run_output_event.parent_run_id or run_response.run_id
)
yield member_agent_run_output_event # type: ignore
else:
member_agent_run_response = member_agent.run( # type: ignore
input=member_agent_task if not history else history, # type: ignore
user_id=user_id,
# All members have the same session_id
session_id=session.session_id,
session_state=member_session_state_copy, # Send a copy to the agent
images=images,
videos=videos,
audio=audio,
files=files,
stream=False,
debug_mode=debug_mode,
dependencies=run_context.dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=run_context.metadata,
knowledge_filters=run_context.knowledge_filters
if not member_agent.knowledge_filters and member_agent.knowledge
else None,
)
check_if_run_cancelled(member_agent_run_response) # type: ignore
# Check if the member run is paused (HITL)
if member_agent_run_response is not None and member_agent_run_response.is_paused:
_propagate_member_pause(run_response, member_agent, member_agent_run_response)
use_team_logger()
_process_delegate_task_to_member(
member_agent_run_response,
member_agent,
member_agent_task, # type: ignore
member_session_state_copy, # type: ignore
)
yield f"Member '{member_agent.name}' requires human input before continuing."
return
if not stream:
try:
if member_agent_run_response.content is None and ( # type: ignore
member_agent_run_response.tools is None or len(member_agent_run_response.tools) == 0 # type: ignore
):
yield "No response from the member agent."
elif isinstance(member_agent_run_response.content, str): # type: ignore
content = member_agent_run_response.content.strip() # type: ignore
if len(content) > 0:
yield content
# If the content is empty but we have tool calls
elif member_agent_run_response.tools is not None and len(member_agent_run_response.tools) > 0: # type: ignore
tool_str = ""
for tool in member_agent_run_response.tools: # type: ignore
if tool.result:
tool_str += f"{tool.result},"
yield tool_str.rstrip(",")
elif issubclass(type(member_agent_run_response.content), BaseModel): # type: ignore
yield member_agent_run_response.content.model_dump_json(indent=2) # type: ignore
else:
import json
yield json.dumps(member_agent_run_response.content, indent=2) # type: ignore
except Exception as e:
yield str(e)
# Afterward, switch back to the team logger
use_team_logger()
_process_delegate_task_to_member(
member_agent_run_response,
member_agent,
member_agent_task, # type: ignore
member_session_state_copy, # type: ignore
)
async def adelegate_task_to_member(
member_id: str, task: str
) -> AsyncIterator[Union[RunOutputEvent, TeamRunOutputEvent, str]]:
"""Use this function to delegate a task to the selected team member.
You must provide a clear and concise description of the task the member should achieve AND the expected output.
Args:
member_id (str): The ID of the member to delegate the task to. Use only the ID of the member, not the ID of the team followed by the ID of the member.
task (str): A clear and concise description of the task the member should achieve.
Returns:
str: The result of the delegated task.
"""
# Find the member agent using the helper function
result = _find_member_by_id(team, member_id, run_context=run_context)
if result is None:
yield f"Member with ID {member_id} not found in the team or any subteams. Please choose the correct member from the list of members:\n\n{team.get_members_system_message_content(indent=0, run_context=run_context)}"
return
_, member_agent = result
member_agent_task, history = _setup_delegate_task_to_member(member_agent=member_agent, task=task)
# Make sure for the member agent, we are using the agent logger
use_agent_logger()
member_session_state_copy = copy(run_context.session_state)
if stream:
member_agent_run_response_stream = member_agent.arun( # type: ignore
input=member_agent_task if not history else history,
user_id=user_id,
# All members have the same session_id
session_id=session.session_id,
session_state=member_session_state_copy, # Send a copy to the agent
images=images,
videos=videos,
audio=audio,
files=files,
stream=True,
stream_events=stream_events or team.stream_member_events,
debug_mode=debug_mode,
dependencies=run_context.dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=run_context.metadata,
knowledge_filters=run_context.knowledge_filters
if not member_agent.knowledge_filters and member_agent.knowledge
else None,
yield_run_output=True,
)
member_agent_run_response = None
async for member_agent_run_response_event in member_agent_run_response_stream:
# Do NOT break out of the loop, AsyncIterator need to exit properly
if isinstance(member_agent_run_response_event, (TeamRunOutput, RunOutput)):
member_agent_run_response = member_agent_run_response_event # type: ignore
continue # Don't yield TeamRunOutput or RunOutput, only yield events
# Check if the run is cancelled
check_if_run_cancelled(member_agent_run_response_event)
# Yield the member event directly
member_agent_run_response_event.parent_run_id = getattr(
member_agent_run_response_event, "parent_run_id", None
) or (run_response.run_id if run_response is not None else None)
yield member_agent_run_response_event # type: ignore
else:
member_agent_run_response = await member_agent.arun( # type: ignore
input=member_agent_task if not history else history,
user_id=user_id,
# All members have the same session_id
session_id=session.session_id,
session_state=member_session_state_copy, # Send a copy to the agent
images=images,
videos=videos,
audio=audio,
files=files,
stream=False,
debug_mode=debug_mode,
dependencies=run_context.dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=run_context.metadata,
knowledge_filters=run_context.knowledge_filters
if not member_agent.knowledge_filters and member_agent.knowledge
else None,
)
check_if_run_cancelled(member_agent_run_response) # type: ignore
# Check if the member run is paused (HITL)
if member_agent_run_response is not None and member_agent_run_response.is_paused:
_propagate_member_pause(run_response, member_agent, member_agent_run_response)
use_team_logger()
_process_delegate_task_to_member(
member_agent_run_response,
member_agent,
member_agent_task, # type: ignore
member_session_state_copy, # type: ignore
)
yield f"Member '{member_agent.name}' requires human input before continuing."
return
if not stream:
try:
if member_agent_run_response.content is None and ( # type: ignore
member_agent_run_response.tools is None or len(member_agent_run_response.tools) == 0 # type: ignore
):
yield "No response from the member agent."
elif isinstance(member_agent_run_response.content, str): # type: ignore
if len(member_agent_run_response.content.strip()) > 0: # type: ignore
yield member_agent_run_response.content # type: ignore
# If the content is empty but we have tool calls
elif (
member_agent_run_response.tools is not None # type: ignore
and len(member_agent_run_response.tools) > 0 # type: ignore
):
yield ",".join([tool.result for tool in member_agent_run_response.tools if tool.result]) # type: ignore
elif issubclass(type(member_agent_run_response.content), BaseModel): # type: ignore
yield member_agent_run_response.content.model_dump_json(indent=2) # type: ignore
else:
import json
yield json.dumps(member_agent_run_response.content, indent=2) # type: ignore
except Exception as e:
yield str(e)
# Afterward, switch back to the team logger
use_team_logger()
_process_delegate_task_to_member(
member_agent_run_response,
member_agent,
member_agent_task, # type: ignore
member_session_state_copy, # type: ignore
)
# When the task should be delegated to all members
def delegate_task_to_members(task: str) -> Iterator[Union[RunOutputEvent, TeamRunOutputEvent, str]]:
"""
Use this function to delegate a task to all the member agents and return a response.
You must provide a clear and concise description of the task the member should achieve AND the expected output.
Args:
task (str): A clear and concise description of the task to send to member agents.
Returns:
str: The result of the delegated task.
"""
from agno.utils.callables import get_resolved_members
resolved_members = get_resolved_members(team, run_context) or []
# Run all the members sequentially
for _, member_agent in enumerate(resolved_members):
member_agent_task, history = _setup_delegate_task_to_member(member_agent=member_agent, task=task)
member_session_state_copy = copy(run_context.session_state)
if stream:
member_agent_run_response_stream = member_agent.run(
input=member_agent_task if not history else history,
user_id=user_id,
# All members have the same session_id
session_id=session.session_id,
session_state=member_session_state_copy, # Send a copy to the agent
images=images,
videos=videos,
audio=audio,
files=files,
stream=True,
stream_events=stream_events or team.stream_member_events,
knowledge_filters=run_context.knowledge_filters
if not member_agent.knowledge_filters and member_agent.knowledge
else None,
debug_mode=debug_mode,
dependencies=run_context.dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=run_context.metadata,
yield_run_output=True,
)
member_agent_run_response = None
for member_agent_run_response_chunk in member_agent_run_response_stream:
# Do NOT break out of the loop, Iterator need to exit properly
if isinstance(member_agent_run_response_chunk, (TeamRunOutput, RunOutput)):
member_agent_run_response = member_agent_run_response_chunk # type: ignore
continue # Don't yield TeamRunOutput or RunOutput, only yield events
# Check if the run is cancelled
check_if_run_cancelled(member_agent_run_response_chunk)
# Yield the member event directly
member_agent_run_response_chunk.parent_run_id = member_agent_run_response_chunk.parent_run_id or (
run_response.run_id if run_response is not None else None
)
yield member_agent_run_response_chunk # type: ignore
else:
member_agent_run_response = member_agent.run( # type: ignore
input=member_agent_task if not history else history,
user_id=user_id,
# All members have the same session_id
session_id=session.session_id,
session_state=member_session_state_copy, # Send a copy to the agent
images=images,
videos=videos,
audio=audio,
files=files,
stream=False,
knowledge_filters=run_context.knowledge_filters
if not member_agent.knowledge_filters and member_agent.knowledge
else None,
debug_mode=debug_mode,
dependencies=run_context.dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=run_context.metadata,
)
check_if_run_cancelled(member_agent_run_response) # type: ignore
# Check if the member run is paused (HITL)
if member_agent_run_response is not None and member_agent_run_response.is_paused:
_propagate_member_pause(run_response, member_agent, member_agent_run_response)
use_team_logger()
_process_delegate_task_to_member(
member_agent_run_response,
member_agent,
member_agent_task, # type: ignore
member_session_state_copy, # type: ignore
)
yield f"Agent {member_agent.name}: Requires human input before continuing."
continue
if not stream:
try:
if member_agent_run_response.content is None and ( # type: ignore
member_agent_run_response.tools is None or len(member_agent_run_response.tools) == 0 # type: ignore
):
yield f"Agent {member_agent.name}: No response from the member agent."
elif isinstance(member_agent_run_response.content, str): # type: ignore
if len(member_agent_run_response.content.strip()) > 0: # type: ignore
yield f"Agent {member_agent.name}: {member_agent_run_response.content}" # type: ignore
elif (
member_agent_run_response.tools is not None and len(member_agent_run_response.tools) > 0 # type: ignore
):
yield f"Agent {member_agent.name}: {','.join([tool.result for tool in member_agent_run_response.tools])}" # type: ignore
elif issubclass(type(member_agent_run_response.content), BaseModel): # type: ignore
yield f"Agent {member_agent.name}: {member_agent_run_response.content.model_dump_json(indent=2)}" # type: ignore
else:
import json
yield f"Agent {member_agent.name}: {json.dumps(member_agent_run_response.content, indent=2)}" # type: ignore
except Exception as e:
yield f"Agent {member_agent.name}: Error - {str(e)}"
_process_delegate_task_to_member(
member_agent_run_response,
member_agent,
member_agent_task, # type: ignore
member_session_state_copy, # type: ignore
)
# After all the member runs, switch back to the team logger
use_team_logger()
# When the task should be delegated to all members
async def adelegate_task_to_members(task: str) -> AsyncIterator[Union[RunOutputEvent, TeamRunOutputEvent, str]]:
"""Use this function to delegate a task to all the member agents and return a response.
You must provide a clear and concise description of the task to send to member agents.
Args:
task (str): A clear and concise description of the task to send to member agents.
Returns:
str: The result of the delegated task.
"""
from agno.utils.callables import get_resolved_members
resolved_members = get_resolved_members(team, run_context) or []
if stream:
# Concurrent streaming: launch each member as a streaming worker and merge events
done_marker = object()
queue: "asyncio.Queue[Union[RunOutputEvent, TeamRunOutputEvent, str, object]]" = asyncio.Queue()
async def stream_member(agent: Union[Agent, "Team"]) -> None:
member_agent_task, history = _setup_delegate_task_to_member(member_agent=agent, task=task) # type: ignore
member_session_state_copy = copy(run_context.session_state)
member_stream = agent.arun( # type: ignore
input=member_agent_task if not history else history,
user_id=user_id,
session_id=session.session_id,
session_state=member_session_state_copy, # Send a copy to the agent
images=images,
videos=videos,
audio=audio,
files=files,
stream=True,
stream_events=stream_events or team.stream_member_events,
debug_mode=debug_mode,
knowledge_filters=run_context.knowledge_filters
if not agent.knowledge_filters and agent.knowledge
else None,
dependencies=run_context.dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=run_context.metadata,
yield_run_output=True,
)
member_agent_run_response = None
try:
try:
async for member_agent_run_output_event in member_stream:
# Do NOT break out of the loop, AsyncIterator need to exit properly
if isinstance(member_agent_run_output_event, (TeamRunOutput, RunOutput)):
member_agent_run_response = member_agent_run_output_event # type: ignore
continue # Don't yield TeamRunOutput or RunOutput, only yield events
check_if_run_cancelled(member_agent_run_output_event)
member_agent_run_output_event.parent_run_id = (
member_agent_run_output_event.parent_run_id
or (run_response.run_id if run_response is not None else None)
)
await queue.put(member_agent_run_output_event)
finally:
# Check if the member run is paused (HITL)
if member_agent_run_response is not None and member_agent_run_response.is_paused:
_propagate_member_pause(run_response, agent, member_agent_run_response)
_process_delegate_task_to_member(
member_agent_run_response,
agent,
member_agent_task, # type: ignore
member_session_state_copy, # type: ignore
)
await queue.put(f"Agent {agent.name}: Requires human input before continuing.")
else:
_process_delegate_task_to_member(
member_agent_run_response,
agent,
member_agent_task, # type: ignore
member_session_state_copy, # type: ignore
)
finally:
await queue.put(done_marker)
# Initialize and launch all members
tasks: List[asyncio.Task[None]] = []
for member_agent in resolved_members:
current_agent = member_agent
_initialize_member(team, current_agent)
tasks.append(asyncio.create_task(stream_member(current_agent)))
# Drain queue until all members reported done
completed = 0
try:
while completed < len(tasks):
item = await queue.get()
if item is done_marker:
completed += 1
else:
yield item # type: ignore
finally:
# Ensure tasks do not leak on cancellation
for t in tasks:
if not t.done():
t.cancel()
# Await cancellation to suppress warnings
for t in tasks:
with contextlib.suppress(Exception, asyncio.CancelledError):
await t
else:
# Non-streaming concurrent run of members; collect results when done
tasks = []
for member_agent_index, member_agent in enumerate(resolved_members):
current_agent = member_agent
member_agent_task, history = _setup_delegate_task_to_member(member_agent=current_agent, task=task)
async def run_member_agent(
member_agent=current_agent,
member_agent_task=member_agent_task,
history=history,
member_agent_index=member_agent_index,
) -> tuple[str, Optional[Union[Agent, "Team"]], Optional[Union[RunOutput, TeamRunOutput]]]:
member_session_state_copy = copy(run_context.session_state)
member_agent_run_response = await member_agent.arun(
input=member_agent_task if not history else history,
user_id=user_id,
# All members have the same session_id
session_id=session.session_id,
session_state=member_session_state_copy, # Send a copy to the agent
images=images,
videos=videos,
audio=audio,
files=files,
stream=False,
stream_events=stream_events,
debug_mode=debug_mode,
knowledge_filters=run_context.knowledge_filters
if not member_agent.knowledge_filters and member_agent.knowledge
else None,
dependencies=run_context.dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=run_context.metadata,
)
check_if_run_cancelled(member_agent_run_response)
member_name = member_agent.name if member_agent.name else f"agent_{member_agent_index}"
# Check if the member run is paused (HITL) before processing
if member_agent_run_response is not None and member_agent_run_response.is_paused:
_process_delegate_task_to_member(
member_agent_run_response,
member_agent,
member_agent_task, # type: ignore
member_session_state_copy, # type: ignore
)
return (
f"Agent {member_name}: Requires human input before continuing.",
member_agent,
member_agent_run_response,
)
_process_delegate_task_to_member(
member_agent_run_response,
member_agent,
member_agent_task, # type: ignore
member_session_state_copy, # type: ignore
)
try:
if member_agent_run_response.content is None and (
member_agent_run_response.tools is None or len(member_agent_run_response.tools) == 0
):
return (f"Agent {member_name}: No response from the member agent.", None, None)
elif isinstance(member_agent_run_response.content, str):
if len(member_agent_run_response.content.strip()) > 0:
return (f"Agent {member_name}: {member_agent_run_response.content}", None, None)
elif (
member_agent_run_response.tools is not None and len(member_agent_run_response.tools) > 0
):
return (
f"Agent {member_name}: {','.join([tool.result for tool in member_agent_run_response.tools])}",
None,
None,
)
elif issubclass(type(member_agent_run_response.content), BaseModel):
return (
f"Agent {member_name}: {member_agent_run_response.content.model_dump_json(indent=2)}", # type: ignore
None,
None,
)
else:
import json
return (
f"Agent {member_name}: {json.dumps(member_agent_run_response.content, indent=2)}",
None,
None,
)
except Exception as e:
return (f"Agent {member_name}: Error - {str(e)}", None, None)
return (f"Agent {member_name}: No Response", None, None)
tasks.append(run_member_agent) # type: ignore
results = await asyncio.gather(*[task() for task in tasks]) # type: ignore
# Propagate pauses sequentially after all members complete
for result_text, paused_agent, paused_run_response in results:
if paused_agent is not None and paused_run_response is not None:
_propagate_member_pause(run_response, paused_agent, paused_run_response)
yield result_text
# After all the member runs, switch back to the team logger
use_team_logger()
if team.delegate_to_all_members:
if async_mode:
delegate_function = adelegate_task_to_members # type: ignore
else:
delegate_function = delegate_task_to_members # type: ignore
delegate_func = Function.from_callable(delegate_function, name="delegate_task_to_members")
else:
if async_mode:
delegate_function = adelegate_task_to_member # type: ignore
else:
delegate_function = delegate_task_to_member # type: ignore
delegate_func = Function.from_callable(delegate_function, name="delegate_task_to_member")
if team.respond_directly:
delegate_func.stop_after_tool_call = True
delegate_func.show_result = True
return delegate_func
def add_to_knowledge(team: "Team", query: str, result: str) -> str:
"""Use this function to add information to the knowledge base for future use.
Args:
query (str): The query or topic to add.
result (str): The actual content or information to store.
Returns:
str: A string indicating the status of the addition.
"""
from agno.utils.callables import get_resolved_knowledge
knowledge = get_resolved_knowledge(team, None)
if knowledge is None:
log_warning("Knowledge is not set, cannot add to knowledge")
return "Knowledge is not set, cannot add to knowledge"
insert_method = getattr(knowledge, "insert", None)
if not callable(insert_method):
log_warning("Knowledge base does not support adding content")
return "Knowledge base does not support adding content"
document_name = query.replace(" ", "_").replace("?", "").replace("!", "").replace(".", "")
document_content = json.dumps({"query": query, "result": result})
log_info(f"Adding document to Knowledge: {document_name}: {document_content}")
from agno.knowledge.reader.text_reader import TextReader
insert_method(name=document_name, text_content=document_content, reader=TextReader())
return "Successfully added to knowledge base"
def create_knowledge_search_tool(
team: "Team",
run_response: Optional[TeamRunOutput] = None,
run_context: Optional[RunContext] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
enable_agentic_filters: Optional[bool] = False,
async_mode: bool = False,
) -> Function:
"""Create a unified search_knowledge_base tool for Team.
Routes all knowledge searches through get_relevant_docs_from_knowledge(),
which checks knowledge_retriever first and falls back to knowledge.search().
"""
def _format_results(docs: Optional[List[Union[Dict[str, Any], str]]]) -> str:
if not docs:
return "No documents found"
if team.references_format == "json":
return json.dumps(docs, indent=2, default=str)
else:
import yaml
return yaml.dump(docs, default_flow_style=False)
def _track_references(docs: Optional[List[Union[Dict[str, Any], str]]], query: str, elapsed: float) -> None:
if run_response is not None and docs:
references = MessageReferences(
query=query,
references=docs,
time=round(elapsed, 4),
)
if run_response.references is None:
run_response.references = []
run_response.references.append(references)
def _resolve_filters(
agentic_filters: Optional[List[Any]] = None,
) -> Optional[Union[Dict[str, Any], List[FilterExpr]]]:
if agentic_filters:
filters_dict: Dict[str, Any] = {}
for filt in agentic_filters:
if isinstance(filt, dict):
filters_dict.update(filt)
elif hasattr(filt, "key") and hasattr(filt, "value"):
filters_dict[filt.key] = filt.value
return get_agentic_or_user_search_filters(filters_dict, knowledge_filters)
return knowledge_filters
if enable_agentic_filters:
def search_knowledge_base_with_filters(query: str, filters: Optional[List[KnowledgeFilter]] = None) -> str:
"""Use this function to search the knowledge base for information about a query.
Args:
query: The query to search for.
filters (optional): The filters to apply to the search. This is a list of KnowledgeFilter objects.
Returns:
str: A string containing the response from the knowledge base.
"""
retrieval_timer = Timer()
retrieval_timer.start()
try:
docs = get_relevant_docs_from_knowledge(
team,
query=query,
filters=_resolve_filters(filters),
validate_filters=True,
run_context=run_context,
)
except Exception as e:
log_warning(f"Knowledge search failed: {e}")
return f"Error searching knowledge base: {type(e).__name__}"
_track_references(docs, query, retrieval_timer.elapsed)
retrieval_timer.stop()
log_debug(f"Time to get references: {retrieval_timer.elapsed:.4f}s")
return _format_results(docs)
async def asearch_knowledge_base_with_filters(
query: str, filters: Optional[List[KnowledgeFilter]] = None
) -> str:
"""Use this function to search the knowledge base for information about a query.
Args:
query: The query to search for.
filters (optional): The filters to apply to the search. This is a list of KnowledgeFilter objects.
Returns:
str: A string containing the response from the knowledge base.
"""
retrieval_timer = Timer()
retrieval_timer.start()
try:
docs = await aget_relevant_docs_from_knowledge(
team,
query=query,
filters=_resolve_filters(filters),
validate_filters=True,
run_context=run_context,
)
except Exception as e:
log_warning(f"Knowledge search failed: {e}")
return f"Error searching knowledge base: {type(e).__name__}"
_track_references(docs, query, retrieval_timer.elapsed)
retrieval_timer.stop()
log_debug(f"Time to get references: {retrieval_timer.elapsed:.4f}s")
return _format_results(docs)
if async_mode:
return Function.from_callable(asearch_knowledge_base_with_filters, name="search_knowledge_base")
return Function.from_callable(search_knowledge_base_with_filters, name="search_knowledge_base")
else:
def search_knowledge_base(query: str) -> str:
"""Use this function to search the knowledge base for information about a query.
Args:
query: The query to search for.
Returns:
str: A string containing the response from the knowledge base.
"""
retrieval_timer = Timer()
retrieval_timer.start()
try:
docs = get_relevant_docs_from_knowledge(
team,
query=query,
filters=knowledge_filters,
run_context=run_context,
)
except Exception as e:
log_warning(f"Knowledge search failed: {e}")
return f"Error searching knowledge base: {type(e).__name__}"
_track_references(docs, query, retrieval_timer.elapsed)
retrieval_timer.stop()
log_debug(f"Time to get references: {retrieval_timer.elapsed:.4f}s")
return _format_results(docs)
async def asearch_knowledge_base(query: str) -> str:
"""Use this function to search the knowledge base for information about a query.
Args:
query: The query to search for.
Returns:
str: A string containing the response from the knowledge base.
"""
retrieval_timer = Timer()
retrieval_timer.start()
try:
docs = await aget_relevant_docs_from_knowledge(
team,
query=query,
filters=knowledge_filters,
run_context=run_context,
)
except Exception as e:
log_warning(f"Knowledge search failed: {e}")
return f"Error searching knowledge base: {type(e).__name__}"
_track_references(docs, query, retrieval_timer.elapsed)
retrieval_timer.stop()
log_debug(f"Time to get references: {retrieval_timer.elapsed:.4f}s")
return _format_results(docs)
if async_mode:
return Function.from_callable(asearch_knowledge_base, name="search_knowledge_base")
return Function.from_callable(search_knowledge_base, name="search_knowledge_base")
def get_relevant_docs_from_knowledge(
team: "Team",
query: str,
num_documents: Optional[int] = None,
filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
validate_filters: bool = False,
run_context: Optional[RunContext] = None,
**kwargs,
) -> Optional[List[Union[Dict[str, Any], str]]]:
"""Return a list of references from the knowledge base"""
from agno.knowledge.document import Document
from agno.utils.callables import get_resolved_knowledge
knowledge = get_resolved_knowledge(team, run_context)
# Extract dependencies from run_context if available
dependencies = run_context.dependencies if run_context else None
if num_documents is None and knowledge is not None:
num_documents = getattr(knowledge, "max_results", None)
# Validate the filters against known valid filter keys
if knowledge is not None and filters is not None and validate_filters:
validate_filters_method = getattr(knowledge, "validate_filters", None)
if callable(validate_filters_method):
valid_filters, invalid_keys = validate_filters_method(filters)
# Warn about invalid filter keys
if invalid_keys:
log_warning(f"Invalid filter keys provided: {invalid_keys}. These filters will be ignored.")
# Only use valid filters
filters = valid_filters
if not filters:
log_warning("No valid filters remain after validation. Search will proceed without filters.")
if invalid_keys == [] and valid_filters == {}:
log_debug("No valid filters provided. Search will proceed without filters.")
filters = None
if team.knowledge_retriever is not None and callable(team.knowledge_retriever):
from inspect import signature
try:
sig = signature(team.knowledge_retriever)
knowledge_retriever_kwargs: Dict[str, Any] = {}
if "team" in sig.parameters:
knowledge_retriever_kwargs = {"team": team}
if "filters" in sig.parameters:
knowledge_retriever_kwargs["filters"] = filters
if "run_context" in sig.parameters:
knowledge_retriever_kwargs["run_context"] = run_context
elif "dependencies" in sig.parameters:
# Backward compatibility: support dependencies parameter
knowledge_retriever_kwargs["dependencies"] = dependencies
knowledge_retriever_kwargs.update({"query": query, "num_documents": num_documents, **kwargs})
return team.knowledge_retriever(**knowledge_retriever_kwargs)
except Exception as e:
log_warning(f"Knowledge retriever failed: {e}")
raise e
# Use knowledge protocol's retrieve method
try:
if knowledge is None:
return None
# Use protocol retrieve() method if available
retrieve_fn = getattr(knowledge, "retrieve", None)
if not callable(retrieve_fn):
log_debug("Knowledge does not implement retrieve()")
return None
if num_documents is None:
num_documents = getattr(knowledge, "max_results", 10)
log_debug(f"Retrieving from knowledge base with filters: {filters}")
relevant_docs: List[Document] = retrieve_fn(query=query, max_results=num_documents, filters=filters)
if not relevant_docs or len(relevant_docs) == 0:
log_debug("No relevant documents found for query")
return None
return [doc.to_dict() for doc in relevant_docs]
except Exception as e:
log_warning(f"Error retrieving from knowledge base: {e}")
raise e
async def aget_relevant_docs_from_knowledge(
team: "Team",
query: str,
num_documents: Optional[int] = None,
filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
validate_filters: bool = False,
run_context: Optional[RunContext] = None,
**kwargs,
) -> Optional[List[Union[Dict[str, Any], str]]]:
"""Get relevant documents from knowledge base asynchronously."""
from agno.knowledge.document import Document
from agno.utils.callables import get_resolved_knowledge
knowledge = get_resolved_knowledge(team, run_context)
# Extract dependencies from run_context if available
dependencies = run_context.dependencies if run_context else None
if num_documents is None and knowledge is not None:
num_documents = getattr(knowledge, "max_results", None)
# Validate the filters against known valid filter keys
if knowledge is not None and filters is not None and validate_filters:
avalidate_filters_method = getattr(knowledge, "avalidate_filters", None)
if callable(avalidate_filters_method):
valid_filters, invalid_keys = await avalidate_filters_method(filters)
# Warn about invalid filter keys
if invalid_keys:
log_warning(f"Invalid filter keys provided: {invalid_keys}. These filters will be ignored.")
# Only use valid filters
filters = valid_filters
if not filters:
log_warning("No valid filters remain after validation. Search will proceed without filters.")
if invalid_keys == [] and valid_filters == {}:
log_debug("No valid filters provided. Search will proceed without filters.")
filters = None
if team.knowledge_retriever is not None and callable(team.knowledge_retriever):
from inspect import isawaitable, signature
try:
sig = signature(team.knowledge_retriever)
knowledge_retriever_kwargs: Dict[str, Any] = {}
if "team" in sig.parameters:
knowledge_retriever_kwargs = {"team": team}
if "filters" in sig.parameters:
knowledge_retriever_kwargs["filters"] = filters
if "run_context" in sig.parameters:
knowledge_retriever_kwargs["run_context"] = run_context
elif "dependencies" in sig.parameters:
# Backward compatibility: support dependencies parameter
knowledge_retriever_kwargs["dependencies"] = dependencies
knowledge_retriever_kwargs.update({"query": query, "num_documents": num_documents, **kwargs})
result = team.knowledge_retriever(**knowledge_retriever_kwargs)
if isawaitable(result):
result = await result
return result
except Exception as e:
log_warning(f"Knowledge retriever failed: {e}")
raise e
# Use knowledge protocol's retrieve method
try:
if knowledge is None:
return None
# Use protocol aretrieve() or retrieve() method if available
aretrieve_fn = getattr(knowledge, "aretrieve", None)
retrieve_fn = getattr(knowledge, "retrieve", None)
if not callable(aretrieve_fn) and not callable(retrieve_fn):
log_debug("Knowledge does not implement retrieve()")
return None
if num_documents is None:
num_documents = getattr(knowledge, "max_results", 10)
log_debug(f"Retrieving from knowledge base with filters: {filters}")
if callable(aretrieve_fn):
relevant_docs: List[Document] = await aretrieve_fn(query=query, max_results=num_documents, filters=filters)
elif callable(retrieve_fn):
relevant_docs = retrieve_fn(query=query, max_results=num_documents, filters=filters)
else:
return None
if not relevant_docs or len(relevant_docs) == 0:
log_debug("No relevant documents found for query")
return None
return [doc.to_dict() for doc in relevant_docs]
except Exception as e:
log_warning(f"Error retrieving from knowledge base: {e}")
raise e
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/team/_default_tools.py",
"license": "Apache License 2.0",
"lines": 1275,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/team/_hooks.py | """Hook execution helpers for Team."""
from __future__ import annotations
from inspect import iscoroutinefunction
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Callable,
Iterator,
List,
Optional,
Union,
)
from agno.exceptions import (
InputCheckError,
OutputCheckError,
)
from agno.run import RunContext, RunStatus
from agno.run.agent import RunOutputEvent
from agno.run.team import (
TeamRunInput,
TeamRunOutput,
TeamRunOutputEvent,
)
from agno.session import TeamSession
from agno.utils.events import (
create_team_post_hook_completed_event,
create_team_post_hook_started_event,
create_team_pre_hook_completed_event,
create_team_pre_hook_started_event,
create_team_run_paused_event,
handle_event,
)
from agno.utils.hooks import (
copy_args_for_background,
filter_hook_args,
is_guardrail_hook,
should_run_hook_in_background,
)
from agno.utils.log import (
log_debug,
log_error,
log_exception,
)
if TYPE_CHECKING:
from agno.team.team import Team
# ---------------------------------------------------------------------------
# HITL pause handlers
# ---------------------------------------------------------------------------
def _get_team_paused_content(run_response: TeamRunOutput) -> str:
"""Generate human-readable content for a paused team run."""
if not run_response.requirements:
return "Team run paused."
active = [req for req in run_response.requirements if not req.is_resolved()]
if not active:
return "Team run paused."
parts: list[str] = []
for req in active:
member = req.member_agent_name or "team"
tool_name = req.tool_execution.tool_name if req.tool_execution else "unknown"
if req.needs_confirmation:
parts.append(f"- {member}: {tool_name} requires confirmation")
elif req.needs_user_input:
parts.append(f"- {member}: {tool_name} requires user input")
elif req.needs_external_execution:
parts.append(f"- {member}: {tool_name} requires external execution")
return "Team run paused. The following require input:\n" + "\n".join(parts)
def handle_team_run_paused(
team: "Team",
run_response: TeamRunOutput,
session: TeamSession,
run_context: Optional[RunContext] = None,
) -> TeamRunOutput:
from agno.run.approval import create_approval_from_pause
from agno.team._run import _cleanup_and_store
run_response.status = RunStatus.paused
if not run_response.content:
run_response.content = _get_team_paused_content(run_response)
# Stamp approval_id on tools before building event and storing so the DB has the complete data.
create_approval_from_pause(
db=team.db, run_response=run_response, team_id=team.id, team_name=team.name, user_id=team.user_id
)
handle_event(
create_team_run_paused_event(
from_run_response=run_response,
tools=run_response.tools,
requirements=run_response.requirements,
),
run_response,
events_to_skip=team.events_to_skip, # type: ignore
store_events=team.store_events,
)
_cleanup_and_store(team, run_response=run_response, session=session, run_context=run_context)
log_debug(f"Team Run Paused: {run_response.run_id}", center=True, symbol="*")
return run_response
def handle_team_run_paused_stream(
team: "Team",
run_response: TeamRunOutput,
session: TeamSession,
run_context: Optional[RunContext] = None,
) -> Iterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
from agno.run.approval import create_approval_from_pause
from agno.team._run import _cleanup_and_store
run_response.status = RunStatus.paused
if not run_response.content:
run_response.content = _get_team_paused_content(run_response)
# Stamp approval_id on tools before building event and storing so the DB has the complete data.
create_approval_from_pause(
db=team.db, run_response=run_response, team_id=team.id, team_name=team.name, user_id=team.user_id
)
pause_event = handle_event(
create_team_run_paused_event(
from_run_response=run_response,
tools=run_response.tools,
requirements=run_response.requirements,
),
run_response,
events_to_skip=team.events_to_skip, # type: ignore
store_events=team.store_events,
)
_cleanup_and_store(team, run_response=run_response, session=session, run_context=run_context)
if pause_event is not None:
yield pause_event
log_debug(f"Team Run Paused: {run_response.run_id}", center=True, symbol="*")
async def ahandle_team_run_paused(
team: "Team",
run_response: TeamRunOutput,
session: TeamSession,
run_context: Optional[RunContext] = None,
) -> TeamRunOutput:
from agno.run.approval import acreate_approval_from_pause
from agno.team._run import _acleanup_and_store
run_response.status = RunStatus.paused
if not run_response.content:
run_response.content = _get_team_paused_content(run_response)
# Stamp approval_id on tools before building event and storing so the DB has the complete data.
await acreate_approval_from_pause(
db=team.db, run_response=run_response, team_id=team.id, team_name=team.name, user_id=team.user_id
)
handle_event(
create_team_run_paused_event(
from_run_response=run_response,
tools=run_response.tools,
requirements=run_response.requirements,
),
run_response,
events_to_skip=team.events_to_skip, # type: ignore
store_events=team.store_events,
)
await _acleanup_and_store(team, run_response=run_response, session=session, run_context=run_context)
log_debug(f"Team Run Paused: {run_response.run_id}", center=True, symbol="*")
return run_response
async def ahandle_team_run_paused_stream(
team: "Team",
run_response: TeamRunOutput,
session: TeamSession,
run_context: Optional[RunContext] = None,
) -> AsyncIterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
from agno.run.approval import acreate_approval_from_pause
from agno.team._run import _acleanup_and_store
run_response.status = RunStatus.paused
if not run_response.content:
run_response.content = _get_team_paused_content(run_response)
# Stamp approval_id on tools before building event and storing so the DB has the complete data.
await acreate_approval_from_pause(
db=team.db, run_response=run_response, team_id=team.id, team_name=team.name, user_id=team.user_id
)
pause_event = handle_event(
create_team_run_paused_event(
from_run_response=run_response,
tools=run_response.tools,
requirements=run_response.requirements,
),
run_response,
events_to_skip=team.events_to_skip, # type: ignore
store_events=team.store_events,
)
await _acleanup_and_store(team, run_response=run_response, session=session, run_context=run_context)
if pause_event is not None:
yield pause_event
log_debug(f"Team Run Paused: {run_response.run_id}", center=True, symbol="*")
def _execute_pre_hooks(
team: "Team",
hooks: Optional[List[Callable[..., Any]]],
run_response: TeamRunOutput,
run_input: TeamRunInput,
session: TeamSession,
run_context: RunContext,
user_id: Optional[str] = None,
debug_mode: Optional[bool] = None,
stream_events: bool = False,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> Iterator[TeamRunOutputEvent]:
"""Execute multiple pre-hook functions in succession."""
from agno.team._init import _set_debug
if hooks is None:
return
# Prepare arguments for hooks
effective_debug_mode = debug_mode if debug_mode is not None else team.debug_mode
all_args = {
"run_input": run_input,
"run_context": run_context,
"team": team,
"session": session,
"user_id": user_id,
"debug_mode": effective_debug_mode,
"metadata": run_context.metadata if run_context else None,
}
all_args.update(kwargs)
# Global background mode: run guardrails synchronously, buffer everything else.
# See agent/_hooks.py execute_pre_hooks for full pattern explanation.
if team._run_hooks_in_background is True and background_tasks is not None:
pending_bg_hooks = []
for hook in hooks:
if is_guardrail_hook(hook):
filtered_args = filter_hook_args(hook, all_args)
try:
hook(**filtered_args)
except (InputCheckError, OutputCheckError):
raise
except Exception as e:
log_error(f"Background guardrail '{hook.__name__}' execution failed: {str(e)}")
log_exception(e)
else:
pending_bg_hooks.append(hook)
bg_args = copy_args_for_background(all_args)
for hook in pending_bg_hooks:
filtered_args = filter_hook_args(hook, bg_args)
background_tasks.add_task(hook, **filtered_args)
return
for i, hook in enumerate(hooks):
# Check if this specific hook should run in background (via @hook decorator)
if should_run_hook_in_background(hook) and background_tasks is not None:
bg_args = copy_args_for_background(all_args)
filtered_args = filter_hook_args(hook, bg_args)
background_tasks.add_task(hook, **filtered_args)
continue
if stream_events:
yield handle_event( # type: ignore
run_response=run_response,
event=create_team_pre_hook_started_event(
from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
),
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
try:
filtered_args = filter_hook_args(hook, all_args)
hook(**filtered_args)
if stream_events:
yield handle_event( # type: ignore
run_response=run_response,
event=create_team_pre_hook_completed_event(
from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
),
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
except (InputCheckError, OutputCheckError) as e:
raise e
except Exception as e:
log_error(f"Pre-hook #{i + 1} execution failed: {str(e)}")
log_exception(e)
finally:
# Reset global log mode in case an agent in the pre-hook changed it
_set_debug(team, debug_mode=debug_mode)
# Update the input on the run_response
run_response.input = run_input
async def _aexecute_pre_hooks(
team: "Team",
hooks: Optional[List[Callable[..., Any]]],
run_response: TeamRunOutput,
run_input: TeamRunInput,
session: TeamSession,
run_context: RunContext,
user_id: Optional[str] = None,
debug_mode: Optional[bool] = None,
stream_events: bool = False,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> AsyncIterator[TeamRunOutputEvent]:
"""Execute multiple pre-hook functions in succession (async version)."""
from agno.team._init import _set_debug
if hooks is None:
return
# Prepare arguments for hooks
effective_debug_mode = debug_mode if debug_mode is not None else team.debug_mode
all_args = {
"run_input": run_input,
"run_context": run_context,
"team": team,
"session": session,
"user_id": user_id,
"debug_mode": effective_debug_mode,
"metadata": run_context.metadata if run_context else None,
}
all_args.update(kwargs)
# Global background mode — see _execute_pre_hooks for pattern explanation.
if team._run_hooks_in_background is True and background_tasks is not None:
pending_bg_hooks = []
for hook in hooks:
if is_guardrail_hook(hook):
filtered_args = filter_hook_args(hook, all_args)
try:
if iscoroutinefunction(hook):
await hook(**filtered_args)
else:
hook(**filtered_args)
except (InputCheckError, OutputCheckError):
raise
except Exception as e:
log_error(f"Background guardrail '{hook.__name__}' execution failed: {str(e)}")
log_exception(e)
else:
pending_bg_hooks.append(hook)
bg_args = copy_args_for_background(all_args)
for hook in pending_bg_hooks:
filtered_args = filter_hook_args(hook, bg_args)
background_tasks.add_task(hook, **filtered_args)
return
for i, hook in enumerate(hooks):
# Check if this specific hook should run in background (via @hook decorator)
if should_run_hook_in_background(hook) and background_tasks is not None:
bg_args = copy_args_for_background(all_args)
filtered_args = filter_hook_args(hook, bg_args)
background_tasks.add_task(hook, **filtered_args)
continue
if stream_events:
yield handle_event( # type: ignore
run_response=run_response,
event=create_team_pre_hook_started_event(
from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
),
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
try:
filtered_args = filter_hook_args(hook, all_args)
if iscoroutinefunction(hook):
await hook(**filtered_args)
else:
hook(**filtered_args)
if stream_events:
yield handle_event( # type: ignore
run_response=run_response,
event=create_team_pre_hook_completed_event(
from_run_response=run_response, run_input=run_input, pre_hook_name=hook.__name__
),
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
except (InputCheckError, OutputCheckError) as e:
raise e
except Exception as e:
log_error(f"Pre-hook #{i + 1} execution failed: {str(e)}")
log_exception(e)
finally:
# Reset global log mode in case an agent in the pre-hook changed it
_set_debug(team, debug_mode=debug_mode)
# Update the input on the run_response
run_response.input = run_input
def _execute_post_hooks(
team: "Team",
hooks: Optional[List[Callable[..., Any]]],
run_output: TeamRunOutput,
session: TeamSession,
run_context: RunContext,
user_id: Optional[str] = None,
debug_mode: Optional[bool] = None,
stream_events: bool = False,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> Iterator[TeamRunOutputEvent]:
"""Execute multiple post-hook functions in succession."""
from agno.team._init import _set_debug
if hooks is None:
return
# Prepare arguments for hooks
effective_debug_mode = debug_mode if debug_mode is not None else team.debug_mode
all_args = {
"run_output": run_output,
"run_context": run_context,
"team": team,
"session": session,
"user_id": user_id,
"debug_mode": effective_debug_mode,
"metadata": run_context.metadata if run_context else None,
}
all_args.update(kwargs)
# Global background mode — see _execute_pre_hooks for pattern explanation.
if team._run_hooks_in_background is True and background_tasks is not None:
pending_bg_hooks = []
for hook in hooks:
if is_guardrail_hook(hook):
filtered_args = filter_hook_args(hook, all_args)
try:
hook(**filtered_args)
except (InputCheckError, OutputCheckError):
raise
except Exception as e:
log_error(f"Background guardrail '{hook.__name__}' execution failed: {str(e)}")
log_exception(e)
else:
pending_bg_hooks.append(hook)
bg_args = copy_args_for_background(all_args)
for hook in pending_bg_hooks:
filtered_args = filter_hook_args(hook, bg_args)
background_tasks.add_task(hook, **filtered_args)
return
for i, hook in enumerate(hooks):
# Check if this specific hook should run in background (via @hook decorator)
if should_run_hook_in_background(hook) and background_tasks is not None:
bg_args = copy_args_for_background(all_args)
filtered_args = filter_hook_args(hook, bg_args)
background_tasks.add_task(hook, **filtered_args)
continue
if stream_events:
yield handle_event( # type: ignore
run_response=run_output,
event=create_team_post_hook_started_event( # type: ignore
from_run_response=run_output,
post_hook_name=hook.__name__,
),
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
try:
filtered_args = filter_hook_args(hook, all_args)
hook(**filtered_args)
if stream_events:
yield handle_event( # type: ignore
run_response=run_output,
event=create_team_post_hook_completed_event( # type: ignore
from_run_response=run_output,
post_hook_name=hook.__name__,
),
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
except (InputCheckError, OutputCheckError) as e:
raise e
except Exception as e:
log_error(f"Post-hook #{i + 1} execution failed: {str(e)}")
log_exception(e)
finally:
# Reset global log mode in case an agent in the post-hook changed it
_set_debug(team, debug_mode=debug_mode)
async def _aexecute_post_hooks(
team: "Team",
hooks: Optional[List[Callable[..., Any]]],
run_output: TeamRunOutput,
session: TeamSession,
run_context: RunContext,
user_id: Optional[str] = None,
debug_mode: Optional[bool] = None,
stream_events: bool = False,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> AsyncIterator[TeamRunOutputEvent]:
"""Execute multiple post-hook functions in succession (async version)."""
from agno.team._init import _set_debug
if hooks is None:
return
# Prepare arguments for hooks
effective_debug_mode = debug_mode if debug_mode is not None else team.debug_mode
all_args = {
"run_output": run_output,
"run_context": run_context,
"team": team,
"session": session,
"user_id": user_id,
"debug_mode": effective_debug_mode,
"metadata": run_context.metadata if run_context else None,
}
all_args.update(kwargs)
# Global background mode — see _execute_pre_hooks for pattern explanation.
if team._run_hooks_in_background is True and background_tasks is not None:
pending_bg_hooks = []
for hook in hooks:
if is_guardrail_hook(hook):
filtered_args = filter_hook_args(hook, all_args)
try:
if iscoroutinefunction(hook):
await hook(**filtered_args)
else:
hook(**filtered_args)
except (InputCheckError, OutputCheckError):
raise
except Exception as e:
log_error(f"Background guardrail '{hook.__name__}' execution failed: {str(e)}")
log_exception(e)
else:
pending_bg_hooks.append(hook)
bg_args = copy_args_for_background(all_args)
for hook in pending_bg_hooks:
filtered_args = filter_hook_args(hook, bg_args)
background_tasks.add_task(hook, **filtered_args)
return
for i, hook in enumerate(hooks):
# Check if this specific hook should run in background (via @hook decorator)
if should_run_hook_in_background(hook) and background_tasks is not None:
bg_args = copy_args_for_background(all_args)
filtered_args = filter_hook_args(hook, bg_args)
background_tasks.add_task(hook, **filtered_args)
continue
if stream_events:
yield handle_event( # type: ignore
run_response=run_output,
event=create_team_post_hook_started_event( # type: ignore
from_run_response=run_output,
post_hook_name=hook.__name__,
),
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
try:
filtered_args = filter_hook_args(hook, all_args)
if iscoroutinefunction(hook):
await hook(**filtered_args)
else:
hook(**filtered_args)
if stream_events:
yield handle_event( # type: ignore
run_response=run_output,
event=create_team_post_hook_completed_event( # type: ignore
from_run_response=run_output,
post_hook_name=hook.__name__,
),
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
except (InputCheckError, OutputCheckError) as e:
raise e
except Exception as e:
log_error(f"Post-hook #{i + 1} execution failed: {str(e)}")
log_exception(e)
finally:
# Reset global log mode in case an agent in the post-hook changed it
_set_debug(team, debug_mode=debug_mode)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/team/_hooks.py",
"license": "Apache License 2.0",
"lines": 540,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/team/_init.py | """Initialization and configuration trait for Team."""
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from agno.team.mode import TeamMode
from agno.team.team import Team
from os import getenv
from typing import (
Any,
Callable,
Dict,
List,
Literal,
Optional,
Tuple,
Type,
Union,
cast,
)
from uuid import uuid4
from pydantic import BaseModel
from agno.agent import Agent
from agno.compression.manager import CompressionManager
from agno.db.base import AsyncBaseDb, BaseDb
from agno.eval.base import BaseEval
from agno.filters import FilterExpr
from agno.guardrails import BaseGuardrail
from agno.knowledge.protocol import KnowledgeProtocol
from agno.learn.machine import LearningMachine
from agno.memory import MemoryManager
from agno.models.base import Model
from agno.models.message import Message
from agno.models.utils import get_model
from agno.run.agent import RunEvent
from agno.run.team import (
TeamRunEvent,
)
from agno.session import SessionSummaryManager, TeamSession
from agno.tools import Toolkit
from agno.tools.function import Function
from agno.utils.log import (
log_debug,
log_error,
log_exception,
log_info,
log_warning,
set_log_level_to_debug,
set_log_level_to_info,
use_team_logger,
)
from agno.utils.safe_formatter import SafeFormatter
from agno.utils.string import generate_id_from_name
def __init__(
team: "Team",
members: Union[List[Union[Agent, "Team"]], Callable[..., List]],
id: Optional[str] = None,
model: Optional[Union[Model, str]] = None,
name: Optional[str] = None,
role: Optional[str] = None,
mode: Optional["TeamMode"] = None,
respond_directly: bool = False,
determine_input_for_members: bool = True,
delegate_to_all_members: bool = False,
max_iterations: int = 10,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
add_session_state_to_context: bool = False,
enable_agentic_state: bool = False,
overwrite_db_session_state: bool = False,
resolve_in_context: bool = True,
cache_session: bool = False,
add_team_history_to_members: bool = False,
num_team_history_runs: int = 3,
search_session_history: Optional[bool] = False,
num_history_sessions: Optional[int] = None,
description: Optional[str] = None,
instructions: Optional[Union[str, List[str], Callable]] = None,
use_instruction_tags: bool = False,
expected_output: Optional[str] = None,
additional_context: Optional[str] = None,
markdown: bool = False,
add_datetime_to_context: bool = False,
add_location_to_context: bool = False,
timezone_identifier: Optional[str] = None,
add_name_to_context: bool = False,
add_member_tools_to_context: bool = False,
system_message: Optional[Union[str, Callable, Message]] = None,
system_message_role: str = "system",
introduction: Optional[str] = None,
additional_input: Optional[List[Union[str, Dict, BaseModel, Message]]] = None,
dependencies: Optional[Dict[str, Any]] = None,
add_dependencies_to_context: bool = False,
knowledge: Optional[Union[KnowledgeProtocol, Callable[..., KnowledgeProtocol]]] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
add_knowledge_to_context: bool = False,
enable_agentic_knowledge_filters: Optional[bool] = False,
update_knowledge: bool = False,
knowledge_retriever: Optional[Callable[..., Optional[List[Union[Dict, str]]]]] = None,
references_format: Literal["json", "yaml"] = "json",
share_member_interactions: bool = False,
get_member_information_tool: bool = False,
search_knowledge: bool = True,
add_search_knowledge_instructions: bool = True,
read_chat_history: bool = False,
store_media: bool = True,
store_tool_messages: bool = True,
store_history_messages: bool = False,
send_media_to_model: bool = True,
add_history_to_context: bool = False,
num_history_runs: Optional[int] = None,
num_history_messages: Optional[int] = None,
max_tool_calls_from_history: Optional[int] = None,
tools: Optional[Union[List[Union[Toolkit, Callable, Function, Dict]], Callable[..., List]]] = None,
tool_call_limit: Optional[int] = None,
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
tool_hooks: Optional[List[Callable]] = None,
pre_hooks: Optional[List[Union[Callable[..., Any], BaseGuardrail, BaseEval]]] = None,
post_hooks: Optional[List[Union[Callable[..., Any], BaseGuardrail, BaseEval]]] = None,
input_schema: Optional[Type[BaseModel]] = None,
output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
parser_model: Optional[Union[Model, str]] = None,
parser_model_prompt: Optional[str] = None,
output_model: Optional[Union[Model, str]] = None,
output_model_prompt: Optional[str] = None,
use_json_mode: bool = False,
parse_response: bool = True,
db: Optional[Union[BaseDb, AsyncBaseDb]] = None,
enable_agentic_memory: bool = False,
update_memory_on_run: bool = False,
enable_user_memories: Optional[bool] = None, # Soon to be deprecated. Use update_memory_on_run
add_memories_to_context: Optional[bool] = None,
memory_manager: Optional[MemoryManager] = None,
enable_session_summaries: bool = False,
session_summary_manager: Optional[SessionSummaryManager] = None,
add_session_summary_to_context: Optional[bool] = None,
learning: Optional[Union[bool, LearningMachine]] = None,
add_learnings_to_context: bool = True,
compress_tool_results: bool = False,
compression_manager: Optional["CompressionManager"] = None,
metadata: Optional[Dict[str, Any]] = None,
reasoning: bool = False,
reasoning_model: Optional[Union[Model, str]] = None,
reasoning_agent: Optional[Agent] = None,
reasoning_min_steps: int = 1,
reasoning_max_steps: int = 10,
stream: Optional[bool] = None,
stream_events: Optional[bool] = None,
store_events: bool = False,
events_to_skip: Optional[List[Union[RunEvent, TeamRunEvent]]] = None,
store_member_responses: bool = False,
stream_member_events: bool = True,
debug_mode: bool = False,
debug_level: Literal[1, 2] = 1,
show_members_responses: bool = False,
retries: int = 0,
delay_between_retries: int = 1,
exponential_backoff: bool = False,
telemetry: bool = True,
cache_callables: bool = True,
callable_tools_cache_key: Optional[Callable[..., Optional[str]]] = None,
callable_knowledge_cache_key: Optional[Callable[..., Optional[str]]] = None,
callable_members_cache_key: Optional[Callable[..., Optional[str]]] = None,
):
from agno.utils.callables import is_callable_factory
team.members = members
team.model = model # type: ignore[assignment]
team.name = name
team.id = id
team.role = role
team.respond_directly = respond_directly
team.determine_input_for_members = determine_input_for_members
team.delegate_to_all_members = delegate_to_all_members
team.max_iterations = max_iterations
# Resolve TeamMode: explicit mode wins, otherwise infer from booleans
from agno.team.mode import TeamMode
if mode is not None:
team.mode = mode
# Normalize booleans deterministically so conflicting flags can't leak through
if mode == TeamMode.route:
team.respond_directly = True
team.delegate_to_all_members = False
elif mode == TeamMode.broadcast:
team.delegate_to_all_members = True
team.respond_directly = False
elif mode in (TeamMode.coordinate, TeamMode.tasks):
team.respond_directly = False
team.delegate_to_all_members = False
else:
if team.respond_directly:
team.mode = TeamMode.route
elif team.delegate_to_all_members:
team.mode = TeamMode.broadcast
else:
team.mode = TeamMode.coordinate
team.user_id = user_id
team.session_id = session_id
team.session_state = session_state
team.add_session_state_to_context = add_session_state_to_context
team.enable_agentic_state = enable_agentic_state
team.overwrite_db_session_state = overwrite_db_session_state
team.resolve_in_context = resolve_in_context
team.cache_session = cache_session
team.add_history_to_context = add_history_to_context
team.num_history_runs = num_history_runs
team.num_history_messages = num_history_messages
if team.num_history_messages is not None and team.num_history_runs is not None:
log_warning("num_history_messages and num_history_runs cannot be set at the same time. Using num_history_runs.")
team.num_history_messages = None
if team.num_history_messages is None and team.num_history_runs is None:
team.num_history_runs = 3
team.max_tool_calls_from_history = max_tool_calls_from_history
team.add_team_history_to_members = add_team_history_to_members
team.num_team_history_runs = num_team_history_runs
team.search_session_history = search_session_history
team.num_history_sessions = num_history_sessions
team.description = description
team.instructions = instructions
team.use_instruction_tags = use_instruction_tags
team.expected_output = expected_output
team.additional_context = additional_context
team.markdown = markdown
team.add_datetime_to_context = add_datetime_to_context
team.add_location_to_context = add_location_to_context
team.add_name_to_context = add_name_to_context
team.timezone_identifier = timezone_identifier
team.add_member_tools_to_context = add_member_tools_to_context
team.system_message = system_message
team.system_message_role = system_message_role
team.introduction = introduction
team.additional_input = additional_input
team.dependencies = dependencies
team.add_dependencies_to_context = add_dependencies_to_context
team.knowledge = knowledge
team.knowledge_filters = knowledge_filters
team.enable_agentic_knowledge_filters = enable_agentic_knowledge_filters
team.update_knowledge = update_knowledge
team.add_knowledge_to_context = add_knowledge_to_context
team.knowledge_retriever = knowledge_retriever
team.references_format = references_format
team.share_member_interactions = share_member_interactions
team.get_member_information_tool = get_member_information_tool
team.search_knowledge = search_knowledge
team.add_search_knowledge_instructions = add_search_knowledge_instructions
team.read_chat_history = read_chat_history
team.store_media = store_media
team.store_tool_messages = store_tool_messages
team.store_history_messages = store_history_messages
team.send_media_to_model = send_media_to_model
if tools is None:
team.tools = None
elif is_callable_factory(tools, excluded_types=(Toolkit, Function)):
team.tools = tools # type: ignore[assignment]
else:
team.tools = list(tools) if tools else [] # type: ignore[arg-type]
team.tool_choice = tool_choice
team.tool_call_limit = tool_call_limit
team.tool_hooks = tool_hooks
# Initialize hooks
team.pre_hooks = pre_hooks
team.post_hooks = post_hooks
team.input_schema = input_schema
team.output_schema = output_schema
team.parser_model = parser_model # type: ignore[assignment]
team.parser_model_prompt = parser_model_prompt
team.output_model = output_model # type: ignore[assignment]
team.output_model_prompt = output_model_prompt
team.use_json_mode = use_json_mode
team.parse_response = parse_response
team.db = db
team.enable_agentic_memory = enable_agentic_memory
if enable_user_memories is not None:
team.update_memory_on_run = enable_user_memories
else:
team.update_memory_on_run = update_memory_on_run
team.enable_user_memories = team.update_memory_on_run # Soon to be deprecated. Use update_memory_on_run
team.add_memories_to_context = add_memories_to_context
team.memory_manager = memory_manager
team.enable_session_summaries = enable_session_summaries
team.session_summary_manager = session_summary_manager
team.add_session_summary_to_context = add_session_summary_to_context
team.learning = learning
team.add_learnings_to_context = add_learnings_to_context
# Context compression settings
team.compress_tool_results = compress_tool_results
team.compression_manager = compression_manager
team.metadata = metadata
team.reasoning = reasoning
team.reasoning_model = reasoning_model # type: ignore[assignment]
team.reasoning_agent = reasoning_agent
team.reasoning_min_steps = reasoning_min_steps
team.reasoning_max_steps = reasoning_max_steps
team.stream = stream
team.stream_events = stream_events
team.store_events = store_events
team.store_member_responses = store_member_responses
team.events_to_skip = events_to_skip
if team.events_to_skip is None:
team.events_to_skip = [
RunEvent.run_content,
TeamRunEvent.run_content,
]
team.stream_member_events = stream_member_events
team.debug_mode = debug_mode
if debug_level not in [1, 2]:
log_warning(f"Invalid debug level: {debug_level}. Setting to 1.")
debug_level = 1
team.debug_level = debug_level
team.show_members_responses = show_members_responses
team.retries = retries
team.delay_between_retries = delay_between_retries
team.exponential_backoff = exponential_backoff
team.telemetry = telemetry
# TODO: Remove these
# Images generated during this session
team.images = None
# Audio generated during this session
team.audio = None
# Videos generated during this session
team.videos = None
# Team session
team._cached_session = None
team._tool_instructions = None
# True if we should parse a member response model
team._member_response_model = None
team._formatter = None
team._hooks_normalised = False
# List of MCP tools that were initialized on the last run
team._mcp_tools_initialized_on_run = []
# List of connectable tools that were initialized on the last run
team._connectable_tools_initialized_on_run = []
# Internal resolved LearningMachine instance
team._learning = None
# Lazy-initialized shared thread pool executor for background tasks (memory, cultural knowledge, etc.)
team._background_executor = None
# Callable factory settings
team.cache_callables = cache_callables
team.callable_tools_cache_key = callable_tools_cache_key
team.callable_knowledge_cache_key = callable_knowledge_cache_key
team.callable_members_cache_key = callable_members_cache_key
team._callable_tools_cache = {}
team._callable_knowledge_cache = {}
team._callable_members_cache = {}
_resolve_models(
team,
)
def background_executor(team: "Team") -> Any:
"""Lazy initialization of shared thread pool executor for background tasks.
Handles both memory creation and cultural knowledge updates concurrently.
Initialized only on first use (runtime, not instantiation) and reused across runs.
"""
if team._background_executor is None:
from concurrent.futures import ThreadPoolExecutor
team._background_executor = ThreadPoolExecutor(max_workers=3, thread_name_prefix="agno-bg")
return team._background_executor
def cached_session(team: "Team") -> Optional[TeamSession]:
return team._cached_session
def set_id(team: "Team") -> None:
"""Set the ID of the team if not set yet.
If the ID is not provided, generate a deterministic UUID from the name.
If the name is not provided, generate a random UUID.
"""
if team.id is None:
team.id = generate_id_from_name(team.name)
def _set_debug(team: "Team", debug_mode: Optional[bool] = None) -> None:
# Get the debug level from the environment variable or the default debug level
debug_level: Literal[1, 2] = (
cast(Literal[1, 2], int(env)) if (env := getenv("AGNO_DEBUG_LEVEL")) in ("1", "2") else team.debug_level
)
# If the default debug mode is set, or passed on run, or via environment variable, set the debug mode to True
if team.debug_mode or debug_mode or getenv("AGNO_DEBUG", "false").lower() == "true":
set_log_level_to_debug(source_type="team", level=debug_level)
else:
set_log_level_to_info(source_type="team")
def _set_telemetry(team: "Team") -> None:
"""Override telemetry settings based on environment variables."""
telemetry_env = getenv("AGNO_TELEMETRY")
if telemetry_env is not None:
team.telemetry = telemetry_env.lower() == "true"
def _initialize_member(team: "Team", member: Union["Team", Agent], debug_mode: Optional[bool] = None) -> None:
from agno.team.team import Team
# Set debug mode for all members
if debug_mode:
member.debug_mode = True
member.debug_level = team.debug_level
if isinstance(member, Agent):
member.team_id = team.id
member.set_id()
# Inherit team primary model if agent has no explicit model
if member.model is None and team.model is not None:
member.model = team.model
log_info(f"Agent '{member.name or member.id}' inheriting model from Team: {team.model.id}")
elif isinstance(member, Team):
member.parent_team_id = team.id
member.set_id()
# Initialize the sub-team's model first so it has its model set
member._set_default_model()
# Then let the sub-team initialize its own members so they inherit from the sub-team
# Only iterate if members is a static list (not a callable factory)
if isinstance(member.members, list):
for sub_member in member.members:
member._initialize_member(sub_member, debug_mode=debug_mode)
def propagate_run_hooks_in_background(team: "Team", run_in_background: bool = True) -> None:
"""
Propagate _run_hooks_in_background setting to this team and all nested members recursively.
This method sets _run_hooks_in_background on the team and all its members (agents and nested teams).
For nested teams, it recursively propagates the setting to their members as well.
Args:
run_in_background: Whether hooks should run in background. Defaults to True.
"""
from agno.team.team import Team
team._run_hooks_in_background = run_in_background
# Only iterate if members is a static list (not a callable factory)
if not isinstance(team.members, list):
return
for member in team.members:
if hasattr(member, "_run_hooks_in_background"):
member._run_hooks_in_background = run_in_background
# If it's a nested team, recursively propagate to its members
if isinstance(member, Team):
member.propagate_run_hooks_in_background(run_in_background)
def _set_default_model(team: "Team") -> None:
# Set the default model
if team.model is None:
try:
from agno.models.openai import OpenAIChat
except ModuleNotFoundError as e:
log_exception(e)
log_error(
"Agno agents use `openai` as the default model provider. Please provide a `model` or install `openai`."
)
exit(1)
log_info("Setting default model to OpenAI Chat")
team.model = OpenAIChat(id="gpt-4o")
def _set_memory_manager(team: "Team") -> None:
if team.db is None:
log_warning("Database not provided. Memories will not be stored.")
if team.memory_manager is None:
team.memory_manager = MemoryManager(model=team.model, db=team.db)
else:
if team.memory_manager.model is None:
team.memory_manager.model = team.model
if team.memory_manager.db is None:
team.memory_manager.db = team.db
if team.add_memories_to_context is None:
team.add_memories_to_context = (
team.update_memory_on_run or team.enable_agentic_memory or team.memory_manager is not None
)
def _set_session_summary_manager(team: "Team") -> None:
if team.enable_session_summaries and team.session_summary_manager is None:
team.session_summary_manager = SessionSummaryManager(model=team.model)
if team.session_summary_manager is not None:
if team.session_summary_manager.model is None:
team.session_summary_manager.model = team.model
if team.add_session_summary_to_context is None:
team.add_session_summary_to_context = team.enable_session_summaries or team.session_summary_manager is not None
def _set_compression_manager(team: "Team") -> None:
if team.compress_tool_results and team.compression_manager is None:
team.compression_manager = CompressionManager(
model=team.model,
)
elif team.compression_manager is not None and team.compression_manager.model is None:
# If compression manager exists but has no model, use the team's model
team.compression_manager.model = team.model
if team.compression_manager is not None:
if team.compression_manager.model is None:
team.compression_manager.model = team.model
if team.compression_manager.compress_tool_results:
team.compress_tool_results = True
def _set_learning_machine(team: "Team") -> None:
"""Initialize LearningMachine with team's db and model.
Sets the internal _learning field without modifying the public learning field.
Handles:
- learning=True: Create default LearningMachine
- learning=False/None: Disabled
- learning=LearningMachine(...): Use provided, inject db/model
"""
team._learning_init_attempted = True
if team.learning is None or team.learning is False:
team._learning = None
return
if team.db is None:
log_warning("Database not provided. LearningMachine not initialized.")
team._learning = None
return
if team.learning is True:
team._learning = LearningMachine(db=team.db, model=team.model, user_profile=True, user_memory=True)
return
if isinstance(team.learning, LearningMachine):
if team.learning.db is None:
team.learning.db = team.db
if team.learning.model is None:
team.learning.model = team.model
team._learning = team.learning
def _initialize_session(
team: "Team",
session_id: Optional[str] = None,
user_id: Optional[str] = None,
) -> Tuple[str, Optional[str]]:
"""Initialize the session for the team."""
if session_id is None:
if team.session_id:
session_id = team.session_id
else:
session_id = str(uuid4())
# We make the session_id sticky to the agent instance if no session_id is provided
team.session_id = session_id
log_debug(f"Session ID: {session_id}", center=True)
# Use the default user_id when necessary
if user_id is None or user_id == "":
user_id = team.user_id
return session_id, user_id
def _initialize_session_state(
team: "Team",
session_state: Dict[str, Any],
user_id: Optional[str] = None,
session_id: Optional[str] = None,
run_id: Optional[str] = None,
) -> Dict[str, Any]:
"""Initialize the session state for the team."""
if user_id:
session_state["current_user_id"] = user_id
if session_id is not None:
session_state["current_session_id"] = session_id
if run_id is not None:
session_state["current_run_id"] = run_id
return session_state
def _has_async_db(team: "Team") -> bool:
"""Return True if the db the team is equipped with is an Async implementation"""
return team.db is not None and isinstance(team.db, AsyncBaseDb)
def _resolve_models(team: "Team") -> None:
"""Resolve model strings to Model instances."""
if team.model is not None:
team.model = get_model(team.model)
if team.reasoning_model is not None:
team.reasoning_model = get_model(team.reasoning_model)
if team.parser_model is not None:
team.parser_model = get_model(team.parser_model)
if team.output_model is not None:
team.output_model = get_model(team.output_model)
def initialize_team(team: "Team", debug_mode: Optional[bool] = None) -> None:
# Make sure for the team, we are using the team logger
use_team_logger()
if team.delegate_to_all_members and team.respond_directly:
log_warning(
"`delegate_to_all_members` and `respond_directly` are both enabled. The task will be delegated to all members, but `respond_directly` will be disabled."
)
team.respond_directly = False
_set_default_model(team)
# Set debug mode
_set_debug(team, debug_mode=debug_mode)
# Set the team ID if not set
team.set_id()
# Set the memory manager and session summary manager
if team.update_memory_on_run or team.enable_agentic_memory or team.memory_manager is not None:
_set_memory_manager(team)
if team.enable_session_summaries or team.session_summary_manager is not None:
_set_session_summary_manager(team)
if team.compress_tool_results or team.compression_manager is not None:
_set_compression_manager(team)
if team.learning is not None and team.learning is not False:
_set_learning_machine(team)
log_debug(f"Team ID: {team.id}", center=True)
# Initialize formatter
if team._formatter is None:
team._formatter = SafeFormatter()
# Only initialize members if they are a static list (not a callable factory)
if isinstance(team.members, list):
for member in team.members:
_initialize_member(team, member, debug_mode=team.debug_mode)
def add_tool(team: "Team", tool: Union[Toolkit, Callable, Function, Dict]) -> None:
from agno.utils.callables import is_callable_factory
if is_callable_factory(team.tools, excluded_types=(Toolkit, Function)):
raise RuntimeError(
"Cannot add_tool() when tools is a callable factory. Use set_tools() to replace the factory."
)
if not team.tools:
team.tools = []
team.tools.append(tool) # type: ignore[union-attr]
def set_tools(team: "Team", tools: Union[List[Union[Toolkit, Callable, Function, Dict]], Callable[..., List]]) -> None:
from agno.utils.callables import is_callable_factory
if is_callable_factory(tools, excluded_types=(Toolkit, Function)):
team.tools = tools # type: ignore[assignment]
team._callable_tools_cache.clear()
else:
team.tools = list(tools) if tools else [] # type: ignore[arg-type]
async def _connect_mcp_tools(team: "Team") -> None:
"""Connect the MCP tools to the agent."""
if team.tools is not None and isinstance(team.tools, list):
for tool in team.tools:
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
if (
hasattr(type(tool), "__mro__")
and any(c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__)
and not tool.initialized # type: ignore
):
try:
# Connect the MCP server
await tool.connect() # type: ignore
team._mcp_tools_initialized_on_run.append(tool) # type: ignore[union-attr]
except Exception as e:
log_warning(f"Error connecting tool: {str(e)}")
async def _disconnect_mcp_tools(team: "Team") -> None:
"""Disconnect the MCP tools from the agent."""
for tool in team._mcp_tools_initialized_on_run: # type: ignore[union-attr]
try:
await tool.close()
except Exception as e:
log_warning(f"Error disconnecting tool: {str(e)}")
team._mcp_tools_initialized_on_run = []
def _connect_connectable_tools(team: "Team") -> None:
"""Connect tools that require connection management (e.g., database connections)."""
if team.tools and isinstance(team.tools, list):
for tool in team.tools:
if (
hasattr(tool, "requires_connect")
and tool.requires_connect # type: ignore
and hasattr(tool, "connect")
and tool not in team._connectable_tools_initialized_on_run # type: ignore[operator]
):
try:
tool.connect() # type: ignore
team._connectable_tools_initialized_on_run.append(tool) # type: ignore[union-attr]
except Exception as e:
log_warning(f"Error connecting tool: {str(e)}")
def _disconnect_connectable_tools(team: "Team") -> None:
"""Disconnect tools that require connection management."""
for tool in team._connectable_tools_initialized_on_run: # type: ignore[union-attr]
if hasattr(tool, "close"):
try:
tool.close() # type: ignore
except Exception as e:
log_warning(f"Error disconnecting tool: {str(e)}")
team._connectable_tools_initialized_on_run = []
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/team/_init.py",
"license": "Apache License 2.0",
"lines": 634,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/team/_managers.py | """Background task orchestration for memory and learning."""
from __future__ import annotations
import asyncio
from concurrent.futures import Future
from typing import (
TYPE_CHECKING,
Optional,
)
if TYPE_CHECKING:
from agno.metrics import RunMetrics
from agno.team.team import Team
from typing import List
from agno.db.base import UserMemory
from agno.run.messages import RunMessages
from agno.session import TeamSession
from agno.utils.log import log_debug, log_warning
# ---------------------------------------------------------------------------
# Memory
# ---------------------------------------------------------------------------
def _make_memories(
team: Team,
run_messages: RunMessages,
user_id: Optional[str] = None,
) -> Optional[RunMetrics]:
from agno.metrics import RunMetrics
collector = RunMetrics()
user_message_str = run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
if (
user_message_str is not None
and user_message_str.strip() != ""
and team.memory_manager is not None
and team.update_memory_on_run
):
log_debug("Managing user memories")
team.memory_manager.create_user_memories(
message=user_message_str,
user_id=user_id,
team_id=team.id,
run_metrics=collector,
)
return collector
async def _amake_memories(
team: Team,
run_messages: RunMessages,
user_id: Optional[str] = None,
) -> Optional[RunMetrics]:
from agno.metrics import RunMetrics
collector = RunMetrics()
user_message_str = run_messages.user_message.get_content_string() if run_messages.user_message is not None else None
if (
user_message_str is not None
and user_message_str.strip() != ""
and team.memory_manager is not None
and team.update_memory_on_run
):
log_debug("Managing user memories")
await team.memory_manager.acreate_user_memories(
message=user_message_str,
user_id=user_id,
team_id=team.id,
run_metrics=collector,
)
return collector
async def _astart_memory_task(
team: Team,
run_messages: RunMessages,
user_id: Optional[str],
existing_task: Optional[asyncio.Task],
) -> Optional[asyncio.Task]:
"""Cancel any existing memory task and start a new one if conditions are met.
Args:
run_messages: The run messages containing the user message.
user_id: The user ID for memory creation.
existing_task: An existing memory task to cancel before starting a new one.
Returns:
A new memory task if conditions are met, None otherwise.
"""
# Cancel any existing task from a previous retry attempt
if existing_task is not None and not existing_task.done():
existing_task.cancel()
try:
await existing_task
except asyncio.CancelledError:
pass
# Create new task if conditions are met
if (
run_messages.user_message is not None
and team.memory_manager is not None
and team.update_memory_on_run
and not team.enable_agentic_memory
):
log_debug("Starting memory creation in background task.")
return asyncio.create_task(_amake_memories(team, run_messages=run_messages, user_id=user_id))
return None
def _start_memory_future(
team: Team,
run_messages: RunMessages,
user_id: Optional[str],
existing_future: Optional[Future],
) -> Optional[Future]:
"""Cancel any existing memory future and start a new one if conditions are met.
Args:
run_messages: The run messages containing the user message.
user_id: The user ID for memory creation.
existing_future: An existing memory future to cancel before starting a new one.
Returns:
A new memory future if conditions are met, None otherwise.
"""
# Cancel any existing future from a previous retry attempt
if existing_future is not None and not existing_future.done():
existing_future.cancel()
# Create new future if conditions are met
if (
run_messages.user_message is not None
and team.memory_manager is not None
and team.update_memory_on_run
and not team.enable_agentic_memory
):
log_debug("Starting memory creation in background thread.")
return team.background_executor.submit(_make_memories, team, run_messages=run_messages, user_id=user_id)
return None
def get_user_memories(team: "Team", user_id: Optional[str] = None) -> Optional[List[UserMemory]]:
"""Get the user memories for the given user ID.
Args:
user_id: The user ID to get the memories for. If not provided, the current cached user ID is used.
Returns:
Optional[List[UserMemory]]: The user memories.
"""
from agno.team._init import _set_memory_manager
if team.memory_manager is None:
_set_memory_manager(team)
user_id = user_id if user_id is not None else team.user_id
if user_id is None:
user_id = "default"
return team.memory_manager.get_user_memories(user_id=user_id) # type: ignore
async def aget_user_memories(team: "Team", user_id: Optional[str] = None) -> Optional[List[UserMemory]]:
"""Get the user memories for the given user ID.
Args:
user_id: The user ID to get the memories for. If not provided, the current cached user ID is used.
Returns:
Optional[List[UserMemory]]: The user memories.
"""
from agno.team._init import _set_memory_manager
if team.memory_manager is None:
_set_memory_manager(team)
user_id = user_id if user_id is not None else team.user_id
if user_id is None:
user_id = "default"
return await team.memory_manager.aget_user_memories(user_id=user_id) # type: ignore
# ---------------------------------------------------------------------------
# Learning
# ---------------------------------------------------------------------------
def _process_learnings(
team: "Team",
run_messages: RunMessages,
session: TeamSession,
user_id: Optional[str],
) -> Optional[RunMetrics]:
"""Process learnings from conversation (runs in background thread)."""
if team._learning is None:
return None
from agno.metrics import RunMetrics
collector = RunMetrics()
try:
messages = list(run_messages.messages) if run_messages else []
team._learning.process(
messages=messages,
user_id=user_id,
session_id=session.session_id if session else None,
team_id=team.id,
run_metrics=collector,
)
log_debug("Learning extraction completed.")
except Exception as e:
log_warning(f"Error processing learnings: {e}")
return collector
async def _aprocess_learnings(
team: "Team",
run_messages: RunMessages,
session: TeamSession,
user_id: Optional[str],
) -> Optional[RunMetrics]:
"""Async process learnings from conversation."""
if team._learning is None:
return None
from agno.metrics import RunMetrics
collector = RunMetrics()
try:
messages = list(run_messages.messages) if run_messages else []
await team._learning.aprocess(
messages=messages,
user_id=user_id,
session_id=session.session_id if session else None,
team_id=team.id,
run_metrics=collector,
)
log_debug("Learning extraction completed.")
except Exception as e:
log_warning(f"Error processing learnings: {e}")
return collector
def _start_learning_future(
team: "Team",
run_messages: RunMessages,
session: TeamSession,
user_id: Optional[str],
existing_future: Optional[Future] = None,
) -> Optional[Future]:
"""Start learning extraction in background thread.
Args:
team: The Team instance.
run_messages: The run messages containing conversation.
session: The team session.
user_id: The user ID for learning extraction.
existing_future: An existing future to cancel before starting a new one.
Returns:
A new learning future if conditions are met, None otherwise.
"""
if existing_future is not None and not existing_future.done():
existing_future.cancel()
if team._learning is not None:
log_debug("Starting learning extraction in background thread.")
return team.background_executor.submit(
_process_learnings,
team,
run_messages=run_messages,
session=session,
user_id=user_id,
)
return None
async def _astart_learning_task(
team: "Team",
run_messages: RunMessages,
session: TeamSession,
user_id: Optional[str],
existing_task: Optional[asyncio.Task[Optional[RunMetrics]]] = None,
) -> Optional[asyncio.Task[Optional[RunMetrics]]]:
"""Start learning extraction as async task.
Args:
team: The Team instance.
run_messages: The run messages containing conversation.
session: The team session.
user_id: The user ID for learning extraction.
existing_task: An existing task to cancel before starting a new one.
Returns:
A new learning task if conditions are met, None otherwise.
"""
if existing_task is not None and not existing_task.done():
existing_task.cancel()
try:
await existing_task
except asyncio.CancelledError:
pass
if team._learning is not None:
log_debug("Starting learning extraction as async task.")
return asyncio.create_task(
_aprocess_learnings(
team,
run_messages=run_messages,
session=session,
user_id=user_id,
)
)
return None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/team/_managers.py",
"license": "Apache License 2.0",
"lines": 263,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/team/_messages.py | """Prompt/message building and deep-copy helpers for Team."""
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from agno.team.team import Team
import json
from collections import ChainMap
from typing import (
Any,
Dict,
List,
Optional,
Sequence,
Type,
Union,
cast,
)
from pydantic import BaseModel
from agno.media import Audio, File, Image, Video
from agno.models.base import Model
from agno.models.message import Message, MessageReferences
from agno.models.response import ModelResponse
from agno.run import RunContext
from agno.run.messages import RunMessages
from agno.run.team import (
TeamRunOutput,
)
from agno.session import TeamSession
from agno.tools import Toolkit
from agno.tools.function import Function
from agno.utils.agent import (
aexecute_instructions,
aexecute_system_message,
execute_instructions,
execute_system_message,
)
from agno.utils.common import is_typed_dict
from agno.utils.log import (
log_debug,
log_warning,
)
from agno.utils.message import filter_tool_calls, get_text_from_message
from agno.utils.team import (
get_member_id,
)
from agno.utils.timer import Timer
def _get_tool_names(member: Any) -> List[str]:
"""Extract tool names from a member's tools list."""
tool_names: List[str] = []
if member.tools is None or not isinstance(member.tools, list):
return tool_names
for _tool in member.tools:
if isinstance(_tool, Toolkit):
for _func in _tool.functions.values():
if _func.entrypoint:
tool_names.append(_func.name)
elif isinstance(_tool, Function) and _tool.entrypoint:
tool_names.append(_tool.name)
elif callable(_tool):
tool_names.append(_tool.__name__)
elif isinstance(_tool, dict) and "name" in _tool and _tool.get("name") is not None:
tool_names.append(_tool["name"])
else:
tool_names.append(str(_tool))
return tool_names
def get_members_system_message_content(
team: "Team", indent: int = 0, run_context: Optional["RunContext"] = None
) -> str:
from agno.team.team import Team
from agno.utils.callables import get_resolved_members
pad = " " * indent
content = ""
resolved_members = get_resolved_members(team, run_context)
if resolved_members is None or len(resolved_members) == 0:
return content
for member in resolved_members:
member_id = get_member_id(member)
if isinstance(member, Team):
content += f'{pad}<member id="{member_id}" name="{member.name}" type="team">\n'
if member.description is not None:
content += f"{pad} Description: {member.description}\n"
if member.members is not None:
content += member.get_members_system_message_content(indent=indent + 2, run_context=run_context)
content += f"{pad}</member>\n"
else:
content += f'{pad}<member id="{member_id}" name="{member.name}">\n'
if member.role is not None:
content += f"{pad} Role: {member.role}\n"
if member.description is not None:
content += f"{pad} Description: {member.description}\n"
if team.add_member_tools_to_context:
tool_names = _get_tool_names(member)
if tool_names:
content += f"{pad} Tools: {', '.join(tool_names)}\n"
content += f"{pad}</member>\n"
return content
def _get_opening_prompt() -> str:
"""Opening identity statement for the team leader."""
return (
"You coordinate a team of specialized AI agents to fulfill the user's request. "
"Delegate to members when their expertise or tools are needed. "
"For straightforward requests you can handle directly — including using your own tools — respond without delegating.\n"
)
def _get_mode_instructions(team: "Team") -> str:
"""Return the mode-specific <how_to_respond> block."""
from agno.team.mode import TeamMode
content = "\n<how_to_respond>\n"
if team.mode == TeamMode.tasks:
content += (
"You operate in autonomous task mode. Decompose the user's goal into discrete tasks, "
"execute them by delegating to team members, and deliver the final result.\n\n"
"Planning:\n"
"- Break the goal into tasks with clear, actionable titles and self-contained descriptions. "
"Each task should be a single unit of work for one member.\n"
"- Assign each task to the member whose role and tools are best suited.\n"
"- Set `depends_on` when a task requires another task's output. "
"Leave tasks independent when they can run in any order.\n\n"
"Execution:\n"
"- Use `execute_task` for sequential or dependent tasks.\n"
"- Use `execute_tasks_parallel` for groups of independent tasks to maximize throughput.\n"
"- Review each result before proceeding. If a task fails, decide whether to retry with the same member, "
"reassign to a different member, or adjust the plan.\n\n"
"Completion:\n"
"- When all tasks are done and results are satisfactory, call `mark_all_complete` with a summary of the outcome.\n"
"- Use `list_tasks` to check progress at any point, and `add_task_note` to record observations.\n\n"
"Write task descriptions that give the member everything they need: "
"the objective, relevant context from the conversation or prior task results, and what a good result looks like.\n"
)
elif team.mode == TeamMode.route:
content += (
"You operate in route mode. For requests that need member expertise, "
"identify the single best member and delegate to them — their response is returned directly to the user. "
"For requests you can handle directly — simple questions, using your own tools, or general conversation — "
"respond without delegating.\n\n"
"When routing to a member:\n"
"- Analyze the request to determine which member's role and tools are the best match.\n"
"- Delegate to exactly one member. Use only the member's ID — do not prefix it with the team ID.\n"
"- Write the task to faithfully represent the user's full intent. Do not reinterpret or narrow the request.\n"
"- If no member is a clear fit, choose the closest match and include any additional context the member might need.\n"
)
elif team.mode == TeamMode.broadcast:
content += (
"You operate in broadcast mode. For requests that benefit from multiple perspectives, "
"send the request to all members simultaneously and synthesize their collective responses. "
"For requests you can handle directly — simple questions, using your own tools, or general conversation — "
"respond without delegating.\n\n"
"When broadcasting:\n"
"- Call `delegate_task_to_members` exactly once with a clear task description. "
"This sends the task to every member in parallel.\n"
"- Write the task so each member can respond independently from their own perspective.\n\n"
"After receiving member responses:\n"
"- Compare perspectives: note agreements, highlight complementary insights, and reconcile any contradictions.\n"
"- Synthesize into a unified answer that integrates the strongest contributions thematically — "
"do not list each member's response sequentially.\n"
)
else:
# coordinate mode (default)
content += (
"You operate in coordinate mode. For requests that need member expertise, "
"select the best member(s), delegate with clear task descriptions, and synthesize their outputs "
"into a unified response. For requests you can handle directly — simple questions, "
"using your own tools, or general conversation — respond without delegating.\n\n"
"Delegation:\n"
"- Match each sub-task to the member whose role and tools are the best fit. "
"Delegate to multiple members when the request spans different areas of expertise.\n"
"- Write task descriptions that are self-contained: state the goal, provide relevant context "
"from the conversation, and describe what a good result looks like.\n"
"- Use only the member's ID when delegating — do not prefix it with the team ID.\n\n"
"After receiving member responses:\n"
"- If a response is incomplete or off-target, re-delegate with clearer instructions or try a different member.\n"
"- Synthesize all results into a single coherent response. Resolve contradictions, fill gaps with your own "
"reasoning, and add structure — do not simply concatenate member outputs.\n"
)
content += "</how_to_respond>\n\n"
return content
def _build_team_context(
team: "Team",
run_context: Optional["RunContext"] = None,
) -> str:
"""Build the opening + team_members + how_to_respond blocks.
Shared between sync and async system-message builders.
"""
from agno.utils.callables import get_resolved_members
content = ""
resolved_members = get_resolved_members(team, run_context)
if resolved_members is not None and len(resolved_members) > 0:
content += _get_opening_prompt()
content += "\n<team_members>\n"
content += team.get_members_system_message_content(run_context=run_context)
if team.get_member_information_tool:
content += "If you need to get information about your team members, you can use the `get_member_information` tool at any time.\n"
content += "</team_members>\n"
content += _get_mode_instructions(team)
return content
def _build_identity_sections(
team: "Team",
instructions: List[str],
) -> str:
"""Build description, role, and instructions sections.
Shared between sync and async system-message builders.
"""
content = ""
if team.description is not None:
content += f"<description>\n{team.description}\n</description>\n\n"
if team.role is not None:
content += f"<your_role>\n{team.role}\n</your_role>\n\n"
if len(instructions) > 0:
if team.use_instruction_tags:
content += "<instructions>"
if len(instructions) > 1:
for _upi in instructions:
content += f"\n- {_upi}"
else:
content += "\n" + instructions[0]
content += "\n</instructions>\n\n"
else:
if len(instructions) > 1:
for _upi in instructions:
content += f"- {_upi}\n"
else:
content += instructions[0] + "\n\n"
return content
def _build_trailing_sections(
team: "Team",
*,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
additional_information: List[str],
tools: Optional[List[Union[Function, dict]]] = None,
output_schema: Optional[Any] = None,
run_context: Optional[RunContext] = None,
session_state: Optional[Dict[str, Any]] = None,
add_session_state_to_context: Optional[bool] = None,
) -> str:
"""Build media, additional info, tool instructions, and other trailing sections.
Shared between sync and async system-message builders.
"""
content = ""
# Attached media
if audio is not None or images is not None or videos is not None or files is not None:
content += "<attached_media>\n"
content += "You have the following media attached to your message:\n"
if audio is not None and len(audio) > 0:
content += " - Audio\n"
if images is not None and len(images) > 0:
content += " - Images\n"
if videos is not None and len(videos) > 0:
content += " - Videos\n"
if files is not None and len(files) > 0:
content += " - Files\n"
content += "</attached_media>\n\n"
# Additional information
if len(additional_information) > 0:
content += "<additional_information>"
for _ai in additional_information:
content += f"\n- {_ai}"
content += "\n</additional_information>\n\n"
# Tool instructions
if team._tool_instructions is not None:
for _ti in team._tool_instructions:
content += f"{_ti}\n"
system_message_from_model = team.model.get_system_message_for_model(tools) # type: ignore[union-attr]
if system_message_from_model is not None:
content += system_message_from_model
if team.expected_output is not None:
content += f"<expected_output>\n{team.expected_output.strip()}\n</expected_output>\n\n"
if team.additional_context is not None:
content += f"<additional_context>\n{team.additional_context.strip()}\n</additional_context>\n\n"
if add_session_state_to_context and session_state is not None:
content += _get_formatted_session_state_for_system_message(team, session_state)
# JSON output prompt
if (
output_schema is not None
and team.parser_model is None
and team.model
and not (
(team.model.supports_native_structured_outputs or team.model.supports_json_schema_outputs)
and not team.use_json_mode
)
):
content += f"{_get_json_output_prompt(team, output_schema)}"
return content
def get_system_message(
team: "Team",
session: TeamSession,
run_context: Optional[RunContext] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
tools: Optional[List[Union[Function, dict]]] = None,
add_session_state_to_context: Optional[bool] = None,
) -> Optional[Message]:
"""Get the system message for the team.
1. If the system_message is provided, use that.
2. If build_context is False, return None.
3. Build and return the default system message for the Team.
"""
# Extract values from run_context
from agno.team._init import _has_async_db, _set_memory_manager
session_state = run_context.session_state if run_context else None
user_id = run_context.user_id if run_context else None
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
# 1. If the system_message is provided, use that.
if team.system_message is not None:
if isinstance(team.system_message, Message):
return team.system_message
sys_message_content: str = ""
if isinstance(team.system_message, str):
sys_message_content = team.system_message
elif callable(team.system_message):
sys_message_content = execute_system_message(
system_message=team.system_message,
agent=cast(Any, team),
team=cast(Any, team),
session_state=session_state,
run_context=run_context,
)
if not isinstance(sys_message_content, str):
raise Exception("system_message must return a string")
# Format the system message with the session state variables
if team.resolve_in_context:
sys_message_content = _format_message_with_state_variables(
team,
sys_message_content,
run_context=run_context,
)
# type: ignore
return Message(role=team.system_message_role, content=sys_message_content)
# 1. Build and return the default system message for the Team.
# 1.1 Build the list of instructions for the system message
team.model = cast(Model, team.model)
instructions: List[str] = []
if team.instructions is not None:
_instructions = team.instructions
if callable(team.instructions):
_instructions = execute_instructions(
instructions=team.instructions,
agent=cast(Any, team),
team=cast(Any, team),
session_state=session_state,
run_context=run_context,
)
if isinstance(_instructions, str):
instructions.append(_instructions)
elif isinstance(_instructions, list):
instructions.extend(_instructions)
# 1.2 Add instructions from the Model
_model_instructions = team.model.get_instructions_for_model(tools)
if _model_instructions is not None:
instructions.extend(_model_instructions)
# 1.3 Build a list of additional information for the system message
additional_information: List[str] = []
# 1.3.1 Add instructions for using markdown
if team.markdown and output_schema is None:
additional_information.append("Use markdown to format your answers.")
# 1.3.2 Add the current datetime
if team.add_datetime_to_context:
from datetime import datetime
tz = None
if team.timezone_identifier:
try:
from zoneinfo import ZoneInfo
tz = ZoneInfo(team.timezone_identifier)
except Exception:
log_warning("Invalid timezone identifier")
time = datetime.now(tz) if tz else datetime.now()
additional_information.append(f"The current time is {time}.")
# 1.3.3 Add the current location
if team.add_location_to_context:
from agno.utils.location import get_location
location = get_location()
if location:
location_str = ", ".join(
filter(None, [location.get("city"), location.get("region"), location.get("country")])
)
if location_str:
additional_information.append(f"Your approximate location is: {location_str}.")
# 1.3.4 Add team name if provided
if team.name is not None and team.add_name_to_context:
additional_information.append(f"Your name is: {team.name}.")
# 2 Build the default system message for the Team.
system_message_content: str = ""
# 2.1 Opening + team members + mode instructions
system_message_content += _build_team_context(team, run_context=run_context)
# 2.2 Identity sections: description, role, instructions
system_message_content += _build_identity_sections(team, instructions)
# 2.3 Knowledge base instructions
if team.knowledge is not None and team.search_knowledge and team.add_search_knowledge_instructions:
build_context_fn = getattr(team.knowledge, "build_context", None)
if callable(build_context_fn):
knowledge_context = build_context_fn(
enable_agentic_filters=team.enable_agentic_knowledge_filters,
)
if knowledge_context:
system_message_content += knowledge_context + "\n"
# 2.4 Memories
if team.add_memories_to_context:
_memory_manager_not_set = False
if not user_id:
user_id = "default"
if team.memory_manager is None:
_set_memory_manager(team)
_memory_manager_not_set = True
if _has_async_db(team):
raise ValueError(
"Sync get_system_message cannot retrieve user memories with an async database. "
"Use aget_system_message instead."
)
user_memories = team.memory_manager.get_user_memories(user_id=user_id) # type: ignore
if user_memories and len(user_memories) > 0:
system_message_content += "You have access to user info and preferences from previous interactions that you can use to personalize your response:\n\n"
system_message_content += "<memories_from_previous_interactions>"
for _memory in user_memories: # type: ignore
system_message_content += f"\n- {_memory.memory}"
system_message_content += "\n</memories_from_previous_interactions>\n\n"
system_message_content += (
"Note: this information is from previous interactions and may be updated in this conversation. "
"You should always prefer information from this conversation over the past memories.\n"
)
else:
system_message_content += (
"You have the capability to retain memories from previous interactions with the user, "
"but have not had any interactions with the user yet.\n"
)
if _memory_manager_not_set:
team.memory_manager = None
if team.enable_agentic_memory:
system_message_content += (
"\n<updating_user_memories>\n"
"- You have access to the `update_user_memory` tool that you can use to add new memories, update existing memories, delete memories, or clear all memories.\n"
"- If the user's message includes information that should be captured as a memory, use the `update_user_memory` tool to update your memory database.\n"
"- Memories should include details that could personalize ongoing interactions with the user.\n"
"- Use this tool to add new memories or update existing memories that you identify in the conversation.\n"
"- Use this tool if the user asks to update their memory, delete a memory, or clear all memories.\n"
"- If you use the `update_user_memory` tool, remember to pass on the response to the user.\n"
"</updating_user_memories>\n\n"
)
# 2.5 Session summary
if team.add_session_summary_to_context and session.summary is not None:
system_message_content += "Here is a brief summary of your previous interactions:\n\n"
system_message_content += "<summary_of_previous_interactions>\n"
system_message_content += session.summary.summary
system_message_content += "\n</summary_of_previous_interactions>\n\n"
system_message_content += (
"Note: this information is from previous interactions and may be outdated. "
"You should ALWAYS prefer information from this conversation over the past summary.\n\n"
)
# 2.6 Trailing sections: media, additional info, tools, expected output, etc.
system_message_content += _build_trailing_sections(
team,
audio=audio,
images=images,
videos=videos,
files=files,
additional_information=additional_information,
tools=tools,
output_schema=output_schema,
run_context=run_context,
session_state=session_state,
add_session_state_to_context=add_session_state_to_context,
)
# Format the full system message with dependencies and session state variables
if team.resolve_in_context:
system_message_content = _format_message_with_state_variables(
team,
system_message_content,
run_context=run_context,
)
return Message(role=team.system_message_role, content=system_message_content.strip())
async def aget_system_message(
team: "Team",
session: TeamSession,
run_context: Optional[RunContext] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
tools: Optional[List[Union[Function, dict]]] = None,
add_session_state_to_context: Optional[bool] = None,
) -> Optional[Message]:
"""Get the system message for the team."""
# Extract values from run_context
from agno.team._init import _has_async_db, _set_memory_manager
session_state = run_context.session_state if run_context else None
user_id = run_context.user_id if run_context else None
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
# 1. If the system_message is provided, use that.
if team.system_message is not None:
if isinstance(team.system_message, Message):
return team.system_message
sys_message_content: str = ""
if isinstance(team.system_message, str):
sys_message_content = team.system_message
elif callable(team.system_message):
sys_message_content = await aexecute_system_message(
system_message=team.system_message,
agent=cast(Any, team),
team=cast(Any, team),
session_state=session_state,
run_context=run_context,
)
if not isinstance(sys_message_content, str):
raise Exception("system_message must return a string")
# Format the system message with the session state variables
if team.resolve_in_context:
sys_message_content = _format_message_with_state_variables(
team,
sys_message_content,
run_context=run_context,
)
# type: ignore
return Message(role=team.system_message_role, content=sys_message_content)
# 1. Build and return the default system message for the Team.
# 1.1 Build the list of instructions for the system message
team.model = cast(Model, team.model)
instructions: List[str] = []
if team.instructions is not None:
_instructions = team.instructions
if callable(team.instructions):
_instructions = await aexecute_instructions(
instructions=team.instructions,
agent=cast(Any, team),
team=cast(Any, team),
session_state=session_state,
run_context=run_context,
)
if isinstance(_instructions, str):
instructions.append(_instructions)
elif isinstance(_instructions, list):
instructions.extend(_instructions)
# 1.2 Add instructions from the Model
_model_instructions = team.model.get_instructions_for_model(tools)
if _model_instructions is not None:
instructions.extend(_model_instructions)
# 1.3 Build a list of additional information for the system message
additional_information: List[str] = []
# 1.3.1 Add instructions for using markdown
if team.markdown and output_schema is None:
additional_information.append("Use markdown to format your answers.")
# 1.3.2 Add the current datetime
if team.add_datetime_to_context:
from datetime import datetime
tz = None
if team.timezone_identifier:
try:
from zoneinfo import ZoneInfo
tz = ZoneInfo(team.timezone_identifier)
except Exception:
log_warning("Invalid timezone identifier")
time = datetime.now(tz) if tz else datetime.now()
additional_information.append(f"The current time is {time}.")
# 1.3.3 Add the current location
if team.add_location_to_context:
from agno.utils.location import get_location
location = get_location()
if location:
location_str = ", ".join(
filter(None, [location.get("city"), location.get("region"), location.get("country")])
)
if location_str:
additional_information.append(f"Your approximate location is: {location_str}.")
# 1.3.4 Add team name if provided
if team.name is not None and team.add_name_to_context:
additional_information.append(f"Your name is: {team.name}.")
# 2 Build the default system message for the Team.
system_message_content: str = ""
# 2.1 Opening + team members + mode instructions
system_message_content += _build_team_context(team, run_context=run_context)
# 2.2 Identity sections: description, role, instructions
system_message_content += _build_identity_sections(team, instructions)
# 2.3 Knowledge base instructions
if team.knowledge is not None and team.search_knowledge and team.add_search_knowledge_instructions:
build_context_fn = getattr(team.knowledge, "build_context", None)
if callable(build_context_fn):
knowledge_context = build_context_fn(
enable_agentic_filters=team.enable_agentic_knowledge_filters,
)
if knowledge_context:
system_message_content += knowledge_context + "\n"
# 2.4 Memories
if team.add_memories_to_context:
_memory_manager_not_set = False
if not user_id:
user_id = "default"
if team.memory_manager is None:
_set_memory_manager(team)
_memory_manager_not_set = True
if _has_async_db(team):
user_memories = await team.memory_manager.aget_user_memories(user_id=user_id) # type: ignore
else:
user_memories = team.memory_manager.get_user_memories(user_id=user_id) # type: ignore
if user_memories and len(user_memories) > 0:
system_message_content += "You have access to user info and preferences from previous interactions that you can use to personalize your response:\n\n"
system_message_content += "<memories_from_previous_interactions>"
for _memory in user_memories: # type: ignore
system_message_content += f"\n- {_memory.memory}"
system_message_content += "\n</memories_from_previous_interactions>\n\n"
system_message_content += (
"Note: this information is from previous interactions and may be updated in this conversation. "
"You should always prefer information from this conversation over the past memories.\n"
)
else:
system_message_content += (
"You have the capability to retain memories from previous interactions with the user, "
"but have not had any interactions with the user yet.\n"
)
if _memory_manager_not_set:
team.memory_manager = None
if team.enable_agentic_memory:
system_message_content += (
"\n<updating_user_memories>\n"
"- You have access to the `update_user_memory` tool that you can use to add new memories, update existing memories, delete memories, or clear all memories.\n"
"- If the user's message includes information that should be captured as a memory, use the `update_user_memory` tool to update your memory database.\n"
"- Memories should include details that could personalize ongoing interactions with the user.\n"
"- Use this tool to add new memories or update existing memories that you identify in the conversation.\n"
"- Use this tool if the user asks to update their memory, delete a memory, or clear all memories.\n"
"- If you use the `update_user_memory` tool, remember to pass on the response to the user.\n"
"</updating_user_memories>\n\n"
)
# 2.5 Session summary
if team.add_session_summary_to_context and session.summary is not None:
system_message_content += "Here is a brief summary of your previous interactions:\n\n"
system_message_content += "<summary_of_previous_interactions>\n"
system_message_content += session.summary.summary
system_message_content += "\n</summary_of_previous_interactions>\n\n"
system_message_content += (
"Note: this information is from previous interactions and may be outdated. "
"You should ALWAYS prefer information from this conversation over the past summary.\n\n"
)
# 2.6 Trailing sections: media, additional info, tools, expected output, etc.
system_message_content += _build_trailing_sections(
team,
audio=audio,
images=images,
videos=videos,
files=files,
additional_information=additional_information,
tools=tools,
output_schema=output_schema,
run_context=run_context,
session_state=session_state,
add_session_state_to_context=add_session_state_to_context,
)
# Format the full system message with dependencies and session state variables
if team.resolve_in_context:
system_message_content = _format_message_with_state_variables(
team,
system_message_content,
run_context=run_context,
)
return Message(role=team.system_message_role, content=system_message_content.strip())
def _get_formatted_session_state_for_system_message(team: "Team", session_state: Dict[str, Any]) -> str:
return f"\n<session_state>\n{session_state}\n</session_state>\n\n"
def _get_run_messages(
team: "Team",
*,
run_response: TeamRunOutput,
run_context: RunContext,
session: TeamSession,
user_id: Optional[str] = None,
input_message: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
tools: Optional[List[Union[Function, dict]]] = None,
**kwargs: Any,
) -> RunMessages:
"""This function returns a RunMessages object with the following attributes:
- system_message: The system message for this run
- user_message: The user message for this run
- messages: List of messages to send to the model
To build the RunMessages object:
1. Add system message to run_messages
2. Add extra messages to run_messages
3. Add history to run_messages
4. Add messages to run_messages if provided (messages parameter first)
5. Add user message to run_messages (message parameter second)
"""
# Initialize the RunMessages object
run_messages = RunMessages()
# 1. Add system message to run_messages
system_message = team.get_system_message(
session=session,
run_context=run_context,
images=images,
audio=audio,
videos=videos,
files=files,
add_session_state_to_context=add_session_state_to_context,
tools=tools,
)
if system_message is not None:
run_messages.system_message = system_message
run_messages.messages.append(system_message)
# 2. Add extra messages to run_messages if provided
if team.additional_input is not None:
messages_to_add_to_run_response: List[Message] = []
if run_messages.extra_messages is None:
run_messages.extra_messages = []
for _m in team.additional_input:
if isinstance(_m, Message):
messages_to_add_to_run_response.append(_m)
run_messages.messages.append(_m)
run_messages.extra_messages.append(_m)
elif isinstance(_m, dict):
try:
_m_parsed = Message.model_validate(_m)
messages_to_add_to_run_response.append(_m_parsed)
run_messages.messages.append(_m_parsed)
run_messages.extra_messages.append(_m_parsed)
except Exception as e:
log_warning(f"Failed to validate message: {e}")
# Add the extra messages to the run_response
if len(messages_to_add_to_run_response) > 0:
log_debug(f"Adding {len(messages_to_add_to_run_response)} extra messages")
if run_response.additional_input is None:
run_response.additional_input = messages_to_add_to_run_response
else:
run_response.additional_input.extend(messages_to_add_to_run_response)
# 3. Add history to run_messages
if add_history_to_context:
from copy import deepcopy
# Only skip messages from history when system_message_role is NOT a standard conversation role.
# Standard conversation roles ("user", "assistant", "tool") should never be filtered
# to preserve conversation continuity.
skip_role = team.system_message_role if team.system_message_role not in ["user", "assistant", "tool"] else None
history = session.get_messages(
last_n_runs=team.num_history_runs,
limit=team.num_history_messages,
skip_roles=[skip_role] if skip_role else None,
team_id=team.id if team.parent_team_id is not None else None,
)
if len(history) > 0:
# Create a deep copy of the history messages to avoid modifying the original messages
history_copy = [deepcopy(msg) for msg in history]
# Tag each message as coming from history
for _msg in history_copy:
_msg.from_history = True
# Filter tool calls from history messages
if team.max_tool_calls_from_history is not None:
filter_tool_calls(history_copy, team.max_tool_calls_from_history)
log_debug(f"Adding {len(history_copy)} messages from history")
# Extend the messages with the history
run_messages.messages += history_copy
# 5. Add user message to run_messages (message second as per Dirk's requirement)
# 5.1 Build user message if message is None, str or list
user_message = _get_user_message(
team,
run_response=run_response,
run_context=run_context,
input_message=input_message,
user_id=user_id,
audio=audio,
images=images,
videos=videos,
files=files,
add_dependencies_to_context=add_dependencies_to_context,
**kwargs,
)
# Add user message to run_messages
if user_message is not None:
run_messages.user_message = user_message
run_messages.messages.append(user_message)
return run_messages
async def _aget_run_messages(
team: "Team",
*,
run_response: TeamRunOutput,
run_context: RunContext,
session: TeamSession,
user_id: Optional[str] = None,
input_message: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
tools: Optional[List[Union[Function, dict]]] = None,
**kwargs: Any,
) -> RunMessages:
"""This function returns a RunMessages object with the following attributes:
- system_message: The system message for this run
- user_message: The user message for this run
- messages: List of messages to send to the model
To build the RunMessages object:
1. Add system message to run_messages
2. Add extra messages to run_messages
3. Add history to run_messages
4. Add messages to run_messages if provided (messages parameter first)
5. Add user message to run_messages (message parameter second)
"""
# Initialize the RunMessages object
run_messages = RunMessages()
# 1. Add system message to run_messages
system_message = await team.aget_system_message(
session=session,
run_context=run_context,
images=images,
audio=audio,
videos=videos,
files=files,
add_session_state_to_context=add_session_state_to_context,
tools=tools,
)
if system_message is not None:
run_messages.system_message = system_message
run_messages.messages.append(system_message)
# 2. Add extra messages to run_messages if provided
if team.additional_input is not None:
messages_to_add_to_run_response: List[Message] = []
if run_messages.extra_messages is None:
run_messages.extra_messages = []
for _m in team.additional_input:
if isinstance(_m, Message):
messages_to_add_to_run_response.append(_m)
run_messages.messages.append(_m)
run_messages.extra_messages.append(_m)
elif isinstance(_m, dict):
try:
_m_parsed = Message.model_validate(_m)
messages_to_add_to_run_response.append(_m_parsed)
run_messages.messages.append(_m_parsed)
run_messages.extra_messages.append(_m_parsed)
except Exception as e:
log_warning(f"Failed to validate message: {e}")
# Add the extra messages to the run_response
if len(messages_to_add_to_run_response) > 0:
log_debug(f"Adding {len(messages_to_add_to_run_response)} extra messages")
if run_response.additional_input is None:
run_response.additional_input = messages_to_add_to_run_response
else:
run_response.additional_input.extend(messages_to_add_to_run_response)
# 3. Add history to run_messages
if add_history_to_context:
from copy import deepcopy
# Only skip messages from history when system_message_role is NOT a standard conversation role.
# Standard conversation roles ("user", "assistant", "tool") should never be filtered
# to preserve conversation continuity.
skip_role = team.system_message_role if team.system_message_role not in ["user", "assistant", "tool"] else None
history = session.get_messages(
last_n_runs=team.num_history_runs,
limit=team.num_history_messages,
skip_roles=[skip_role] if skip_role else None,
team_id=team.id if team.parent_team_id is not None else None,
)
if len(history) > 0:
# Create a deep copy of the history messages to avoid modifying the original messages
history_copy = [deepcopy(msg) for msg in history]
# Tag each message as coming from history
for _msg in history_copy:
_msg.from_history = True
# Filter tool calls from history messages
if team.max_tool_calls_from_history is not None:
filter_tool_calls(history_copy, team.max_tool_calls_from_history)
log_debug(f"Adding {len(history_copy)} messages from history")
# Extend the messages with the history
run_messages.messages += history_copy
# 5. Add user message to run_messages (message second as per Dirk's requirement)
# 5.1 Build user message if message is None, str or list
user_message = await _aget_user_message(
team,
run_response=run_response,
run_context=run_context,
input_message=input_message,
user_id=user_id,
audio=audio,
images=images,
videos=videos,
files=files,
add_dependencies_to_context=add_dependencies_to_context,
**kwargs,
)
# Add user message to run_messages
if user_message is not None:
run_messages.user_message = user_message
run_messages.messages.append(user_message)
return run_messages
def _get_user_message(
team: "Team",
*,
run_response: TeamRunOutput,
run_context: RunContext,
input_message: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
add_dependencies_to_context: Optional[bool] = None,
**kwargs,
):
# Get references from the knowledge base to use in the user message
from agno.team._utils import _convert_dependencies_to_string, _convert_documents_to_string
references = None
if input_message is None:
# If we have any media, return a message with empty content
if images is not None or audio is not None or videos is not None or files is not None:
return Message(
role="user",
content="",
images=None if not team.send_media_to_model else images,
audio=None if not team.send_media_to_model else audio,
videos=None if not team.send_media_to_model else videos,
files=None if not team.send_media_to_model else files,
**kwargs,
)
else:
# If the input is None, return None
return None
else:
if isinstance(input_message, list):
input_content: Union[str, List[Any], List[Message]]
if len(input_message) > 0 and isinstance(input_message[0], dict) and "type" in input_message[0]:
# This is multimodal content (text + images/audio/video), preserve the structure
input_content = input_message
elif len(input_message) > 0 and isinstance(input_message[0], Message):
# This is a list of Message objects, extract text content from them
input_content = get_text_from_message(input_message)
elif all(isinstance(item, str) for item in input_message):
input_content = "\n".join([str(item) for item in input_message])
else:
input_content = str(input_message)
return Message(
role="user",
content=input_content,
images=None if not team.send_media_to_model else images,
audio=None if not team.send_media_to_model else audio,
videos=None if not team.send_media_to_model else videos,
files=None if not team.send_media_to_model else files,
**kwargs,
)
# If message is provided as a Message, use it directly
elif isinstance(input_message, Message):
return input_message
# If message is provided as a dict, try to validate it as a Message
elif isinstance(input_message, dict):
try:
if team.input_schema and is_typed_dict(team.input_schema):
import json
content = json.dumps(input_message, indent=2, ensure_ascii=False)
return Message(role="user", content=content)
else:
return Message.model_validate(input_message)
except Exception as e:
log_warning(f"Failed to validate input: {e}")
# If message is provided as a BaseModel, convert it to a Message
elif isinstance(input_message, BaseModel):
try:
# Create a user message with the BaseModel content
content = input_message.model_dump_json(indent=2, exclude_none=True)
return Message(role="user", content=content)
except Exception as e:
log_warning(f"Failed to convert BaseModel to message: {e}")
else:
user_msg_content = input_message
if team.add_knowledge_to_context:
if isinstance(input_message, str):
user_msg_content = input_message
elif callable(input_message):
user_msg_content = input_message(agent=team)
else:
raise Exception("input must be a string or a callable when add_references is True")
try:
retrieval_timer = Timer()
retrieval_timer.start()
docs_from_knowledge = team.get_relevant_docs_from_knowledge(
query=user_msg_content,
filters=run_context.knowledge_filters,
run_context=run_context,
**kwargs,
)
if docs_from_knowledge is not None:
references = MessageReferences(
query=user_msg_content,
references=docs_from_knowledge,
time=round(retrieval_timer.elapsed, 4),
)
# Add the references to the run_response
if run_response.references is None:
run_response.references = []
run_response.references.append(references)
retrieval_timer.stop()
log_debug(f"Time to get references: {retrieval_timer.elapsed:.4f}s")
except Exception as e:
log_warning(f"Failed to get references: {e}")
if team.resolve_in_context:
user_msg_content = _format_message_with_state_variables(
team,
user_msg_content,
run_context=run_context,
)
# Convert to string for concatenation operations
user_msg_content_str = get_text_from_message(user_msg_content) if user_msg_content is not None else ""
# 4.1 Add knowledge references to user message
if (
team.add_knowledge_to_context
and references is not None
and references.references is not None
and len(references.references) > 0
):
user_msg_content_str += "\n\nUse the following references from the knowledge base if it helps:\n"
user_msg_content_str += "<references>\n"
user_msg_content_str += _convert_documents_to_string(team, references.references) + "\n"
user_msg_content_str += "</references>"
# 4.2 Add context to user message
if add_dependencies_to_context and run_context.dependencies is not None:
user_msg_content_str += "\n\n<additional context>\n"
user_msg_content_str += _convert_dependencies_to_string(team, run_context.dependencies) + "\n"
user_msg_content_str += "</additional context>"
# Use the string version for the final content
user_msg_content = user_msg_content_str
# Return the user message
return Message(
role="user",
content=user_msg_content,
images=None if not team.send_media_to_model else images,
audio=None if not team.send_media_to_model else audio,
videos=None if not team.send_media_to_model else videos,
files=None if not team.send_media_to_model else files,
**kwargs,
)
async def _aget_user_message(
team: "Team",
*,
run_response: TeamRunOutput,
run_context: RunContext,
input_message: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
add_dependencies_to_context: Optional[bool] = None,
**kwargs,
):
# Get references from the knowledge base to use in the user message
from agno.team._utils import _convert_dependencies_to_string, _convert_documents_to_string
references = None
if input_message is None:
# If we have any media, return a message with empty content
if images is not None or audio is not None or videos is not None or files is not None:
return Message(
role="user",
content="",
images=None if not team.send_media_to_model else images,
audio=None if not team.send_media_to_model else audio,
videos=None if not team.send_media_to_model else videos,
files=None if not team.send_media_to_model else files,
**kwargs,
)
else:
# If the input is None, return None
return None
else:
if isinstance(input_message, list):
input_content: Union[str, List[Any], List[Message]]
if len(input_message) > 0 and isinstance(input_message[0], dict) and "type" in input_message[0]:
# This is multimodal content (text + images/audio/video), preserve the structure
input_content = input_message
elif len(input_message) > 0 and isinstance(input_message[0], Message):
# This is a list of Message objects, extract text content from them
input_content = get_text_from_message(input_message)
elif all(isinstance(item, str) for item in input_message):
input_content = "\n".join([str(item) for item in input_message])
else:
input_content = str(input_message)
return Message(
role="user",
content=input_content,
images=None if not team.send_media_to_model else images,
audio=None if not team.send_media_to_model else audio,
videos=None if not team.send_media_to_model else videos,
files=None if not team.send_media_to_model else files,
**kwargs,
)
# If message is provided as a Message, use it directly
elif isinstance(input_message, Message):
return input_message
# If message is provided as a dict, try to validate it as a Message
elif isinstance(input_message, dict):
try:
if team.input_schema and is_typed_dict(team.input_schema):
import json
content = json.dumps(input_message, indent=2, ensure_ascii=False)
return Message(role="user", content=content)
else:
return Message.model_validate(input_message)
except Exception as e:
log_warning(f"Failed to validate input: {e}")
# If message is provided as a BaseModel, convert it to a Message
elif isinstance(input_message, BaseModel):
try:
# Create a user message with the BaseModel content
content = input_message.model_dump_json(indent=2, exclude_none=True)
return Message(role="user", content=content)
except Exception as e:
log_warning(f"Failed to convert BaseModel to message: {e}")
else:
user_msg_content = input_message
if team.add_knowledge_to_context:
if isinstance(input_message, str):
user_msg_content = input_message
elif callable(input_message):
user_msg_content = input_message(agent=team)
else:
raise Exception("input must be a string or a callable when add_references is True")
try:
retrieval_timer = Timer()
retrieval_timer.start()
docs_from_knowledge = await team.aget_relevant_docs_from_knowledge(
query=user_msg_content,
filters=run_context.knowledge_filters,
run_context=run_context,
**kwargs,
)
if docs_from_knowledge is not None:
references = MessageReferences(
query=user_msg_content,
references=docs_from_knowledge,
time=round(retrieval_timer.elapsed, 4),
)
# Add the references to the run_response
if run_response.references is None:
run_response.references = []
run_response.references.append(references)
retrieval_timer.stop()
log_debug(f"Time to get references: {retrieval_timer.elapsed:.4f}s")
except Exception as e:
log_warning(f"Failed to get references: {e}")
if team.resolve_in_context:
user_msg_content = _format_message_with_state_variables(
team,
user_msg_content,
run_context=run_context,
)
# Convert to string for concatenation operations
user_msg_content_str = get_text_from_message(user_msg_content) if user_msg_content is not None else ""
# 4.1 Add knowledge references to user message
if (
team.add_knowledge_to_context
and references is not None
and references.references is not None
and len(references.references) > 0
):
user_msg_content_str += "\n\nUse the following references from the knowledge base if it helps:\n"
user_msg_content_str += "<references>\n"
user_msg_content_str += _convert_documents_to_string(team, references.references) + "\n"
user_msg_content_str += "</references>"
# 4.2 Add context to user message
if add_dependencies_to_context and run_context.dependencies is not None:
user_msg_content_str += "\n\n<additional context>\n"
user_msg_content_str += _convert_dependencies_to_string(team, run_context.dependencies) + "\n"
user_msg_content_str += "</additional context>"
# Use the string version for the final content
user_msg_content = user_msg_content_str
# Return the user message
return Message(
role="user",
content=user_msg_content,
images=None if not team.send_media_to_model else images,
audio=None if not team.send_media_to_model else audio,
videos=None if not team.send_media_to_model else videos,
files=None if not team.send_media_to_model else files,
**kwargs,
)
def _get_messages_for_parser_model(
team: "Team",
model_response: ModelResponse,
response_format: Optional[Union[Dict, Type[BaseModel]]],
run_context: Optional[RunContext] = None,
) -> List[Message]:
"""Get the messages for the parser model."""
from agno.utils.prompts import get_json_output_prompt
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
system_content = (
team.parser_model_prompt
if team.parser_model_prompt is not None
else "You are tasked with creating a structured output from the provided user message."
)
if response_format == {"type": "json_object"} and output_schema is not None:
system_content += f"{get_json_output_prompt(output_schema)}" # type: ignore
return [
Message(role="system", content=system_content),
Message(role="user", content=model_response.content),
]
def _get_messages_for_parser_model_stream(
team: "Team",
run_response: TeamRunOutput,
response_format: Optional[Union[Dict, Type[BaseModel]]],
run_context: Optional[RunContext] = None,
) -> List[Message]:
"""Get the messages for the parser model."""
from agno.utils.prompts import get_json_output_prompt
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
system_content = (
team.parser_model_prompt
if team.parser_model_prompt is not None
else "You are tasked with creating a structured output from the provided data."
)
if response_format == {"type": "json_object"} and output_schema is not None:
system_content += f"{get_json_output_prompt(output_schema)}" # type: ignore
return [
Message(role="system", content=system_content),
Message(role="user", content=run_response.content),
]
def _get_messages_for_output_model(team: "Team", messages: List[Message]) -> List[Message]:
"""Get the messages for the output model."""
from copy import deepcopy
# Copy the list and messages to avoid mutating the originals
messages = [deepcopy(m) for m in messages]
if team.output_model_prompt is not None:
system_message_exists = False
for message in messages:
if message.role == "system":
system_message_exists = True
message.content = team.output_model_prompt
break
if not system_message_exists:
messages.insert(0, Message(role="system", content=team.output_model_prompt))
# Remove the last assistant message from the messages list
if messages and messages[-1].role == "assistant":
messages.pop(-1)
return messages
def _format_message_with_state_variables(
team: "Team",
message: Any,
run_context: Optional[RunContext] = None,
) -> Any:
"""Format a message with the session state variables from run_context."""
import re
import string
if not isinstance(message, str):
return message
# Extract values from run_context
session_state = run_context.session_state if run_context else None
dependencies = run_context.dependencies if run_context else None
metadata = run_context.metadata if run_context else None
user_id = run_context.user_id if run_context else None
# Should already be resolved and passed from run() method
format_variables = ChainMap(
session_state if session_state is not None else {},
dependencies or {},
metadata or {},
{"user_id": user_id} if user_id is not None else {},
)
converted_msg = message
for var_name in format_variables.keys():
# Only convert standalone {var_name} patterns, not nested ones
pattern = r"\{" + re.escape(var_name) + r"\}"
replacement = "${" + var_name + "}"
converted_msg = re.sub(pattern, replacement, converted_msg)
# Use Template to safely substitute variables
template = string.Template(converted_msg)
try:
result = template.safe_substitute(format_variables)
return result
except Exception as e:
log_warning(f"Template substitution failed: {e}")
return message
def _get_json_output_prompt(
team: "Team", output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None
) -> str:
"""Return the JSON output prompt for the Agent.
This is added to the system prompt when the output_schema is set and structured_outputs is False.
"""
json_output_prompt = "Provide your output as a JSON containing the following fields:"
if output_schema is not None:
if isinstance(output_schema, str):
json_output_prompt += "\n<json_fields>"
json_output_prompt += f"\n{output_schema}"
json_output_prompt += "\n</json_fields>"
elif isinstance(output_schema, list):
json_output_prompt += "\n<json_fields>"
json_output_prompt += f"\n{json.dumps(output_schema)}"
json_output_prompt += "\n</json_fields>"
elif isinstance(output_schema, dict):
json_output_prompt += "\n<json_fields>"
json_output_prompt += f"\n{json.dumps(output_schema)}"
json_output_prompt += "\n</json_fields>"
elif isinstance(output_schema, type) and issubclass(output_schema, BaseModel):
json_schema = output_schema.model_json_schema()
if json_schema is not None:
response_model_properties = {}
json_schema_properties = json_schema.get("properties")
if json_schema_properties is not None:
for field_name, field_properties in json_schema_properties.items():
formatted_field_properties = {
prop_name: prop_value
for prop_name, prop_value in field_properties.items()
if prop_name != "title"
}
response_model_properties[field_name] = formatted_field_properties
json_schema_defs = json_schema.get("$defs")
if json_schema_defs is not None:
response_model_properties["$defs"] = {}
for def_name, def_properties in json_schema_defs.items():
def_fields = def_properties.get("properties")
formatted_def_properties = {}
if def_fields is not None:
for field_name, field_properties in def_fields.items():
formatted_field_properties = {
prop_name: prop_value
for prop_name, prop_value in field_properties.items()
if prop_name != "title"
}
formatted_def_properties[field_name] = formatted_field_properties
if len(formatted_def_properties) > 0:
response_model_properties["$defs"][def_name] = formatted_def_properties
if len(response_model_properties) > 0:
json_output_prompt += "\n<json_fields>"
json_output_prompt += (
f"\n{json.dumps([key for key in response_model_properties.keys() if key != '$defs'])}"
)
json_output_prompt += "\n</json_fields>"
json_output_prompt += "\n\nHere are the properties for each field:"
json_output_prompt += "\n<json_field_properties>"
json_output_prompt += f"\n{json.dumps(response_model_properties, indent=2)}"
json_output_prompt += "\n</json_field_properties>"
else:
log_warning(f"Could not build json schema for {output_schema}")
else:
json_output_prompt += "Provide the output as JSON."
json_output_prompt += "\nStart your response with `{` and end it with `}`."
json_output_prompt += "\nYour output will be passed to json.loads() to convert it to a Python object."
json_output_prompt += "\nMake sure it only contains valid JSON."
return json_output_prompt
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/team/_messages.py",
"license": "Apache License 2.0",
"lines": 1337,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/team/_response.py | """Response-related helpers for Team (parsing, output models, reasoning, metrics)."""
from __future__ import annotations
from collections import deque
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Dict,
Iterator,
List,
Optional,
Type,
Union,
cast,
get_args,
)
from uuid import uuid4
from pydantic import BaseModel
from agno.media import Audio
from agno.models.base import Model
from agno.models.message import Message
from agno.models.response import ModelResponse, ModelResponseEvent
from agno.reasoning.step import NextAction, ReasoningStep, ReasoningSteps
from agno.run import RunContext
from agno.run.agent import RunOutput, RunOutputEvent
from agno.run.messages import RunMessages
from agno.run.requirement import RunRequirement
from agno.run.team import (
TeamRunEvent,
TeamRunOutput,
TeamRunOutputEvent,
)
from agno.session import TeamSession
from agno.tools.function import Function
from agno.utils.events import (
create_team_compression_completed_event,
create_team_compression_started_event,
create_team_model_request_completed_event,
create_team_model_request_started_event,
create_team_parser_model_response_completed_event,
create_team_parser_model_response_started_event,
create_team_reasoning_completed_event,
create_team_reasoning_content_delta_event,
create_team_reasoning_started_event,
create_team_reasoning_step_event,
create_team_run_output_content_event,
create_team_tool_call_completed_event,
create_team_tool_call_error_event,
create_team_tool_call_started_event,
handle_event,
)
from agno.utils.log import log_debug, log_warning
from agno.utils.merge_dict import merge_dictionaries
from agno.utils.reasoning import (
add_reasoning_metrics_to_metadata,
add_reasoning_step_to_metadata,
append_to_reasoning_content,
update_run_output_with_reasoning,
)
from agno.utils.string import parse_response_dict_str, parse_response_model_str
if TYPE_CHECKING:
from agno.reasoning.manager import ReasoningEvent
from agno.team.team import Team
# ---------------------------------------------------------------------------
# Response format
# ---------------------------------------------------------------------------
def get_response_format(
team: "Team", model: Optional[Model] = None, run_context: Optional[RunContext] = None
) -> Optional[Union[Dict, Type[BaseModel]]]:
model = cast(Model, model or team.model)
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
if output_schema is None:
return None
else:
json_response_format = {"type": "json_object"}
if model.supports_native_structured_outputs:
if not team.use_json_mode:
log_debug("Setting Model.response_format to Agent.output_schema")
return output_schema
else:
log_debug("Model supports native structured outputs but it is not enabled. Using JSON mode instead.")
return json_response_format
elif model.supports_json_schema_outputs:
if team.use_json_mode:
log_debug("Setting Model.response_format to JSON response mode")
# Handle JSON schema - pass through directly (user provides full provider format)
if isinstance(output_schema, dict):
return output_schema
# Handle Pydantic schema
return {
"type": "json_schema",
"json_schema": {
"name": output_schema.__name__,
"schema": output_schema.model_json_schema(),
},
}
else:
return None
else:
log_debug("Model does not support structured or JSON schema outputs.")
return json_response_format
# ---------------------------------------------------------------------------
# Parser model helpers
# ---------------------------------------------------------------------------
def process_parser_response(
team: "Team",
model_response: ModelResponse,
run_messages: RunMessages,
parser_model_response: ModelResponse,
messages_for_parser_model: list,
) -> None:
"""Common logic for processing parser model response."""
parser_model_response_message: Optional[Message] = None
for message in reversed(messages_for_parser_model):
if message.role == "assistant":
parser_model_response_message = message
break
if parser_model_response_message is not None:
run_messages.messages.append(parser_model_response_message)
model_response.parsed = parser_model_response.parsed
model_response.content = parser_model_response.content
else:
log_warning("Unable to parse response with parser model")
def parse_response_with_parser_model(
team: "Team", model_response: ModelResponse, run_messages: RunMessages, run_context: Optional[RunContext] = None
) -> None:
"""Parse the model response using the parser model."""
from agno.team._messages import _get_messages_for_parser_model
if team.parser_model is None:
return
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
if output_schema is not None:
parser_response_format = get_response_format(team, team.parser_model, run_context=run_context)
messages_for_parser_model = _get_messages_for_parser_model(
team, model_response, parser_response_format, run_context=run_context
)
parser_model_response: ModelResponse = team.parser_model.response(
messages=messages_for_parser_model,
response_format=parser_response_format,
)
process_parser_response(team, model_response, run_messages, parser_model_response, messages_for_parser_model)
else:
log_warning("A response model is required to parse the response with a parser model")
async def aparse_response_with_parser_model(
team: "Team", model_response: ModelResponse, run_messages: RunMessages, run_context: Optional[RunContext] = None
) -> None:
"""Parse the model response using the parser model."""
from agno.team._messages import _get_messages_for_parser_model
if team.parser_model is None:
return
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
if output_schema is not None:
parser_response_format = get_response_format(team, team.parser_model, run_context=run_context)
messages_for_parser_model = _get_messages_for_parser_model(
team, model_response, parser_response_format, run_context=run_context
)
parser_model_response: ModelResponse = await team.parser_model.aresponse(
messages=messages_for_parser_model,
response_format=parser_response_format,
)
process_parser_response(team, model_response, run_messages, parser_model_response, messages_for_parser_model)
else:
log_warning("A response model is required to parse the response with a parser model")
def parse_response_with_parser_model_stream(
team: "Team",
session: TeamSession,
run_response: TeamRunOutput,
stream_events: bool = False,
run_context: Optional[RunContext] = None,
):
"""Parse the model response using the parser model"""
from agno.team._messages import _get_messages_for_parser_model_stream
if team.parser_model is not None:
# run_context override for output_schema
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
if output_schema is not None:
if stream_events:
yield handle_event( # type: ignore
create_team_parser_model_response_started_event(run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
parser_model_response = ModelResponse(content="")
parser_response_format = get_response_format(team, team.parser_model, run_context=run_context)
messages_for_parser_model = _get_messages_for_parser_model_stream(
team, run_response, parser_response_format, run_context=run_context
)
for model_response_event in team.parser_model.response_stream(
messages=messages_for_parser_model,
response_format=parser_response_format,
stream_model_response=False,
):
yield from _handle_model_response_chunk(
team,
session=session,
run_response=run_response,
full_model_response=parser_model_response,
model_response_event=model_response_event,
parse_structured_output=True,
stream_events=stream_events,
run_context=run_context,
)
run_response.content = parser_model_response.content
parser_model_response_message: Optional[Message] = None
for message in reversed(messages_for_parser_model):
if message.role == "assistant":
parser_model_response_message = message
break
if parser_model_response_message is not None:
if run_response.messages is not None:
run_response.messages.append(parser_model_response_message)
else:
log_warning("Unable to parse response with parser model")
if stream_events:
yield handle_event( # type: ignore
create_team_parser_model_response_completed_event(run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
else:
log_warning("A response model is required to parse the response with a parser model")
async def aparse_response_with_parser_model_stream(
team: "Team",
session: TeamSession,
run_response: TeamRunOutput,
stream_events: bool = False,
run_context: Optional[RunContext] = None,
):
"""Parse the model response using the parser model stream."""
from agno.team._messages import _get_messages_for_parser_model_stream
if team.parser_model is not None:
# run_context override for output_schema
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
if output_schema is not None:
if stream_events:
yield handle_event( # type: ignore
create_team_parser_model_response_started_event(run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
parser_model_response = ModelResponse(content="")
parser_response_format = get_response_format(team, team.parser_model, run_context=run_context)
messages_for_parser_model = _get_messages_for_parser_model_stream(
team, run_response, parser_response_format, run_context=run_context
)
model_response_stream = team.parser_model.aresponse_stream(
messages=messages_for_parser_model,
response_format=parser_response_format,
stream_model_response=False,
)
async for model_response_event in model_response_stream: # type: ignore
for event in _handle_model_response_chunk(
team,
session=session,
run_response=run_response,
full_model_response=parser_model_response,
model_response_event=model_response_event,
parse_structured_output=True,
stream_events=stream_events,
run_context=run_context,
):
yield event
run_response.content = parser_model_response.content
parser_model_response_message: Optional[Message] = None
for message in reversed(messages_for_parser_model):
if message.role == "assistant":
parser_model_response_message = message
break
if parser_model_response_message is not None:
if run_response.messages is not None:
run_response.messages.append(parser_model_response_message)
else:
log_warning("Unable to parse response with parser model")
if stream_events:
yield handle_event( # type: ignore
create_team_parser_model_response_completed_event(run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
else:
log_warning("A response model is required to parse the response with a parser model")
# ---------------------------------------------------------------------------
# Output model helpers
# ---------------------------------------------------------------------------
def parse_response_with_output_model(team: "Team", model_response: ModelResponse, run_messages: RunMessages) -> None:
"""Parse the model response using the output model."""
from agno.team._messages import _get_messages_for_output_model
if team.output_model is None:
return
messages_for_output_model = _get_messages_for_output_model(team, run_messages.messages)
output_model_response: ModelResponse = team.output_model.response(messages=messages_for_output_model)
model_response.content = output_model_response.content
def generate_response_with_output_model_stream(
team: "Team",
session: TeamSession,
run_response: TeamRunOutput,
run_messages: RunMessages,
stream_events: bool = False,
):
"""Parse the model response using the output model stream."""
from agno.team._messages import _get_messages_for_output_model
from agno.utils.events import (
create_team_output_model_response_completed_event,
create_team_output_model_response_started_event,
)
if team.output_model is None:
return
if stream_events:
yield handle_event( # type: ignore
create_team_output_model_response_started_event(run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
messages_for_output_model = _get_messages_for_output_model(team, run_messages.messages)
model_response = ModelResponse(content="")
for model_response_event in team.output_model.response_stream(messages=messages_for_output_model):
yield from _handle_model_response_chunk(
team,
session=session,
run_response=run_response,
full_model_response=model_response,
model_response_event=model_response_event,
)
# Update the TeamRunResponse content
run_response.content = model_response.content
if stream_events:
yield handle_event( # type: ignore
create_team_output_model_response_completed_event(run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Build a list of messages that should be added to the RunResponse
messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
# Update the RunResponse messages
run_response.messages = messages_for_run_response
async def agenerate_response_with_output_model(
team: "Team", model_response: ModelResponse, run_messages: RunMessages
) -> None:
"""Parse the model response using the output model stream."""
from agno.team._messages import _get_messages_for_output_model
if team.output_model is None:
return
messages_for_output_model = _get_messages_for_output_model(team, run_messages.messages)
output_model_response: ModelResponse = await team.output_model.aresponse(messages=messages_for_output_model)
model_response.content = output_model_response.content
async def agenerate_response_with_output_model_stream(
team: "Team",
session: TeamSession,
run_response: TeamRunOutput,
run_messages: RunMessages,
stream_events: bool = False,
):
"""Parse the model response using the output model stream."""
from agno.team._messages import _get_messages_for_output_model
from agno.utils.events import (
create_team_output_model_response_completed_event,
create_team_output_model_response_started_event,
)
if team.output_model is None:
return
if stream_events:
yield handle_event( # type: ignore
create_team_output_model_response_started_event(run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
messages_for_output_model = _get_messages_for_output_model(team, run_messages.messages)
model_response = ModelResponse(content="")
async for model_response_event in team.output_model.aresponse_stream(messages=messages_for_output_model):
for event in _handle_model_response_chunk(
team,
session=session,
run_response=run_response,
full_model_response=model_response,
model_response_event=model_response_event,
):
yield event
# Update the TeamRunResponse content
run_response.content = model_response.content
if stream_events:
yield handle_event( # type: ignore
create_team_output_model_response_completed_event(run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Build a list of messages that should be added to the RunResponse
messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
# Update the RunResponse messages
run_response.messages = messages_for_run_response
# ---------------------------------------------------------------------------
# Reasoning
# ---------------------------------------------------------------------------
def format_reasoning_step_content(team: "Team", run_response: TeamRunOutput, reasoning_step: ReasoningStep) -> str:
"""Format content for a reasoning step without changing any existing logic."""
step_content = ""
if reasoning_step.title:
step_content += f"## {reasoning_step.title}\n"
if reasoning_step.reasoning:
step_content += f"{reasoning_step.reasoning}\n"
if reasoning_step.action:
step_content += f"Action: {reasoning_step.action}\n"
if reasoning_step.result:
step_content += f"Result: {reasoning_step.result}\n"
step_content += "\n"
# Get the current reasoning_content and append this step
current_reasoning_content = ""
if hasattr(run_response, "reasoning_content") and run_response.reasoning_content:
current_reasoning_content = run_response.reasoning_content
# Create updated reasoning_content
updated_reasoning_content = current_reasoning_content + step_content
return updated_reasoning_content
def handle_reasoning_event(
team: "Team",
event: "ReasoningEvent",
run_response: TeamRunOutput,
stream_events: bool,
) -> Iterator[TeamRunOutputEvent]:
"""
Convert a ReasoningEvent from the ReasoningManager to Team-specific TeamRunOutputEvents.
This method handles the conversion of generic reasoning events to Team events,
keeping the reason() function clean and simple.
"""
from agno.reasoning.manager import ReasoningEventType
if event.event_type == ReasoningEventType.started:
if stream_events:
yield handle_event( # type: ignore
create_team_reasoning_started_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
elif event.event_type == ReasoningEventType.content_delta:
if stream_events and event.reasoning_content:
yield handle_event( # type: ignore
create_team_reasoning_content_delta_event(
from_run_response=run_response,
reasoning_content=event.reasoning_content,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
elif event.event_type == ReasoningEventType.step:
if event.reasoning_step:
# Update run_response with this step
update_run_output_with_reasoning(
run_response=run_response,
reasoning_steps=[event.reasoning_step],
reasoning_agent_messages=[],
)
if stream_events:
updated_reasoning_content = format_reasoning_step_content(
team,
run_response=run_response,
reasoning_step=event.reasoning_step,
)
yield handle_event( # type: ignore
create_team_reasoning_step_event(
from_run_response=run_response,
reasoning_step=event.reasoning_step,
reasoning_content=updated_reasoning_content,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
elif event.event_type == ReasoningEventType.completed:
if event.message and event.reasoning_steps:
update_run_output_with_reasoning(
run_response=run_response,
reasoning_steps=event.reasoning_steps,
reasoning_agent_messages=event.reasoning_messages,
)
if stream_events:
yield handle_event( # type: ignore
create_team_reasoning_completed_event(
from_run_response=run_response,
content=ReasoningSteps(reasoning_steps=event.reasoning_steps),
content_type=ReasoningSteps.__name__,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
elif event.event_type == ReasoningEventType.error:
log_warning(f"Reasoning error. {event.error}, continuing regular session...")
def handle_reasoning(
team: "Team", run_response: TeamRunOutput, run_messages: RunMessages, run_context: Optional[RunContext] = None
) -> None:
if team.reasoning or team.reasoning_model is not None:
reasoning_generator = reason(
team, run_response=run_response, run_messages=run_messages, run_context=run_context, stream_events=False
)
# Consume the generator without yielding
deque(reasoning_generator, maxlen=0)
def handle_reasoning_stream(
team: "Team",
run_response: TeamRunOutput,
run_messages: RunMessages,
run_context: Optional[RunContext] = None,
stream_events: bool = False,
) -> Iterator[TeamRunOutputEvent]:
if team.reasoning or team.reasoning_model is not None:
reasoning_generator = reason(
team,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
stream_events=stream_events,
)
yield from reasoning_generator
async def ahandle_reasoning(
team: "Team", run_response: TeamRunOutput, run_messages: RunMessages, run_context: Optional[RunContext] = None
) -> None:
if team.reasoning or team.reasoning_model is not None:
reason_generator = areason(
team, run_response=run_response, run_messages=run_messages, run_context=run_context, stream_events=False
)
# Consume the generator without yielding
async for _ in reason_generator:
pass
async def ahandle_reasoning_stream(
team: "Team",
run_response: TeamRunOutput,
run_messages: RunMessages,
run_context: Optional[RunContext] = None,
stream_events: bool = False,
) -> AsyncIterator[TeamRunOutputEvent]:
if team.reasoning or team.reasoning_model is not None:
reason_generator = areason(
team,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
stream_events=stream_events,
)
async for item in reason_generator:
yield item
def reason(
team: "Team",
run_response: TeamRunOutput,
run_messages: RunMessages,
run_context: Optional[RunContext] = None,
stream_events: bool = False,
) -> Iterator[TeamRunOutputEvent]:
"""
Run reasoning using the ReasoningManager.
Handles both native reasoning models (DeepSeek, Anthropic, etc.) and
default Chain-of-Thought reasoning with a clean, unified interface.
"""
from agno.reasoning.manager import ReasoningConfig, ReasoningManager
# Get the reasoning model (use copy of main model if not provided)
reasoning_model: Optional[Model] = team.reasoning_model
if reasoning_model is None and team.model is not None:
from copy import deepcopy
reasoning_model = deepcopy(team.model)
# Create reasoning manager with config
manager = ReasoningManager(
ReasoningConfig(
reasoning_model=reasoning_model,
reasoning_agent=team.reasoning_agent,
min_steps=team.reasoning_min_steps,
max_steps=team.reasoning_max_steps,
tools=team.tools if isinstance(team.tools, list) else None,
tool_call_limit=team.tool_call_limit,
use_json_mode=team.use_json_mode,
telemetry=team.telemetry,
debug_mode=team.debug_mode,
debug_level=team.debug_level,
run_context=run_context,
run_metrics=run_response.metrics,
)
)
# Use the unified reason() method and convert events
for event in manager.reason(run_messages, stream=stream_events):
yield from handle_reasoning_event(team, event, run_response, stream_events)
async def areason(
team: "Team",
run_response: TeamRunOutput,
run_messages: RunMessages,
run_context: Optional[RunContext] = None,
stream_events: bool = False,
) -> AsyncIterator[TeamRunOutputEvent]:
"""
Run reasoning asynchronously using the ReasoningManager.
Handles both native reasoning models (DeepSeek, Anthropic, etc.) and
default Chain-of-Thought reasoning with a clean, unified interface.
"""
from agno.reasoning.manager import ReasoningConfig, ReasoningManager
# Get the reasoning model (use copy of main model if not provided)
reasoning_model: Optional[Model] = team.reasoning_model
if reasoning_model is None and team.model is not None:
from copy import deepcopy
reasoning_model = deepcopy(team.model)
# Create reasoning manager with config
manager = ReasoningManager(
ReasoningConfig(
reasoning_model=reasoning_model,
reasoning_agent=team.reasoning_agent,
min_steps=team.reasoning_min_steps,
max_steps=team.reasoning_max_steps,
tools=team.tools if isinstance(team.tools, list) else None,
tool_call_limit=team.tool_call_limit,
use_json_mode=team.use_json_mode,
telemetry=team.telemetry,
debug_mode=team.debug_mode,
debug_level=team.debug_level,
run_context=run_context,
run_metrics=run_response.metrics,
)
)
# Use the unified areason() method and convert events
async for event in manager.areason(run_messages, stream=stream_events):
for output_event in handle_reasoning_event(team, event, run_response, stream_events):
yield output_event
# ---------------------------------------------------------------------------
# Tool-call reasoning update
# ---------------------------------------------------------------------------
def update_reasoning_content_from_tool_call(
team: "Team", run_response: TeamRunOutput, tool_name: str, tool_args: Dict[str, Any]
) -> Optional[ReasoningStep]:
"""Update reasoning_content based on tool calls that look like thinking or reasoning tools."""
# Case 1: ReasoningTools.think (has title, thought, optional action and confidence)
if tool_name.lower() == "think" and "title" in tool_args and "thought" in tool_args:
title = tool_args["title"]
thought = tool_args["thought"]
action = tool_args.get("action", "")
confidence = tool_args.get("confidence", None)
# Create a reasoning step
reasoning_step = ReasoningStep(
title=title,
reasoning=thought,
action=action,
result=None,
next_action=NextAction.CONTINUE,
confidence=confidence,
)
# Add the step to the run response
add_reasoning_step_to_metadata(run_response, reasoning_step)
formatted_content = f"## {title}\n{thought}\n"
if action:
formatted_content += f"Action: {action}\n"
if confidence is not None:
formatted_content += f"Confidence: {confidence}\n"
formatted_content += "\n"
append_to_reasoning_content(run_response, formatted_content)
return reasoning_step
# Case 2: ReasoningTools.analyze (has title, result, analysis, optional next_action and confidence)
elif tool_name.lower() == "analyze" and "title" in tool_args:
title = tool_args["title"]
result = tool_args.get("result", "")
analysis = tool_args.get("analysis", "")
next_action = tool_args.get("next_action", "")
confidence = tool_args.get("confidence", None)
# Map string next_action to enum
next_action_enum = NextAction.CONTINUE
if next_action.lower() == "validate":
next_action_enum = NextAction.VALIDATE
elif next_action.lower() in ["final", "final_answer", "finalize"]:
next_action_enum = NextAction.FINAL_ANSWER
# Create a reasoning step
reasoning_step = ReasoningStep(
title=title,
action=None,
result=result,
reasoning=analysis,
next_action=next_action_enum,
confidence=confidence,
)
# Add the step to the run response
add_reasoning_step_to_metadata(run_response, reasoning_step)
formatted_content = f"## {title}\n"
if result:
formatted_content += f"Result: {result}\n"
if analysis:
formatted_content += f"{analysis}\n"
if next_action and next_action.lower() != "continue":
formatted_content += f"Next Action: {next_action}\n"
if confidence is not None:
formatted_content += f"Confidence: {confidence}\n"
formatted_content += "\n"
append_to_reasoning_content(run_response, formatted_content)
return reasoning_step
# Case 3: ReasoningTool.think (simple format, just has 'thought')
elif tool_name.lower() == "think" and "thought" in tool_args:
thought = tool_args["thought"]
reasoning_step = ReasoningStep(
title="Thinking",
action=None,
result=None,
reasoning=thought,
next_action=None,
confidence=None,
)
formatted_content = f"## Thinking\n{thought}\n\n"
add_reasoning_step_to_metadata(run_response, reasoning_step)
append_to_reasoning_content(run_response, formatted_content)
return reasoning_step
return None
# ---------------------------------------------------------------------------
# Run-response update (moved from _hooks.py)
# ---------------------------------------------------------------------------
def _update_run_response(
team: "Team",
model_response: ModelResponse,
run_response: TeamRunOutput,
run_messages: RunMessages,
run_context: Optional[RunContext] = None,
):
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
# Handle structured outputs
if (output_schema is not None) and not team.use_json_mode and (model_response.parsed is not None):
# Update the run_response content with the structured output
run_response.content = model_response.parsed
# Update the run_response content_type with the structured output class name
run_response.content_type = "dict" if isinstance(output_schema, dict) else output_schema.__name__
else:
# Update the run_response content with the model response content
if not run_response.content:
run_response.content = model_response.content
else:
run_response.content += model_response.content
# Update the run_response thinking with the model response thinking
if model_response.reasoning_content is not None:
if not run_response.reasoning_content:
run_response.reasoning_content = model_response.reasoning_content
else:
run_response.reasoning_content += model_response.reasoning_content
# Update provider data
if model_response.provider_data is not None:
run_response.model_provider_data = model_response.provider_data
# Update citations
if model_response.citations is not None:
run_response.citations = model_response.citations
# Update the run_response tools with the model response tool_executions
if model_response.tool_executions is not None:
if run_response.tools is None:
run_response.tools = model_response.tool_executions
else:
run_response.tools.extend(model_response.tool_executions)
# Update the run_response audio with the model response audio
if model_response.audio is not None:
run_response.response_audio = model_response.audio
# Update session_state with changes from model response
if model_response.updated_session_state is not None and run_response.session_state is not None:
merge_dictionaries(run_response.session_state, model_response.updated_session_state)
# Build a list of messages that should be added to the RunOutput
messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
# Update the TeamRunOutput messages
run_response.messages = messages_for_run_response
if model_response.tool_executions:
for tool_call in model_response.tool_executions:
tool_name = tool_call.tool_name
if tool_name and tool_name.lower() in ["think", "analyze"]:
tool_args = tool_call.tool_args or {}
update_reasoning_content_from_tool_call(team, run_response, tool_name, tool_args)
# ---------------------------------------------------------------------------
# Model response stream processing (moved from _hooks.py)
# ---------------------------------------------------------------------------
def _handle_model_response_stream(
team: "Team",
session: TeamSession,
run_response: TeamRunOutput,
run_messages: RunMessages,
tools: Optional[List[Union[Function, dict]]] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
stream_events: bool = False,
session_state: Optional[Dict[str, Any]] = None,
run_context: Optional[RunContext] = None,
) -> Iterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
team.model = cast(Model, team.model)
reasoning_state = {
"reasoning_started": False,
"reasoning_time_taken": 0.0,
}
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
should_parse_structured_output = output_schema is not None and team.parse_response and team.parser_model is None
stream_model_response = True
if should_parse_structured_output:
log_debug("Response model set, model response is not streamed.")
stream_model_response = False
full_model_response = ModelResponse()
for model_response_event in team.model.response_stream(
messages=run_messages.messages,
response_format=response_format,
tools=tools,
tool_choice=team.tool_choice,
tool_call_limit=team.tool_call_limit,
stream_model_response=stream_model_response,
run_response=run_response,
send_media_to_model=team.send_media_to_model,
compression_manager=team.compression_manager if team.compress_tool_results else None,
):
# Handle LLM request events and compression events from ModelResponse
if isinstance(model_response_event, ModelResponse):
if model_response_event.event == ModelResponseEvent.model_request_started.value:
if stream_events:
yield handle_event( # type: ignore
create_team_model_request_started_event(
from_run_response=run_response,
model=team.model.id,
model_provider=team.model.provider,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
continue
if model_response_event.event == ModelResponseEvent.model_request_completed.value:
if stream_events:
yield handle_event( # type: ignore
create_team_model_request_completed_event(
from_run_response=run_response,
model=team.model.id,
model_provider=team.model.provider,
input_tokens=model_response_event.input_tokens,
output_tokens=model_response_event.output_tokens,
total_tokens=model_response_event.total_tokens,
time_to_first_token=model_response_event.time_to_first_token,
reasoning_tokens=model_response_event.reasoning_tokens,
cache_read_tokens=model_response_event.cache_read_tokens,
cache_write_tokens=model_response_event.cache_write_tokens,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
continue
# Handle compression events
if model_response_event.event == ModelResponseEvent.compression_started.value:
if stream_events:
yield handle_event( # type: ignore
create_team_compression_started_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
continue
if model_response_event.event == ModelResponseEvent.compression_completed.value:
if stream_events:
stats = model_response_event.compression_stats or {}
yield handle_event( # type: ignore
create_team_compression_completed_event(
from_run_response=run_response,
tool_results_compressed=stats.get("tool_results_compressed"),
original_size=stats.get("original_size"),
compressed_size=stats.get("compressed_size"),
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
continue
yield from _handle_model_response_chunk(
team,
session=session,
run_response=run_response,
full_model_response=full_model_response,
model_response_event=model_response_event,
reasoning_state=reasoning_state,
stream_events=stream_events,
parse_structured_output=should_parse_structured_output,
session_state=session_state,
run_context=run_context,
)
# 3. Update TeamRunOutput
if full_model_response.content is not None:
run_response.content = full_model_response.content
if full_model_response.reasoning_content is not None:
run_response.reasoning_content = full_model_response.reasoning_content
if full_model_response.audio is not None:
run_response.response_audio = full_model_response.audio
if full_model_response.citations is not None:
run_response.citations = full_model_response.citations
if full_model_response.provider_data is not None:
run_response.model_provider_data = full_model_response.provider_data
# Build a list of messages that should be added to the RunOutput
messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
# Update the TeamRunOutput messages
run_response.messages = messages_for_run_response
if stream_events and reasoning_state["reasoning_started"]:
all_reasoning_steps: List[ReasoningStep] = []
if run_response.reasoning_steps:
all_reasoning_steps = cast(List[ReasoningStep], run_response.reasoning_steps)
if all_reasoning_steps:
add_reasoning_metrics_to_metadata(run_response, reasoning_state["reasoning_time_taken"])
yield handle_event( # type: ignore
create_team_reasoning_completed_event(
from_run_response=run_response,
content=ReasoningSteps(reasoning_steps=all_reasoning_steps),
content_type=ReasoningSteps.__name__,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
async def _ahandle_model_response_stream(
team: "Team",
session: TeamSession,
run_response: TeamRunOutput,
run_messages: RunMessages,
tools: Optional[List[Union[Function, dict]]] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
stream_events: bool = False,
session_state: Optional[Dict[str, Any]] = None,
run_context: Optional[RunContext] = None,
) -> AsyncIterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
team.model = cast(Model, team.model)
reasoning_state = {
"reasoning_started": False,
"reasoning_time_taken": 0.0,
}
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
should_parse_structured_output = output_schema is not None and team.parse_response and team.parser_model is None
stream_model_response = True
if should_parse_structured_output:
log_debug("Response model set, model response is not streamed.")
stream_model_response = False
full_model_response = ModelResponse()
model_stream = team.model.aresponse_stream(
messages=run_messages.messages,
response_format=response_format,
tools=tools,
tool_choice=team.tool_choice,
tool_call_limit=team.tool_call_limit,
stream_model_response=stream_model_response,
send_media_to_model=team.send_media_to_model,
run_response=run_response,
compression_manager=team.compression_manager if team.compress_tool_results else None,
) # type: ignore
async for model_response_event in model_stream:
# Handle LLM request events and compression events from ModelResponse
if isinstance(model_response_event, ModelResponse):
if model_response_event.event == ModelResponseEvent.model_request_started.value:
if stream_events:
yield handle_event( # type: ignore
create_team_model_request_started_event(
from_run_response=run_response,
model=team.model.id,
model_provider=team.model.provider,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
continue
if model_response_event.event == ModelResponseEvent.model_request_completed.value:
if stream_events:
yield handle_event( # type: ignore
create_team_model_request_completed_event(
from_run_response=run_response,
model=team.model.id,
model_provider=team.model.provider,
input_tokens=model_response_event.input_tokens,
output_tokens=model_response_event.output_tokens,
total_tokens=model_response_event.total_tokens,
time_to_first_token=model_response_event.time_to_first_token,
reasoning_tokens=model_response_event.reasoning_tokens,
cache_read_tokens=model_response_event.cache_read_tokens,
cache_write_tokens=model_response_event.cache_write_tokens,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
continue
# Handle compression events
if model_response_event.event == ModelResponseEvent.compression_started.value:
if stream_events:
yield handle_event( # type: ignore
create_team_compression_started_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
continue
if model_response_event.event == ModelResponseEvent.compression_completed.value:
if stream_events:
stats = model_response_event.compression_stats or {}
yield handle_event( # type: ignore
create_team_compression_completed_event(
from_run_response=run_response,
tool_results_compressed=stats.get("tool_results_compressed"),
original_size=stats.get("original_size"),
compressed_size=stats.get("compressed_size"),
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
continue
for event in _handle_model_response_chunk(
team,
session=session,
run_response=run_response,
full_model_response=full_model_response,
model_response_event=model_response_event,
reasoning_state=reasoning_state,
stream_events=stream_events,
parse_structured_output=should_parse_structured_output,
session_state=session_state,
run_context=run_context,
):
yield event
# Update TeamRunOutput
if full_model_response.content is not None:
run_response.content = full_model_response.content
if full_model_response.reasoning_content is not None:
run_response.reasoning_content = full_model_response.reasoning_content
if full_model_response.audio is not None:
run_response.response_audio = full_model_response.audio
if full_model_response.citations is not None:
run_response.citations = full_model_response.citations
if full_model_response.provider_data is not None:
run_response.model_provider_data = full_model_response.provider_data
# Build a list of messages that should be added to the RunOutput
messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
# Update the TeamRunOutput messages
run_response.messages = messages_for_run_response
if stream_events and reasoning_state["reasoning_started"]:
all_reasoning_steps: List[ReasoningStep] = []
if run_response.reasoning_steps:
all_reasoning_steps = cast(List[ReasoningStep], run_response.reasoning_steps)
if all_reasoning_steps:
add_reasoning_metrics_to_metadata(run_response, reasoning_state["reasoning_time_taken"])
yield handle_event( # type: ignore
create_team_reasoning_completed_event(
from_run_response=run_response,
content=ReasoningSteps(reasoning_steps=all_reasoning_steps),
content_type=ReasoningSteps.__name__,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
def _handle_model_response_chunk(
team: "Team",
session: TeamSession,
run_response: TeamRunOutput,
full_model_response: ModelResponse,
model_response_event: Union[ModelResponse, TeamRunOutputEvent, RunOutputEvent],
reasoning_state: Optional[Dict[str, Any]] = None,
stream_events: bool = False,
parse_structured_output: bool = False,
session_state: Optional[Dict[str, Any]] = None,
run_context: Optional[RunContext] = None,
) -> Iterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
if isinstance(model_response_event, tuple(get_args(RunOutputEvent))) or isinstance(
model_response_event, tuple(get_args(TeamRunOutputEvent))
):
if team.stream_member_events:
if model_response_event.event == TeamRunEvent.custom_event: # type: ignore
if hasattr(model_response_event, "team_id"):
model_response_event.team_id = team.id
if hasattr(model_response_event, "team_name"):
model_response_event.team_name = team.name
if not model_response_event.session_id: # type: ignore
model_response_event.session_id = session.session_id # type: ignore
if not model_response_event.run_id: # type: ignore
model_response_event.run_id = run_response.run_id # type: ignore
# We just bubble the event up
yield handle_event( # type: ignore
model_response_event, # type: ignore
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
) # type: ignore
else:
# Don't yield anything
return
else:
model_response_event = cast(ModelResponse, model_response_event)
# If the model response is an assistant_response, yield a RunOutput
if model_response_event.event == ModelResponseEvent.assistant_response.value:
content_type = "str"
should_yield = False
# Process content
if model_response_event.content is not None:
if parse_structured_output:
full_model_response.content = model_response_event.content
_convert_response_to_structured_format(team, full_model_response, run_context=run_context)
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
content_type = "dict" if isinstance(output_schema, dict) else output_schema.__name__ # type: ignore
run_response.content_type = content_type
elif team._member_response_model is not None:
full_model_response.content = model_response_event.content
_convert_response_to_structured_format(team, full_model_response, run_context=run_context)
content_type = (
"dict"
if isinstance(team._member_response_model, dict)
else team._member_response_model.__name__
) # type: ignore
run_response.content_type = content_type
elif isinstance(model_response_event.content, str):
full_model_response.content = (full_model_response.content or "") + model_response_event.content
should_yield = True
# Process reasoning content
if model_response_event.reasoning_content is not None:
full_model_response.reasoning_content = (
full_model_response.reasoning_content or ""
) + model_response_event.reasoning_content
run_response.reasoning_content = full_model_response.reasoning_content
should_yield = True
if model_response_event.redacted_reasoning_content is not None:
if not full_model_response.reasoning_content:
full_model_response.reasoning_content = model_response_event.redacted_reasoning_content
else:
full_model_response.reasoning_content += model_response_event.redacted_reasoning_content
run_response.reasoning_content = full_model_response.reasoning_content
should_yield = True
# Handle provider data (one chunk)
if model_response_event.provider_data is not None:
run_response.model_provider_data = model_response_event.provider_data
# Handle citations (one chunk)
if model_response_event.citations is not None:
run_response.citations = model_response_event.citations
# Process audio
if model_response_event.audio is not None:
if full_model_response.audio is None:
full_model_response.audio = Audio(id=str(uuid4()), content=b"", transcript="")
if model_response_event.audio.id is not None:
full_model_response.audio.id = model_response_event.audio.id # type: ignore
if model_response_event.audio.content is not None:
# Handle both base64 string and bytes content
if isinstance(model_response_event.audio.content, str):
# Decode base64 string to bytes
try:
import base64
decoded_content = base64.b64decode(model_response_event.audio.content)
if full_model_response.audio.content is None:
full_model_response.audio.content = b""
full_model_response.audio.content += decoded_content
except Exception:
# If decode fails, encode string as bytes
if full_model_response.audio.content is None:
full_model_response.audio.content = b""
full_model_response.audio.content += model_response_event.audio.content.encode("utf-8")
elif isinstance(model_response_event.audio.content, bytes):
# Content is already bytes
if full_model_response.audio.content is None:
full_model_response.audio.content = b""
full_model_response.audio.content += model_response_event.audio.content
if model_response_event.audio.transcript is not None:
if full_model_response.audio.transcript is None:
full_model_response.audio.transcript = ""
full_model_response.audio.transcript += model_response_event.audio.transcript # type: ignore
if model_response_event.audio.expires_at is not None:
full_model_response.audio.expires_at = model_response_event.audio.expires_at # type: ignore
if model_response_event.audio.mime_type is not None:
full_model_response.audio.mime_type = model_response_event.audio.mime_type # type: ignore
if model_response_event.audio.sample_rate is not None:
full_model_response.audio.sample_rate = model_response_event.audio.sample_rate
if model_response_event.audio.channels is not None:
full_model_response.audio.channels = model_response_event.audio.channels
# Yield the audio and transcript bit by bit
should_yield = True
if model_response_event.images is not None:
for image in model_response_event.images:
if run_response.images is None:
run_response.images = []
run_response.images.append(image)
should_yield = True
# Only yield the chunk
if should_yield:
if content_type == "str":
yield handle_event( # type: ignore
create_team_run_output_content_event(
from_run_response=run_response,
content=model_response_event.content,
reasoning_content=model_response_event.reasoning_content,
redacted_reasoning_content=model_response_event.redacted_reasoning_content,
response_audio=full_model_response.audio,
citations=model_response_event.citations,
model_provider_data=model_response_event.provider_data,
image=model_response_event.images[-1] if model_response_event.images else None,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
else:
yield handle_event( # type: ignore
create_team_run_output_content_event(
from_run_response=run_response,
content=full_model_response.content,
content_type=content_type,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Handle tool interruption events (HITL flow)
elif model_response_event.event == ModelResponseEvent.tool_call_paused.value:
tool_executions_list = model_response_event.tool_executions
if tool_executions_list is not None:
if run_response.tools is None:
run_response.tools = tool_executions_list
else:
run_response.tools.extend(tool_executions_list)
if run_response.requirements is None:
run_response.requirements = []
run_response.requirements.append(RunRequirement(tool_execution=tool_executions_list[-1]))
# If the model response is a tool_call_started, add the tool call to the run_response
elif model_response_event.event == ModelResponseEvent.tool_call_started.value:
# Add tool calls to the run_response
tool_executions_list = model_response_event.tool_executions
if tool_executions_list is not None:
# Add tool calls to the agent.run_response
if run_response.tools is None:
run_response.tools = tool_executions_list
else:
run_response.tools.extend(tool_executions_list)
for tool in tool_executions_list:
if stream_events:
yield handle_event( # type: ignore
create_team_tool_call_started_event(
from_run_response=run_response,
tool=tool,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# If the model response is a tool_call_completed, update the existing tool call in the run_response
elif model_response_event.event == ModelResponseEvent.tool_call_completed.value:
if model_response_event.updated_session_state is not None:
# Update the session_state variable that TeamRunOutput references
if session_state is not None:
merge_dictionaries(session_state, model_response_event.updated_session_state)
# Also update the DB session object
if session.session_data is not None:
merge_dictionaries(
session.session_data["session_state"], model_response_event.updated_session_state
)
if model_response_event.images is not None:
for image in model_response_event.images:
if run_response.images is None:
run_response.images = []
run_response.images.append(image)
if model_response_event.videos is not None:
for video in model_response_event.videos:
if run_response.videos is None:
run_response.videos = []
run_response.videos.append(video)
if model_response_event.audios is not None:
for audio in model_response_event.audios:
if run_response.audio is None:
run_response.audio = []
run_response.audio.append(audio)
if model_response_event.files is not None:
for file_obj in model_response_event.files:
if run_response.files is None:
run_response.files = []
run_response.files.append(file_obj)
reasoning_step: Optional[ReasoningStep] = None
tool_executions_list = model_response_event.tool_executions
if tool_executions_list is not None:
# Update the existing tool call in the run_response
if run_response.tools:
# Create a mapping of tool_call_id to index
tool_call_index_map = {
tc.tool_call_id: i for i, tc in enumerate(run_response.tools) if tc.tool_call_id is not None
}
# Process tool calls
for tool_execution in tool_executions_list:
tool_call_id = tool_execution.tool_call_id or ""
index = tool_call_index_map.get(tool_call_id)
if index is not None:
if run_response.tools[index].child_run_id is not None:
tool_execution.child_run_id = run_response.tools[index].child_run_id
run_response.tools[index] = tool_execution
else:
run_response.tools = tool_executions_list
# Only iterate through new tool calls
for tool_call in tool_executions_list:
tool_name = tool_call.tool_name or ""
if tool_name.lower() in ["think", "analyze"]:
tool_args = tool_call.tool_args or {}
reasoning_step = update_reasoning_content_from_tool_call(
team, run_response, tool_name, tool_args
)
metrics = tool_call.metrics
if metrics is not None and metrics.duration is not None and reasoning_state is not None:
reasoning_state["reasoning_time_taken"] = reasoning_state["reasoning_time_taken"] + float(
metrics.duration
)
if stream_events:
yield handle_event( # type: ignore
create_team_tool_call_completed_event(
from_run_response=run_response,
tool=tool_call,
content=model_response_event.content,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
if tool_call.tool_call_error:
yield handle_event( # type: ignore
create_team_tool_call_error_event(
from_run_response=run_response, tool=tool_call, error=str(tool_call.result)
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
if stream_events:
if reasoning_step is not None:
if reasoning_state is not None and not reasoning_state["reasoning_started"]:
yield handle_event( # type: ignore
create_team_reasoning_started_event(
from_run_response=run_response,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
reasoning_state["reasoning_started"] = True
yield handle_event( # type: ignore
create_team_reasoning_step_event(
from_run_response=run_response,
reasoning_step=reasoning_step,
reasoning_content=run_response.reasoning_content or "",
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# ---------------------------------------------------------------------------
# Structured format conversion (moved from _hooks.py)
# ---------------------------------------------------------------------------
def _convert_response_to_structured_format(
team: "Team", run_response: Union[TeamRunOutput, RunOutput, ModelResponse], run_context: Optional[RunContext] = None
):
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
# Convert the response to the structured format if needed
if output_schema is not None:
# If the output schema is a dict, do not convert it into a BaseModel
if isinstance(output_schema, dict):
if isinstance(run_response.content, dict):
# Content is already a dict - just set content_type
if hasattr(run_response, "content_type"):
run_response.content_type = "dict"
elif isinstance(run_response.content, str):
parsed_dict = parse_response_dict_str(run_response.content)
if parsed_dict is not None:
run_response.content = parsed_dict
if hasattr(run_response, "content_type"):
run_response.content_type = "dict"
else:
log_warning("Failed to parse JSON response")
# If the output schema is a Pydantic model and parse_response is True, parse it into a BaseModel
elif not isinstance(run_response.content, output_schema):
if isinstance(run_response.content, str) and team.parse_response:
try:
parsed_response_content = parse_response_model_str(run_response.content, output_schema)
# Update TeamRunOutput
if parsed_response_content is not None:
run_response.content = parsed_response_content
if hasattr(run_response, "content_type"):
run_response.content_type = output_schema.__name__
else:
log_warning("Failed to convert response to output_schema")
except Exception as e:
log_warning(f"Failed to convert response to output model: {e}")
else:
log_warning("Something went wrong. Team run response content is not a string")
elif team._member_response_model is not None:
# Handle dict schema from member
if isinstance(team._member_response_model, dict):
if isinstance(run_response.content, dict):
# Content is already a dict - just set content_type
if hasattr(run_response, "content_type"):
run_response.content_type = "dict"
elif isinstance(run_response.content, str):
parsed_dict = parse_response_dict_str(run_response.content)
if parsed_dict is not None:
run_response.content = parsed_dict
if hasattr(run_response, "content_type"):
run_response.content_type = "dict"
else:
log_warning("Failed to parse JSON response")
# Handle Pydantic schema from member
elif not isinstance(run_response.content, team._member_response_model):
if isinstance(run_response.content, str):
try:
parsed_response_content = parse_response_model_str(
run_response.content, team._member_response_model
)
# Update TeamRunOutput
if parsed_response_content is not None:
run_response.content = parsed_response_content
if hasattr(run_response, "content_type"):
run_response.content_type = team._member_response_model.__name__
else:
log_warning("Failed to convert response to output_schema")
except Exception as e:
log_warning(f"Failed to convert response to output model: {e}")
else:
log_warning("Something went wrong. Member run response content is not a string")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/team/_response.py",
"license": "Apache License 2.0",
"lines": 1418,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/team/_run.py | """Run lifecycle and sync/async execution trait for Team."""
from __future__ import annotations
import asyncio
import time
from collections import deque
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Dict,
Iterator,
List,
Optional,
Sequence,
Type,
Union,
cast,
)
from uuid import uuid4
from pydantic import BaseModel
from agno.exceptions import (
InputCheckError,
OutputCheckError,
RunCancelledException,
)
from agno.filters import FilterExpr
from agno.media import Audio, File, Image, Video
from agno.models.base import Model
from agno.models.message import Message
from agno.models.metrics import RunMetrics, merge_background_metrics
from agno.models.response import ModelResponse
from agno.run import RunContext, RunStatus
from agno.run.agent import RunOutput, RunOutputEvent
from agno.run.cancel import (
acancel_run as acancel_run_global,
)
from agno.run.cancel import (
acleanup_run,
araise_if_cancelled,
aregister_run,
cleanup_run,
raise_if_cancelled,
register_run,
)
from agno.run.cancel import (
cancel_run as cancel_run_global,
)
from agno.run.messages import RunMessages
from agno.run.team import (
TaskData,
TeamRunInput,
TeamRunOutput,
TeamRunOutputEvent,
)
from agno.session import TeamSession
from agno.tools.function import Function
from agno.utils.agent import (
await_for_open_threads,
await_for_thread_tasks_stream,
collect_background_metrics,
store_media_util,
validate_input,
validate_media_object_id,
wait_for_open_threads,
wait_for_thread_tasks_stream,
)
from agno.utils.events import (
add_team_error_event,
create_team_run_cancelled_event,
create_team_run_completed_event,
create_team_run_content_completed_event,
create_team_run_error_event,
create_team_run_started_event,
create_team_session_summary_completed_event,
create_team_session_summary_started_event,
handle_event,
)
from agno.utils.hooks import (
normalize_post_hooks,
normalize_pre_hooks,
)
from agno.utils.log import (
log_debug,
log_error,
log_info,
log_warning,
)
# Strong references to background tasks so they aren't garbage-collected mid-execution.
# See: https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task
_background_tasks: set[asyncio.Task[None]] = set()
if TYPE_CHECKING:
from agno.team.team import Team
def cancel_run(run_id: str) -> bool:
"""Cancel a running team execution.
Args:
run_id (str): The run_id to cancel.
Returns:
bool: True if the run was found and marked for cancellation, False otherwise.
"""
return cancel_run_global(run_id)
async def acancel_run(run_id: str) -> bool:
"""Cancel a running team execution.
Args:
run_id (str): The run_id to cancel.
Returns:
bool: True if the run was found and marked for cancellation, False otherwise.
"""
return await acancel_run_global(run_id)
async def _asetup_session(
team: "Team",
run_context: RunContext,
session_id: str,
user_id: Optional[str],
run_id: Optional[str],
) -> TeamSession:
"""Read/create session, load state from DB, and resolve callable dependencies.
Shared setup for _arun() and _arun_stream(). Mirrors what the sync
run_dispatch() does inline before calling _run()/_run_stream().
"""
# Read or create session
from agno.team._init import _has_async_db, _initialize_session_state
from agno.team._storage import (
_aread_or_create_session,
_load_session_state,
_read_or_create_session,
_update_metadata,
)
if _has_async_db(team):
team_session = await _aread_or_create_session(team, session_id=session_id, user_id=user_id)
else:
team_session = _read_or_create_session(team, session_id=session_id, user_id=user_id)
# Update metadata
_update_metadata(team, session=team_session)
# Initialize and load session state from DB
run_context.session_state = _initialize_session_state(
team,
session_state=run_context.session_state if run_context.session_state is not None else {},
user_id=user_id,
session_id=session_id,
run_id=run_id,
)
if run_context.session_state is not None:
run_context.session_state = _load_session_state(
team, session=team_session, session_state=run_context.session_state
)
# Resolve callable dependencies AFTER state is loaded
if run_context.dependencies is not None:
await _aresolve_run_dependencies(team, run_context=run_context)
return team_session
def _run_tasks(
team: "Team",
run_response: TeamRunOutput,
session: TeamSession,
run_context: RunContext,
user_id: Optional[str] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> TeamRunOutput:
"""Run the Team in autonomous task mode.
The team leader iteratively plans and delegates tasks to members until
the goal is complete or max_iterations is reached.
"""
from agno.team._hooks import _execute_post_hooks, _execute_pre_hooks
from agno.team._init import _disconnect_connectable_tools
from agno.team._managers import _start_learning_future, _start_memory_future
from agno.team._messages import _get_run_messages
from agno.team._response import (
_convert_response_to_structured_format,
_update_run_response,
handle_reasoning,
)
from agno.team._telemetry import log_team_telemetry
from agno.team._tools import _determine_tools_for_model
from agno.team.task import TaskStatus, load_task_list
log_debug(f"Team Task Run Start: {run_response.run_id}", center=True)
memory_future = None
learning_future = None
try:
run_input = cast(TeamRunInput, run_response.input)
team.model = cast(Model, team.model)
# 1. Execute pre-hooks
if team.pre_hooks is not None:
pre_hook_iterator = _execute_pre_hooks(
team,
hooks=team.pre_hooks, # type: ignore
run_response=run_response,
run_input=run_input,
run_context=run_context,
session=session,
user_id=user_id,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
deque(pre_hook_iterator, maxlen=0)
# 2. Determine tools for model (includes task management tools)
team_run_context: Dict[str, Any] = {}
_tools = _determine_tools_for_model(
team,
model=team.model,
run_response=run_response,
run_context=run_context,
team_run_context=team_run_context,
session=session,
user_id=user_id,
async_mode=False,
input_message=run_input.input_content,
images=run_input.images,
videos=run_input.videos,
audio=run_input.audios,
files=run_input.files,
debug_mode=debug_mode,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
stream=False,
stream_events=False,
)
# 3. Prepare initial run messages
run_messages = _get_run_messages(
team,
run_response=run_response,
session=session,
run_context=run_context,
user_id=user_id,
input_message=run_input.input_content,
audio=run_input.audios,
images=run_input.images,
videos=run_input.videos,
files=run_input.files,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
tools=_tools,
**kwargs,
)
if len(run_messages.messages) == 0:
log_error("No messages to be sent to the model.")
# 4. Start memory and learning creation in background
memory_future = _start_memory_future(
team,
run_messages=run_messages,
user_id=user_id,
existing_future=memory_future,
)
learning_future = _start_learning_future(
team,
run_messages=run_messages,
session=session,
user_id=user_id,
existing_future=learning_future,
)
raise_if_cancelled(run_response.run_id) # type: ignore
# 5. Reason about the task if reasoning is enabled
handle_reasoning(team, run_response=run_response, run_messages=run_messages, run_context=run_context)
raise_if_cancelled(run_response.run_id) # type: ignore
# Use accumulated messages for the iterative loop
accumulated_messages = run_messages.messages
model_response: Optional[ModelResponse] = None
# === Iterative task loop ===
for iteration in range(team.max_iterations):
log_debug(f"Task iteration {iteration + 1}/{team.max_iterations}")
# On subsequent iterations, inject current task state as a user message
if iteration > 0:
task_list = load_task_list(run_context.session_state)
task_summary = task_list.get_summary_string()
state_message = Message(
role="user",
content=f"<current_task_state>\n{task_summary}\n</current_task_state>\n\n"
"Continue working on the tasks. Create, execute, or update tasks as needed. "
"When all tasks are done, call `mark_all_complete` with a summary.",
)
accumulated_messages.append(state_message)
# Get model response
model_response = team.model.response(
messages=accumulated_messages,
response_format=response_format,
tools=_tools,
tool_choice=team.tool_choice,
tool_call_limit=team.tool_call_limit,
run_response=run_response,
send_media_to_model=team.send_media_to_model,
compression_manager=team.compression_manager if team.compress_tool_results else None,
)
raise_if_cancelled(run_response.run_id) # type: ignore
# Update run response
_update_run_response(
team,
model_response=model_response,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
)
# Check if delegation propagated member HITL requirements
if run_response.requirements and any(not req.is_resolved() for req in run_response.requirements):
from agno.team import _hooks
return _hooks.handle_team_run_paused(
team, run_response=run_response, session=session, run_context=run_context
)
# Check termination conditions
task_list = load_task_list(run_context.session_state)
if task_list.goal_complete:
log_debug("Task goal marked complete, finishing task loop.")
break
if task_list.all_terminal():
# All tasks done but some may have failed
has_failures = any(t.status == TaskStatus.failed for t in task_list.tasks)
if not has_failures:
log_debug("All tasks completed successfully, finishing task loop.")
break
# If there are failures, continue to let model handle them
log_debug("All tasks terminal but some failed, continuing to let model handle.")
else:
# Loop exhausted without completing
task_list = load_task_list(run_context.session_state)
if not task_list.goal_complete:
log_warning(f"Reached max_iterations ({team.max_iterations}) without completing all tasks.")
# === Post-loop ===
# Store media if enabled
if team.store_media and model_response is not None:
store_media_util(run_response, model_response)
# Convert response to structured format
_convert_response_to_structured_format(team, run_response=run_response, run_context=run_context)
# Execute post-hooks
if team.post_hooks is not None:
iterator = _execute_post_hooks(
team,
hooks=team.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=session,
user_id=user_id,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
deque(iterator, maxlen=0)
raise_if_cancelled(run_response.run_id) # type: ignore
# Wait for background memory and learning creation
wait_for_open_threads(memory_future=memory_future, learning_future=learning_future) # type: ignore
merge_background_metrics(run_response.metrics, collect_background_metrics(memory_future, learning_future))
raise_if_cancelled(run_response.run_id) # type: ignore
# Create session summary
if team.session_summary_manager is not None:
session.upsert_run(run_response=run_response)
try:
team.session_summary_manager.create_session_summary(session=session, run_metrics=run_response.metrics)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
raise_if_cancelled(run_response.run_id) # type: ignore
# Set the run status to completed
run_response.status = RunStatus.completed
# Cleanup and store
_cleanup_and_store(team, run_response=run_response, session=session)
log_team_telemetry(team, session_id=session.session_id, run_id=run_response.run_id)
log_debug(f"Team Task Run End: {run_response.run_id}", center=True, symbol="*")
return run_response
except RunCancelledException as e:
log_info(f"Team task run {run_response.run_id} was cancelled")
run_response.status = RunStatus.cancelled
run_response.content = str(e)
_cleanup_and_store(team, run_response=run_response, session=session)
return run_response
except (InputCheckError, OutputCheckError) as e:
run_response.status = RunStatus.error
run_error = create_team_run_error_event(
run_response,
error=str(e),
error_id=e.error_id,
error_type=e.type,
additional_data=e.additional_data,
)
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
_cleanup_and_store(team, run_response=run_response, session=session)
return run_response
except KeyboardInterrupt:
run_response = cast(TeamRunOutput, run_response)
run_response.status = RunStatus.cancelled
run_response.content = "Operation cancelled by user"
return run_response
except Exception as e:
run_response.status = RunStatus.error
run_error = create_team_run_error_event(run_response, error=str(e))
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Team task run: {str(e)}")
_cleanup_and_store(team, run_response=run_response, session=session)
return run_response
finally:
# Cancel background futures on error
for future in (memory_future, learning_future):
if future is not None and not future.done():
future.cancel()
try:
future.result(timeout=0)
except Exception:
pass
_disconnect_connectable_tools(team)
cleanup_run(run_response.run_id) # type: ignore
return run_response
def _run_tasks_stream(
team: "Team",
run_response: TeamRunOutput,
session: TeamSession,
run_context: RunContext,
user_id: Optional[str] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
stream_events: bool = False,
yield_run_output: bool = False,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> Iterator[Union[TeamRunOutputEvent, RunOutputEvent, TeamRunOutput]]:
"""Run the Team in autonomous task mode with streaming support.
The team leader iteratively plans and delegates tasks to members until
the goal is complete or max_iterations is reached. Events are yielded
for each iteration.
"""
from agno.team._hooks import _execute_post_hooks, _execute_pre_hooks
from agno.team._init import _disconnect_connectable_tools
from agno.team._managers import _start_learning_future, _start_memory_future
from agno.team._messages import _get_run_messages
from agno.team._response import (
_convert_response_to_structured_format,
_handle_model_response_stream,
generate_response_with_output_model_stream,
handle_reasoning_stream,
)
from agno.team._telemetry import log_team_telemetry
from agno.team._tools import _determine_tools_for_model
from agno.team.task import TaskStatus, load_task_list
from agno.utils.events import (
create_team_task_iteration_completed_event,
create_team_task_iteration_started_event,
create_team_task_state_updated_event,
)
log_debug(f"Team Task Run (Stream) Start: {run_response.run_id}", center=True)
memory_future = None
learning_future = None
try:
run_input = cast(TeamRunInput, run_response.input)
team.model = cast(Model, team.model)
# 1. Execute pre-hooks
if team.pre_hooks is not None:
pre_hook_iterator = _execute_pre_hooks(
team,
hooks=team.pre_hooks, # type: ignore
run_response=run_response,
run_input=run_input,
run_context=run_context,
session=session,
user_id=user_id,
debug_mode=debug_mode,
stream_events=stream_events,
background_tasks=background_tasks,
**kwargs,
)
for pre_hook_event in pre_hook_iterator:
yield pre_hook_event
# 2. Determine tools for model (includes task management tools)
team_run_context: Dict[str, Any] = {}
_tools = _determine_tools_for_model(
team,
model=team.model,
run_response=run_response,
run_context=run_context,
team_run_context=team_run_context,
session=session,
user_id=user_id,
async_mode=False,
input_message=run_input.input_content,
images=run_input.images,
videos=run_input.videos,
audio=run_input.audios,
files=run_input.files,
debug_mode=debug_mode,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
stream=True,
stream_events=stream_events,
)
# 3. Prepare initial run messages
run_messages = _get_run_messages(
team,
run_response=run_response,
session=session,
run_context=run_context,
user_id=user_id,
input_message=run_input.input_content,
audio=run_input.audios,
images=run_input.images,
videos=run_input.videos,
files=run_input.files,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
tools=_tools,
**kwargs,
)
if len(run_messages.messages) == 0:
log_error("No messages to be sent to the model.")
# 4. Start memory creation in background
memory_future = _start_memory_future(
team,
run_messages=run_messages,
user_id=user_id,
existing_future=memory_future,
)
learning_future = _start_learning_future(
team,
run_messages=run_messages,
session=session,
user_id=user_id,
existing_future=learning_future,
)
# Yield run started event
if stream_events:
yield handle_event( # type: ignore
create_team_run_started_event(run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
raise_if_cancelled(run_response.run_id) # type: ignore
# 5. Reason about the task if reasoning is enabled
yield from handle_reasoning_stream(
team,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
stream_events=stream_events,
)
raise_if_cancelled(run_response.run_id) # type: ignore
# Use accumulated messages for the iterative loop
accumulated_messages = run_messages.messages
# === Iterative task loop ===
for iteration in range(team.max_iterations):
log_debug(f"Task iteration {iteration + 1}/{team.max_iterations}")
# Yield task iteration started event
if stream_events:
yield handle_event( # type: ignore
create_team_task_iteration_started_event(
from_run_response=run_response,
iteration=iteration + 1,
max_iterations=team.max_iterations,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# On subsequent iterations, inject current task state as a user message
if iteration > 0:
task_list = load_task_list(run_context.session_state)
task_summary = task_list.get_summary_string()
state_message = Message(
role="user",
content=f"<current_task_state>\n{task_summary}\n</current_task_state>\n\n"
"Continue working on the tasks. Create, execute, or update tasks as needed. "
"When all tasks are done, call `mark_all_complete` with a summary.",
)
accumulated_messages.append(state_message)
# Get model response with streaming
# Update run_messages with accumulated messages for streaming
run_messages.messages = accumulated_messages
if team.output_model is None:
for event in _handle_model_response_stream(
team,
session=session,
run_response=run_response,
run_messages=run_messages,
tools=_tools,
response_format=response_format,
stream_events=stream_events,
session_state=run_context.session_state,
run_context=run_context,
):
raise_if_cancelled(run_response.run_id) # type: ignore
yield event
else:
for event in _handle_model_response_stream(
team,
session=session,
run_response=run_response,
run_messages=run_messages,
tools=_tools,
response_format=response_format,
stream_events=stream_events,
session_state=run_context.session_state,
run_context=run_context,
):
raise_if_cancelled(run_response.run_id) # type: ignore
from agno.run.team import IntermediateRunContentEvent, RunContentEvent
if isinstance(event, RunContentEvent):
if stream_events:
yield IntermediateRunContentEvent(
content=event.content,
content_type=event.content_type,
)
else:
yield event
for event in generate_response_with_output_model_stream(
team,
session=session,
run_response=run_response,
run_messages=run_messages,
stream_events=stream_events,
):
raise_if_cancelled(run_response.run_id) # type: ignore
yield event
raise_if_cancelled(run_response.run_id) # type: ignore
# Check if delegation propagated member HITL requirements
if run_response.requirements and any(not req.is_resolved() for req in run_response.requirements):
from agno.team import _hooks
yield from _hooks.handle_team_run_paused_stream(
team, run_response=run_response, session=session, run_context=run_context
)
if yield_run_output:
yield run_response
return
# Check termination conditions
task_list = load_task_list(run_context.session_state)
# Yield task state updated event
if stream_events:
# Convert task list to TaskData for frontend
task_data_list = [
TaskData(
id=t.id,
title=t.title,
description=t.description,
status=t.status.value,
assignee=t.assignee,
dependencies=t.dependencies,
result=t.result,
)
for t in task_list.tasks
]
yield handle_event( # type: ignore
create_team_task_state_updated_event(
from_run_response=run_response,
task_summary=task_list.get_summary_string(),
goal_complete=task_list.goal_complete,
tasks=task_data_list,
completion_summary=task_list.completion_summary,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Yield task iteration completed event
if stream_events:
yield handle_event( # type: ignore
create_team_task_iteration_completed_event(
from_run_response=run_response,
iteration=iteration + 1,
max_iterations=team.max_iterations,
task_summary=task_list.get_summary_string(),
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
if task_list.goal_complete:
log_debug("Task goal marked complete, finishing task loop.")
break
if task_list.all_terminal():
# All tasks done but some may have failed
has_failures = any(t.status == TaskStatus.failed for t in task_list.tasks)
if not has_failures:
log_debug("All tasks completed successfully, finishing task loop.")
break
# If there are failures, continue to let model handle them
log_debug("All tasks terminal but some failed, continuing to let model handle.")
else:
# Loop exhausted without completing
task_list = load_task_list(run_context.session_state)
if not task_list.goal_complete:
log_warning(f"Reached max_iterations ({team.max_iterations}) without completing all tasks.")
# === Post-loop ===
# Convert response to structured format
_convert_response_to_structured_format(team, run_response=run_response, run_context=run_context)
# Yield RunContentCompletedEvent
if stream_events:
yield handle_event( # type: ignore
create_team_run_content_completed_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Execute post-hooks
if team.post_hooks is not None:
yield from _execute_post_hooks(
team,
hooks=team.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=session,
user_id=user_id,
debug_mode=debug_mode,
stream_events=stream_events,
background_tasks=background_tasks,
**kwargs,
)
raise_if_cancelled(run_response.run_id) # type: ignore
# Wait for background memory creation
yield from wait_for_thread_tasks_stream(
run_response=run_response,
memory_future=memory_future, # type: ignore
learning_future=learning_future, # type: ignore
stream_events=stream_events,
events_to_skip=team.events_to_skip, # type: ignore
store_events=team.store_events,
get_memories_callback=lambda: team.get_user_memories(user_id=user_id),
)
raise_if_cancelled(run_response.run_id) # type: ignore
# Create session summary
if team.session_summary_manager is not None:
session.upsert_run(run_response=run_response)
if stream_events:
yield handle_event( # type: ignore
create_team_session_summary_started_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
try:
team.session_summary_manager.create_session_summary(session=session)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
if stream_events:
yield handle_event( # type: ignore
create_team_session_summary_completed_event(
from_run_response=run_response, session_summary=session.summary
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
raise_if_cancelled(run_response.run_id) # type: ignore
# Create the run completed event
completed_event = handle_event(
create_team_run_completed_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Set the run status to completed
run_response.status = RunStatus.completed
# Cleanup and store
_cleanup_and_store(team, run_response=run_response, session=session)
if stream_events:
yield completed_event
if yield_run_output:
yield run_response
log_team_telemetry(team, session_id=session.session_id, run_id=run_response.run_id)
log_debug(f"Team Task Run (Stream) End: {run_response.run_id}", center=True, symbol="*")
except RunCancelledException as e:
log_info(f"Team task run {run_response.run_id} was cancelled during streaming")
run_response.status = RunStatus.cancelled
run_response.content = str(e)
yield handle_event(
create_team_run_cancelled_event(from_run_response=run_response, reason=str(e)),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
_cleanup_and_store(team, run_response=run_response, session=session)
except (InputCheckError, OutputCheckError) as e:
run_response.status = RunStatus.error
run_error = create_team_run_error_event(
run_response,
error=str(e),
error_id=e.error_id,
error_type=e.type,
additional_data=e.additional_data,
)
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
_cleanup_and_store(team, run_response=run_response, session=session)
yield run_error
except KeyboardInterrupt:
run_response = cast(TeamRunOutput, run_response)
run_response.status = RunStatus.cancelled
run_response.content = "Operation cancelled by user"
try:
_cleanup_and_store(team, run_response=run_response, session=session)
except Exception:
pass
yield handle_event( # type: ignore
create_team_run_cancelled_event(from_run_response=run_response, reason="Operation cancelled by user"),
run_response,
events_to_skip=team.events_to_skip, # type: ignore
store_events=team.store_events,
)
except Exception as e:
run_response.status = RunStatus.error
run_error = create_team_run_error_event(run_response, error=str(e))
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Team task run (stream): {str(e)}")
_cleanup_and_store(team, run_response=run_response, session=session)
yield run_error
finally:
# Cancel background futures on error
for future in (memory_future, learning_future):
if future is not None and not future.done():
future.cancel()
try:
future.result(timeout=0)
except Exception:
pass
_disconnect_connectable_tools(team)
cleanup_run(run_response.run_id) # type: ignore
def _run(
team: "Team",
run_response: TeamRunOutput,
session: TeamSession,
run_context: RunContext,
user_id: Optional[str] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> TeamRunOutput:
"""Run the Team and return the response.
Steps:
1. Execute pre-hooks
2. Determine tools for model
3. Prepare run messages
4. Start memory creation in background thread
5. Reason about the task if reasoning is enabled
6. Get a response from the model
7. Update TeamRunOutput with the model response
8. Store media if enabled
9. Convert response to structured format
10. Execute post-hooks
11. Wait for background memory creation
12. Create session summary
13. Cleanup and store (scrub, stop timer, add to session, calculate metrics, save session)
"""
from agno.team._hooks import _execute_post_hooks, _execute_pre_hooks
from agno.team._init import _disconnect_connectable_tools
from agno.team._managers import _start_learning_future, _start_memory_future
from agno.team._messages import _get_run_messages
from agno.team._response import (
_convert_response_to_structured_format,
_update_run_response,
handle_reasoning,
parse_response_with_output_model,
parse_response_with_parser_model,
)
from agno.team._telemetry import log_team_telemetry
from agno.team._tools import _determine_tools_for_model
# Dispatch to task mode if applicable
from agno.team.mode import TeamMode
if team.mode == TeamMode.tasks:
return _run_tasks(
team,
run_response=run_response,
session=session,
run_context=run_context,
user_id=user_id,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
response_format=response_format,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
log_debug(f"Team Run Start: {run_response.run_id}", center=True)
memory_future = None
learning_future = None
try:
# Set up retry logic
num_attempts = team.retries + 1
for attempt in range(num_attempts):
if attempt > 0:
log_debug(f"Retrying Team run {run_response.run_id}. Attempt {attempt + 1} of {num_attempts}...")
try:
# 1. Execute pre-hooks
run_input = cast(TeamRunInput, run_response.input)
team.model = cast(Model, team.model)
if team.pre_hooks is not None:
# Can modify the run input
pre_hook_iterator = _execute_pre_hooks(
team,
hooks=team.pre_hooks, # type: ignore
run_response=run_response,
run_input=run_input,
run_context=run_context,
session=session,
user_id=user_id,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
# Consume the generator without yielding
deque(pre_hook_iterator, maxlen=0)
# 2. Determine tools for model
# Initialize team run context
team_run_context: Dict[str, Any] = {}
# Note: MCP tool refresh is async-only by design (_check_and_refresh_mcp_tools
# is called in _arun/_arun_stream). Sync paths do not support MCP tools.
_tools = _determine_tools_for_model(
team,
model=team.model,
run_response=run_response,
run_context=run_context,
team_run_context=team_run_context,
session=session,
user_id=user_id,
async_mode=False,
input_message=run_input.input_content,
images=run_input.images,
videos=run_input.videos,
audio=run_input.audios,
files=run_input.files,
debug_mode=debug_mode,
add_history_to_context=add_history_to_context,
add_session_state_to_context=add_session_state_to_context,
add_dependencies_to_context=add_dependencies_to_context,
stream=False,
stream_events=False,
)
# 3. Prepare run messages
run_messages: RunMessages = _get_run_messages(
team,
run_response=run_response,
session=session,
run_context=run_context,
user_id=user_id,
input_message=run_input.input_content,
audio=run_input.audios,
images=run_input.images,
videos=run_input.videos,
files=run_input.files,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
tools=_tools,
**kwargs,
)
if len(run_messages.messages) == 0:
log_error("No messages to be sent to the model.")
# 4. Start memory creation in background thread
memory_future = _start_memory_future(
team,
run_messages=run_messages,
user_id=user_id,
existing_future=memory_future,
)
learning_future = _start_learning_future(
team,
run_messages=run_messages,
session=session,
user_id=user_id,
existing_future=learning_future,
)
raise_if_cancelled(run_response.run_id) # type: ignore
# 5. Reason about the task if reasoning is enabled
handle_reasoning(team, run_response=run_response, run_messages=run_messages, run_context=run_context)
# Check for cancellation before model call
raise_if_cancelled(run_response.run_id) # type: ignore
# 6. Get the model response for the team leader
team.model = cast(Model, team.model)
model_response: ModelResponse = team.model.response(
messages=run_messages.messages,
response_format=response_format,
tools=_tools,
tool_choice=team.tool_choice,
tool_call_limit=team.tool_call_limit,
run_response=run_response,
send_media_to_model=team.send_media_to_model,
compression_manager=team.compression_manager if team.compress_tool_results else None,
)
# Check for cancellation after model call
raise_if_cancelled(run_response.run_id) # type: ignore
# If an output model is provided, generate output using the output model
parse_response_with_output_model(team, model_response, run_messages)
# If a parser model is provided, structure the response separately
parse_response_with_parser_model(team, model_response, run_messages, run_context=run_context)
# 7. Update TeamRunOutput with the model response
_update_run_response(
team,
model_response=model_response,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
)
# 7b. Check if delegation propagated member HITL requirements
if run_response.requirements and any(not req.is_resolved() for req in run_response.requirements):
from agno.team import _hooks
return _hooks.handle_team_run_paused(
team, run_response=run_response, session=session, run_context=run_context
)
# 8. Store media if enabled
if team.store_media:
store_media_util(run_response, model_response)
# 9. Convert response to structured format
_convert_response_to_structured_format(team, run_response=run_response, run_context=run_context)
# 10. Execute post-hooks after output is generated but before response is returned
if team.post_hooks is not None:
iterator = _execute_post_hooks(
team,
hooks=team.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=session,
user_id=user_id,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
deque(iterator, maxlen=0)
raise_if_cancelled(run_response.run_id) # type: ignore
# 11. Wait for background memory creation
wait_for_open_threads(memory_future=memory_future, learning_future=learning_future) # type: ignore
merge_background_metrics(
run_response.metrics, collect_background_metrics(memory_future, learning_future)
)
raise_if_cancelled(run_response.run_id) # type: ignore
# 12. Create session summary
if team.session_summary_manager is not None:
# Upsert the RunOutput to Team Session before creating the session summary
session.upsert_run(run_response=run_response)
try:
team.session_summary_manager.create_session_summary(
session=session, run_metrics=run_response.metrics
)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
raise_if_cancelled(run_response.run_id) # type: ignore
# Set the run status to completed
run_response.status = RunStatus.completed
# 13. Cleanup and store the run response
_cleanup_and_store(team, run_response=run_response, session=session)
# Log Team Telemetry
log_team_telemetry(team, session_id=session.session_id, run_id=run_response.run_id)
log_debug(f"Team Run End: {run_response.run_id}", center=True, symbol="*")
return run_response
except RunCancelledException as e:
# Handle run cancellation during streaming
log_info(f"Team run {run_response.run_id} was cancelled during streaming")
run_response.status = RunStatus.cancelled
run_response.content = str(e)
# Cleanup and store the run response and session
_cleanup_and_store(team, run_response=run_response, session=session)
return run_response
except (InputCheckError, OutputCheckError) as e:
run_response.status = RunStatus.error
# Add error event to list of events
run_error = create_team_run_error_event(
run_response,
error=str(e),
error_id=e.error_id,
error_type=e.type,
additional_data=e.additional_data,
)
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
_cleanup_and_store(team, run_response=run_response, session=session)
return run_response
except KeyboardInterrupt:
run_response = cast(TeamRunOutput, run_response)
run_response.status = RunStatus.cancelled
run_response.content = "Operation cancelled by user"
try:
_cleanup_and_store(team, run_response=run_response, session=session)
except Exception:
pass
return run_response
except Exception as e:
if attempt < num_attempts - 1:
# Calculate delay with exponential backoff if enabled
if team.exponential_backoff:
delay = team.delay_between_retries * (2**attempt)
else:
delay = team.delay_between_retries
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
time.sleep(delay)
continue
run_response.status = RunStatus.error
run_error = create_team_run_error_event(run_response, error=str(e))
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
# If the content is None, set it to the error message
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Team run: {str(e)}")
# Cleanup and store the run response and session
_cleanup_and_store(team, run_response=run_response, session=session)
return run_response
finally:
# Cancel background futures on error (wait_for_open_threads handles waiting on success)
for future in (memory_future, learning_future):
if future is not None and not future.done():
future.cancel()
try:
future.result(timeout=0)
except Exception:
pass
# Always disconnect connectable tools
_disconnect_connectable_tools(team)
# Always clean up the run tracking
cleanup_run(run_response.run_id) # type: ignore
return run_response # Defensive fallback for type-checker; all paths return inside the loop
def _run_stream(
team: "Team",
run_response: TeamRunOutput,
run_context: RunContext,
session: TeamSession,
user_id: Optional[str] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
stream_events: bool = False,
yield_run_output: bool = False,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> Iterator[Union[TeamRunOutputEvent, RunOutputEvent, TeamRunOutput]]:
"""Run the Team and return the response iterator.
Steps:
1. Execute pre-hooks
2. Determine tools for model
3. Prepare run messages
4. Start memory creation in background thread
5. Reason about the task if reasoning is enabled
6. Get a response from the model
7. Parse response with parser model if provided
8. Wait for background memory creation
9. Create session summary
10. Cleanup and store (scrub, add to session, calculate metrics, save session)
"""
from agno.team._hooks import _execute_post_hooks, _execute_pre_hooks
from agno.team._init import _disconnect_connectable_tools
from agno.team._managers import _start_learning_future, _start_memory_future
from agno.team._messages import _get_run_messages
from agno.team._response import (
_handle_model_response_stream,
generate_response_with_output_model_stream,
handle_reasoning_stream,
parse_response_with_parser_model_stream,
)
from agno.team._telemetry import log_team_telemetry
from agno.team._tools import _determine_tools_for_model
# Fallback for tasks mode (streaming not yet supported)
# Dispatch to task mode streaming if applicable
from agno.team.mode import TeamMode
if team.mode == TeamMode.tasks:
yield from _run_tasks_stream(
team,
run_response=run_response,
session=session,
run_context=run_context,
user_id=user_id,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
response_format=response_format,
stream_events=stream_events,
yield_run_output=yield_run_output,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
return
log_debug(f"Team Run Start: {run_response.run_id}", center=True)
memory_future = None
learning_future = None
try:
# Set up retry logic
num_attempts = team.retries + 1
for attempt in range(num_attempts):
if attempt > 0:
log_debug(f"Retrying Team run {run_response.run_id}. Attempt {attempt + 1} of {num_attempts}...")
try:
# 1. Execute pre-hooks
run_input = cast(TeamRunInput, run_response.input)
team.model = cast(Model, team.model)
if team.pre_hooks is not None:
# Can modify the run input
pre_hook_iterator = _execute_pre_hooks(
team,
hooks=team.pre_hooks, # type: ignore
run_response=run_response,
run_context=run_context,
run_input=run_input,
session=session,
user_id=user_id,
debug_mode=debug_mode,
stream_events=stream_events,
background_tasks=background_tasks,
**kwargs,
)
for pre_hook_event in pre_hook_iterator:
yield pre_hook_event
# 2. Determine tools for model
# Initialize team run context
team_run_context: Dict[str, Any] = {}
# Note: MCP tool refresh is async-only by design (_check_and_refresh_mcp_tools
# is called in _arun/_arun_stream). Sync paths do not support MCP tools.
_tools = _determine_tools_for_model(
team,
model=team.model,
run_response=run_response,
run_context=run_context,
team_run_context=team_run_context,
session=session,
user_id=user_id,
async_mode=False,
input_message=run_input.input_content,
images=run_input.images,
videos=run_input.videos,
audio=run_input.audios,
files=run_input.files,
debug_mode=debug_mode,
add_history_to_context=add_history_to_context,
add_session_state_to_context=add_session_state_to_context,
add_dependencies_to_context=add_dependencies_to_context,
stream=True,
stream_events=stream_events,
)
# 3. Prepare run messages
run_messages: RunMessages = _get_run_messages(
team,
run_response=run_response,
run_context=run_context,
session=session,
user_id=user_id,
input_message=run_input.input_content,
audio=run_input.audios,
images=run_input.images,
videos=run_input.videos,
files=run_input.files,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
tools=_tools,
**kwargs,
)
if len(run_messages.messages) == 0:
log_error("No messages to be sent to the model.")
# 4. Start memory creation in background thread
memory_future = _start_memory_future(
team,
run_messages=run_messages,
user_id=user_id,
existing_future=memory_future,
)
learning_future = _start_learning_future(
team,
run_messages=run_messages,
session=session,
user_id=user_id,
existing_future=learning_future,
)
# Start the Run by yielding a RunStarted event
if stream_events:
yield handle_event( # type: ignore
create_team_run_started_event(run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
raise_if_cancelled(run_response.run_id) # type: ignore
# 5. Reason about the task if reasoning is enabled
yield from handle_reasoning_stream(
team,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
stream_events=stream_events,
)
# Check for cancellation before model processing
raise_if_cancelled(run_response.run_id) # type: ignore
# 6. Get a response from the model
if team.output_model is None:
for event in _handle_model_response_stream(
team,
session=session,
run_response=run_response,
run_messages=run_messages,
tools=_tools,
response_format=response_format,
stream_events=stream_events,
session_state=run_context.session_state,
run_context=run_context,
):
raise_if_cancelled(run_response.run_id) # type: ignore
yield event
else:
for event in _handle_model_response_stream(
team,
session=session,
run_response=run_response,
run_messages=run_messages,
tools=_tools,
response_format=response_format,
stream_events=stream_events,
session_state=run_context.session_state,
run_context=run_context,
):
raise_if_cancelled(run_response.run_id) # type: ignore
from agno.run.team import IntermediateRunContentEvent, RunContentEvent
if isinstance(event, RunContentEvent):
if stream_events:
yield IntermediateRunContentEvent(
content=event.content,
content_type=event.content_type,
)
else:
yield event
for event in generate_response_with_output_model_stream(
team,
session=session,
run_response=run_response,
run_messages=run_messages,
stream_events=stream_events,
):
raise_if_cancelled(run_response.run_id) # type: ignore
yield event
# Check for cancellation after model processing
raise_if_cancelled(run_response.run_id) # type: ignore
# 6b. Check if delegation propagated member HITL requirements
if run_response.requirements and any(not req.is_resolved() for req in run_response.requirements):
from agno.team import _hooks
yield from _hooks.handle_team_run_paused_stream(
team, run_response=run_response, session=session, run_context=run_context
)
if yield_run_output:
yield run_response
return
# 7. Parse response with parser model if provided
yield from parse_response_with_parser_model_stream(
team,
session=session,
run_response=run_response,
stream_events=stream_events,
run_context=run_context,
)
# Yield RunContentCompletedEvent
if stream_events:
yield handle_event( # type: ignore
create_team_run_content_completed_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Execute post-hooks after output is generated but before response is returned
if team.post_hooks is not None:
yield from _execute_post_hooks(
team,
hooks=team.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=session,
user_id=user_id,
debug_mode=debug_mode,
stream_events=stream_events,
background_tasks=background_tasks,
**kwargs,
)
raise_if_cancelled(run_response.run_id) # type: ignore
# 8. Wait for background memory creation
yield from wait_for_thread_tasks_stream(
run_response=run_response,
memory_future=memory_future, # type: ignore
learning_future=learning_future, # type: ignore
stream_events=stream_events,
events_to_skip=team.events_to_skip, # type: ignore
store_events=team.store_events,
get_memories_callback=lambda: team.get_user_memories(user_id=user_id),
)
merge_background_metrics(
run_response.metrics, collect_background_metrics(memory_future, learning_future)
)
raise_if_cancelled(run_response.run_id) # type: ignore
# 9. Create session summary
if team.session_summary_manager is not None:
# Upsert the RunOutput to Team Session before creating the session summary
session.upsert_run(run_response=run_response)
if stream_events:
yield handle_event( # type: ignore
create_team_session_summary_started_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
try:
team.session_summary_manager.create_session_summary(
session=session, run_metrics=run_response.metrics
)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
if stream_events:
yield handle_event( # type: ignore
create_team_session_summary_completed_event(
from_run_response=run_response, session_summary=session.summary
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
raise_if_cancelled(run_response.run_id) # type: ignore
# Create the run completed event
completed_event = handle_event(
create_team_run_completed_event(
from_run_response=run_response,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Set the run status to completed
run_response.status = RunStatus.completed
# 10. Cleanup and store the run response
_cleanup_and_store(team, run_response=run_response, session=session)
if stream_events:
yield completed_event
if yield_run_output:
yield run_response
# Log Team Telemetry
log_team_telemetry(team, session_id=session.session_id, run_id=run_response.run_id)
log_debug(f"Team Run End: {run_response.run_id}", center=True, symbol="*")
break
except RunCancelledException as e:
# Handle run cancellation during streaming
log_info(f"Team run {run_response.run_id} was cancelled during streaming")
run_response.status = RunStatus.cancelled
run_response.content = str(e)
# Yield the cancellation event
yield handle_event(
create_team_run_cancelled_event(from_run_response=run_response, reason=str(e)),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
_cleanup_and_store(team, run_response=run_response, session=session)
break
except (InputCheckError, OutputCheckError) as e:
run_response.status = RunStatus.error
# Add error event to list of events
run_error = create_team_run_error_event(
run_response,
error=str(e),
error_id=e.error_id,
error_type=e.type,
additional_data=e.additional_data,
)
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
_cleanup_and_store(team, run_response=run_response, session=session)
yield run_error
break
except KeyboardInterrupt:
run_response = cast(TeamRunOutput, run_response)
try:
_cleanup_and_store(team, run_response=run_response, session=session)
except Exception:
pass
yield handle_event( # type: ignore
create_team_run_cancelled_event(
from_run_response=run_response, reason="Operation cancelled by user"
),
run_response,
events_to_skip=team.events_to_skip, # type: ignore
store_events=team.store_events,
)
break
except Exception as e:
if attempt < num_attempts - 1:
# Calculate delay with exponential backoff if enabled
if team.exponential_backoff:
delay = team.delay_between_retries * (2**attempt)
else:
delay = team.delay_between_retries
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
time.sleep(delay)
continue
run_response.status = RunStatus.error
run_error = create_team_run_error_event(run_response, error=str(e))
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Team run: {str(e)}")
_cleanup_and_store(team, run_response=run_response, session=session)
yield run_error
finally:
# Cancel background futures on error (wait_for_thread_tasks_stream handles waiting on success)
for future in (memory_future, learning_future):
if future is not None and not future.done():
future.cancel()
try:
future.result(timeout=0)
except Exception:
pass
# Always disconnect connectable tools
_disconnect_connectable_tools(team)
# Always clean up the run tracking
cleanup_run(run_response.run_id) # type: ignore
def run_dispatch(
team: "Team",
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
*,
stream: Optional[bool] = None,
stream_events: Optional[bool] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
run_context: Optional[RunContext] = None,
run_id: Optional[str] = None,
user_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
debug_mode: Optional[bool] = None,
yield_run_output: bool = False,
output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
**kwargs: Any,
) -> Union[TeamRunOutput, Iterator[Union[RunOutputEvent, TeamRunOutputEvent]]]:
"""Run the Team and return the response."""
from agno.team._init import _has_async_db, _initialize_session, _initialize_session_state
from agno.team._response import get_response_format
from agno.team._run_options import resolve_run_options
from agno.team._storage import _load_session_state, _read_or_create_session, _update_metadata
if _has_async_db(team):
raise Exception("run() is not supported with an async DB. Please use arun() instead.")
# Set the id for the run
run_id = run_id or str(uuid4())
# Initialize Team
team.initialize_team(debug_mode=debug_mode)
if (add_history_to_context or team.add_history_to_context) and not team.db and not team.parent_team_id:
log_warning(
"add_history_to_context is True, but no database has been assigned to the team. History will not be added to the context."
)
background_tasks = kwargs.pop("background_tasks", None)
if background_tasks is not None:
from fastapi import BackgroundTasks
background_tasks: BackgroundTasks = background_tasks # type: ignore
# Validate input against input_schema if provided
validated_input = validate_input(input, team.input_schema)
try:
# Register run for cancellation tracking (after validation succeeds)
register_run(run_id) # type: ignore
# Normalise hook & guardails
if not team._hooks_normalised:
if team.pre_hooks:
team.pre_hooks = normalize_pre_hooks(team.pre_hooks) # type: ignore
if team.post_hooks:
team.post_hooks = normalize_post_hooks(team.post_hooks) # type: ignore
team._hooks_normalised = True
session_id, user_id = _initialize_session(team, session_id=session_id, user_id=user_id)
image_artifacts, video_artifacts, audio_artifacts, file_artifacts = validate_media_object_id(
images=images, videos=videos, audios=audio, files=files
)
# Create RunInput to capture the original user input
run_input = TeamRunInput(
input_content=validated_input,
images=image_artifacts,
videos=video_artifacts,
audios=audio_artifacts,
files=file_artifacts,
)
# Read existing session from database
team_session = _read_or_create_session(team, session_id=session_id, user_id=user_id)
_update_metadata(team, session=team_session)
# Resolve run options AFTER _update_metadata so session-stored metadata is visible
opts = resolve_run_options(
team,
stream=stream,
stream_events=stream_events,
yield_run_output=yield_run_output,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
dependencies=dependencies,
knowledge_filters=knowledge_filters,
metadata=metadata,
output_schema=output_schema,
)
# Initialize session state
session_state = _initialize_session_state(
team,
session_state=session_state if session_state is not None else {},
user_id=user_id,
session_id=session_id,
run_id=run_id,
)
# Update session state from DB
session_state = _load_session_state(team, session=team_session, session_state=session_state)
# Track which options were explicitly provided for run_context precedence
dependencies_provided = dependencies is not None
knowledge_filters_provided = knowledge_filters is not None
metadata_provided = metadata is not None
team.model = cast(Model, team.model)
# Initialize run context
run_context = run_context or RunContext(
run_id=run_id,
session_id=session_id,
user_id=user_id,
session_state=session_state,
dependencies=opts.dependencies,
knowledge_filters=opts.knowledge_filters,
metadata=opts.metadata,
output_schema=opts.output_schema,
)
# Apply options with precedence: explicit args > existing run_context > resolved defaults.
opts.apply_to_context(
run_context,
dependencies_provided=dependencies_provided,
knowledge_filters_provided=knowledge_filters_provided,
metadata_provided=metadata_provided,
)
# Resolve callable dependencies once before retry loop
if run_context.dependencies is not None:
_resolve_run_dependencies(team, run_context=run_context)
# Configure the model for runs
response_format: Optional[Union[Dict, Type[BaseModel]]] = (
get_response_format(team, run_context=run_context) if team.parser_model is None else None
)
# Create a new run_response for this attempt
run_response = TeamRunOutput(
run_id=run_id,
session_id=session_id,
user_id=user_id,
team_id=team.id,
team_name=team.name,
metadata=run_context.metadata,
session_state=run_context.session_state,
input=run_input,
)
run_response.model = team.model.id if team.model is not None else None
run_response.model_provider = team.model.provider if team.model is not None else None
# Start the run metrics timer, to calculate the run duration
run_response.metrics = RunMetrics()
run_response.metrics.start_timer()
except Exception:
cleanup_run(run_id)
raise
if opts.stream:
return _run_stream(
team,
run_response=run_response,
run_context=run_context,
session=team_session,
user_id=user_id,
add_history_to_context=opts.add_history_to_context,
add_dependencies_to_context=opts.add_dependencies_to_context,
add_session_state_to_context=opts.add_session_state_to_context,
response_format=response_format,
stream_events=opts.stream_events,
yield_run_output=opts.yield_run_output,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
) # type: ignore
else:
return _run(
team,
run_response=run_response,
run_context=run_context,
session=team_session,
user_id=user_id,
add_history_to_context=opts.add_history_to_context,
add_dependencies_to_context=opts.add_dependencies_to_context,
add_session_state_to_context=opts.add_session_state_to_context,
response_format=response_format,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
async def _arun_tasks(
team: "Team",
run_response: TeamRunOutput,
run_context: RunContext,
session_id: str,
user_id: Optional[str] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
add_history_to_context: Optional[bool] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> TeamRunOutput:
"""Run the Team in autonomous task mode (async).
The team leader iteratively plans and delegates tasks to members until
the goal is complete or max_iterations is reached.
"""
from agno.team._hooks import _aexecute_post_hooks, _aexecute_pre_hooks
from agno.team._init import _disconnect_connectable_tools, _disconnect_mcp_tools
from agno.team._managers import _astart_learning_task, _astart_memory_task
from agno.team._messages import _aget_run_messages
from agno.team._response import (
_convert_response_to_structured_format,
_update_run_response,
ahandle_reasoning,
)
from agno.team._telemetry import alog_team_telemetry
from agno.team._tools import _check_and_refresh_mcp_tools, _determine_tools_for_model
from agno.team.task import TaskStatus, load_task_list
log_debug(f"Team Task Run Start: {run_response.run_id}", center=True)
memory_task = None
learning_task = None
team_session: Optional[TeamSession] = None
try:
# Register run for cancellation tracking
await aregister_run(run_context.run_id)
# Setup session
team_session = await _asetup_session(
team=team,
run_context=run_context,
session_id=session_id,
user_id=user_id,
run_id=run_response.run_id,
)
run_input = cast(TeamRunInput, run_response.input)
team.model = cast(Model, team.model)
# 1. Execute pre-hooks
if team.pre_hooks is not None:
pre_hook_iterator = _aexecute_pre_hooks(
team,
hooks=team.pre_hooks, # type: ignore
run_response=run_response,
run_context=run_context,
run_input=run_input,
session=team_session,
user_id=user_id,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
async for _ in pre_hook_iterator:
pass
# 2. Determine tools for model (includes task management tools)
team_run_context: Dict[str, Any] = {}
await _check_and_refresh_mcp_tools(team)
_tools = _determine_tools_for_model(
team,
model=team.model,
run_response=run_response,
run_context=run_context,
team_run_context=team_run_context,
session=team_session,
user_id=user_id,
async_mode=True,
input_message=run_input.input_content,
images=run_input.images,
videos=run_input.videos,
audio=run_input.audios,
files=run_input.files,
debug_mode=debug_mode,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
stream=False,
stream_events=False,
)
# 3. Prepare initial run messages
run_messages = await _aget_run_messages(
team,
run_response=run_response,
run_context=run_context,
session=team_session, # type: ignore
user_id=user_id,
input_message=run_input.input_content,
audio=run_input.audios,
images=run_input.images,
videos=run_input.videos,
files=run_input.files,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
tools=_tools,
**kwargs,
)
# 4. Start memory and learning creation in background
memory_task = await _astart_memory_task(
team,
run_messages=run_messages,
user_id=user_id,
existing_task=memory_task,
)
learning_task = await _astart_learning_task(
team,
run_messages=run_messages,
session=team_session,
user_id=user_id,
existing_task=learning_task,
)
await araise_if_cancelled(run_response.run_id) # type: ignore
# 5. Reason about the task if reasoning is enabled
await ahandle_reasoning(team, run_response=run_response, run_messages=run_messages, run_context=run_context)
await araise_if_cancelled(run_response.run_id) # type: ignore
# Use accumulated messages for the iterative loop
accumulated_messages = run_messages.messages
model_response: Optional[ModelResponse] = None
# === Iterative task loop ===
for iteration in range(team.max_iterations):
log_debug(f"Task iteration {iteration + 1}/{team.max_iterations}")
# On subsequent iterations, inject current task state as a user message
if iteration > 0:
task_list = load_task_list(run_context.session_state)
task_summary = task_list.get_summary_string()
state_message = Message(
role="user",
content=f"<current_task_state>\n{task_summary}\n</current_task_state>\n\n"
"Continue working on the tasks. Create, execute, or update tasks as needed. "
"When all tasks are done, call `mark_all_complete` with a summary.",
)
accumulated_messages.append(state_message)
# Get model response
model_response = await team.model.aresponse(
messages=accumulated_messages,
response_format=response_format,
tools=_tools,
tool_choice=team.tool_choice,
tool_call_limit=team.tool_call_limit,
run_response=run_response,
send_media_to_model=team.send_media_to_model,
compression_manager=team.compression_manager if team.compress_tool_results else None,
) # type: ignore
await araise_if_cancelled(run_response.run_id) # type: ignore
# Update run response
_update_run_response(
team,
model_response=model_response,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
)
# Check if delegation propagated member HITL requirements
if run_response.requirements and any(not req.is_resolved() for req in run_response.requirements):
from agno.team import _hooks
return await _hooks.ahandle_team_run_paused(
team, run_response=run_response, session=team_session, run_context=run_context
)
# Check termination conditions
task_list = load_task_list(run_context.session_state)
if task_list.goal_complete:
log_debug("Task goal marked complete, finishing task loop.")
break
if task_list.all_terminal():
has_failures = any(t.status == TaskStatus.failed for t in task_list.tasks)
if not has_failures:
log_debug("All tasks completed successfully, finishing task loop.")
break
log_debug("All tasks terminal but some failed, continuing to let model handle.")
else:
# Loop exhausted without completing
task_list = load_task_list(run_context.session_state)
if not task_list.goal_complete:
log_warning(f"Reached max_iterations ({team.max_iterations}) without completing all tasks.")
# === Post-loop ===
# Store media if enabled
if team.store_media and model_response is not None:
store_media_util(run_response, model_response)
# Convert response to structured format
_convert_response_to_structured_format(team, run_response=run_response, run_context=run_context)
# Execute post-hooks
if team.post_hooks is not None:
async for _ in _aexecute_post_hooks(
team,
hooks=team.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=team_session,
user_id=user_id,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
):
pass
await araise_if_cancelled(run_response.run_id) # type: ignore
# Wait for background memory and learning creation
await_for_open_threads(memory_task=memory_task, learning_task=learning_task) # type: ignore
merge_background_metrics(run_response.metrics, collect_background_metrics(memory_task, learning_task))
await araise_if_cancelled(run_response.run_id) # type: ignore
# Create session summary
if team.session_summary_manager is not None:
team_session.upsert_run(run_response=run_response)
try:
await team.session_summary_manager.acreate_session_summary(
session=team_session, run_metrics=run_response.metrics
)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
await araise_if_cancelled(run_response.run_id) # type: ignore
# Set the run status to completed
run_response.status = RunStatus.completed
# Cleanup and store
await _acleanup_and_store(team, run_response=run_response, session=team_session)
await alog_team_telemetry(team, session_id=team_session.session_id, run_id=run_response.run_id)
log_debug(f"Team Task Run End: {run_response.run_id}", center=True, symbol="*")
return run_response
except RunCancelledException as e:
log_info(f"Team task run {run_response.run_id} was cancelled")
run_response.status = RunStatus.cancelled
run_response.content = str(e)
if team_session is not None:
await _acleanup_and_store(team, run_response=run_response, session=team_session)
return run_response
except (InputCheckError, OutputCheckError) as e:
run_response.status = RunStatus.error
run_error = create_team_run_error_event(
run_response,
error=str(e),
error_id=e.error_id,
error_type=e.type,
additional_data=e.additional_data,
)
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
if team_session is not None:
await _acleanup_and_store(team, run_response=run_response, session=team_session)
return run_response
except (KeyboardInterrupt, asyncio.CancelledError):
run_response = cast(TeamRunOutput, run_response)
run_response.status = RunStatus.cancelled
run_response.content = "Operation cancelled by user"
return run_response
except Exception as e:
run_response.status = RunStatus.error
run_error = create_team_run_error_event(run_response, error=str(e))
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Team task run: {str(e)}")
if team_session is not None:
await _acleanup_and_store(team, run_response=run_response, session=team_session)
return run_response
finally:
_disconnect_connectable_tools(team)
await _disconnect_mcp_tools(team)
# Cancel background tasks on error
for task in (memory_task, learning_task):
if task is not None and not task.done():
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
await acleanup_run(run_response.run_id) # type: ignore
return run_response
async def _arun_tasks_stream(
team: "Team",
run_response: TeamRunOutput,
run_context: RunContext,
session_id: str,
user_id: Optional[str] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
stream_events: bool = False,
yield_run_output: bool = False,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
add_history_to_context: Optional[bool] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> AsyncIterator[Union[TeamRunOutputEvent, RunOutputEvent, TeamRunOutput]]:
"""Run the Team in autonomous task mode with async streaming support.
The team leader iteratively plans and delegates tasks to members until
the goal is complete or max_iterations is reached. Events are yielded
for each iteration.
"""
from agno.team._hooks import _aexecute_post_hooks, _aexecute_pre_hooks
from agno.team._init import _disconnect_connectable_tools, _disconnect_mcp_tools
from agno.team._managers import _astart_learning_task, _astart_memory_task
from agno.team._messages import _aget_run_messages
from agno.team._response import (
_ahandle_model_response_stream,
_convert_response_to_structured_format,
agenerate_response_with_output_model_stream,
ahandle_reasoning_stream,
)
from agno.team._telemetry import alog_team_telemetry
from agno.team._tools import _check_and_refresh_mcp_tools, _determine_tools_for_model
from agno.team.task import TaskStatus, load_task_list
from agno.utils.events import (
create_team_task_iteration_completed_event,
create_team_task_iteration_started_event,
create_team_task_state_updated_event,
)
log_debug(f"Team Task Run (Async Stream) Start: {run_response.run_id}", center=True)
memory_task = None
learning_task = None
team_session: Optional[TeamSession] = None
try:
# Register run for cancellation tracking
await aregister_run(run_context.run_id)
# Setup session
team_session = await _asetup_session(
team=team,
run_context=run_context,
session_id=session_id,
user_id=user_id,
run_id=run_response.run_id,
)
run_input = cast(TeamRunInput, run_response.input)
team.model = cast(Model, team.model)
# 1. Execute pre-hooks
if team.pre_hooks is not None:
pre_hook_iterator = _aexecute_pre_hooks(
team,
hooks=team.pre_hooks, # type: ignore
run_response=run_response,
run_context=run_context,
run_input=run_input,
session=team_session,
user_id=user_id,
debug_mode=debug_mode,
stream_events=stream_events,
background_tasks=background_tasks,
**kwargs,
)
async for pre_hook_event in pre_hook_iterator:
yield pre_hook_event
# 2. Determine tools for model (includes task management tools)
team_run_context: Dict[str, Any] = {}
await _check_and_refresh_mcp_tools(team)
_tools = _determine_tools_for_model(
team,
model=team.model,
run_response=run_response,
run_context=run_context,
team_run_context=team_run_context,
session=team_session,
user_id=user_id,
async_mode=True,
input_message=run_input.input_content,
images=run_input.images,
videos=run_input.videos,
audio=run_input.audios,
files=run_input.files,
debug_mode=debug_mode,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
stream=True,
stream_events=stream_events,
)
# 3. Prepare initial run messages
run_messages = await _aget_run_messages(
team,
run_response=run_response,
run_context=run_context,
session=team_session, # type: ignore
user_id=user_id,
input_message=run_input.input_content,
audio=run_input.audios,
images=run_input.images,
videos=run_input.videos,
files=run_input.files,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
tools=_tools,
**kwargs,
)
# 4. Start memory creation in background
memory_task = await _astart_memory_task(
team,
run_messages=run_messages,
user_id=user_id,
existing_task=memory_task,
)
learning_task = await _astart_learning_task(
team,
run_messages=run_messages,
session=team_session,
user_id=user_id,
existing_task=learning_task,
)
# Yield run started event
if stream_events:
yield handle_event( # type: ignore
create_team_run_started_event(run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
await araise_if_cancelled(run_response.run_id) # type: ignore
# 5. Reason about the task if reasoning is enabled
async for item in ahandle_reasoning_stream(
team,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
stream_events=stream_events,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
yield item
await araise_if_cancelled(run_response.run_id) # type: ignore
# Use accumulated messages for the iterative loop
accumulated_messages = run_messages.messages
# === Iterative task loop ===
for iteration in range(team.max_iterations):
log_debug(f"Task iteration {iteration + 1}/{team.max_iterations}")
# Yield task iteration started event
if stream_events:
yield handle_event( # type: ignore
create_team_task_iteration_started_event(
from_run_response=run_response,
iteration=iteration + 1,
max_iterations=team.max_iterations,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# On subsequent iterations, inject current task state as a user message
if iteration > 0:
task_list = load_task_list(run_context.session_state)
task_summary = task_list.get_summary_string()
state_message = Message(
role="user",
content=f"<current_task_state>\n{task_summary}\n</current_task_state>\n\n"
"Continue working on the tasks. Create, execute, or update tasks as needed. "
"When all tasks are done, call `mark_all_complete` with a summary.",
)
accumulated_messages.append(state_message)
# Get model response with streaming
# Update run_messages with accumulated messages for streaming
run_messages.messages = accumulated_messages
if team.output_model is None:
async for event in _ahandle_model_response_stream(
team,
session=team_session,
run_response=run_response,
run_messages=run_messages,
tools=_tools,
response_format=response_format,
stream_events=stream_events,
session_state=run_context.session_state,
run_context=run_context,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
yield event
else:
async for event in _ahandle_model_response_stream(
team,
session=team_session,
run_response=run_response,
run_messages=run_messages,
tools=_tools,
response_format=response_format,
stream_events=stream_events,
session_state=run_context.session_state,
run_context=run_context,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
from agno.run.team import IntermediateRunContentEvent, RunContentEvent
if isinstance(event, RunContentEvent):
if stream_events:
yield IntermediateRunContentEvent(
content=event.content,
content_type=event.content_type,
)
else:
yield event
async for event in agenerate_response_with_output_model_stream(
team,
session=team_session,
run_response=run_response,
run_messages=run_messages,
stream_events=stream_events,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
yield event
await araise_if_cancelled(run_response.run_id) # type: ignore
# Check if delegation propagated member HITL requirements
if run_response.requirements and any(not req.is_resolved() for req in run_response.requirements):
from agno.team import _hooks
async for item in _hooks.ahandle_team_run_paused_stream( # type: ignore[assignment]
team, run_response=run_response, session=team_session, run_context=run_context
):
yield item
if yield_run_output:
yield run_response
return
# Check termination conditions
task_list = load_task_list(run_context.session_state)
# Yield task state updated event
if stream_events:
# Convert task list to TaskData for creating detailed events
task_data_list = [
TaskData(
id=t.id,
title=t.title,
description=t.description,
status=t.status.value,
assignee=t.assignee,
dependencies=t.dependencies,
result=t.result,
)
for t in task_list.tasks
]
yield handle_event( # type: ignore
create_team_task_state_updated_event(
from_run_response=run_response,
task_summary=task_list.get_summary_string(),
goal_complete=task_list.goal_complete,
tasks=task_data_list,
completion_summary=task_list.completion_summary,
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Yield task iteration completed event
if stream_events:
yield handle_event( # type: ignore
create_team_task_iteration_completed_event(
from_run_response=run_response,
iteration=iteration + 1,
max_iterations=team.max_iterations,
task_summary=task_list.get_summary_string(),
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
if task_list.goal_complete:
log_debug("Task goal marked complete, finishing task loop.")
break
if task_list.all_terminal():
has_failures = any(t.status == TaskStatus.failed for t in task_list.tasks)
if not has_failures:
log_debug("All tasks completed successfully, finishing task loop.")
break
log_debug("All tasks terminal but some failed, continuing to let model handle.")
else:
# Loop exhausted without completing
task_list = load_task_list(run_context.session_state)
if not task_list.goal_complete:
log_warning(f"Reached max_iterations ({team.max_iterations}) without completing all tasks.")
# === Post-loop ===
# Convert response to structured format
_convert_response_to_structured_format(team, run_response=run_response, run_context=run_context)
# Yield RunContentCompletedEvent
if stream_events:
yield handle_event( # type: ignore
create_team_run_content_completed_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Execute post-hooks
if team.post_hooks is not None:
async for event in _aexecute_post_hooks(
team,
hooks=team.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=team_session,
user_id=user_id,
debug_mode=debug_mode,
stream_events=stream_events,
background_tasks=background_tasks,
**kwargs,
):
yield event
await araise_if_cancelled(run_response.run_id) # type: ignore
# Wait for background memory creation
async for event in await_for_thread_tasks_stream(
run_response=run_response,
memory_task=memory_task,
learning_task=learning_task,
stream_events=stream_events,
events_to_skip=team.events_to_skip, # type: ignore
store_events=team.store_events,
get_memories_callback=lambda: team.aget_user_memories(user_id=user_id),
):
yield event
await araise_if_cancelled(run_response.run_id) # type: ignore
# Create session summary
if team.session_summary_manager is not None:
team_session.upsert_run(run_response=run_response)
if stream_events:
yield handle_event( # type: ignore
create_team_session_summary_started_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
try:
await team.session_summary_manager.acreate_session_summary(session=team_session)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
if stream_events:
yield handle_event( # type: ignore
create_team_session_summary_completed_event(
from_run_response=run_response, session_summary=team_session.summary
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
await araise_if_cancelled(run_response.run_id) # type: ignore
# Create the run completed event
completed_event = handle_event(
create_team_run_completed_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Set the run status to completed
run_response.status = RunStatus.completed
# Cleanup and store
await _acleanup_and_store(team, run_response=run_response, session=team_session)
if stream_events:
yield completed_event
if yield_run_output:
yield run_response
await alog_team_telemetry(team, session_id=team_session.session_id, run_id=run_response.run_id)
log_debug(f"Team Task Run (Async Stream) End: {run_response.run_id}", center=True, symbol="*")
except RunCancelledException as e:
log_info(f"Team task run {run_response.run_id} was cancelled during async streaming")
run_response.status = RunStatus.cancelled
run_response.content = str(e)
yield handle_event( # type: ignore
create_team_run_cancelled_event(from_run_response=run_response, reason=str(e)),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
if team_session is not None:
await _acleanup_and_store(team, run_response=run_response, session=team_session)
except (InputCheckError, OutputCheckError) as e:
run_response.status = RunStatus.error
run_error = create_team_run_error_event(
run_response,
error=str(e),
error_id=e.error_id,
error_type=e.type,
additional_data=e.additional_data,
)
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
if team_session is not None:
await _acleanup_and_store(team, run_response=run_response, session=team_session)
yield run_error
except (KeyboardInterrupt, asyncio.CancelledError):
run_response = cast(TeamRunOutput, run_response)
run_response.status = RunStatus.cancelled
run_response.content = "Operation cancelled by user"
try:
if team_session is not None:
await _acleanup_and_store(team, run_response=run_response, session=team_session)
except Exception:
pass
yield handle_event( # type: ignore
create_team_run_cancelled_event(from_run_response=run_response, reason="Operation cancelled by user"),
run_response,
events_to_skip=team.events_to_skip, # type: ignore
store_events=team.store_events,
)
except Exception as e:
run_response.status = RunStatus.error
run_error = create_team_run_error_event(run_response, error=str(e))
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Team task run (async stream): {str(e)}")
if team_session is not None:
await _acleanup_and_store(team, run_response=run_response, session=team_session)
yield run_error
finally:
_disconnect_connectable_tools(team)
await _disconnect_mcp_tools(team)
# Cancel background tasks on error
if memory_task is not None and not memory_task.done():
memory_task.cancel()
try:
await memory_task
except asyncio.CancelledError:
pass
if learning_task is not None and not learning_task.done():
learning_task.cancel()
try:
await learning_task
except asyncio.CancelledError:
pass
await acleanup_run(run_response.run_id) # type: ignore
async def _arun(
team: "Team",
run_response: TeamRunOutput,
run_context: RunContext,
session_id: str,
user_id: Optional[str] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
add_history_to_context: Optional[bool] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> TeamRunOutput:
"""Run the Team and return the response.
Pre-loop setup:
1. Setup session via _asetup_session (read/create, load state, resolve dependencies)
Steps (inside retry loop):
1. Execute pre-hooks
2. Determine tools for model
3. Prepare run messages
4. Start memory creation in background task
5. Reason about the task if reasoning is enabled
6. Get a response from the Model
7. Update TeamRunOutput with the model response
8. Store media if enabled
9. Convert response to structured format
10. Execute post-hooks
11. Wait for background memory creation
12. Create session summary
13. Cleanup and store (scrub, add to session, calculate metrics, save session)
"""
from agno.team._hooks import _aexecute_post_hooks, _aexecute_pre_hooks
from agno.team._init import _disconnect_connectable_tools, _disconnect_mcp_tools
from agno.team._managers import _astart_learning_task, _astart_memory_task
from agno.team._messages import _aget_run_messages
from agno.team._response import (
_convert_response_to_structured_format,
_update_run_response,
agenerate_response_with_output_model,
ahandle_reasoning,
aparse_response_with_parser_model,
)
from agno.team._telemetry import alog_team_telemetry
from agno.team._tools import _check_and_refresh_mcp_tools, _determine_tools_for_model
# Dispatch to task mode if applicable
from agno.team.mode import TeamMode
if team.mode == TeamMode.tasks:
return await _arun_tasks(
team,
run_response=run_response,
run_context=run_context,
session_id=session_id,
user_id=user_id,
response_format=response_format,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
add_history_to_context=add_history_to_context,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
log_debug(f"Team Run Start: {run_response.run_id}", center=True)
memory_task = None
learning_task = None
try:
# Register run for cancellation tracking
await aregister_run(run_context.run_id)
# Setup session: read/create, load state, resolve dependencies
team_session = await _asetup_session(
team=team,
run_context=run_context,
session_id=session_id,
user_id=user_id,
run_id=run_response.run_id,
)
# Set up retry logic
num_attempts = team.retries + 1
for attempt in range(num_attempts):
if attempt > 0:
log_debug(f"Retrying Team run {run_response.run_id}. Attempt {attempt + 1} of {num_attempts}...")
try:
run_input = cast(TeamRunInput, run_response.input)
# 1. Execute pre-hooks after session is loaded but before processing starts
if team.pre_hooks is not None:
pre_hook_iterator = _aexecute_pre_hooks(
team,
hooks=team.pre_hooks, # type: ignore
run_response=run_response,
run_context=run_context,
run_input=run_input,
session=team_session,
user_id=user_id,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
# Consume the async iterator without yielding
async for _ in pre_hook_iterator:
pass
# 2. Resolve callable factories and determine tools for model
team_run_context: Dict[str, Any] = {}
team.model = cast(Model, team.model)
# Resolve callable factories (tools, knowledge, members) before tool determination
from agno.team._tools import _aresolve_callable_resources
await _aresolve_callable_resources(team, run_context=run_context)
await _check_and_refresh_mcp_tools(
team,
)
_tools = _determine_tools_for_model(
team,
model=team.model,
run_response=run_response,
run_context=run_context,
team_run_context=team_run_context,
session=team_session,
user_id=user_id,
async_mode=True,
input_message=run_input.input_content,
images=run_input.images,
videos=run_input.videos,
audio=run_input.audios,
files=run_input.files,
debug_mode=debug_mode,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
stream=False,
stream_events=False,
)
# 3. Prepare run messages
run_messages = await _aget_run_messages(
team,
run_response=run_response,
run_context=run_context,
session=team_session, # type: ignore
user_id=user_id,
input_message=run_input.input_content,
audio=run_input.audios,
images=run_input.images,
videos=run_input.videos,
files=run_input.files,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
tools=_tools,
**kwargs,
)
team.model = cast(Model, team.model)
# 4. Start memory creation in background task
memory_task = await _astart_memory_task(
team,
run_messages=run_messages,
user_id=user_id,
existing_task=memory_task,
)
learning_task = await _astart_learning_task(
team,
run_messages=run_messages,
session=team_session,
user_id=user_id,
existing_task=learning_task,
)
await araise_if_cancelled(run_response.run_id) # type: ignore
# 5. Reason about the task if reasoning is enabled
await ahandle_reasoning(
team, run_response=run_response, run_messages=run_messages, run_context=run_context
)
# Check for cancellation before model call
await araise_if_cancelled(run_response.run_id) # type: ignore
# 6. Get the model response for the team leader
model_response = await team.model.aresponse(
messages=run_messages.messages,
tools=_tools,
tool_choice=team.tool_choice,
tool_call_limit=team.tool_call_limit,
response_format=response_format,
send_media_to_model=team.send_media_to_model,
run_response=run_response,
compression_manager=team.compression_manager if team.compress_tool_results else None,
) # type: ignore
# Check for cancellation after model call
await araise_if_cancelled(run_response.run_id) # type: ignore
# If an output model is provided, generate output using the output model
await agenerate_response_with_output_model(
team, model_response=model_response, run_messages=run_messages
)
# If a parser model is provided, structure the response separately
await aparse_response_with_parser_model(
team, model_response=model_response, run_messages=run_messages, run_context=run_context
)
# 7. Update TeamRunOutput with the model response
_update_run_response(
team,
model_response=model_response,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
)
# 7b. Check if delegation propagated member HITL requirements
if run_response.requirements and any(not req.is_resolved() for req in run_response.requirements):
from agno.team import _hooks
return await _hooks.ahandle_team_run_paused(
team, run_response=run_response, session=team_session, run_context=run_context
)
# 8. Store media if enabled
if team.store_media:
store_media_util(run_response, model_response)
# 9. Convert response to structured format
_convert_response_to_structured_format(team, run_response=run_response, run_context=run_context)
# 10. Execute post-hooks after output is generated but before response is returned
if team.post_hooks is not None:
async for _ in _aexecute_post_hooks(
team,
hooks=team.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=team_session,
user_id=user_id,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
):
pass
await araise_if_cancelled(run_response.run_id) # type: ignore
# 11. Wait for background memory creation
await await_for_open_threads(memory_task=memory_task, learning_task=learning_task)
merge_background_metrics(run_response.metrics, collect_background_metrics(memory_task, learning_task))
await araise_if_cancelled(run_response.run_id) # type: ignore
# 12. Create session summary
if team.session_summary_manager is not None:
# Upsert the RunOutput to Team Session before creating the session summary
team_session.upsert_run(run_response=run_response)
try:
await team.session_summary_manager.acreate_session_summary(
session=team_session, run_metrics=run_response.metrics
)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
await araise_if_cancelled(run_response.run_id) # type: ignore
run_response.status = RunStatus.completed
# 13. Cleanup and store the run response and session
await _acleanup_and_store(team, run_response=run_response, session=team_session)
# Log Team Telemetry
await alog_team_telemetry(team, session_id=team_session.session_id, run_id=run_response.run_id)
log_debug(f"Team Run End: {run_response.run_id}", center=True, symbol="*")
return run_response
except RunCancelledException as e:
# Handle run cancellation
log_info(f"Run {run_response.run_id} was cancelled")
run_response.content = str(e)
run_response.status = RunStatus.cancelled
# Cleanup and store the run response and session
await _acleanup_and_store(team, run_response=run_response, session=team_session)
return run_response
except (InputCheckError, OutputCheckError) as e:
run_response.status = RunStatus.error
run_error = create_team_run_error_event(
run_response,
error=str(e),
error_id=e.error_id,
error_type=e.type,
additional_data=e.additional_data,
)
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
await _acleanup_and_store(team, run_response=run_response, session=team_session)
return run_response
except (KeyboardInterrupt, asyncio.CancelledError):
run_response = cast(TeamRunOutput, run_response)
run_response.status = RunStatus.cancelled
run_response.content = "Operation cancelled by user"
try:
await _acleanup_and_store(team, run_response=run_response, session=team_session)
except Exception:
pass
return run_response
except Exception as e:
if attempt < num_attempts - 1:
# Calculate delay with exponential backoff if enabled
if team.exponential_backoff:
delay = team.delay_between_retries * (2**attempt)
else:
delay = team.delay_between_retries
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
await asyncio.sleep(delay)
continue
run_response.status = RunStatus.error
run_error = create_team_run_error_event(run_response, error=str(e))
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Team run: {str(e)}")
# Cleanup and store the run response and session
await _acleanup_and_store(team, run_response=run_response, session=team_session)
return run_response
finally:
# Always disconnect connectable tools
_disconnect_connectable_tools(team)
await _disconnect_mcp_tools(team)
# Cancel background task on error (await_for_open_threads handles waiting on success)
if memory_task is not None and not memory_task.done():
memory_task.cancel()
try:
await memory_task
except asyncio.CancelledError:
pass
if learning_task is not None and not learning_task.done():
learning_task.cancel()
try:
await learning_task
except asyncio.CancelledError:
pass
# Always clean up the run tracking
await acleanup_run(run_response.run_id) # type: ignore
return run_response
async def _arun_background(
team: "Team",
run_response: TeamRunOutput,
run_context: RunContext,
session_id: str,
user_id: Optional[str] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> TeamRunOutput:
"""Start a team run in the background and return immediately with PENDING status.
The run is persisted with PENDING status, then an asyncio task is spawned
to execute the actual run. The task transitions through RUNNING -> COMPLETED/ERROR.
Callers can poll for results via team.aget_run_output(run_id, session_id).
"""
from agno.team._session import asave_session
from agno.team._storage import _aread_or_create_session, _update_metadata
# 1. Register the run for cancellation tracking (before spawning the task)
await aregister_run(run_context.run_id)
# 2. Set status to PENDING
run_response.status = RunStatus.pending
# 3. Persist the PENDING run so polling can find it immediately
team_session = await _aread_or_create_session(team, session_id=session_id, user_id=user_id)
_update_metadata(team, session=team_session)
team_session.upsert_run(run_response=run_response)
await asave_session(team, session=team_session)
log_info(f"Background run {run_response.run_id} created with PENDING status")
# 4. Spawn the background task
async def _background_task() -> None:
try:
# Transition to RUNNING
run_response.status = RunStatus.running
team_session.upsert_run(run_response=run_response)
await asave_session(team, session=team_session)
# Execute the actual run — _arun handles everything including
# session persistence and cleanup
await _arun(
team,
run_response=run_response,
run_context=run_context,
session_id=session_id,
user_id=user_id,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
response_format=response_format,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
except Exception:
log_error(f"Background run {run_response.run_id} failed", exc_info=True)
# Persist ERROR status
try:
run_response.status = RunStatus.error
team_session.upsert_run(run_response=run_response)
await asave_session(team, session=team_session)
except Exception:
log_error(f"Failed to persist error state for background run {run_response.run_id}", exc_info=True)
# Note: acleanup_run is already called by _arun's finally block
task = asyncio.create_task(_background_task())
_background_tasks.add(task)
task.add_done_callback(_background_tasks.discard)
# 5. Return immediately with the PENDING response
return run_response
async def _arun_stream(
team: "Team",
run_response: TeamRunOutput,
run_context: RunContext,
session_id: str,
user_id: Optional[str] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
stream_events: bool = False,
yield_run_output: bool = False,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
add_history_to_context: Optional[bool] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> AsyncIterator[Union[TeamRunOutputEvent, RunOutputEvent, TeamRunOutput]]:
"""Run the Team and return the response as a stream.
Pre-loop setup:
1. Setup session via _asetup_session (read/create, load state, resolve dependencies)
Steps (inside retry loop):
1. Execute pre-hooks
2. Determine tools for model
3. Prepare run messages
4. Start memory creation in background task
5. Reason about the task if reasoning is enabled
6. Get a response from the model
7. Parse response with parser model if provided
8. Wait for background memory creation
9. Create session summary
10. Cleanup and store (scrub, add to session, calculate metrics, save session)
"""
from agno.team._hooks import _aexecute_post_hooks, _aexecute_pre_hooks
from agno.team._init import _disconnect_connectable_tools, _disconnect_mcp_tools
from agno.team._managers import _astart_learning_task, _astart_memory_task
from agno.team._messages import _aget_run_messages
from agno.team._response import (
_ahandle_model_response_stream,
agenerate_response_with_output_model_stream,
ahandle_reasoning_stream,
aparse_response_with_parser_model_stream,
)
from agno.team._telemetry import alog_team_telemetry
from agno.team._tools import _check_and_refresh_mcp_tools, _determine_tools_for_model
# Fallback for tasks mode (streaming not yet supported)
# Dispatch to task mode streaming if applicable
from agno.team.mode import TeamMode
if team.mode == TeamMode.tasks:
async for event in _arun_tasks_stream(
team,
run_response=run_response,
run_context=run_context,
session_id=session_id,
user_id=user_id,
response_format=response_format,
stream_events=stream_events,
yield_run_output=yield_run_output,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
add_history_to_context=add_history_to_context,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
):
yield event
return
log_debug(f"Team Run Start: {run_response.run_id}", center=True)
memory_task = None
learning_task = None
try:
# Register run for cancellation tracking
await aregister_run(run_context.run_id)
# Setup session: read/create, load state, resolve dependencies
team_session = await _asetup_session(
team=team,
run_context=run_context,
session_id=session_id,
user_id=user_id,
run_id=run_response.run_id,
)
# Set up retry logic
num_attempts = team.retries + 1
for attempt in range(num_attempts):
if attempt > 0:
log_debug(f"Retrying Team run {run_response.run_id}. Attempt {attempt + 1} of {num_attempts}...")
try:
# 1. Execute pre-hooks
run_input = cast(TeamRunInput, run_response.input)
team.model = cast(Model, team.model)
if team.pre_hooks is not None:
pre_hook_iterator = _aexecute_pre_hooks(
team,
hooks=team.pre_hooks, # type: ignore
run_response=run_response,
run_context=run_context,
run_input=run_input,
session=team_session,
user_id=user_id,
debug_mode=debug_mode,
stream_events=stream_events,
background_tasks=background_tasks,
**kwargs,
)
async for pre_hook_event in pre_hook_iterator:
yield pre_hook_event
# 2. Resolve callable factories and determine tools for model
team_run_context: Dict[str, Any] = {}
team.model = cast(Model, team.model)
# Resolve callable factories (tools, knowledge, members) before tool determination
from agno.team._tools import _aresolve_callable_resources
await _aresolve_callable_resources(team, run_context=run_context)
await _check_and_refresh_mcp_tools(
team,
)
_tools = _determine_tools_for_model(
team,
model=team.model,
run_response=run_response,
run_context=run_context,
team_run_context=team_run_context,
session=team_session, # type: ignore
user_id=user_id,
async_mode=True,
input_message=run_input.input_content,
images=run_input.images,
videos=run_input.videos,
audio=run_input.audios,
files=run_input.files,
debug_mode=debug_mode,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
stream=True,
stream_events=stream_events,
)
# 3. Prepare run messages
run_messages = await _aget_run_messages(
team,
run_response=run_response,
run_context=run_context,
session=team_session, # type: ignore
user_id=user_id,
input_message=run_input.input_content,
audio=run_input.audios,
images=run_input.images,
videos=run_input.videos,
files=run_input.files,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
tools=_tools,
**kwargs,
)
# 4. Start memory creation in background task
memory_task = await _astart_memory_task(
team,
run_messages=run_messages,
user_id=user_id,
existing_task=memory_task,
)
learning_task = await _astart_learning_task(
team,
run_messages=run_messages,
session=team_session,
user_id=user_id,
existing_task=learning_task,
)
# Yield the run started event
if stream_events:
yield handle_event( # type: ignore
create_team_run_started_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# 5. Reason about the task if reasoning is enabled
async for item in ahandle_reasoning_stream(
team,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
stream_events=stream_events,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
yield item
# Check for cancellation before model processing
await araise_if_cancelled(run_response.run_id) # type: ignore
# 6. Get a response from the model
if team.output_model is None:
async for event in _ahandle_model_response_stream(
team,
session=team_session,
run_response=run_response,
run_messages=run_messages,
tools=_tools,
response_format=response_format,
stream_events=stream_events,
session_state=run_context.session_state,
run_context=run_context,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
yield event
else:
async for event in _ahandle_model_response_stream(
team,
session=team_session,
run_response=run_response,
run_messages=run_messages,
tools=_tools,
response_format=response_format,
stream_events=stream_events,
session_state=run_context.session_state,
run_context=run_context,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
from agno.run.team import IntermediateRunContentEvent, RunContentEvent
if isinstance(event, RunContentEvent):
if stream_events:
yield IntermediateRunContentEvent(
content=event.content,
content_type=event.content_type,
)
else:
yield event
async for event in agenerate_response_with_output_model_stream(
team,
session=team_session,
run_response=run_response,
run_messages=run_messages,
stream_events=stream_events,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
yield event
# Check for cancellation after model processing
await araise_if_cancelled(run_response.run_id) # type: ignore
# 6b. Check if delegation propagated member HITL requirements
if run_response.requirements and any(not req.is_resolved() for req in run_response.requirements):
from agno.team import _hooks
async for item in _hooks.ahandle_team_run_paused_stream( # type: ignore[assignment]
team, run_response=run_response, session=team_session, run_context=run_context
):
yield item
if yield_run_output:
yield run_response
return
# 7. Parse response with parser model if provided
async for event in aparse_response_with_parser_model_stream(
team,
session=team_session,
run_response=run_response,
stream_events=stream_events,
run_context=run_context,
):
yield event
# Yield RunContentCompletedEvent
if stream_events:
yield handle_event( # type: ignore
create_team_run_content_completed_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Execute post-hooks after output is generated but before response is returned
if team.post_hooks is not None:
async for event in _aexecute_post_hooks(
team,
hooks=team.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=team_session,
user_id=user_id,
debug_mode=debug_mode,
stream_events=stream_events,
background_tasks=background_tasks,
**kwargs,
):
yield event
await araise_if_cancelled(run_response.run_id) # type: ignore
# 8. Wait for background memory creation
async for event in await_for_thread_tasks_stream(
run_response=run_response,
memory_task=memory_task,
learning_task=learning_task,
stream_events=stream_events,
events_to_skip=team.events_to_skip, # type: ignore
store_events=team.store_events,
get_memories_callback=lambda: team.aget_user_memories(user_id=user_id),
):
yield event
merge_background_metrics(run_response.metrics, collect_background_metrics(memory_task, learning_task))
await araise_if_cancelled(run_response.run_id) # type: ignore
# 9. Create session summary
if team.session_summary_manager is not None:
# Upsert the RunOutput to Team Session before creating the session summary
team_session.upsert_run(run_response=run_response)
if stream_events:
yield handle_event( # type: ignore
create_team_session_summary_started_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
try:
await team.session_summary_manager.acreate_session_summary(
session=team_session, run_metrics=run_response.metrics
)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
if stream_events:
yield handle_event( # type: ignore
create_team_session_summary_completed_event(
from_run_response=run_response, session_summary=team_session.summary
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
await araise_if_cancelled(run_response.run_id) # type: ignore
# Create the run completed event
completed_event = handle_event(
create_team_run_completed_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Set the run status to completed
run_response.status = RunStatus.completed
# 10. Cleanup and store the run response and session
await _acleanup_and_store(team, run_response=run_response, session=team_session)
if stream_events:
yield completed_event
if yield_run_output:
yield run_response
# Log Team Telemetry
await alog_team_telemetry(team, session_id=team_session.session_id, run_id=run_response.run_id)
log_debug(f"Team Run End: {run_response.run_id}", center=True, symbol="*")
break
except RunCancelledException as e:
# Handle run cancellation during async streaming
log_info(f"Team run {run_response.run_id} was cancelled during async streaming")
run_response.status = RunStatus.cancelled
run_response.content = str(e)
# Yield the cancellation event
yield handle_event( # type: ignore
create_team_run_cancelled_event(from_run_response=run_response, reason=str(e)),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Cleanup and store the run response and session
await _acleanup_and_store(team, run_response=run_response, session=team_session)
break
except (InputCheckError, OutputCheckError) as e:
run_response.status = RunStatus.error
run_error = create_team_run_error_event(
run_response,
error=str(e),
error_id=e.error_id,
error_type=e.type,
additional_data=e.additional_data,
)
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
await _acleanup_and_store(team, run_response=run_response, session=team_session)
yield run_error
break
except (KeyboardInterrupt, asyncio.CancelledError):
run_response = cast(TeamRunOutput, run_response)
try:
await _acleanup_and_store(team, run_response=run_response, session=team_session)
except Exception:
pass
yield handle_event( # type: ignore
create_team_run_cancelled_event(
from_run_response=run_response, reason="Operation cancelled by user"
),
run_response,
events_to_skip=team.events_to_skip, # type: ignore
store_events=team.store_events,
)
break
except Exception as e:
if attempt < num_attempts - 1:
# Calculate delay with exponential backoff if enabled
if team.exponential_backoff:
delay = team.delay_between_retries * (2**attempt)
else:
delay = team.delay_between_retries
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
await asyncio.sleep(delay)
continue
run_response.status = RunStatus.error
run_error = create_team_run_error_event(run_response, error=str(e))
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Team run: {str(e)}")
# Cleanup and store the run response and session
await _acleanup_and_store(team, run_response=run_response, session=team_session)
yield run_error
finally:
# Always disconnect connectable tools
_disconnect_connectable_tools(team)
await _disconnect_mcp_tools(team)
# Cancel background task on error (await_for_thread_tasks_stream handles waiting on success)
if memory_task is not None and not memory_task.done():
memory_task.cancel()
try:
await memory_task
except asyncio.CancelledError:
pass
if learning_task is not None and not learning_task.done():
learning_task.cancel()
try:
await learning_task
except asyncio.CancelledError:
pass
# Always clean up the run tracking
await acleanup_run(run_response.run_id) # type: ignore
def arun_dispatch( # type: ignore
team: "Team",
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
*,
stream: Optional[bool] = None,
stream_events: Optional[bool] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
run_id: Optional[str] = None,
run_context: Optional[RunContext] = None,
user_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
debug_mode: Optional[bool] = None,
yield_run_output: bool = False,
output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
background: bool = False,
**kwargs: Any,
) -> Union[TeamRunOutput, AsyncIterator[Union[RunOutputEvent, TeamRunOutputEvent]]]:
"""Run the Team asynchronously and return the response."""
# Set the id for the run and register it immediately for cancellation tracking
from agno.team._init import _initialize_session
from agno.team._response import get_response_format
from agno.team._run_options import resolve_run_options
run_id = run_id or str(uuid4())
# Initialize Team
team.initialize_team(debug_mode=debug_mode)
# Resolve run options centrally
opts = resolve_run_options(
team,
stream=stream,
stream_events=stream_events,
yield_run_output=yield_run_output,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
dependencies=dependencies,
knowledge_filters=knowledge_filters,
metadata=metadata,
output_schema=output_schema,
)
if (opts.add_history_to_context) and not team.db and not team.parent_team_id:
log_warning(
"add_history_to_context is True, but no database has been assigned to the team. History will not be added to the context."
)
background_tasks = kwargs.pop("background_tasks", None)
if background_tasks is not None:
from fastapi import BackgroundTasks
background_tasks: BackgroundTasks = background_tasks # type: ignore
# Validate input against input_schema if provided
validated_input = validate_input(input, team.input_schema)
# Normalise hook & guardails
if not team._hooks_normalised:
if team.pre_hooks:
team.pre_hooks = normalize_pre_hooks(team.pre_hooks, async_mode=True) # type: ignore
if team.post_hooks:
team.post_hooks = normalize_post_hooks(team.post_hooks, async_mode=True) # type: ignore
team._hooks_normalised = True
session_id, user_id = _initialize_session(team, session_id=session_id, user_id=user_id)
image_artifacts, video_artifacts, audio_artifacts, file_artifacts = validate_media_object_id(
images=images, videos=videos, audios=audio, files=files
)
# Track which options were explicitly provided for run_context precedence
dependencies_provided = dependencies is not None
knowledge_filters_provided = knowledge_filters is not None
metadata_provided = metadata is not None
# Create RunInput to capture the original user input
run_input = TeamRunInput(
input_content=validated_input,
images=image_artifacts,
videos=video_artifacts,
audios=audio_artifacts,
files=file_artifacts,
)
team.model = cast(Model, team.model)
# Initialize run context
run_context = run_context or RunContext(
run_id=run_id,
session_id=session_id,
user_id=user_id,
session_state=session_state,
dependencies=opts.dependencies,
knowledge_filters=opts.knowledge_filters,
metadata=opts.metadata,
output_schema=opts.output_schema,
)
# Apply options with precedence: explicit args > existing run_context > resolved defaults.
opts.apply_to_context(
run_context,
dependencies_provided=dependencies_provided,
knowledge_filters_provided=knowledge_filters_provided,
metadata_provided=metadata_provided,
)
# Configure the model for runs
response_format: Optional[Union[Dict, Type[BaseModel]]] = (
get_response_format(team, run_context=run_context) if team.parser_model is None else None
)
# Create a new run_response for this attempt
run_response = TeamRunOutput(
run_id=run_id,
user_id=user_id,
session_id=session_id,
team_id=team.id,
team_name=team.name,
metadata=run_context.metadata,
session_state=run_context.session_state,
input=run_input,
)
run_response.model = team.model.id if team.model is not None else None
run_response.model_provider = team.model.provider if team.model is not None else None
# Start the run metrics timer, to calculate the run duration
run_response.metrics = RunMetrics()
run_response.metrics.start_timer()
# Background execution: return immediately with PENDING status
if background:
if opts.stream:
raise ValueError(
"Background execution cannot be combined with streaming. Set stream=False when using background=True."
)
if not team.db:
raise ValueError(
"Background execution requires a database to be configured on the team for run persistence."
)
return _arun_background( # type: ignore[return-value]
team, # type: ignore
run_response=run_response,
run_context=run_context,
session_id=session_id,
user_id=user_id,
add_history_to_context=opts.add_history_to_context,
add_dependencies_to_context=opts.add_dependencies_to_context,
add_session_state_to_context=opts.add_session_state_to_context,
response_format=response_format,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
if opts.stream:
return _arun_stream(
team, # type: ignore
run_response=run_response,
run_context=run_context,
session_id=session_id,
user_id=user_id,
add_history_to_context=opts.add_history_to_context,
add_dependencies_to_context=opts.add_dependencies_to_context,
add_session_state_to_context=opts.add_session_state_to_context,
response_format=response_format,
stream_events=opts.stream_events,
yield_run_output=opts.yield_run_output,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
else:
return _arun(
team, # type: ignore
run_response=run_response,
run_context=run_context,
session_id=session_id,
user_id=user_id,
add_history_to_context=opts.add_history_to_context,
add_dependencies_to_context=opts.add_dependencies_to_context,
add_session_state_to_context=opts.add_session_state_to_context,
response_format=response_format,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
def _update_team_media(team: "Team", run_response: Union[TeamRunOutput, RunOutput]) -> None:
"""Update the team state with the run response."""
if run_response.images is not None:
if team.images is None:
team.images = []
team.images.extend(run_response.images)
if run_response.videos is not None:
if team.videos is None:
team.videos = []
team.videos.extend(run_response.videos)
if run_response.audio is not None:
if team.audio is None:
team.audio = []
team.audio.extend(run_response.audio)
# ---------------------------------------------------------------------------
# Post-run cleanup (moved from _storage.py)
# ---------------------------------------------------------------------------
def _cleanup_and_store(
team: "Team",
run_response: TeamRunOutput,
session: TeamSession,
run_context: Optional[RunContext] = None,
) -> None:
# Scrub the stored run based on storage flags
from agno.run.approval import update_approval_run_status
from agno.team._session import update_session_metrics
scrub_run_output_for_storage(team, run_response)
# Stop the timer for the Run duration
if run_response.metrics:
run_response.metrics.stop_timer()
# Update run_response.session_state before saving
if run_context is not None and run_context.session_state is not None:
run_response.session_state = run_context.session_state
# Add RunOutput to Team Session
session.upsert_run(run_response=run_response)
# Calculate session metrics
update_session_metrics(team, session=session, run_response=run_response)
# Update session state before saving the session
if run_context is not None and run_context.session_state is not None:
if session.session_data is not None:
session.session_data["session_state"] = run_context.session_state
else:
session.session_data = {"session_state": run_context.session_state}
# Save session to memory
team.save_session(session=session)
# Update approval run_status if this run has an associated approval.
# This is a no-op if no approval exists for this run_id.
if run_response.status is not None and run_response.run_id is not None:
update_approval_run_status(team.db, run_response.run_id, run_response.status)
async def _acleanup_and_store(
team: "Team",
run_response: TeamRunOutput,
session: TeamSession,
run_context: Optional[RunContext] = None,
) -> None:
# Scrub the stored run based on storage flags
from agno.run.approval import aupdate_approval_run_status
from agno.team._session import update_session_metrics
scrub_run_output_for_storage(team, run_response)
# Stop the timer for the Run duration
if run_response.metrics:
run_response.metrics.stop_timer()
# Update run_response.session_state before saving
if run_context is not None and run_context.session_state is not None:
run_response.session_state = run_context.session_state
# Add RunOutput to Team Session
session.upsert_run(run_response=run_response)
# Calculate session metrics
update_session_metrics(team, session=session, run_response=run_response)
# Update session state before saving the session
if run_context is not None and run_context.session_state is not None:
if session.session_data is not None:
session.session_data["session_state"] = run_context.session_state
else:
session.session_data = {"session_state": run_context.session_state}
# Save session to memory
await team.asave_session(session=session)
# Update approval run_status if this run has an associated approval.
# This is a no-op if no approval exists for this run_id.
if run_response.status is not None and run_response.run_id is not None:
await aupdate_approval_run_status(team.db, run_response.run_id, run_response.status)
def scrub_run_output_for_storage(team: "Team", run_response: TeamRunOutput) -> bool:
"""
Scrub run output based on storage flags before persisting to database.
Returns True if any scrubbing was done, False otherwise.
"""
from agno.utils.agent import (
scrub_history_messages_from_run_output,
scrub_media_from_run_output,
scrub_tool_results_from_run_output,
)
scrubbed = False
if not team.store_media:
scrub_media_from_run_output(run_response)
scrubbed = True
if not team.store_tool_messages:
scrub_tool_results_from_run_output(run_response)
scrubbed = True
if not team.store_history_messages:
scrub_history_messages_from_run_output(run_response)
scrubbed = True
return scrubbed
def _scrub_member_responses(team: "Team", member_responses: List[Union[TeamRunOutput, RunOutput]]) -> None:
"""
Scrub member responses based on each member's storage flags.
This is called when saving the team session to ensure member data is scrubbed per member settings.
Recursively handles nested team's member responses.
"""
from agno.team._tools import _find_member_by_id
from agno.team.team import Team
for member_response in member_responses:
member_id = None
if isinstance(member_response, RunOutput):
member_id = member_response.agent_id
elif isinstance(member_response, TeamRunOutput):
member_id = member_response.team_id
if not member_id:
log_info("Skipping member response with no ID")
continue
member_result = _find_member_by_id(team, member_id)
if not member_result:
log_debug(f"Could not find member with ID: {member_id}")
continue
_, member = member_result
if not member.store_media or not member.store_tool_messages or not member.store_history_messages:
from agno.agent._run import scrub_run_output_for_storage
scrub_run_output_for_storage(member, run_response=member_response) # type: ignore[arg-type]
# If this is a nested team, recursively scrub its member responses
if isinstance(member, Team) and isinstance(member_response, TeamRunOutput) and member_response.member_responses:
member._scrub_member_responses(member_response.member_responses) # type: ignore
# ---------------------------------------------------------------------------
# Run dependency resolution (moved from _tools.py)
# ---------------------------------------------------------------------------
def _resolve_run_dependencies(team: "Team", run_context: RunContext) -> None:
from inspect import signature
log_debug("Resolving dependencies")
if not isinstance(run_context.dependencies, dict):
log_warning("Dependencies is not a dict")
return
for key, value in run_context.dependencies.items():
if not callable(value):
run_context.dependencies[key] = value
continue
try:
sig = signature(value)
# Build kwargs for the function
kwargs: Dict[str, Any] = {}
if "agent" in sig.parameters:
kwargs["agent"] = team
if "team" in sig.parameters:
kwargs["team"] = team
if "run_context" in sig.parameters:
kwargs["run_context"] = run_context
resolved_value = value(**kwargs) if kwargs else value()
run_context.dependencies[key] = resolved_value
except Exception as e:
log_warning(f"Failed to resolve dependencies for {key}: {e}")
async def _aresolve_run_dependencies(team: "Team", run_context: RunContext) -> None:
from inspect import iscoroutine, signature
log_debug("Resolving context (async)")
if not isinstance(run_context.dependencies, dict):
log_warning("Dependencies is not a dict")
return
for key, value in run_context.dependencies.items():
if not callable(value):
run_context.dependencies[key] = value
continue
try:
sig = signature(value)
# Build kwargs for the function
kwargs: Dict[str, Any] = {}
if "agent" in sig.parameters:
kwargs["agent"] = team
if "team" in sig.parameters:
kwargs["team"] = team
if "run_context" in sig.parameters:
kwargs["run_context"] = run_context
resolved_value = value(**kwargs) if kwargs else value()
if iscoroutine(resolved_value):
resolved_value = await resolved_value
run_context.dependencies[key] = resolved_value
except Exception as e:
log_warning(f"Failed to resolve context for '{key}': {e}")
# ---------------------------------------------------------------------------
# continue_run infrastructure
# ---------------------------------------------------------------------------
def _get_continue_run_messages(
team: "Team",
input: List[Message],
) -> RunMessages:
"""Build a RunMessages object from the existing conversation messages.
Similar to agent's get_continue_run_messages - extracts system and user messages
from the existing message list for the continuation run.
"""
run_messages = RunMessages()
# Extract most recent user message
user_message = None
for msg in reversed(input):
if msg.role == "user":
user_message = msg
break
# Extract system message
system_message = None
system_role = team.system_message_role or "system"
for msg in input:
if msg.role == system_role:
system_message = msg
break
run_messages.system_message = system_message
run_messages.user_message = user_message
run_messages.messages = input
return run_messages
def _handle_team_tool_call_updates(
team: "Team",
run_response: TeamRunOutput,
run_messages: RunMessages,
tools: List[Union[Function, dict]],
) -> None:
"""Handle tool call updates for team-level tools.
Mirrors agent's handle_tool_call_updates but operates on team-level tools.
The agent-level functions (run_tool, reject_tool_call, etc.) accept ``Agent``
in their type hints but only access duck-typed attributes (``model``, ``name``,
etc.) that ``Team`` also provides, so passing a ``Team`` is safe at runtime.
"""
from agno.agent._tools import (
handle_external_execution_update,
handle_get_user_input_tool_update,
handle_user_input_update,
reject_tool_call,
run_tool,
)
team.model = cast(Model, team.model)
_functions = {tool.name: tool for tool in tools if isinstance(tool, Function)}
for _t in run_response.tools or []:
# Case 1: Handle confirmed tools and execute them
if _t.requires_confirmation is not None and _t.requires_confirmation is True and _functions:
if _t.confirmed is not None and _t.confirmed is True and _t.result is None:
deque(run_tool(team, run_response, run_messages, _t, functions=_functions), maxlen=0) # type: ignore
else:
reject_tool_call(team, run_messages, _t, functions=_functions) # type: ignore
_t.confirmed = False
_t.confirmation_note = _t.confirmation_note or "Tool call was rejected"
_t.tool_call_error = True
_t.requires_confirmation = False
# Case 2: Handle external execution required tools
elif _t.external_execution_required is not None and _t.external_execution_required is True:
handle_external_execution_update(team, run_messages=run_messages, tool=_t) # type: ignore
# Case 3: Agentic user input required
elif _t.tool_name == "get_user_input" and _t.requires_user_input is not None and _t.requires_user_input is True:
handle_get_user_input_tool_update(team, run_messages=run_messages, tool=_t) # type: ignore
_t.requires_user_input = False
_t.answered = True
# Case 4: Handle user input required tools
elif _t.requires_user_input is not None and _t.requires_user_input is True:
handle_user_input_update(team, tool=_t) # type: ignore
_t.requires_user_input = False
_t.answered = True
deque(run_tool(team, run_response, run_messages, _t, functions=_functions), maxlen=0) # type: ignore
def _handle_team_tool_call_updates_stream(
team: "Team",
run_response: TeamRunOutput,
run_messages: RunMessages,
tools: List[Union[Function, dict]],
stream_events: bool = False,
) -> Iterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
"""Handle tool call updates for team-level tools (sync streaming).
Mirrors agent's handle_tool_call_updates_stream but operates on team-level tools.
Yields events during tool execution for streaming responses.
"""
from agno.agent._tools import (
handle_external_execution_update,
handle_get_user_input_tool_update,
handle_user_input_update,
reject_tool_call,
run_tool,
)
team.model = cast(Model, team.model)
_functions = {tool.name: tool for tool in tools if isinstance(tool, Function)}
for _t in run_response.tools or []:
# Case 1: Handle confirmed tools and execute them
if _t.requires_confirmation is not None and _t.requires_confirmation is True and _functions:
if _t.confirmed is not None and _t.confirmed is True and _t.result is None:
yield from run_tool(
team, # type: ignore[arg-type]
run_response, # type: ignore[arg-type]
run_messages,
_t,
functions=_functions,
stream_events=stream_events, # type: ignore
)
else:
reject_tool_call(team, run_messages, _t, functions=_functions) # type: ignore
_t.confirmed = False
_t.confirmation_note = _t.confirmation_note or "Tool call was rejected"
_t.tool_call_error = True
_t.requires_confirmation = False
# Case 2: Handle external execution required tools
elif _t.external_execution_required is not None and _t.external_execution_required is True:
handle_external_execution_update(team, run_messages=run_messages, tool=_t) # type: ignore
# Case 3: Agentic user input required
elif _t.tool_name == "get_user_input" and _t.requires_user_input is not None and _t.requires_user_input is True:
handle_get_user_input_tool_update(team, run_messages=run_messages, tool=_t) # type: ignore
_t.requires_user_input = False
_t.answered = True
# Case 4: Handle user input required tools
elif _t.requires_user_input is not None and _t.requires_user_input is True:
handle_user_input_update(team, tool=_t) # type: ignore
yield from run_tool(
team, # type: ignore[arg-type]
run_response, # type: ignore[arg-type]
run_messages,
_t,
functions=_functions,
stream_events=stream_events, # type: ignore
)
_t.requires_user_input = False
_t.answered = True
async def _ahandle_team_tool_call_updates(
team: "Team",
run_response: TeamRunOutput,
run_messages: RunMessages,
tools: List[Union[Function, dict]],
) -> None:
"""Async version of _handle_team_tool_call_updates.
See _handle_team_tool_call_updates docstring for the Team/Agent duck-typing note.
"""
from agno.agent._tools import (
arun_tool,
handle_external_execution_update,
handle_get_user_input_tool_update,
handle_user_input_update,
reject_tool_call,
)
team.model = cast(Model, team.model)
_functions = {tool.name: tool for tool in tools if isinstance(tool, Function)}
for _t in run_response.tools or []:
if _t.requires_confirmation is not None and _t.requires_confirmation is True and _functions:
if _t.confirmed is not None and _t.confirmed is True and _t.result is None:
async for _ in arun_tool(team, run_response, run_messages, _t, functions=_functions): # type: ignore
pass
else:
reject_tool_call(team, run_messages, _t, functions=_functions) # type: ignore
_t.confirmed = False
_t.confirmation_note = _t.confirmation_note or "Tool call was rejected"
_t.tool_call_error = True
_t.requires_confirmation = False
elif _t.external_execution_required is not None and _t.external_execution_required is True:
handle_external_execution_update(team, run_messages=run_messages, tool=_t) # type: ignore
elif _t.tool_name == "get_user_input" and _t.requires_user_input is not None and _t.requires_user_input is True:
handle_get_user_input_tool_update(team, run_messages=run_messages, tool=_t) # type: ignore
_t.requires_user_input = False
_t.answered = True
elif _t.requires_user_input is not None and _t.requires_user_input is True:
handle_user_input_update(team, tool=_t) # type: ignore
_t.requires_user_input = False
_t.answered = True
async for _ in arun_tool(team, run_response, run_messages, _t, functions=_functions): # type: ignore
pass
async def _ahandle_team_tool_call_updates_stream(
team: "Team",
run_response: TeamRunOutput,
run_messages: RunMessages,
tools: List[Union[Function, dict]],
stream_events: bool = False,
) -> AsyncIterator[Union[TeamRunOutputEvent, RunOutputEvent]]:
"""Async streaming version of _handle_team_tool_call_updates.
Mirrors agent's ahandle_tool_call_updates_stream but operates on team-level tools.
Yields events during tool execution for async streaming responses.
"""
from agno.agent._tools import (
arun_tool,
handle_external_execution_update,
handle_get_user_input_tool_update,
handle_user_input_update,
reject_tool_call,
)
team.model = cast(Model, team.model)
_functions = {tool.name: tool for tool in tools if isinstance(tool, Function)}
for _t in run_response.tools or []:
# Case 1: Handle confirmed tools and execute them
if _t.requires_confirmation is not None and _t.requires_confirmation is True and _functions:
if _t.confirmed is not None and _t.confirmed is True and _t.result is None:
async for event in arun_tool(
team, # type: ignore[arg-type]
run_response, # type: ignore[arg-type]
run_messages,
_t,
functions=_functions,
stream_events=stream_events, # type: ignore
):
yield event # type: ignore
else:
reject_tool_call(team, run_messages, _t, functions=_functions) # type: ignore
_t.confirmed = False
_t.confirmation_note = _t.confirmation_note or "Tool call was rejected"
_t.tool_call_error = True
_t.requires_confirmation = False
# Case 2: Handle external execution required tools
elif _t.external_execution_required is not None and _t.external_execution_required is True:
handle_external_execution_update(team, run_messages=run_messages, tool=_t) # type: ignore
# Case 3: Agentic user input required
elif _t.tool_name == "get_user_input" and _t.requires_user_input is not None and _t.requires_user_input is True:
handle_get_user_input_tool_update(team, run_messages=run_messages, tool=_t) # type: ignore
_t.requires_user_input = False
_t.answered = True
# Case 4: Handle user input required tools
elif _t.requires_user_input is not None and _t.requires_user_input is True:
handle_user_input_update(team, tool=_t) # type: ignore
async for event in arun_tool(
team, # type: ignore[arg-type]
run_response, # type: ignore[arg-type]
run_messages,
_t,
functions=_functions,
stream_events=stream_events, # type: ignore
):
yield event # type: ignore
_t.requires_user_input = False
_t.answered = True
def _normalize_requirements_payload(
requirements: List[Any],
) -> List[Any]:
"""Convert dicts in the requirements list to RunRequirement objects."""
from agno.run.requirement import RunRequirement
result = []
for req in requirements:
if isinstance(req, dict):
result.append(RunRequirement.from_dict(req))
else:
result.append(req)
return result
def _has_member_requirements(requirements: List[Any]) -> bool:
"""Check if any requirements are for member agents (have member_agent_id set)."""
return any(getattr(req, "member_agent_id", None) is not None for req in requirements)
def _has_team_level_requirements(requirements: List[Any]) -> bool:
"""Check if any requirements are for team-level tools (no member_agent_id)."""
return any(getattr(req, "member_agent_id", None) is None for req in requirements)
def _route_requirements_to_members(
team: "Team",
run_response: TeamRunOutput,
session: TeamSession,
run_context: Optional[RunContext] = None,
) -> List[str]:
"""Route member requirements back to the appropriate member agents (sync).
Groups requirements by member_agent_id, calls member.continue_run() for each,
and returns a list of result descriptions for building a continuation message.
Returns:
List of member result strings.
"""
from agno.run.requirement import RunRequirement
from agno.team._tools import _find_member_route_by_id
# Group requirements by member
member_reqs: Dict[str, List[RunRequirement]] = {}
for req in run_response.requirements or []:
mid = getattr(req, "member_agent_id", None)
if mid is not None:
member_reqs.setdefault(mid, []).append(req)
member_results: List[str] = []
for member_id, reqs in member_reqs.items():
route_result = _find_member_route_by_id(team, member_id, run_context=run_context)
if route_result is None:
log_warning(f"Could not find member with ID {member_id} for continue_run routing")
member_results.append(f"[{member_id}]: Could not route requirement — member not found")
continue
_, member = route_result
# Get the member's paused RunOutput from the requirement.
# This is stored by _propagate_member_pause and avoids needing a
# session/DB lookup (which fails without a database since
# initialize_team clears the cached session).
member_run_output = getattr(reqs[0], "_member_run_response", None)
if member_run_output is not None:
# Update requirements and tool executions on the member's run output
member_run_output.requirements = reqs
updated_tools = [req.tool_execution for req in reqs if req.tool_execution is not None]
if updated_tools and member_run_output.tools:
updated_map = {t.tool_call_id: t for t in updated_tools}
member_run_output.tools = [updated_map.get(t.tool_call_id, t) for t in member_run_output.tools]
member_response = member.continue_run(
run_response=member_run_output,
session_id=session.session_id,
)
else:
# Fallback: use run_id (requires DB or cached session)
member_run_id = reqs[0].member_run_id if reqs else None
member_response = member.continue_run(
run_id=member_run_id,
requirements=reqs,
session_id=session.session_id,
)
# Check if member is still paused (chained HITL)
if getattr(member_response, "is_paused", False):
from agno.team._tools import _propagate_member_pause
_propagate_member_pause(run_response, member, member_response)
else:
content = getattr(member_response, "content", None) or "Task completed"
member_results.append(f"[{member.name or member_id}]: {content}")
# Clear _member_run_response references to allow GC of the member RunOutput
for req in reqs:
req._member_run_response = None
return member_results
async def _aroute_requirements_to_members(
team: "Team",
run_response: TeamRunOutput,
session: TeamSession,
run_context: Optional[RunContext] = None,
) -> List[str]:
"""Route member requirements back to the appropriate member agents (async).
Runs member continue_run() calls concurrently with asyncio.gather.
Returns:
List of member result strings.
"""
from agno.run.requirement import RunRequirement
from agno.team._tools import _find_member_route_by_id
# Group requirements by member
member_reqs: Dict[str, List[RunRequirement]] = {}
for req in run_response.requirements or []:
mid = getattr(req, "member_agent_id", None)
if mid is not None:
member_reqs.setdefault(mid, []).append(req)
if not member_reqs:
return []
async def _continue_member(member_id: str, reqs: List[RunRequirement]) -> Optional[str]:
route_result = _find_member_route_by_id(team, member_id, run_context=run_context)
if route_result is None:
log_warning(f"Could not find member with ID {member_id} for continue_run routing")
return f"[{member_id}]: Could not route requirement — member not found"
_, member = route_result
# Get the member's paused RunOutput from the requirement
member_run_output = getattr(reqs[0], "_member_run_response", None)
if member_run_output is not None:
member_run_output.requirements = reqs
updated_tools = [req.tool_execution for req in reqs if req.tool_execution is not None]
if updated_tools and member_run_output.tools:
updated_map = {t.tool_call_id: t for t in updated_tools}
member_run_output.tools = [updated_map.get(t.tool_call_id, t) for t in member_run_output.tools]
member_response = await member.acontinue_run( # type: ignore[misc]
run_response=member_run_output,
session_id=session.session_id,
)
else:
member_run_id = reqs[0].member_run_id if reqs else None
member_response = await member.acontinue_run( # type: ignore[misc]
run_id=member_run_id,
requirements=reqs,
session_id=session.session_id,
)
# Clear _member_run_response references to allow GC of the member RunOutput
for req in reqs:
req._member_run_response = None
if getattr(member_response, "is_paused", False):
from agno.team._tools import _propagate_member_pause
_propagate_member_pause(run_response, member, member_response)
return None
else:
content = getattr(member_response, "content", None) or "Task completed"
return f"[{member.name or member_id}]: {content}"
tasks = [_continue_member(mid, reqs) for mid, reqs in member_reqs.items()]
results = await asyncio.gather(*tasks, return_exceptions=True)
member_results: List[str] = []
for r in results:
if isinstance(r, BaseException):
log_warning(f"Member continue_run failed: {r}")
elif isinstance(r, str):
member_results.append(r)
return member_results
def _build_continuation_message(member_results: List[str]) -> str:
"""Build a user message from member results to feed back into the team model."""
if not member_results:
return "The delegated task has been completed."
parts = ["Member results after human-in-the-loop resolution:"]
parts.extend(member_results)
return "\n".join(parts)
def continue_run_dispatch(
team: "Team",
run_response: Optional[TeamRunOutput] = None,
*,
run_id: Optional[str] = None,
requirements: Optional[List[Any]] = None,
stream: Optional[bool] = None,
stream_events: Optional[bool] = False,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
run_context: Optional[RunContext] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
dependencies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
debug_mode: Optional[bool] = None,
yield_run_output: bool = False,
**kwargs: Any,
) -> Union[TeamRunOutput, Iterator[Union[TeamRunOutputEvent, RunOutputEvent, TeamRunOutput]]]:
"""Continue a paused team run (sync).
Handles both team-level tool pauses and member-agent tool pauses.
"""
from agno.team._init import _has_async_db, _initialize_session
from agno.team._response import get_response_format
from agno.team._run_options import resolve_run_options
from agno.team._storage import _load_session_state, _read_or_create_session, _update_metadata
from agno.team._tools import _determine_tools_for_model
if run_response is None and run_id is None:
raise ValueError("Either run_response or run_id must be provided.")
if run_response is None and (run_id is not None and (session_id is None and team.session_id is None)):
raise ValueError("Session ID is required to continue a run from a run_id.")
if _has_async_db(team):
raise Exception("continue_run() is not supported with an async DB. Please use acontinue_run() instead.")
background_tasks = kwargs.pop("background_tasks", None)
if background_tasks is not None:
from fastapi import BackgroundTasks
background_tasks: BackgroundTasks = background_tasks # type: ignore
session_id = run_response.session_id if run_response else session_id
run_id_resolved: str = run_response.run_id if run_response else run_id # type: ignore
session_id, user_id = _initialize_session(team, session_id=session_id, user_id=user_id)
# Initialize the Team
team.initialize_team(debug_mode=debug_mode)
# Read existing session from storage
team_session = _read_or_create_session(team, session_id=session_id, user_id=user_id)
_update_metadata(team, session=team_session)
# Load session state
session_state = _load_session_state(team, session=team_session, session_state={})
# Resolve run options
opts = resolve_run_options(
team,
stream=stream,
stream_events=stream_events,
yield_run_output=yield_run_output,
dependencies=dependencies,
knowledge_filters=knowledge_filters,
metadata=metadata,
)
# Initialize run context
run_context = run_context or RunContext(
run_id=run_id_resolved,
session_id=session_id,
user_id=user_id,
session_state=session_state,
dependencies=opts.dependencies,
knowledge_filters=opts.knowledge_filters,
metadata=opts.metadata,
)
if dependencies is not None:
run_context.dependencies = opts.dependencies
elif run_context.dependencies is None:
run_context.dependencies = opts.dependencies
if knowledge_filters is not None:
run_context.knowledge_filters = opts.knowledge_filters
elif run_context.knowledge_filters is None:
run_context.knowledge_filters = opts.knowledge_filters
if metadata is not None:
run_context.metadata = opts.metadata
elif run_context.metadata is None:
run_context.metadata = opts.metadata
# Resolve dependencies
if run_context.dependencies is not None:
_resolve_run_dependencies(team, run_context=run_context)
# Resolve run_response from run_id if needed
if run_response is None and run_id is not None:
if requirements is None:
raise ValueError("To continue a run from a given run_id, the requirements parameter must be provided.")
runs = team_session.runs or []
run_response = next((r for r in runs if r.run_id == run_id), None) # type: ignore
if run_response is None:
raise RuntimeError(f"No runs found for run ID {run_id}")
run_response = cast(TeamRunOutput, run_response)
# Normalize and apply requirements
if requirements is not None:
requirements = _normalize_requirements_payload(requirements)
run_response.requirements = requirements
# Update tools from requirements
updated_tools = [req.tool_execution for req in requirements if req.tool_execution is not None]
if updated_tools and run_response.tools:
updated_tools_map = {tool.tool_call_id: tool for tool in updated_tools}
run_response.tools = [updated_tools_map.get(tool.tool_call_id, tool) for tool in run_response.tools]
elif updated_tools:
run_response.tools = updated_tools
# Determine what kind of pause we're continuing from
has_member = _has_member_requirements(run_response.requirements or [])
has_team_level = _has_team_level_requirements(run_response.requirements or [])
# Route member requirements to member agents
member_results: List[str] = []
if has_member:
member_reqs = [r for r in (run_response.requirements or []) if getattr(r, "member_agent_id", None) is not None]
team_level_reqs = [r for r in (run_response.requirements or []) if getattr(r, "member_agent_id", None) is None]
# Set only member reqs for routing; _route_requirements_to_members
# may append newly propagated reqs via _propagate_member_pause (chained HITL).
original_member_req_ids = {id(r) for r in member_reqs}
run_response.requirements = member_reqs
member_results = _route_requirements_to_members(
team, run_response=run_response, session=team_session, run_context=run_context
)
# Merge: keep team-level reqs + any newly propagated member reqs (chained HITL)
newly_propagated = [r for r in (run_response.requirements or []) if id(r) not in original_member_req_ids]
run_response.requirements = team_level_reqs + newly_propagated
# Check if any members are still paused
if run_response.requirements and any(not req.is_resolved() for req in run_response.requirements):
from agno.team import _hooks
if opts.stream:
return _hooks.handle_team_run_paused_stream(
team, run_response=run_response, session=team_session, run_context=run_context
) # type: ignore
else:
return _hooks.handle_team_run_paused(
team, run_response=run_response, session=team_session, run_context=run_context
)
# Handle team-level tool resolution
if has_team_level:
# Guard: if team-level requirements are unresolved, re-pause instead of auto-rejecting
unresolved_team = [
r
for r in (run_response.requirements or [])
if getattr(r, "member_agent_id", None) is None and not r.is_resolved()
]
if unresolved_team:
from agno.team import _hooks
if opts.stream:
return _hooks.handle_team_run_paused_stream(
team, run_response=run_response, session=team_session, run_context=run_context
) # type: ignore
else:
return _hooks.handle_team_run_paused(
team, run_response=run_response, session=team_session, run_context=run_context
)
response_format = get_response_format(team, run_context=run_context) if team.parser_model is None else None
team.model = cast(Model, team.model)
# Prepare tools
team_run_context: Dict[str, Any] = {}
_tools = _determine_tools_for_model(
team,
model=team.model,
run_response=run_response,
run_context=run_context,
team_run_context=team_run_context,
session=team_session,
user_id=user_id,
async_mode=False,
stream=opts.stream or False,
stream_events=opts.stream_events or False,
)
# Get continue run messages from existing conversation
input_messages = run_response.messages or []
run_messages = _get_continue_run_messages(team, input=input_messages)
# Handle tool call updates (execute confirmed tools, etc.)
_handle_team_tool_call_updates(team, run_response=run_response, run_messages=run_messages, tools=_tools)
# Reset run state for continuation
run_response.status = RunStatus.running
# Reset content before re-running the model; _update_run_response appends
# to existing content, so stale content from the paused run must be cleared.
run_response.content = None
log_debug(f"Team Continue Run Start: {run_response.run_id}", center=True)
if opts.stream:
return _continue_run_stream(
team,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
tools=_tools,
session=team_session,
user_id=user_id,
response_format=response_format,
stream_events=opts.stream_events,
yield_run_output=opts.yield_run_output,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
else:
return _continue_run(
team,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
tools=_tools,
session=team_session,
user_id=user_id,
response_format=response_format,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
# Member-only case: re-run team model with member results
if member_results and not has_team_level:
continuation_message = _build_continuation_message(member_results)
# Mark original paused run as completed before starting a fresh run
run_response.status = RunStatus.completed
_cleanup_and_store(team, run_response=run_response, session=team_session)
if opts.stream:
return team.run( # type: ignore
input=continuation_message,
stream=True,
stream_events=opts.stream_events,
session_id=session_id,
user_id=user_id,
knowledge_filters=knowledge_filters,
dependencies=dependencies,
metadata=metadata,
debug_mode=debug_mode,
**kwargs,
)
else:
return team.run(
input=continuation_message,
stream=False,
session_id=session_id,
user_id=user_id,
knowledge_filters=knowledge_filters,
dependencies=dependencies,
metadata=metadata,
debug_mode=debug_mode,
**kwargs,
)
# Fallback: nothing to do
run_response.status = RunStatus.completed
_cleanup_and_store(team, run_response=run_response, session=team_session)
return run_response
def _continue_run(
team: "Team",
run_response: TeamRunOutput,
run_messages: RunMessages,
run_context: RunContext,
tools: List[Union[Function, dict]],
session: TeamSession,
user_id: Optional[str] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> TeamRunOutput:
"""Continue a paused team run (sync, non-streaming).
Steps:
1. Generate response from model (includes running tool calls)
2. Update TeamRunOutput with model response
3. Check for new pauses
4. Convert response to structured format
5. Create session summary
6. Cleanup and store
"""
from agno.team._hooks import _execute_post_hooks
from agno.team._init import _disconnect_connectable_tools
from agno.team._response import (
_convert_response_to_structured_format,
_update_run_response,
parse_response_with_output_model,
parse_response_with_parser_model,
)
from agno.team._telemetry import log_team_telemetry
from agno.utils.events import create_team_run_continued_event
register_run(run_response.run_id) # type: ignore
# Emit RunContinued event (matching streaming variant behaviour)
handle_event(
create_team_run_continued_event(run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
team.model = cast(Model, team.model)
try:
num_attempts = team.retries + 1
for attempt in range(num_attempts):
try:
raise_if_cancelled(run_response.run_id) # type: ignore
# Generate model response
model_response: ModelResponse = team.model.response(
messages=run_messages.messages,
response_format=response_format,
tools=tools,
tool_choice=team.tool_choice,
tool_call_limit=team.tool_call_limit,
run_response=run_response,
send_media_to_model=team.send_media_to_model,
compression_manager=team.compression_manager if team.compress_tool_results else None,
)
raise_if_cancelled(run_response.run_id) # type: ignore
# Parse with output/parser models if needed
parse_response_with_output_model(team, model_response, run_messages)
parse_response_with_parser_model(team, model_response, run_messages, run_context=run_context)
# Update run response
_update_run_response(
team,
model_response=model_response,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
)
# Check for new pauses (team-level tools or member propagation)
if run_response.requirements and any(not req.is_resolved() for req in run_response.requirements):
from agno.team import _hooks
return _hooks.handle_team_run_paused(
team, run_response=run_response, session=session, run_context=run_context
)
# Convert to structured format
_convert_response_to_structured_format(team, run_response=run_response, run_context=run_context)
# Store media
if team.store_media:
store_media_util(run_response, model_response)
# Execute post-hooks
if team.post_hooks is not None:
iterator = _execute_post_hooks(
team,
hooks=team.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=session,
user_id=user_id,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
deque(iterator, maxlen=0)
# Create session summary
if team.session_summary_manager is not None:
session.upsert_run(run_response=run_response)
try:
team.session_summary_manager.create_session_summary(
session=session, run_metrics=run_response.metrics
)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
# Complete
run_response.status = RunStatus.completed
_cleanup_and_store(team, run_response=run_response, session=session)
log_team_telemetry(team, session_id=session.session_id, run_id=run_response.run_id)
log_debug(f"Team Continue Run End: {run_response.run_id}", center=True, symbol="*")
return run_response
except RunCancelledException as e:
log_info(f"Team run {run_response.run_id} was cancelled")
run_response.status = RunStatus.cancelled
run_response.content = str(e)
_cleanup_and_store(team, run_response=run_response, session=session)
return run_response
except (InputCheckError, OutputCheckError) as e:
run_response.status = RunStatus.error
run_error = create_team_run_error_event(
run_response,
error=str(e),
error_id=e.error_id,
error_type=e.type,
additional_data=e.additional_data,
)
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
_cleanup_and_store(team, run_response=run_response, session=session)
return run_response
except KeyboardInterrupt:
run_response.status = RunStatus.cancelled
run_response.content = "Operation cancelled by user"
return run_response
except Exception as e:
if attempt < num_attempts - 1:
import time as _time
if team.exponential_backoff:
delay = team.delay_between_retries * (2**attempt)
else:
delay = team.delay_between_retries
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
_time.sleep(delay)
continue
run_response.status = RunStatus.error
run_error = create_team_run_error_event(run_response, error=str(e))
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Team continue_run: {str(e)}")
_cleanup_and_store(team, run_response=run_response, session=session)
return run_response
finally:
_disconnect_connectable_tools(team)
cleanup_run(run_response.run_id) # type: ignore
return run_response
def _continue_run_stream(
team: "Team",
run_response: TeamRunOutput,
run_messages: RunMessages,
run_context: RunContext,
tools: List[Union[Function, dict]],
session: TeamSession,
user_id: Optional[str] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
stream_events: bool = False,
yield_run_output: bool = False,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> Iterator[Union[TeamRunOutputEvent, RunOutputEvent, TeamRunOutput]]:
"""Continue a paused team run (sync, streaming)."""
from agno.team._hooks import _execute_post_hooks
from agno.team._init import _disconnect_connectable_tools
from agno.team._response import (
_handle_model_response_stream,
generate_response_with_output_model_stream,
parse_response_with_parser_model_stream,
)
from agno.team._telemetry import log_team_telemetry
from agno.utils.events import create_team_run_continued_event
register_run(run_response.run_id) # type: ignore
try:
num_attempts = team.retries + 1
for attempt in range(num_attempts):
try:
# Yield RunContinued event
if stream_events:
yield handle_event(
create_team_run_continued_event(run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
raise_if_cancelled(run_response.run_id) # type: ignore
# Handle the updated tools (execute confirmed tools, etc.) with streaming
yield from _handle_team_tool_call_updates_stream(
team,
run_response=run_response,
run_messages=run_messages,
tools=tools,
stream_events=stream_events,
)
# Stream model response
if team.output_model is None:
for event in _handle_model_response_stream(
team,
session=session,
run_response=run_response,
run_messages=run_messages,
tools=tools,
response_format=response_format,
stream_events=stream_events,
session_state=run_context.session_state,
run_context=run_context,
):
raise_if_cancelled(run_response.run_id) # type: ignore
yield event
else:
from agno.run.team import IntermediateRunContentEvent, RunContentEvent
for event in _handle_model_response_stream(
team,
session=session,
run_response=run_response,
run_messages=run_messages,
tools=tools,
response_format=response_format,
stream_events=stream_events,
session_state=run_context.session_state,
run_context=run_context,
):
raise_if_cancelled(run_response.run_id) # type: ignore
if isinstance(event, RunContentEvent):
if stream_events:
yield IntermediateRunContentEvent(
content=event.content,
content_type=event.content_type,
)
else:
yield event
for event in generate_response_with_output_model_stream(
team,
session=session,
run_response=run_response,
run_messages=run_messages,
stream_events=stream_events,
):
raise_if_cancelled(run_response.run_id) # type: ignore
yield event
raise_if_cancelled(run_response.run_id) # type: ignore
# Check for new pauses
if run_response.requirements and any(not req.is_resolved() for req in run_response.requirements):
from agno.team import _hooks
yield from _hooks.handle_team_run_paused_stream(
team, run_response=run_response, session=session, run_context=run_context
)
if yield_run_output:
yield run_response
return
# Parse response with parser model
yield from parse_response_with_parser_model_stream(
team,
session=session,
run_response=run_response,
stream_events=stream_events,
run_context=run_context,
)
# Content completed event
if stream_events:
yield handle_event(
create_team_run_content_completed_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Post-hooks
if team.post_hooks is not None:
iterator = _execute_post_hooks(
team,
hooks=team.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=session,
user_id=user_id,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
for hook_event in iterator:
yield hook_event
# Session summary
if team.session_summary_manager is not None:
session.upsert_run(run_response=run_response)
if stream_events:
yield handle_event(
create_team_session_summary_started_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
try:
team.session_summary_manager.create_session_summary(
session=session, run_metrics=run_response.metrics
)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
if stream_events:
yield handle_event(
create_team_session_summary_completed_event(
from_run_response=run_response, session_summary=session.summary
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Completed event
completed_event = handle_event(
create_team_run_completed_event(run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
run_response.status = RunStatus.completed
_cleanup_and_store(team, run_response=run_response, session=session)
if stream_events:
yield completed_event
if yield_run_output:
yield run_response
log_team_telemetry(team, session_id=session.session_id, run_id=run_response.run_id)
log_debug(f"Team Continue Run End: {run_response.run_id}", center=True, symbol="*")
break
except RunCancelledException as e:
log_info(f"Team run {run_response.run_id} was cancelled")
run_response.status = RunStatus.cancelled
if not run_response.content:
run_response.content = str(e)
yield handle_event(
create_team_run_cancelled_event(from_run_response=run_response, reason=str(e)),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
_cleanup_and_store(team, run_response=run_response, session=session)
break
except (InputCheckError, OutputCheckError) as e:
run_response.status = RunStatus.error
run_error = create_team_run_error_event(
run_response,
error=str(e),
error_id=e.error_id,
error_type=e.type,
additional_data=e.additional_data,
)
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
_cleanup_and_store(team, run_response=run_response, session=session)
yield run_error
break
except KeyboardInterrupt:
yield handle_event(
create_team_run_cancelled_event(
from_run_response=run_response, reason="Operation cancelled by user"
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
break
except Exception as e:
if attempt < num_attempts - 1:
import time as _time
if team.exponential_backoff:
delay = team.delay_between_retries * (2**attempt)
else:
delay = team.delay_between_retries
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
_time.sleep(delay)
continue
run_response.status = RunStatus.error
run_error = create_team_run_error_event(run_response, error=str(e))
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Team continue_run stream: {str(e)}")
_cleanup_and_store(team, run_response=run_response, session=session)
yield run_error
finally:
_disconnect_connectable_tools(team)
cleanup_run(run_response.run_id) # type: ignore
def acontinue_run_dispatch( # type: ignore
team: "Team",
run_response: Optional[TeamRunOutput] = None,
*,
run_id: Optional[str] = None,
requirements: Optional[List[Any]] = None,
stream: Optional[bool] = None,
stream_events: Optional[bool] = False,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
run_context: Optional[RunContext] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
dependencies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
debug_mode: Optional[bool] = None,
yield_run_output: bool = False,
**kwargs: Any,
) -> Union[TeamRunOutput, AsyncIterator[Union[TeamRunOutputEvent, RunOutputEvent, TeamRunOutput]]]:
"""Continue a paused team run (async entry point).
Routes to _acontinue_run or _acontinue_run_stream based on stream option.
"""
from agno.team._init import _initialize_session
from agno.team._response import get_response_format
from agno.team._run_options import resolve_run_options
if run_response is None and run_id is None:
raise ValueError("Either run_response or run_id must be provided.")
if run_response is None and (run_id is not None and (session_id is None and team.session_id is None)):
raise ValueError("Session ID is required to continue a run from a run_id.")
background_tasks = kwargs.pop("background_tasks", None)
if background_tasks is not None:
from fastapi import BackgroundTasks
background_tasks: BackgroundTasks = background_tasks # type: ignore
session_id_resolved = run_response.session_id if run_response else session_id
run_id_resolved: str = run_response.run_id if run_response else run_id # type: ignore
session_id_resolved, user_id = _initialize_session(team, session_id=session_id_resolved, user_id=user_id)
# Initialize the Team
team.initialize_team(debug_mode=debug_mode)
# Resolve run options
opts = resolve_run_options(
team,
stream=stream,
stream_events=stream_events,
yield_run_output=yield_run_output,
dependencies=dependencies,
knowledge_filters=knowledge_filters,
metadata=metadata,
)
# Initialize run context
run_context = run_context or RunContext(
run_id=run_id_resolved,
session_id=session_id_resolved,
user_id=user_id,
session_state={},
dependencies=opts.dependencies,
knowledge_filters=opts.knowledge_filters,
metadata=opts.metadata,
)
if dependencies is not None:
run_context.dependencies = opts.dependencies
elif run_context.dependencies is None:
run_context.dependencies = opts.dependencies
if knowledge_filters is not None:
run_context.knowledge_filters = opts.knowledge_filters
elif run_context.knowledge_filters is None:
run_context.knowledge_filters = opts.knowledge_filters
if metadata is not None:
run_context.metadata = opts.metadata
elif run_context.metadata is None:
run_context.metadata = opts.metadata
response_format = get_response_format(team, run_context=run_context) if team.parser_model is None else None
if opts.stream:
return _acontinue_run_stream(
team,
run_response=run_response,
run_context=run_context,
requirements=requirements,
run_id=run_id_resolved,
user_id=user_id,
session_id=session_id_resolved,
response_format=response_format,
stream_events=opts.stream_events,
yield_run_output=opts.yield_run_output,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
else:
return _acontinue_run( # type: ignore
team,
run_response=run_response,
run_context=run_context,
requirements=requirements,
run_id=run_id_resolved,
user_id=user_id,
session_id=session_id_resolved,
response_format=response_format,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
)
async def _acontinue_run(
team: "Team",
session_id: str,
run_context: RunContext,
run_response: Optional[TeamRunOutput] = None,
requirements: Optional[List[Any]] = None,
run_id: Optional[str] = None,
user_id: Optional[str] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> TeamRunOutput:
"""Continue a paused team run (async, non-streaming)."""
from agno.team._hooks import _aexecute_post_hooks
from agno.team._init import _disconnect_connectable_tools, _disconnect_mcp_tools
from agno.team._response import (
_convert_response_to_structured_format,
_update_run_response,
agenerate_response_with_output_model,
aparse_response_with_parser_model,
)
from agno.team._telemetry import alog_team_telemetry
from agno.team._tools import _check_and_refresh_mcp_tools, _determine_tools_for_model
log_debug(f"Team Continue Run: {run_response.run_id if run_response else run_id}", center=True)
team_session: Optional[TeamSession] = None
try:
num_attempts = team.retries + 1
for attempt in range(num_attempts):
try:
# Setup session
team_session = await _asetup_session(
team=team,
run_context=run_context,
session_id=session_id,
user_id=user_id,
run_id=run_id,
)
# Resolve run_response from run_id if needed
if run_response is None and run_id is not None:
if requirements is None:
raise ValueError("Requirements are required to continue a run from a run_id.")
runs = team_session.runs or []
run_response = next((r for r in runs if r.run_id == run_id), None) # type: ignore
if run_response is None:
raise RuntimeError(f"No runs found for run ID {run_id}")
run_response = cast(TeamRunOutput, run_response)
# Normalize and apply requirements
if requirements is not None:
requirements = _normalize_requirements_payload(requirements)
run_response.requirements = requirements
updated_tools = [req.tool_execution for req in requirements if req.tool_execution is not None]
if updated_tools and run_response.tools:
updated_tools_map = {tool.tool_call_id: tool for tool in updated_tools}
run_response.tools = [
updated_tools_map.get(tool.tool_call_id, tool) for tool in run_response.tools
]
elif updated_tools:
run_response.tools = updated_tools
await aregister_run(run_response.run_id) # type: ignore
# Emit RunContinued event (matching streaming variant behaviour)
from agno.utils.events import create_team_run_continued_event
handle_event(
create_team_run_continued_event(run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
has_member = _has_member_requirements(run_response.requirements or [])
has_team_level = _has_team_level_requirements(run_response.requirements or [])
# Route member requirements
member_results: List[str] = []
if has_member:
member_reqs = [
r for r in (run_response.requirements or []) if getattr(r, "member_agent_id", None) is not None
]
team_level_reqs = [
r for r in (run_response.requirements or []) if getattr(r, "member_agent_id", None) is None
]
original_member_req_ids = {id(r) for r in member_reqs}
run_response.requirements = member_reqs
member_results = await _aroute_requirements_to_members(
team, run_response=run_response, session=team_session, run_context=run_context
)
# Merge: keep team-level reqs + any newly propagated member reqs (chained HITL)
newly_propagated = [
r for r in (run_response.requirements or []) if id(r) not in original_member_req_ids
]
run_response.requirements = team_level_reqs + newly_propagated
# Check if still paused
if run_response.requirements and any(not req.is_resolved() for req in run_response.requirements):
from agno.team import _hooks
return await _hooks.ahandle_team_run_paused(
team, run_response=run_response, session=team_session, run_context=run_context
)
# Handle team-level tool resolution
if has_team_level:
# Guard: if team-level requirements are unresolved, re-pause instead of auto-rejecting
unresolved_team = [
r
for r in (run_response.requirements or [])
if getattr(r, "member_agent_id", None) is None and not r.is_resolved()
]
if unresolved_team:
from agno.team import _hooks
return await _hooks.ahandle_team_run_paused(
team, run_response=run_response, session=team_session, run_context=run_context
)
team.model = cast(Model, team.model)
await _check_and_refresh_mcp_tools(team)
team_run_context: Dict[str, Any] = {}
_tools = _determine_tools_for_model(
team,
model=team.model,
run_response=run_response,
run_context=run_context,
team_run_context=team_run_context,
session=team_session,
user_id=user_id,
async_mode=True,
)
input_messages = run_response.messages or []
run_messages = _get_continue_run_messages(team, input=input_messages)
await _ahandle_team_tool_call_updates(
team, run_response=run_response, run_messages=run_messages, tools=_tools
)
run_response.status = RunStatus.running
run_response.content = None
# Get model response
model_response: ModelResponse = await team.model.aresponse(
messages=run_messages.messages,
response_format=response_format,
tools=_tools,
tool_choice=team.tool_choice,
tool_call_limit=team.tool_call_limit,
run_response=run_response,
send_media_to_model=team.send_media_to_model,
compression_manager=team.compression_manager if team.compress_tool_results else None,
)
await araise_if_cancelled(run_response.run_id) # type: ignore
await agenerate_response_with_output_model(team, model_response, run_messages)
await aparse_response_with_parser_model(team, model_response, run_messages, run_context=run_context)
_update_run_response(
team,
model_response=model_response,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
)
# Check for new pauses
if run_response.requirements and any(not req.is_resolved() for req in run_response.requirements):
from agno.team import _hooks
return await _hooks.ahandle_team_run_paused(
team, run_response=run_response, session=team_session, run_context=run_context
)
_convert_response_to_structured_format(team, run_response=run_response, run_context=run_context)
if team.store_media:
store_media_util(run_response, model_response)
elif member_results:
# Member-only: re-run team with results
continuation_message = _build_continuation_message(member_results)
# Mark original paused run as completed before starting a fresh run
run_response.status = RunStatus.completed
if team_session is not None:
await _acleanup_and_store(team, run_response=run_response, session=team_session)
result = await team.arun( # type: ignore[misc]
input=continuation_message,
stream=False,
session_id=session_id,
user_id=user_id,
knowledge_filters=run_context.knowledge_filters,
dependencies=run_context.dependencies,
metadata=run_context.metadata,
debug_mode=debug_mode,
**kwargs,
)
return result # type: ignore
# Post-hooks
if team.post_hooks is not None:
async for _ in _aexecute_post_hooks(
team,
hooks=team.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=team_session,
user_id=user_id,
debug_mode=debug_mode,
background_tasks=background_tasks,
**kwargs,
):
pass
# Session summary
if team.session_summary_manager is not None:
team_session.upsert_run(run_response=run_response)
try:
await team.session_summary_manager.acreate_session_summary(
session=team_session, run_metrics=run_response.metrics
)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
run_response.status = RunStatus.completed
await _acleanup_and_store(team, run_response=run_response, session=team_session)
await alog_team_telemetry(team, session_id=team_session.session_id, run_id=run_response.run_id)
log_debug(f"Team Continue Run End: {run_response.run_id}", center=True, symbol="*")
return run_response
except RunCancelledException as e:
if run_response is None:
run_response = TeamRunOutput(run_id=run_id)
run_response = cast(TeamRunOutput, run_response)
log_info(f"Team run {run_response.run_id} was cancelled")
run_response.status = RunStatus.cancelled
run_response.content = str(e)
if team_session is not None:
await _acleanup_and_store(team, run_response=run_response, session=team_session)
return run_response
except (InputCheckError, OutputCheckError) as e:
run_response = cast(TeamRunOutput, run_response)
run_response.status = RunStatus.error
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
if team_session is not None:
await _acleanup_and_store(team, run_response=run_response, session=team_session)
return run_response
except KeyboardInterrupt:
run_response = cast(TeamRunOutput, run_response)
run_response.status = RunStatus.cancelled
run_response.content = "Operation cancelled by user"
return run_response
except Exception as e:
run_response = cast(TeamRunOutput, run_response)
if attempt < num_attempts - 1:
if team.exponential_backoff:
delay = team.delay_between_retries * (2**attempt)
else:
delay = team.delay_between_retries
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
await asyncio.sleep(delay)
continue
run_response.status = RunStatus.error
run_error = create_team_run_error_event(run_response, error=str(e))
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Team acontinue_run: {str(e)}")
if team_session is not None:
await _acleanup_and_store(team, run_response=run_response, session=team_session)
return run_response
finally:
_disconnect_connectable_tools(team)
await _disconnect_mcp_tools(team) # type: ignore
if run_response and run_response.run_id:
await acleanup_run(run_response.run_id)
return run_response # type: ignore
async def _acontinue_run_stream(
team: "Team",
session_id: str,
run_context: RunContext,
run_response: Optional[TeamRunOutput] = None,
requirements: Optional[List[Any]] = None,
run_id: Optional[str] = None,
user_id: Optional[str] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
stream_events: bool = False,
yield_run_output: bool = False,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> AsyncIterator[Union[TeamRunOutputEvent, RunOutputEvent, TeamRunOutput]]:
"""Continue a paused team run (async, streaming)."""
from agno.team._hooks import _aexecute_post_hooks
from agno.team._init import _disconnect_connectable_tools, _disconnect_mcp_tools
from agno.team._response import (
_ahandle_model_response_stream,
agenerate_response_with_output_model_stream,
aparse_response_with_parser_model_stream,
)
from agno.team._telemetry import alog_team_telemetry
from agno.team._tools import _check_and_refresh_mcp_tools, _determine_tools_for_model
from agno.utils.events import create_team_run_continued_event
log_debug(f"Team Continue Run Stream: {run_response.run_id if run_response else run_id}", center=True)
team_session: Optional[TeamSession] = None
try:
num_attempts = team.retries + 1
for attempt in range(num_attempts):
try:
# Setup session
team_session = await _asetup_session(
team=team,
run_context=run_context,
session_id=session_id,
user_id=user_id,
run_id=run_id,
)
# Resolve run_response from run_id if needed
if run_response is None and run_id is not None:
if requirements is None:
raise ValueError("Requirements are required to continue a run from a run_id.")
runs = team_session.runs or []
run_response = next((r for r in runs if r.run_id == run_id), None) # type: ignore
if run_response is None:
raise RuntimeError(f"No runs found for run ID {run_id}")
run_response = cast(TeamRunOutput, run_response)
# Normalize and apply requirements
if requirements is not None:
requirements = _normalize_requirements_payload(requirements)
run_response.requirements = requirements
updated_tools = [req.tool_execution for req in requirements if req.tool_execution is not None]
if updated_tools and run_response.tools:
updated_tools_map = {tool.tool_call_id: tool for tool in updated_tools}
run_response.tools = [
updated_tools_map.get(tool.tool_call_id, tool) for tool in run_response.tools
]
elif updated_tools:
run_response.tools = updated_tools
await aregister_run(run_response.run_id) # type: ignore
has_member = _has_member_requirements(run_response.requirements or [])
has_team_level = _has_team_level_requirements(run_response.requirements or [])
# Route member requirements
member_results: List[str] = []
if has_member:
member_reqs = [
r for r in (run_response.requirements or []) if getattr(r, "member_agent_id", None) is not None
]
team_level_reqs = [
r for r in (run_response.requirements or []) if getattr(r, "member_agent_id", None) is None
]
original_member_req_ids = {id(r) for r in member_reqs}
run_response.requirements = member_reqs
member_results = await _aroute_requirements_to_members(
team, run_response=run_response, session=team_session, run_context=run_context
)
# Merge: keep team-level reqs + any newly propagated member reqs (chained HITL)
newly_propagated = [
r for r in (run_response.requirements or []) if id(r) not in original_member_req_ids
]
run_response.requirements = team_level_reqs + newly_propagated
if run_response.requirements and any(not req.is_resolved() for req in run_response.requirements):
from agno.team import _hooks
async for item in _hooks.ahandle_team_run_paused_stream(
team, run_response=run_response, session=team_session, run_context=run_context
):
yield item
if yield_run_output:
yield run_response
return
if has_team_level:
# Guard: if team-level requirements are unresolved, re-pause instead of auto-rejecting
unresolved_team = [
r
for r in (run_response.requirements or [])
if getattr(r, "member_agent_id", None) is None and not r.is_resolved()
]
if unresolved_team:
from agno.team import _hooks
async for item in _hooks.ahandle_team_run_paused_stream(
team, run_response=run_response, session=team_session, run_context=run_context
):
yield item
if yield_run_output:
yield run_response
return
team.model = cast(Model, team.model)
await _check_and_refresh_mcp_tools(team)
team_run_context: Dict[str, Any] = {}
_tools = _determine_tools_for_model(
team,
model=team.model,
run_response=run_response,
run_context=run_context,
team_run_context=team_run_context,
session=team_session,
user_id=user_id,
async_mode=True,
stream=True,
stream_events=stream_events,
)
input_messages = run_response.messages or []
run_messages = _get_continue_run_messages(team, input=input_messages)
run_response.status = RunStatus.running
run_response.content = None
# Yield RunContinued event
if stream_events:
yield handle_event(
create_team_run_continued_event(run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Handle the updated tools (execute confirmed tools, etc.) with streaming
async for event in _ahandle_team_tool_call_updates_stream(
team,
run_response=run_response,
run_messages=run_messages,
tools=_tools,
stream_events=stream_events,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
yield event
# Stream model response
if team.output_model is None:
async for event in _ahandle_model_response_stream(
team,
session=team_session,
run_response=run_response,
run_messages=run_messages,
tools=_tools,
response_format=response_format,
stream_events=stream_events,
session_state=run_context.session_state,
run_context=run_context,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
yield event
else:
from agno.run.team import IntermediateRunContentEvent, RunContentEvent
async for event in _ahandle_model_response_stream(
team,
session=team_session,
run_response=run_response,
run_messages=run_messages,
tools=_tools,
response_format=response_format,
stream_events=stream_events,
session_state=run_context.session_state,
run_context=run_context,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
if isinstance(event, RunContentEvent):
if stream_events:
yield IntermediateRunContentEvent(
content=event.content,
content_type=event.content_type,
)
else:
yield event
async for event in agenerate_response_with_output_model_stream(
team,
session=team_session,
run_response=run_response,
run_messages=run_messages,
stream_events=stream_events,
):
await araise_if_cancelled(run_response.run_id) # type: ignore
yield event
await araise_if_cancelled(run_response.run_id) # type: ignore
# Check for new pauses
if run_response.requirements and any(not req.is_resolved() for req in run_response.requirements):
from agno.team import _hooks
async for item in _hooks.ahandle_team_run_paused_stream(
team, run_response=run_response, session=team_session, run_context=run_context
):
yield item
if yield_run_output:
yield run_response
return
# Parse response with parser model
async for event in aparse_response_with_parser_model_stream(
team,
session=team_session,
run_response=run_response,
stream_events=stream_events,
run_context=run_context,
):
yield event
elif member_results:
# Member-only: mark original run as completed, then re-run team
continuation_message = _build_continuation_message(member_results)
run_response.status = RunStatus.completed
if team_session is not None:
await _acleanup_and_store(team, run_response=run_response, session=team_session)
async for item in team.arun( # type: ignore
input=continuation_message,
stream=True,
stream_events=stream_events,
session_id=session_id,
user_id=user_id,
knowledge_filters=run_context.knowledge_filters,
dependencies=run_context.dependencies,
metadata=run_context.metadata,
debug_mode=debug_mode,
**kwargs,
):
yield item
return
# Content completed
if stream_events:
yield handle_event(
create_team_run_content_completed_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Post-hooks
if team.post_hooks is not None:
async for event in _aexecute_post_hooks(
team,
hooks=team.post_hooks, # type: ignore
run_output=run_response,
run_context=run_context,
session=team_session,
user_id=user_id,
debug_mode=debug_mode,
stream_events=stream_events,
background_tasks=background_tasks,
**kwargs,
):
yield event
# Session summary
if team.session_summary_manager is not None:
team_session.upsert_run(run_response=run_response)
if stream_events:
yield handle_event(
create_team_session_summary_started_event(from_run_response=run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
try:
await team.session_summary_manager.acreate_session_summary(
session=team_session, run_metrics=run_response.metrics
)
except Exception as e:
log_warning(f"Error in session summary creation: {str(e)}")
if stream_events:
yield handle_event(
create_team_session_summary_completed_event(
from_run_response=run_response, session_summary=team_session.summary
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
# Completed
completed_event = handle_event(
create_team_run_completed_event(run_response),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
run_response.status = RunStatus.completed
await _acleanup_and_store(team, run_response=run_response, session=team_session)
if stream_events:
yield completed_event
if yield_run_output:
yield run_response
await alog_team_telemetry(team, session_id=team_session.session_id, run_id=run_response.run_id)
log_debug(f"Team Continue Run End: {run_response.run_id}", center=True, symbol="*")
break
except RunCancelledException as e:
if run_response is None:
run_response = TeamRunOutput(run_id=run_id)
run_response = cast(TeamRunOutput, run_response)
log_info(f"Team run {run_response.run_id} was cancelled")
run_response.status = RunStatus.cancelled
if not run_response.content:
run_response.content = str(e)
yield handle_event(
create_team_run_cancelled_event(from_run_response=run_response, reason=str(e)),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
if team_session is not None:
await _acleanup_and_store(team, run_response=run_response, session=team_session)
break
except (InputCheckError, OutputCheckError) as e:
run_response = cast(TeamRunOutput, run_response)
run_response.status = RunStatus.error
run_error = create_team_run_error_event(
run_response,
error=str(e),
error_id=e.error_id,
error_type=e.type,
additional_data=e.additional_data,
)
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Validation failed: {str(e)} | Check: {e.check_trigger}")
if team_session is not None:
await _acleanup_and_store(team, run_response=run_response, session=team_session)
yield run_error
break
except KeyboardInterrupt:
if run_response is None:
run_response = TeamRunOutput(run_id=run_id)
run_response = cast(TeamRunOutput, run_response)
yield handle_event(
create_team_run_cancelled_event(
from_run_response=run_response, reason="Operation cancelled by user"
),
run_response,
events_to_skip=team.events_to_skip,
store_events=team.store_events,
)
break
except Exception as e:
if run_response is None:
run_response = TeamRunOutput(run_id=run_id)
run_response = cast(TeamRunOutput, run_response)
if attempt < num_attempts - 1:
if team.exponential_backoff:
delay = team.delay_between_retries * (2**attempt)
else:
delay = team.delay_between_retries
log_warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}. Retrying in {delay}s...")
await asyncio.sleep(delay)
continue
run_response.status = RunStatus.error
run_error = create_team_run_error_event(run_response, error=str(e))
run_response.events = add_team_error_event(error=run_error, events=run_response.events)
if run_response.content is None:
run_response.content = str(e)
log_error(f"Error in Team acontinue_run stream: {str(e)}")
if team_session is not None:
await _acleanup_and_store(team, run_response=run_response, session=team_session)
yield run_error
finally:
_disconnect_connectable_tools(team)
await _disconnect_mcp_tools(team) # type: ignore
if run_response and run_response.run_id:
await acleanup_run(run_response.run_id)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/team/_run.py",
"license": "Apache License 2.0",
"lines": 5327,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/team/_run_options.py | """Centralized run option resolution for team dispatch functions."""
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
from pydantic import BaseModel
from agno.filters import FilterExpr
if TYPE_CHECKING:
from agno.run import RunContext
from agno.team.team import Team
@dataclass(frozen=True)
class ResolvedRunOptions:
"""Immutable snapshot of resolved run options.
All values are fully resolved (call-site > team default > fallback)
at construction time, except metadata where team-level values take
precedence on conflicting keys.
"""
stream: bool
stream_events: bool
yield_run_output: bool
add_history_to_context: bool
add_dependencies_to_context: bool
add_session_state_to_context: bool
dependencies: Optional[Dict[str, Any]]
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]]
metadata: Optional[Dict[str, Any]]
output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]]
def apply_to_context(
self,
run_context: "RunContext",
*,
dependencies_provided: bool = False,
knowledge_filters_provided: bool = False,
metadata_provided: bool = False,
) -> None:
"""Apply resolved options to run_context with precedence:
explicit args > existing run_context > resolved defaults."""
if dependencies_provided:
run_context.dependencies = self.dependencies
elif run_context.dependencies is None:
run_context.dependencies = self.dependencies
if knowledge_filters_provided:
run_context.knowledge_filters = self.knowledge_filters
elif run_context.knowledge_filters is None:
run_context.knowledge_filters = self.knowledge_filters
if metadata_provided:
run_context.metadata = self.metadata
elif run_context.metadata is None:
run_context.metadata = self.metadata
# Always set output_schema from resolved options.
# Unlike other fields, output_schema must always be updated because the same run_context
# may be reused across workflow steps with different teams, each with their own output_schema.
run_context.output_schema = self.output_schema
def resolve_run_options(
team: Team,
*,
stream: Optional[bool] = None,
stream_events: Optional[bool] = None,
yield_run_output: Optional[bool] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
metadata: Optional[Dict[str, Any]] = None,
output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None,
) -> ResolvedRunOptions:
"""Resolve all run options from call-site values and team defaults.
Reads from ``team`` but does not mutate it.
"""
from agno.team._utils import _get_effective_filters
from agno.utils.merge_dict import merge_dictionaries
# stream: call-site > team.stream > False
resolved_stream: bool
if stream is not None:
resolved_stream = stream
elif team.stream is not None:
resolved_stream = team.stream
else:
resolved_stream = False
# stream_events: forced False when not streaming;
# otherwise call-site > team.stream_events > False
resolved_stream_events: bool
if resolved_stream is False:
resolved_stream_events = False
elif stream_events is not None:
resolved_stream_events = stream_events
elif team.stream_events is not None:
resolved_stream_events = team.stream_events
else:
resolved_stream_events = False
# yield_run_output: call-site > False
resolved_yield = yield_run_output if yield_run_output is not None else False
# Context flags: call-site > team.<field>
resolved_add_history = add_history_to_context if add_history_to_context is not None else team.add_history_to_context
resolved_add_deps = (
add_dependencies_to_context if add_dependencies_to_context is not None else team.add_dependencies_to_context
)
resolved_add_state = (
add_session_state_to_context if add_session_state_to_context is not None else team.add_session_state_to_context
)
# dependencies: call-site > team.dependencies
# Defensive copy to prevent dependency resolution from mutating team defaults
if dependencies is not None:
resolved_deps = dependencies.copy()
elif team.dependencies is not None:
resolved_deps = team.dependencies.copy()
else:
resolved_deps = None
# knowledge_filters: delegate to existing _get_effective_filters()
resolved_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
if team.knowledge_filters or knowledge_filters:
resolved_filters = _get_effective_filters(team, knowledge_filters=knowledge_filters)
# metadata: merge call-site + team.metadata (team values take precedence)
resolved_metadata: Optional[Dict[str, Any]] = None
if metadata is not None and team.metadata is not None:
resolved_metadata = metadata.copy()
merge_dictionaries(resolved_metadata, team.metadata)
elif metadata is not None:
resolved_metadata = metadata.copy()
elif team.metadata is not None:
resolved_metadata = team.metadata.copy()
# output_schema: call-site > team.output_schema
resolved_output_schema = output_schema if output_schema is not None else team.output_schema
return ResolvedRunOptions(
stream=resolved_stream,
stream_events=resolved_stream_events,
yield_run_output=resolved_yield,
add_history_to_context=resolved_add_history,
add_dependencies_to_context=resolved_add_deps,
add_session_state_to_context=resolved_add_state,
dependencies=resolved_deps,
knowledge_filters=resolved_filters,
metadata=resolved_metadata,
output_schema=resolved_output_schema,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/team/_run_options.py",
"license": "Apache License 2.0",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/team/_session.py | """Public session accessors and management for Team."""
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
cast,
)
if TYPE_CHECKING:
from agno.team.team import Team
from agno.db.base import SessionType
from agno.metrics import SessionMetrics
from agno.models.message import Message
from agno.run import RunStatus
from agno.run.team import TeamRunOutput
from agno.session import TeamSession, WorkflowSession
from agno.session.summary import SessionSummary
from agno.utils.agent import (
aget_session_metrics_util,
aget_session_name_util,
aget_session_state_util,
aset_session_name_util,
aupdate_session_state_util,
get_session_metrics_util,
get_session_name_util,
get_session_state_util,
set_session_name_util,
update_session_state_util,
)
from agno.utils.log import log_debug, log_warning
# ---------------------------------------------------------------------------
# Session read / write
# ---------------------------------------------------------------------------
def get_session(
team: "Team",
session_id: Optional[str] = None,
user_id: Optional[str] = None,
) -> Optional[TeamSession]:
"""Load a TeamSession from database.
Args:
session_id: The session_id to load from storage.
user_id: The user_id for tenant isolation.
Returns:
TeamSession: The TeamSession loaded from the database or created if it does not exist.
"""
from agno.team._init import _has_async_db
from agno.team._storage import _read_session
if not session_id and not team.session_id:
raise Exception("No session_id provided")
session_id_to_load: str = session_id or team.session_id # type: ignore[assignment]
# If there is a cached session, return it
if team.cache_session and hasattr(team, "_cached_session") and team._cached_session is not None:
if team._cached_session.session_id == session_id_to_load and (
user_id is None or team._cached_session.user_id == user_id
):
return team._cached_session
if _has_async_db(team):
raise ValueError("Cannot use sync get_session() with an async database. Use aget_session() instead.")
# Load and return the session from the database
if team.db is not None:
loaded_session = None
# We have a standalone team, so we are loading a TeamSession
if team.workflow_id is None:
loaded_session = cast(TeamSession, _read_session(team, session_id=session_id_to_load, user_id=user_id))
# We have a workflow team, so we are loading a WorkflowSession
else:
loaded_session = cast( # type: ignore[assignment]
WorkflowSession,
_read_session(
team,
session_id=session_id_to_load,
session_type=SessionType.WORKFLOW,
user_id=user_id,
),
)
# Cache the session if relevant
if loaded_session is not None and team.cache_session:
team._cached_session = loaded_session
return loaded_session # type: ignore[return-value]
log_debug(f"TeamSession {session_id_to_load} not found in db")
return None
async def aget_session(
team: "Team",
session_id: Optional[str] = None,
user_id: Optional[str] = None,
) -> Optional[TeamSession]:
"""Load a TeamSession from database.
Args:
session_id: The session_id to load from storage.
user_id: The user_id for tenant isolation.
Returns:
TeamSession: The TeamSession loaded from the database or created if it does not exist.
"""
from agno.team._init import _has_async_db
from agno.team._storage import _aread_session, _read_session
if not session_id and not team.session_id:
raise Exception("No session_id provided")
session_id_to_load: str = session_id or team.session_id # type: ignore[assignment]
# If there is a cached session, return it
if team.cache_session and hasattr(team, "_cached_session") and team._cached_session is not None:
if team._cached_session.session_id == session_id_to_load and (
user_id is None or team._cached_session.user_id == user_id
):
return team._cached_session
# Load and return the session from the database
if team.db is not None:
loaded_session = None
# We have a standalone team, so we are loading a TeamSession
if team.workflow_id is None:
if _has_async_db(team):
loaded_session = cast(
TeamSession, await _aread_session(team, session_id=session_id_to_load, user_id=user_id)
) # type: ignore[arg-type]
else:
loaded_session = cast(TeamSession, _read_session(team, session_id=session_id_to_load, user_id=user_id))
# We have a workflow team, so we are loading a WorkflowSession
else:
if _has_async_db(team):
loaded_session = cast( # type: ignore[assignment]
WorkflowSession,
await _aread_session(
team,
session_id=session_id_to_load,
session_type=SessionType.WORKFLOW,
user_id=user_id,
),
)
else:
loaded_session = cast( # type: ignore[assignment]
WorkflowSession,
_read_session(
team,
session_id=session_id_to_load,
session_type=SessionType.WORKFLOW,
user_id=user_id,
),
)
# Cache the session if relevant
if loaded_session is not None and team.cache_session:
team._cached_session = loaded_session
return loaded_session # type: ignore[return-value]
log_debug(f"TeamSession {session_id_to_load} not found in db")
return None
def save_session(team: "Team", session: TeamSession) -> None:
"""
Save the TeamSession to storage
Args:
session: The TeamSession to save.
"""
from agno.team._init import _has_async_db
from agno.team._run import _scrub_member_responses
from agno.team._storage import _upsert_session
if _has_async_db(team):
raise ValueError("Cannot use sync save_session() with an async database. Use asave_session() instead.")
if team.db is not None and team.parent_team_id is None and team.workflow_id is None:
if session.session_data is not None and isinstance(session.session_data.get("session_state"), dict):
session.session_data["session_state"].pop("current_session_id", None)
session.session_data["session_state"].pop("current_user_id", None)
session.session_data["session_state"].pop("current_run_id", None)
# scrub the member responses based on storage settings
if session.runs is not None:
for run in session.runs:
if hasattr(run, "member_responses"):
if not team.store_member_responses:
# Remove all member responses
run.member_responses = []
else:
# Scrub individual member responses based on their storage flags
_scrub_member_responses(team, run.member_responses)
_upsert_session(team, session=session)
log_debug(f"Created or updated TeamSession record: {session.session_id}")
async def asave_session(team: "Team", session: TeamSession) -> None:
"""
Save the TeamSession to storage
Args:
session: The TeamSession to save.
"""
from agno.team._init import _has_async_db
from agno.team._run import _scrub_member_responses
from agno.team._storage import _aupsert_session, _upsert_session
if team.db is not None and team.parent_team_id is None and team.workflow_id is None:
if session.session_data is not None and isinstance(session.session_data.get("session_state"), dict):
session.session_data["session_state"].pop("current_session_id", None)
session.session_data["session_state"].pop("current_user_id", None)
session.session_data["session_state"].pop("current_run_id", None)
# scrub the member responses based on storage settings
if session.runs is not None:
for run in session.runs:
if hasattr(run, "member_responses"):
if not team.store_member_responses:
# Remove all member responses
run.member_responses = []
else:
# Scrub individual member responses based on their storage flags
_scrub_member_responses(team, run.member_responses)
if _has_async_db(team):
await _aupsert_session(team, session=session)
else:
_upsert_session(team, session=session)
log_debug(f"Created or updated TeamSession record: {session.session_id}")
# ---------------------------------------------------------------------------
# Session name
# ---------------------------------------------------------------------------
def generate_session_name(team: "Team", session: TeamSession, _retries: int = 0) -> str:
"""
Generate a name for the team session
Args:
session: The TeamSession to generate a name for.
_retries: Internal retry counter (do not set manually).
Returns:
str: The generated session name.
"""
max_retries = 3
if team.model is None:
raise Exception("Model not set")
gen_session_name_prompt = "Team Conversation\n"
# Get team session messages for generating the name
messages_for_generating_session_name = session.get_messages()
for message in messages_for_generating_session_name:
gen_session_name_prompt += f"{message.role.upper()}: {message.content}\n"
gen_session_name_prompt += "\n\nTeam Session Name: "
system_message = Message(
role=team.system_message_role,
content="Please provide a suitable name for this conversation in maximum 5 words. "
"Remember, do not exceed 5 words.",
)
user_message = Message(role="user", content=gen_session_name_prompt)
generate_name_messages = [system_message, user_message]
# Generate name
generated_name = team.model.response(messages=generate_name_messages)
content = generated_name.content
if content is None:
if _retries < max_retries:
from agno.utils.log import log_error
log_error("Generated name is None. Trying again.")
return generate_session_name(team, session=session, _retries=_retries + 1)
from agno.utils.log import log_error
log_error("Generated name is None after max retries. Using fallback.")
return "Team Session"
if len(content.split()) > 15:
if _retries < max_retries:
from agno.utils.log import log_error
log_error("Generated name is too long. Trying again.")
return generate_session_name(team, session=session, _retries=_retries + 1)
from agno.utils.log import log_error
log_error("Generated name is too long after max retries. Using fallback.")
return "Team Session"
return content.replace('"', "").strip()
def set_session_name(
team: "Team", session_id: Optional[str] = None, autogenerate: bool = False, session_name: Optional[str] = None
) -> TeamSession:
"""
Set the session name and save to storage
Args:
session_id: The session ID to set the name for. If not provided, the current cached session ID is used.
autogenerate: Whether to autogenerate the session name.
session_name: The session name to set. If not provided, the session name will be autogenerated.
Returns:
TeamSession: The updated session.
"""
session_id = session_id or team.session_id
if session_id is None:
raise Exception("Session ID is not set")
return cast(
TeamSession,
set_session_name_util(
cast(Any, team),
session_id=session_id,
autogenerate=autogenerate,
session_name=session_name,
),
)
async def aset_session_name(
team: "Team", session_id: Optional[str] = None, autogenerate: bool = False, session_name: Optional[str] = None
) -> TeamSession:
"""
Set the session name and save to storage
Args:
session_id: The session ID to set the name for. If not provided, the current cached session ID is used.
autogenerate: Whether to autogenerate the session name.
session_name: The session name to set. If not provided, the session name will be autogenerated.
Returns:
TeamSession: The updated session.
"""
session_id = session_id or team.session_id
if session_id is None:
raise Exception("Session ID is not set")
return cast(
TeamSession,
await aset_session_name_util(
cast(Any, team),
session_id=session_id,
autogenerate=autogenerate,
session_name=session_name,
),
)
def get_session_name(team: "Team", session_id: Optional[str] = None) -> str:
"""
Get the session name for the given session ID.
Args:
session_id: The session ID to get the name for. If not provided, the current cached session ID is used.
Returns:
str: The session name.
"""
session_id = session_id or team.session_id
if session_id is None:
raise Exception("Session ID is not set")
return get_session_name_util(cast(Any, team), session_id=session_id)
async def aget_session_name(team: "Team", session_id: Optional[str] = None) -> str:
"""
Get the session name for the given session ID.
Args:
session_id: The session ID to get the name for. If not provided, the current cached session ID is used.
Returns:
str: The session name.
"""
session_id = session_id or team.session_id
if session_id is None:
raise Exception("Session ID is not set")
return await aget_session_name_util(cast(Any, team), session_id=session_id)
# ---------------------------------------------------------------------------
# Session state
# ---------------------------------------------------------------------------
def get_session_state(team: "Team", session_id: Optional[str] = None) -> Dict[str, Any]:
"""Get the session state for the given session ID.
Args:
session_id: The session ID to get the state for. If not provided, the current cached session ID is used.
Returns:
Dict[str, Any]: The session state.
"""
session_id = session_id or team.session_id
if session_id is None:
raise Exception("Session ID is not set")
return get_session_state_util(cast(Any, team), session_id=session_id)
async def aget_session_state(team: "Team", session_id: Optional[str] = None) -> Dict[str, Any]:
"""Get the session state for the given session ID.
Args:
session_id: The session ID to get the state for. If not provided, the current cached session ID is used.
Returns:
Dict[str, Any]: The session state.
"""
session_id = session_id or team.session_id
if session_id is None:
raise Exception("Session ID is not set")
return await aget_session_state_util(cast(Any, team), session_id=session_id)
def update_session_state(team: "Team", session_state_updates: Dict[str, Any], session_id: Optional[str] = None) -> str:
"""
Update the session state for the given session ID and user ID.
Args:
session_state_updates: The updates to apply to the session state. Should be a dictionary of key-value pairs.
session_id: The session ID to update. If not provided, the current cached session ID is used.
Returns:
dict: The updated session state.
"""
session_id = session_id or team.session_id
if session_id is None:
raise Exception("Session ID is not set")
return update_session_state_util(
cast(Any, team), session_state_updates=session_state_updates, session_id=session_id
)
async def aupdate_session_state(
team: "Team", session_state_updates: Dict[str, Any], session_id: Optional[str] = None
) -> str:
"""
Update the session state for the given session ID and user ID.
Args:
session_state_updates: The updates to apply to the session state. Should be a dictionary of key-value pairs.
session_id: The session ID to update. If not provided, the current cached session ID is used.
Returns:
dict: The updated session state.
"""
session_id = session_id or team.session_id
if session_id is None:
raise Exception("Session ID is not set")
return await aupdate_session_state_util(
entity=cast(Any, team),
session_state_updates=session_state_updates,
session_id=session_id,
)
# ---------------------------------------------------------------------------
# Session metrics
# ---------------------------------------------------------------------------
def get_session_metrics(team: "Team", session_id: Optional[str] = None) -> Optional[SessionMetrics]:
"""Get the session metrics for the given session ID.
Args:
session_id: The session ID to get the metrics for. If not provided, the current cached session ID is used.
Returns:
Optional[SessionMetrics]: The session metrics.
"""
session_id = session_id or team.session_id
if session_id is None:
raise Exception("Session ID is not set")
return get_session_metrics_util(cast(Any, team), session_id=session_id)
async def aget_session_metrics(team: "Team", session_id: Optional[str] = None) -> Optional[SessionMetrics]:
"""Get the session metrics for the given session ID.
Args:
session_id: The session ID to get the metrics for. If not provided, the current cached session ID is used.
Returns:
Optional[SessionMetrics]: The session metrics.
"""
session_id = session_id or team.session_id
if session_id is None:
raise Exception("Session ID is not set")
return await aget_session_metrics_util(cast(Any, team), session_id=session_id)
def update_session_metrics(team: "Team", session: TeamSession, run_response: TeamRunOutput) -> None:
"""Calculate session metrics and write them to session_data.
Converts run-level Metrics (details: Dict[str, List[ModelMetrics]]) to
session-level SessionMetrics (details: List[ModelMetrics]) using
SessionMetrics.accumulate_from_run().
Accumulates metrics from the team leader's own model calls as well as
all member agent/team responses (recursively for nested teams).
"""
from agno.team._storage import get_session_metrics_internal
session_metrics = get_session_metrics_internal(team, session=session)
if session_metrics is None:
return
if run_response.metrics is not None:
session_metrics.accumulate_from_run(run_response.metrics)
# Accumulate metrics from member responses (agent and nested team runs)
_accumulate_member_metrics(session_metrics, run_response.member_responses)
if session.session_data is not None:
session.session_data["session_metrics"] = session_metrics.to_dict()
def _accumulate_member_metrics(
session_metrics: SessionMetrics,
member_responses: "List",
) -> None:
"""Recursively accumulate metrics from member responses into session metrics."""
for member_response in member_responses:
if member_response.metrics is not None:
session_metrics.accumulate_from_run(member_response.metrics)
# Recurse into nested team member responses
if isinstance(member_response, TeamRunOutput) and member_response.member_responses:
_accumulate_member_metrics(session_metrics, member_response.member_responses)
# ---------------------------------------------------------------------------
# Session delete
# ---------------------------------------------------------------------------
def delete_session(team: "Team", session_id: str, user_id: Optional[str] = None):
"""Delete the current session and save to storage"""
if team.db is None:
return
team.db.delete_session(session_id=session_id, user_id=user_id)
async def adelete_session(team: "Team", session_id: str, user_id: Optional[str] = None):
"""Delete the current session and save to storage"""
from agno.team._init import _has_async_db
if team.db is None:
return
if _has_async_db(team):
await team.db.delete_session(session_id=session_id, user_id=user_id) # type: ignore
else:
team.db.delete_session(session_id=session_id, user_id=user_id)
# ---------------------------------------------------------------------------
# Session messages / chat history
# ---------------------------------------------------------------------------
def get_session_messages(
team: "Team",
session_id: Optional[str] = None,
member_ids: Optional[List[str]] = None,
last_n_runs: Optional[int] = None,
limit: Optional[int] = None,
skip_roles: Optional[List[str]] = None,
skip_statuses: Optional[List[RunStatus]] = None,
skip_history_messages: bool = True,
skip_member_messages: bool = True,
) -> List[Message]:
"""Get all messages belonging to the given session.
Args:
session_id: The session ID to get the messages for. If not provided, the current cached session ID is used.
member_ids: The ids of the members to get the messages from.
last_n_runs: The number of runs to return messages from, counting from the latest. Defaults to all runs.
limit: The number of messages to return, counting from the latest. Defaults to all messages.
skip_roles: Skip messages with these roles.
skip_statuses: Skip messages with these statuses.
skip_history_messages: Skip messages that were tagged as history in previous runs.
skip_member_messages: Skip messages created by members of the team.
Returns:
List[Message]: The messages for the session.
"""
session_id = session_id or team.session_id
if session_id is None:
log_warning("Session ID is not set, cannot get messages for session")
return []
session = get_session(team, session_id=session_id)
if session is None:
raise Exception("Session not found")
return session.get_messages(
team_id=team.id,
member_ids=member_ids,
last_n_runs=last_n_runs,
limit=limit,
skip_roles=skip_roles,
skip_statuses=skip_statuses,
skip_history_messages=skip_history_messages,
skip_member_messages=skip_member_messages,
)
async def aget_session_messages(
team: "Team",
session_id: Optional[str] = None,
member_ids: Optional[List[str]] = None,
last_n_runs: Optional[int] = None,
limit: Optional[int] = None,
skip_roles: Optional[List[str]] = None,
skip_statuses: Optional[List[RunStatus]] = None,
skip_history_messages: bool = True,
skip_member_messages: bool = True,
) -> List[Message]:
"""Get all messages belonging to the given session.
Args:
session_id: The session ID to get the messages for. If not provided, the current cached session ID is used.
member_ids: The ids of the members to get the messages from.
last_n_runs: The number of runs to return messages from, counting from the latest. Defaults to all runs.
limit: The number of messages to return, counting from the latest. Defaults to all messages.
skip_roles: Skip messages with these roles.
skip_statuses: Skip messages with these statuses.
skip_history_messages: Skip messages that were tagged as history in previous runs.
skip_member_messages: Skip messages created by members of the team.
Returns:
List[Message]: The messages for the session.
"""
session_id = session_id or team.session_id
if session_id is None:
log_warning("Session ID is not set, cannot get messages for session")
return []
session = await aget_session(team, session_id=session_id)
if session is None:
raise Exception("Session not found")
return session.get_messages(
team_id=team.id,
member_ids=member_ids,
last_n_runs=last_n_runs,
limit=limit,
skip_roles=skip_roles,
skip_statuses=skip_statuses,
skip_history_messages=skip_history_messages,
skip_member_messages=skip_member_messages,
)
def get_chat_history(
team: "Team", session_id: Optional[str] = None, last_n_runs: Optional[int] = None
) -> List[Message]:
"""Return the chat history (user and assistant messages) for the session.
Use get_messages() for more filtering options.
Args:
session_id: The session ID to get the chat history for. If not provided, the current cached session ID is used.
Returns:
List[Message]: The chat history from the session.
"""
return get_session_messages(
team, session_id=session_id, last_n_runs=last_n_runs, skip_roles=["system", "tool"], skip_member_messages=True
)
async def aget_chat_history(
team: "Team", session_id: Optional[str] = None, last_n_runs: Optional[int] = None
) -> List[Message]:
"""Read the chat history from the session
Args:
session_id: The session ID to get the chat history for. If not provided, the current cached session ID is used.
Returns:
List[Message]: The chat history from the session.
"""
return await aget_session_messages(
team, session_id=session_id, last_n_runs=last_n_runs, skip_roles=["system", "tool"], skip_member_messages=True
)
# ---------------------------------------------------------------------------
# Session summary
# ---------------------------------------------------------------------------
def get_session_summary(team: "Team", session_id: Optional[str] = None) -> Optional[SessionSummary]:
"""Get the session summary for the given session ID and user ID.
Args:
session_id: The session ID to get the summary for. If not provided, the current cached session ID is used.
Returns:
SessionSummary: The session summary.
"""
session_id = session_id if session_id is not None else team.session_id
if session_id is None:
raise ValueError("Session ID is required")
session = get_session(team, session_id=session_id)
if session is None:
raise Exception(f"Session {session_id} not found")
return session.get_session_summary() # type: ignore
async def aget_session_summary(team: "Team", session_id: Optional[str] = None) -> Optional[SessionSummary]:
"""Get the session summary for the given session ID and user ID.
Args:
session_id: The session ID to get the summary for. If not provided, the current cached session ID is used.
Returns:
SessionSummary: The session summary.
"""
session_id = session_id if session_id is not None else team.session_id
if session_id is None:
raise ValueError("Session ID is required")
session = await aget_session(team, session_id=session_id)
if session is None:
raise Exception(f"Session {session_id} not found")
return session.get_session_summary() # type: ignore
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/team/_session.py",
"license": "Apache License 2.0",
"lines": 593,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/team/_storage.py | """Session persistence and serialization helpers for Team."""
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from agno.team.mode import TeamMode
from agno.team.team import Team
from typing import (
Any,
Dict,
List,
Optional,
Union,
cast,
)
from pydantic import BaseModel
from agno.agent import Agent
from agno.db.base import AsyncBaseDb, BaseDb, ComponentType, SessionType
from agno.db.utils import db_from_dict
from agno.metrics import RunMetrics, SessionMetrics
from agno.models.base import Model
from agno.models.message import Message
from agno.models.utils import get_model
from agno.registry.registry import Registry
from agno.run.agent import RunOutput
from agno.run.team import (
TeamRunOutput,
)
from agno.session import TeamSession, WorkflowSession
from agno.tools import Toolkit
from agno.tools.function import Function
from agno.utils.agent import (
aget_last_run_output_util,
aget_run_output_util,
aget_session_metrics_util,
get_last_run_output_util,
get_run_output_util,
get_session_metrics_util,
)
from agno.utils.log import (
log_debug,
log_error,
log_warning,
)
from agno.utils.merge_dict import merge_dictionaries
from agno.utils.string import generate_id_from_name
# ---------------------------------------------------------------------------
# Run output accessors
# ---------------------------------------------------------------------------
def get_run_output(
team: "Team", run_id: str, session_id: Optional[str] = None
) -> Optional[Union[TeamRunOutput, RunOutput]]:
"""
Get a RunOutput or TeamRunOutput from the database. Handles cached sessions.
Args:
run_id (str): The run_id to load from storage.
session_id (Optional[str]): The session_id to load from storage.
"""
if not session_id and not team.session_id:
raise Exception("No session_id provided")
session_id_to_load = session_id or team.session_id
return get_run_output_util(cast(Any, team), run_id=run_id, session_id=session_id_to_load)
async def aget_run_output(
team: "Team", run_id: str, session_id: Optional[str] = None
) -> Optional[Union[TeamRunOutput, RunOutput]]:
"""
Get a RunOutput or TeamRunOutput from the database. Handles cached sessions.
Args:
run_id (str): The run_id to load from storage.
session_id (Optional[str]): The session_id to load from storage.
"""
if not session_id and not team.session_id:
raise Exception("No session_id provided")
session_id_to_load = session_id or team.session_id
return await aget_run_output_util(cast(Any, team), run_id=run_id, session_id=session_id_to_load)
def get_last_run_output(team: "Team", session_id: Optional[str] = None) -> Optional[TeamRunOutput]:
"""
Get the last run response from the database.
Args:
session_id (Optional[str]): The session_id to load from storage.
Returns:
RunOutput: The last run response from the database.
"""
if not session_id and not team.session_id:
raise Exception("No session_id provided")
session_id_to_load = session_id or team.session_id
return cast(TeamRunOutput, get_last_run_output_util(cast(Any, team), session_id=session_id_to_load))
async def aget_last_run_output(team: "Team", session_id: Optional[str] = None) -> Optional[TeamRunOutput]:
"""
Get the last run response from the database.
Args:
session_id (Optional[str]): The session_id to load from storage.
Returns:
RunOutput: The last run response from the database.
"""
if not session_id and not team.session_id:
raise Exception("No session_id provided")
session_id_to_load = session_id or team.session_id
return cast(TeamRunOutput, await aget_last_run_output_util(cast(Any, team), session_id=session_id_to_load))
# ---------------------------------------------------------------------------
# Session metrics (internal)
# ---------------------------------------------------------------------------
def get_session_metrics_internal(team: "Team", session: TeamSession) -> SessionMetrics:
# Get the session_metrics from the database
if session.session_data is not None and "session_metrics" in session.session_data:
session_metrics_from_db = session.session_data.get("session_metrics")
if session_metrics_from_db is not None:
if isinstance(session_metrics_from_db, dict):
return SessionMetrics.from_dict(session_metrics_from_db)
elif isinstance(session_metrics_from_db, SessionMetrics):
return session_metrics_from_db
elif isinstance(session_metrics_from_db, RunMetrics):
# Convert legacy RunMetrics to SessionMetrics
return SessionMetrics(
input_tokens=session_metrics_from_db.input_tokens,
output_tokens=session_metrics_from_db.output_tokens,
total_tokens=session_metrics_from_db.total_tokens,
audio_input_tokens=session_metrics_from_db.audio_input_tokens,
audio_output_tokens=session_metrics_from_db.audio_output_tokens,
audio_total_tokens=session_metrics_from_db.audio_total_tokens,
cache_read_tokens=session_metrics_from_db.cache_read_tokens,
cache_write_tokens=session_metrics_from_db.cache_write_tokens,
reasoning_tokens=session_metrics_from_db.reasoning_tokens,
cost=session_metrics_from_db.cost,
)
return SessionMetrics()
# ---------------------------------------------------------------------------
# Session read / write
# ---------------------------------------------------------------------------
def _read_session(
team: "Team", session_id: str, session_type: SessionType = SessionType.TEAM, user_id: Optional[str] = None
) -> Optional[Union[TeamSession, WorkflowSession]]:
"""Get a Session from the database."""
try:
if not team.db:
raise ValueError("Db not initialized")
session = team.db.get_session(session_id=session_id, session_type=session_type, user_id=user_id)
return session # type: ignore
except Exception as e:
log_warning(f"Error getting session from db: {e}")
return None
async def _aread_session(
team: "Team", session_id: str, session_type: SessionType = SessionType.TEAM, user_id: Optional[str] = None
) -> Optional[Union[TeamSession, WorkflowSession]]:
"""Get a Session from the database."""
from agno.team._init import _has_async_db
try:
if not team.db:
raise ValueError("Db not initialized")
if _has_async_db(team):
team.db = cast(AsyncBaseDb, team.db)
session = await team.db.get_session(session_id=session_id, session_type=session_type, user_id=user_id)
else:
session = team.db.get_session(session_id=session_id, session_type=session_type, user_id=user_id) # type: ignore[assignment]
return session # type: ignore
except Exception as e:
log_warning(f"Error getting session from db: {e}")
return None
def _upsert_session(team: "Team", session: TeamSession) -> Optional[TeamSession]:
"""Upsert a Session into the database."""
try:
if not team.db:
raise ValueError("Db not initialized")
return team.db.upsert_session(session=session) # type: ignore
except Exception as e:
log_warning(f"Error upserting session into db: {e}")
return None
async def _aupsert_session(team: "Team", session: TeamSession) -> Optional[TeamSession]:
"""Upsert a Session into the database."""
from agno.team._init import _has_async_db
try:
if not team.db:
raise ValueError("Db not initialized")
if _has_async_db(team):
return await team.db.upsert_session(session=session) # type: ignore
else:
return team.db.upsert_session(session=session) # type: ignore
except Exception as e:
log_warning(f"Error upserting session into db: {e}")
return None
def _read_or_create_session(team: "Team", session_id: str, user_id: Optional[str] = None) -> TeamSession:
"""Load the TeamSession from storage
Returns:
Optional[TeamSession]: The loaded TeamSession or None if not found.
"""
from time import time
from agno.session.team import TeamSession
from agno.team._telemetry import get_team_data
# Return existing session if we have one
if (
team._cached_session is not None
and team._cached_session.session_id == session_id
and (user_id is None or team._cached_session.user_id == user_id)
):
return team._cached_session
# Try to load from database
team_session = None
if team.db is not None and team.parent_team_id is None and team.workflow_id is None:
team_session = cast(TeamSession, _read_session(team, session_id=session_id, user_id=user_id))
# Create new session if none found
if team_session is None:
log_debug(f"Creating new TeamSession: {session_id}")
session_data = {}
if team.session_state is not None:
from copy import deepcopy
session_data["session_state"] = deepcopy(team.session_state)
team_session = TeamSession(
session_id=session_id,
team_id=team.id,
user_id=user_id,
team_data=get_team_data(team),
session_data=session_data,
metadata=team.metadata,
created_at=int(time()),
)
if team.introduction is not None:
from uuid import uuid4
team_session.upsert_run(
TeamRunOutput(
run_id=str(uuid4()),
team_id=team.id,
session_id=session_id,
user_id=user_id,
team_name=team.name,
content=team.introduction,
messages=[Message(role=team.model.assistant_message_role, content=team.introduction)], # type: ignore
)
)
# Cache the session if relevant
if team_session is not None and team.cache_session:
team._cached_session = team_session
return team_session
async def _aread_or_create_session(team: "Team", session_id: str, user_id: Optional[str] = None) -> TeamSession:
"""Load the TeamSession from storage
Returns:
Optional[TeamSession]: The loaded TeamSession or None if not found.
"""
from time import time
from agno.session.team import TeamSession
from agno.team._init import _has_async_db
from agno.team._telemetry import get_team_data
# Return existing session if we have one
if (
team._cached_session is not None
and team._cached_session.session_id == session_id
and (user_id is None or team._cached_session.user_id == user_id)
):
return team._cached_session
# Try to load from database
team_session = None
if team.db is not None and team.parent_team_id is None and team.workflow_id is None:
if _has_async_db(team):
team_session = cast(TeamSession, await _aread_session(team, session_id=session_id, user_id=user_id))
else:
team_session = cast(TeamSession, _read_session(team, session_id=session_id, user_id=user_id))
# Create new session if none found
if team_session is None:
log_debug(f"Creating new TeamSession: {session_id}")
session_data = {}
if team.session_state is not None:
from copy import deepcopy
session_data["session_state"] = deepcopy(team.session_state)
team_session = TeamSession(
session_id=session_id,
team_id=team.id,
user_id=user_id,
team_data=get_team_data(team),
session_data=session_data,
metadata=team.metadata,
created_at=int(time()),
)
if team.introduction is not None:
from uuid import uuid4
team_session.upsert_run(
TeamRunOutput(
run_id=str(uuid4()),
team_id=team.id,
session_id=session_id,
user_id=user_id,
team_name=team.name,
content=team.introduction,
messages=[Message(role=team.model.assistant_message_role, content=team.introduction)], # type: ignore
)
)
# Cache the session if relevant
if team_session is not None and team.cache_session:
team._cached_session = team_session
return team_session
def _load_session_state(team: "Team", session: TeamSession, session_state: Dict[str, Any]) -> Dict[str, Any]:
"""Load and return the stored session_state from the database, optionally merging it with the given one"""
# Get the session_state from the database and merge with proper precedence
# At this point session_state contains: agent_defaults + run_params
if session.session_data is not None and "session_state" in session.session_data:
session_state_from_db = session.session_data.get("session_state")
if (
session_state_from_db is not None
and isinstance(session_state_from_db, dict)
and len(session_state_from_db) > 0
and not team.overwrite_db_session_state
):
# This preserves precedence: run_params > db_state > agent_defaults
merged_state = session_state_from_db.copy()
merge_dictionaries(merged_state, session_state)
session_state.clear()
session_state.update(merged_state)
# Update the session_state in the session
if session.session_data is not None:
session.session_data["session_state"] = session_state
return session_state
def _update_metadata(team: "Team", session: TeamSession):
"""Update the extra_data in the session"""
# Read metadata from the database
if session.metadata is not None:
# If metadata is set in the agent, update the database metadata with the agent's metadata
if team.metadata is not None:
# Updates agent's session metadata in place
merge_dictionaries(session.metadata, team.metadata)
# Update the current metadata with the metadata from the database which is updated in place
team.metadata = session.metadata
def to_dict(team: "Team") -> Dict[str, Any]:
"""
Convert the Team to a dictionary.
Returns:
Dict[str, Any]: Dictionary representation of the team configuration
"""
from agno.team.team import Team
config: Dict[str, Any] = {}
# --- Team Settings ---
if team.id is not None:
config["id"] = team.id
if team.name is not None:
config["name"] = team.name
if team.role is not None:
config["role"] = team.role
if team.description is not None:
config["description"] = team.description
# --- Model ---
if team.model is not None:
config["model"] = team.model.to_dict() if isinstance(team.model, Model) else str(team.model)
# --- Members ---
if team.members and isinstance(team.members, list):
serialized_members = []
for member in team.members:
if isinstance(member, Agent):
serialized_members.append({"type": "agent", "agent_id": member.id})
elif isinstance(member, Team):
serialized_members.append({"type": "team", "team_id": member.id})
if serialized_members:
config["members"] = serialized_members
# --- Mode ---
if team.mode is not None:
config["mode"] = team.mode.value if hasattr(team.mode, "value") else str(team.mode)
if team.max_iterations != 10:
config["max_iterations"] = team.max_iterations
# --- Execution settings (only if non-default) ---
if team.respond_directly:
config["respond_directly"] = team.respond_directly
if team.delegate_to_all_members:
config["delegate_to_all_members"] = team.delegate_to_all_members
if not team.determine_input_for_members: # default is True
config["determine_input_for_members"] = team.determine_input_for_members
# --- User settings ---
if team.user_id is not None:
config["user_id"] = team.user_id
# --- Session settings ---
if team.session_id is not None:
config["session_id"] = team.session_id
if team.session_state is not None:
config["session_state"] = team.session_state
if team.add_session_state_to_context:
config["add_session_state_to_context"] = team.add_session_state_to_context
if team.enable_agentic_state:
config["enable_agentic_state"] = team.enable_agentic_state
if team.overwrite_db_session_state:
config["overwrite_db_session_state"] = team.overwrite_db_session_state
if team.cache_session:
config["cache_session"] = team.cache_session
# --- Team history settings ---
if team.add_team_history_to_members:
config["add_team_history_to_members"] = team.add_team_history_to_members
if team.num_team_history_runs != 3: # default is 3
config["num_team_history_runs"] = team.num_team_history_runs
if team.share_member_interactions:
config["share_member_interactions"] = team.share_member_interactions
if team.search_session_history:
config["search_session_history"] = team.search_session_history
if team.num_history_sessions is not None:
config["num_history_sessions"] = team.num_history_sessions
if team.read_chat_history:
config["read_chat_history"] = team.read_chat_history
# --- System message settings ---
if team.system_message is not None and isinstance(team.system_message, str):
config["system_message"] = team.system_message
if team.system_message_role != "system": # default is "system"
config["system_message_role"] = team.system_message_role
if team.introduction is not None:
config["introduction"] = team.introduction
if team.instructions is not None and not callable(team.instructions):
config["instructions"] = team.instructions
if team.expected_output is not None:
config["expected_output"] = team.expected_output
if team.additional_context is not None:
config["additional_context"] = team.additional_context
# --- Context settings ---
if team.markdown:
config["markdown"] = team.markdown
if team.add_datetime_to_context:
config["add_datetime_to_context"] = team.add_datetime_to_context
if team.add_location_to_context:
config["add_location_to_context"] = team.add_location_to_context
if team.timezone_identifier is not None:
config["timezone_identifier"] = team.timezone_identifier
if team.add_name_to_context:
config["add_name_to_context"] = team.add_name_to_context
if team.add_member_tools_to_context:
config["add_member_tools_to_context"] = team.add_member_tools_to_context
if not team.resolve_in_context: # default is True
config["resolve_in_context"] = team.resolve_in_context
# --- Database settings ---
if team.db is not None and hasattr(team.db, "to_dict"):
config["db"] = team.db.to_dict()
# --- Dependencies ---
if team.dependencies is not None:
config["dependencies"] = team.dependencies
if team.add_dependencies_to_context:
config["add_dependencies_to_context"] = team.add_dependencies_to_context
# --- Knowledge settings ---
# TODO: implement knowledge serialization
# if team.knowledge is not None:
# config["knowledge"] = team.knowledge.to_dict()
if team.knowledge_filters is not None:
config["knowledge_filters"] = team.knowledge_filters
if team.enable_agentic_knowledge_filters:
config["enable_agentic_knowledge_filters"] = team.enable_agentic_knowledge_filters
if team.update_knowledge:
config["update_knowledge"] = team.update_knowledge
if team.add_knowledge_to_context:
config["add_knowledge_to_context"] = team.add_knowledge_to_context
if not team.search_knowledge: # default is True
config["search_knowledge"] = team.search_knowledge
if not team.add_search_knowledge_instructions: # default is True
config["add_search_knowledge_instructions"] = team.add_search_knowledge_instructions
if team.references_format != "json": # default is "json"
config["references_format"] = team.references_format
# --- Tools ---
if team.tools and isinstance(team.tools, list):
serialized_tools = []
for tool in team.tools:
try:
if isinstance(tool, Function):
serialized_tools.append(tool.to_dict())
elif isinstance(tool, Toolkit):
for func in tool.functions.values():
serialized_tools.append(func.to_dict())
elif callable(tool):
func = Function.from_callable(tool)
serialized_tools.append(func.to_dict())
except Exception as e:
log_warning(f"Could not serialize tool {tool}: {e}")
if serialized_tools:
config["tools"] = serialized_tools
if team.tool_choice is not None:
config["tool_choice"] = team.tool_choice
if team.tool_call_limit is not None:
config["tool_call_limit"] = team.tool_call_limit
if team.get_member_information_tool:
config["get_member_information_tool"] = team.get_member_information_tool
# --- Schema settings ---
if team.input_schema is not None:
if issubclass(team.input_schema, BaseModel):
config["input_schema"] = team.input_schema.__name__
elif isinstance(team.input_schema, dict):
config["input_schema"] = team.input_schema
if team.output_schema is not None:
if isinstance(team.output_schema, type) and issubclass(team.output_schema, BaseModel):
config["output_schema"] = team.output_schema.__name__
elif isinstance(team.output_schema, dict):
config["output_schema"] = team.output_schema
# --- Parser and output settings ---
if team.parser_model is not None:
if isinstance(team.parser_model, Model):
config["parser_model"] = team.parser_model.to_dict()
else:
config["parser_model"] = str(team.parser_model)
if team.parser_model_prompt is not None:
config["parser_model_prompt"] = team.parser_model_prompt
if team.output_model is not None:
if isinstance(team.output_model, Model):
config["output_model"] = team.output_model.to_dict()
else:
config["output_model"] = str(team.output_model)
if team.output_model_prompt is not None:
config["output_model_prompt"] = team.output_model_prompt
if team.use_json_mode:
config["use_json_mode"] = team.use_json_mode
if not team.parse_response: # default is True
config["parse_response"] = team.parse_response
# --- Memory settings ---
# TODO: implement memory manager serialization
# if team.memory_manager is not None:
# config["memory_manager"] = team.memory_manager.to_dict()
if team.enable_agentic_memory:
config["enable_agentic_memory"] = team.enable_agentic_memory
if team.enable_user_memories:
config["enable_user_memories"] = team.enable_user_memories
if team.add_memories_to_context is not None:
config["add_memories_to_context"] = team.add_memories_to_context
if team.enable_session_summaries:
config["enable_session_summaries"] = team.enable_session_summaries
if team.add_session_summary_to_context is not None:
config["add_session_summary_to_context"] = team.add_session_summary_to_context
# TODO: implement session summary manager serialization
# if team.session_summary_manager is not None:
# config["session_summary_manager"] = team.session_summary_manager.to_dict()
# --- Learning settings ---
if team.learning is not None:
if team.learning is True:
config["learning"] = True
elif team.learning is False:
config["learning"] = False
elif hasattr(team.learning, "to_dict"):
config["learning"] = team.learning.to_dict()
else:
config["learning"] = True if team.learning else False
if not team.add_learnings_to_context: # default is True
config["add_learnings_to_context"] = team.add_learnings_to_context
# --- History settings ---
if team.add_history_to_context:
config["add_history_to_context"] = team.add_history_to_context
if team.num_history_runs is not None:
config["num_history_runs"] = team.num_history_runs
if team.num_history_messages is not None:
config["num_history_messages"] = team.num_history_messages
if team.max_tool_calls_from_history is not None:
config["max_tool_calls_from_history"] = team.max_tool_calls_from_history
# --- Media/storage settings ---
if not team.send_media_to_model: # default is True
config["send_media_to_model"] = team.send_media_to_model
if not team.store_media: # default is True
config["store_media"] = team.store_media
if not team.store_tool_messages: # default is True
config["store_tool_messages"] = team.store_tool_messages
if team.store_history_messages: # default is False
config["store_history_messages"] = team.store_history_messages
# --- Compression settings ---
if team.compress_tool_results:
config["compress_tool_results"] = team.compress_tool_results
# TODO: implement compression manager serialization
# if team.compression_manager is not None:
# config["compression_manager"] = team.compression_manager.to_dict()
# --- Reasoning settings ---
if team.reasoning:
config["reasoning"] = team.reasoning
# TODO: implement reasoning model serialization
# if team.reasoning_model is not None:
# config["reasoning_model"] = team.reasoning_model.to_dict() if isinstance(team.reasoning_model, Model) else str(team.reasoning_model)
if team.reasoning_min_steps != 1: # default is 1
config["reasoning_min_steps"] = team.reasoning_min_steps
if team.reasoning_max_steps != 10: # default is 10
config["reasoning_max_steps"] = team.reasoning_max_steps
# --- Streaming settings ---
if team.stream is not None:
config["stream"] = team.stream
if team.stream_events is not None:
config["stream_events"] = team.stream_events
if not team.stream_member_events: # default is True
config["stream_member_events"] = team.stream_member_events
if team.store_events:
config["store_events"] = team.store_events
if team.store_member_responses:
config["store_member_responses"] = team.store_member_responses
# --- Retry settings ---
if team.retries > 0:
config["retries"] = team.retries
if team.delay_between_retries != 1: # default is 1
config["delay_between_retries"] = team.delay_between_retries
if team.exponential_backoff:
config["exponential_backoff"] = team.exponential_backoff
# --- Metadata ---
if team.metadata is not None:
config["metadata"] = team.metadata
# --- Debug and telemetry settings ---
if team.debug_mode:
config["debug_mode"] = team.debug_mode
if team.debug_level != 1: # default is 1
config["debug_level"] = team.debug_level
if team.show_members_responses:
config["show_members_responses"] = team.show_members_responses
if not team.telemetry: # default is True
config["telemetry"] = team.telemetry
return config
def _deserialize_learning(value: Any) -> Any:
"""Deserialize a learning config value from to_dict output.
Returns True, False, None, or a LearningMachine instance.
"""
if value is None or value is True or value is False:
return value
if isinstance(value, dict):
from agno.learn.machine import LearningMachine
return LearningMachine.from_dict(value)
return value
def _parse_team_mode(value: Optional[str]) -> Optional["TeamMode"]:
"""Parse a string into a TeamMode enum, returning None if not provided."""
if value is None:
return None
from agno.team.mode import TeamMode
return TeamMode(value)
def from_dict(
cls,
data: Dict[str, Any],
db: Optional["BaseDb"] = None,
registry: Optional["Registry"] = None,
) -> "Team":
"""
Create a Team from a dictionary.
Args:
data: Dictionary containing team configuration
db: Optional database for loading agents in members
registry: Optional registry for rehydrating tools
Returns:
Team: Reconstructed team instance
"""
config = data.copy()
# --- Handle Model reconstruction ---
if "model" in config:
model_data = config["model"]
if isinstance(model_data, dict) and "id" in model_data:
config["model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
elif isinstance(model_data, str):
config["model"] = get_model(model_data)
# --- Handle Members reconstruction ---
members: Optional[List[Union[Agent, "Team"]]] = None
from agno.agent import get_agent_by_id
from agno.team import get_team_by_id
if "members" in config and config["members"]:
members = []
for member_data in config["members"]:
member_type = member_data.get("type")
if member_type == "agent":
# TODO: Make sure to pass the correct version to get_agent_by_id. Right now its returning the latest version.
if db is None:
log_warning(f"Cannot load member agent {member_data['agent_id']}: db is None")
continue
agent = get_agent_by_id(id=member_data["agent_id"], db=db, registry=registry)
if agent:
members.append(agent)
else:
log_warning(f"Agent not found: {member_data['agent_id']}")
elif member_type == "team":
# Handle nested teams as members
if db is None:
log_warning(f"Cannot load member team {member_data['team_id']}: db is None")
continue
nested_team = get_team_by_id(id=member_data["team_id"], db=db, registry=registry)
if nested_team:
members.append(nested_team)
else:
log_warning(f"Team not found: {member_data['team_id']}")
# --- Handle reasoning_model reconstruction ---
# TODO: implement reasoning model deserialization
# if "reasoning_model" in config:
# model_data = config["reasoning_model"]
# if isinstance(model_data, dict) and "id" in model_data:
# config["reasoning_model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
# elif isinstance(model_data, str):
# config["reasoning_model"] = get_model(model_data)
# --- Handle parser_model reconstruction ---
# TODO: implement parser model deserialization
# if "parser_model" in config:
# model_data = config["parser_model"]
# if isinstance(model_data, dict) and "id" in model_data:
# config["parser_model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
# elif isinstance(model_data, str):
# config["parser_model"] = get_model(model_data)
# --- Handle output_model reconstruction ---
# TODO: implement output model deserialization
# if "output_model" in config:
# model_data = config["output_model"]
# if isinstance(model_data, dict) and "id" in model_data:
# config["output_model"] = get_model(f"{model_data['provider']}:{model_data['id']}")
# elif isinstance(model_data, str):
# config["output_model"] = get_model(model_data)
# --- Handle tools reconstruction ---
if "tools" in config and config["tools"]:
if registry:
config["tools"] = [registry.rehydrate_function(t) for t in config["tools"]]
else:
log_warning("No registry provided, tools will not be rehydrated.")
del config["tools"]
# --- Handle DB reconstruction ---
if "db" in config and isinstance(config["db"], dict):
db_data = config["db"]
db_id = db_data.get("id")
# First try to get the db from the registry (preferred - reuses existing connection)
if registry and db_id:
registry_db = registry.get_db(db_id)
if registry_db is not None:
config["db"] = registry_db
else:
del config["db"]
else:
# No registry or no db_id, fall back to creating from dict
config["db"] = db_from_dict(db_data)
if config["db"] is None:
del config["db"]
# --- Handle Schema reconstruction ---
if "input_schema" in config and isinstance(config["input_schema"], str):
schema_cls = registry.get_schema(config["input_schema"]) if registry else None
if schema_cls:
config["input_schema"] = schema_cls
else:
log_warning(f"Input schema {config['input_schema']} not found in registry, skipping.")
del config["input_schema"]
if "output_schema" in config and isinstance(config["output_schema"], str):
schema_cls = registry.get_schema(config["output_schema"]) if registry else None
if schema_cls:
config["output_schema"] = schema_cls
else:
log_warning(f"Output schema {config['output_schema']} not found in registry, skipping.")
del config["output_schema"]
# --- Handle MemoryManager reconstruction ---
# TODO: implement memory manager deserialization
# if "memory_manager" in config and isinstance(config["memory_manager"], dict):
# from agno.memory import MemoryManager
# config["memory_manager"] = MemoryManager.from_dict(config["memory_manager"])
# --- Handle SessionSummaryManager reconstruction ---
# TODO: implement session summary manager deserialization
# if "session_summary_manager" in config and isinstance(config["session_summary_manager"], dict):
# from agno.session import SessionSummaryManager
# config["session_summary_manager"] = SessionSummaryManager.from_dict(config["session_summary_manager"])
# --- Handle Knowledge reconstruction ---
# TODO: implement knowledge deserialization
# if "knowledge" in config and isinstance(config["knowledge"], dict):
# from agno.knowledge import Knowledge
# config["knowledge"] = Knowledge.from_dict(config["knowledge"])
# --- Handle CompressionManager reconstruction ---
# TODO: implement compression manager deserialization
# if "compression_manager" in config and isinstance(config["compression_manager"], dict):
# from agno.compression.manager import CompressionManager
# config["compression_manager"] = CompressionManager.from_dict(config["compression_manager"])
team = cast(
"Team",
cls(
# --- Team settings ---
id=config.get("id"),
name=config.get("name"),
role=config.get("role"),
description=config.get("description"),
# --- Model ---
model=config.get("model"),
# --- Members ---
members=members or [],
# --- Mode ---
mode=_parse_team_mode(config.get("mode")),
max_iterations=config.get("max_iterations", 10),
# --- Execution settings ---
respond_directly=config.get("respond_directly", False),
delegate_to_all_members=config.get("delegate_to_all_members", False),
determine_input_for_members=config.get("determine_input_for_members", True),
# --- User settings ---
user_id=config.get("user_id"),
# --- Session settings ---
session_id=config.get("session_id"),
session_state=config.get("session_state"),
add_session_state_to_context=config.get("add_session_state_to_context", False),
enable_agentic_state=config.get("enable_agentic_state", False),
overwrite_db_session_state=config.get("overwrite_db_session_state", False),
cache_session=config.get("cache_session", False),
add_team_history_to_members=config.get("add_team_history_to_members", False),
num_team_history_runs=config.get("num_team_history_runs", 3),
share_member_interactions=config.get("share_member_interactions", False),
search_session_history=config.get("search_session_history", False),
num_history_sessions=config.get("num_history_sessions"),
read_chat_history=config.get("read_chat_history", False),
# --- System message settings ---
system_message=config.get("system_message"),
system_message_role=config.get("system_message_role", "system"),
introduction=config.get("introduction"),
instructions=config.get("instructions"),
expected_output=config.get("expected_output"),
additional_context=config.get("additional_context"),
markdown=config.get("markdown", False),
add_datetime_to_context=config.get("add_datetime_to_context", False),
add_location_to_context=config.get("add_location_to_context", False),
timezone_identifier=config.get("timezone_identifier"),
add_name_to_context=config.get("add_name_to_context", False),
add_member_tools_to_context=config.get("add_member_tools_to_context", False),
resolve_in_context=config.get("resolve_in_context", True),
# --- Database settings ---
db=config.get("db"),
# --- Dependencies ---
dependencies=config.get("dependencies"),
add_dependencies_to_context=config.get("add_dependencies_to_context", False),
# --- Knowledge settings ---
# knowledge=config.get("knowledge"), # TODO
knowledge_filters=config.get("knowledge_filters"),
enable_agentic_knowledge_filters=config.get("enable_agentic_knowledge_filters", False),
add_knowledge_to_context=config.get("add_knowledge_to_context", False),
update_knowledge=config.get("update_knowledge", False),
search_knowledge=config.get("search_knowledge", True),
add_search_knowledge_instructions=config.get("add_search_knowledge_instructions", True),
references_format=config.get("references_format", "json"),
# --- Tools ---
tools=config.get("tools"),
tool_call_limit=config.get("tool_call_limit"),
tool_choice=config.get("tool_choice"),
get_member_information_tool=config.get("get_member_information_tool", False),
# --- Schema settings ---
input_schema=config.get("input_schema"),
output_schema=config.get("output_schema"),
# --- Parser and output settings ---
# parser_model=config.get("parser_model"), # TODO
parser_model_prompt=config.get("parser_model_prompt"),
# output_model=config.get("output_model"), # TODO
output_model_prompt=config.get("output_model_prompt"),
use_json_mode=config.get("use_json_mode", False),
parse_response=config.get("parse_response", True),
# --- Memory settings ---
# memory_manager=config.get("memory_manager"), # TODO
enable_agentic_memory=config.get("enable_agentic_memory", False),
enable_user_memories=config.get("enable_user_memories"),
add_memories_to_context=config.get("add_memories_to_context"),
enable_session_summaries=config.get("enable_session_summaries", False),
add_session_summary_to_context=config.get("add_session_summary_to_context"),
# session_summary_manager=config.get("session_summary_manager"), # TODO
# --- Learning settings ---
learning=_deserialize_learning(config.get("learning")),
add_learnings_to_context=config.get("add_learnings_to_context", True),
# --- History settings ---
add_history_to_context=config.get("add_history_to_context", False),
num_history_runs=config.get("num_history_runs"),
num_history_messages=config.get("num_history_messages"),
max_tool_calls_from_history=config.get("max_tool_calls_from_history"),
# --- Compression settings ---
compress_tool_results=config.get("compress_tool_results", False),
# compression_manager=config.get("compression_manager"), # TODO
# --- Reasoning settings ---
reasoning=config.get("reasoning", False),
# reasoning_model=config.get("reasoning_model"), # TODO
reasoning_min_steps=config.get("reasoning_min_steps", 1),
reasoning_max_steps=config.get("reasoning_max_steps", 10),
# --- Streaming settings ---
stream=config.get("stream"),
stream_events=config.get("stream_events"),
stream_member_events=config.get("stream_member_events", True),
store_events=config.get("store_events", False),
store_member_responses=config.get("store_member_responses", False),
# --- Media settings ---
send_media_to_model=config.get("send_media_to_model", True),
store_media=config.get("store_media", True),
store_tool_messages=config.get("store_tool_messages", True),
store_history_messages=config.get("store_history_messages", False),
# --- Retry settings ---
retries=config.get("retries", 0),
delay_between_retries=config.get("delay_between_retries", 1),
exponential_backoff=config.get("exponential_backoff", False),
# --- Metadata ---
metadata=config.get("metadata"),
# --- Debug and telemetry settings ---
debug_mode=config.get("debug_mode", False),
debug_level=config.get("debug_level", 1),
show_members_responses=config.get("show_members_responses", False),
telemetry=config.get("telemetry", True),
),
)
return team
def save(
team: "Team",
*,
db: Optional["BaseDb"] = None,
stage: str = "published",
label: Optional[str] = None,
notes: Optional[str] = None,
) -> Optional[int]:
"""
Save the team component and config to the database, including member agents/teams.
Args:
db: The database to save the component and config to.
stage: The stage of the component. Defaults to "published".
label: The label of the component.
notes: The notes of the component.
Returns:
Optional[int]: The version number of the saved config.
"""
from agno.agent.agent import Agent
db_ = db or team.db
if not db_:
raise ValueError("Db not initialized or provided")
if not isinstance(db_, BaseDb):
raise ValueError("Async databases not yet supported for save(). Use a sync database.")
if team.id is None:
team.id = generate_id_from_name(team.name)
try:
# Collect all links for members
all_links: List[Dict[str, Any]] = []
# Save each member (Agent or nested Team) and collect links
# Only iterate if members is a static list (not a callable factory)
members_list = team.members if isinstance(team.members, list) else []
for position, member in enumerate(members_list):
# Save member first - returns version
member_version = member.save(db=db_, stage=stage, label=label, notes=notes)
# Add link
all_links.append(
{
"link_kind": "member",
"link_key": f"member_{position}",
"child_component_id": member.id,
"child_version": member_version,
"position": position,
"meta": {"type": "agent" if isinstance(member, Agent) else "team"},
}
)
# Create or update component
db_.upsert_component(
component_id=team.id,
component_type=ComponentType.TEAM,
name=getattr(team, "name", team.id),
description=getattr(team, "description", None),
metadata=getattr(team, "metadata", None),
)
# Create or update config with links
config = db_.upsert_config(
component_id=team.id,
config=team.to_dict(),
links=all_links if all_links else None,
label=label,
stage=stage,
notes=notes,
)
return config["version"]
except Exception as e:
log_error(f"Error saving Team to database: {e}")
raise
def _hydrate_from_graph(
cls,
graph: Dict[str, Any],
*,
db: "BaseDb",
registry: Optional["Registry"] = None,
) -> Optional["Team"]:
"""
Hydrate a team and its members from an already-loaded component graph.
This avoids re-querying the DB for nested teams whose graphs are already available.
"""
from agno.agent.agent import Agent
config = graph["config"].get("config")
if config is None:
return None
team = cls.from_dict(config, db=db, registry=registry)
team.id = graph["component"]["component_id"]
team.db = db
# Hydrate members from graph children
team.members = []
for child in graph.get("children", []):
child_graph = child.get("graph")
if child_graph is None:
continue
child_config = child_graph["config"].get("config")
if child_config is None:
continue
link_meta = child["link"].get("meta", {})
member_type = link_meta.get("type")
if member_type == "agent":
agent = Agent.from_dict(child_config)
agent.id = child_graph["component"]["component_id"]
agent.db = db
team.members.append(agent)
elif member_type == "team":
# Recursively hydrate nested teams from the already-loaded child graph
nested_team = _hydrate_from_graph(cls, child_graph, db=db, registry=registry)
if nested_team:
team.members.append(nested_team)
return team
def load(
cls,
id: str,
*,
db: "BaseDb",
registry: Optional["Registry"] = None,
label: Optional[str] = None,
version: Optional[int] = None,
) -> Optional["Team"]:
"""
Load a team by id, with hydrated members.
Args:
id: The id of the team to load.
db: The database to load the team from.
label: The label of the team to load.
Returns:
The team loaded from the database with hydrated members, or None if not found.
"""
# Use graph to load team + all members in a single DB call
graph = db.load_component_graph(id, version=version, label=label)
if graph is None:
return None
return _hydrate_from_graph(cls, graph, db=db, registry=registry)
def delete(
team: "Team",
*,
db: Optional["BaseDb"] = None,
hard_delete: bool = False,
) -> bool:
"""
Delete the team component.
Args:
db: The database to delete the component from.
hard_delete: Whether to hard delete the component.
Returns:
True if the component was deleted, False otherwise.
"""
db_ = db or team.db
if not db_:
raise ValueError("Db not initialized or provided")
if not isinstance(db_, BaseDb):
raise ValueError("Async databases not yet supported for delete(). Use a sync database.")
if team.id is None:
raise ValueError("Cannot delete team without an id")
return db_.delete_component(component_id=team.id, hard_delete=hard_delete)
def get_session_metrics(team: "Team", session_id: Optional[str] = None):
session_id = session_id or team.session_id
if session_id is None:
raise Exception("Session ID is not set")
return get_session_metrics_util(team, session_id=session_id)
async def aget_session_metrics(team: "Team", session_id: Optional[str] = None):
session_id = session_id or team.session_id
if session_id is None:
raise Exception("Session ID is not set")
return await aget_session_metrics_util(team, session_id=session_id)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/team/_storage.py",
"license": "Apache License 2.0",
"lines": 1027,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/team/_telemetry.py | """Telemetry logging helpers for Team."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, Optional
from agno.utils.log import log_debug
if TYPE_CHECKING:
from agno.team.team import Team
def get_team_data(team: "Team") -> Dict[str, Any]:
team_data: Dict[str, Any] = {}
if team.name is not None:
team_data["name"] = team.name
if team.id is not None:
team_data["team_id"] = team.id
if team.model is not None:
team_data["model"] = team.model.to_dict()
return team_data
def get_telemetry_data(team: "Team") -> Dict[str, Any]:
"""Get the telemetry data for the team"""
return {
"team_id": team.id,
"db_type": team.db.__class__.__name__ if team.db else None,
"model_provider": team.model.provider if team.model else None,
"model_name": team.model.name if team.model else None,
"model_id": team.model.id if team.model else None,
"parser_model": team.parser_model.to_dict() if team.parser_model else None,
"output_model": team.output_model.to_dict() if team.output_model else None,
"member_count": len(team.members) if isinstance(team.members, list) else 0,
"has_knowledge": team.knowledge is not None,
"has_tools": team.tools is not None,
"has_learnings": team._learning is not None,
}
def log_team_telemetry(team: "Team", session_id: str, run_id: Optional[str] = None) -> None:
"""Send a telemetry event to the API for a created Team run"""
from agno.team._init import _set_telemetry
_set_telemetry(team)
if not team.telemetry:
return
from agno.api.team import TeamRunCreate, create_team_run
try:
create_team_run(
run=TeamRunCreate(session_id=session_id, run_id=run_id, data=get_telemetry_data(team)),
)
except Exception as e:
log_debug(f"Could not create Team run telemetry event: {e}")
async def alog_team_telemetry(team: "Team", session_id: str, run_id: Optional[str] = None) -> None:
"""Send a telemetry event to the API for a created Team async run"""
from agno.team._init import _set_telemetry
_set_telemetry(team)
if not team.telemetry:
return
from agno.api.team import TeamRunCreate, acreate_team_run
try:
await acreate_team_run(run=TeamRunCreate(session_id=session_id, run_id=run_id, data=get_telemetry_data(team)))
except Exception as e:
log_debug(f"Could not create Team run telemetry event: {e}")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/team/_telemetry.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/team/_tools.py | """Tool selection and resolution for Team."""
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from agno.team.team import Team
from copy import copy, deepcopy
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
Union,
)
from pydantic import BaseModel
from agno.agent import Agent
from agno.media import Audio, File, Image, Video
from agno.models.base import Model
from agno.models.message import Message
from agno.run import RunContext
from agno.run.agent import RunOutput
from agno.run.team import (
TeamRunOutput,
)
from agno.session import TeamSession
from agno.tools import Toolkit
from agno.tools.function import Function
from agno.utils.agent import (
collect_joint_audios,
collect_joint_files,
collect_joint_images,
collect_joint_videos,
)
from agno.utils.log import (
log_debug,
log_warning,
)
from agno.utils.team import (
get_member_id,
get_team_member_interactions_str,
get_team_run_context_audio,
get_team_run_context_files,
get_team_run_context_images,
get_team_run_context_videos,
)
async def _aresolve_callable_resources(team: "Team", run_context: "RunContext") -> None:
"""Resolve all callable factories (tools, knowledge, members) asynchronously."""
from agno.utils.callables import aresolve_callable_knowledge, aresolve_callable_members, aresolve_callable_tools
await aresolve_callable_tools(team, run_context)
await aresolve_callable_knowledge(team, run_context)
await aresolve_callable_members(team, run_context)
async def _check_and_refresh_mcp_tools(team: "Team") -> None:
# Connect MCP tools
from agno.team._init import _connect_mcp_tools
await _connect_mcp_tools(
team,
)
# Add provided tools - only if tools is a static list
if team.tools is not None and isinstance(team.tools, list):
for tool in team.tools:
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
if hasattr(type(tool), "__mro__") and any(
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
):
if tool.refresh_connection: # type: ignore
try:
is_alive = await tool.is_alive() # type: ignore
if not is_alive:
await tool.connect(force=True) # type: ignore
except (RuntimeError, BaseException) as e:
log_warning(f"Failed to check if MCP tool is alive: {e}")
continue
try:
await tool.build_tools() # type: ignore
except (RuntimeError, BaseException) as e:
log_warning(f"Failed to build tools for {str(tool)}: {e}")
continue
def _determine_tools_for_model(
team: "Team",
model: Model,
run_response: TeamRunOutput,
run_context: RunContext,
team_run_context: Dict[str, Any],
session: TeamSession,
user_id: Optional[str] = None,
async_mode: bool = False,
input_message: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
audio: Optional[Sequence[Audio]] = None,
files: Optional[Sequence[File]] = None,
debug_mode: Optional[bool] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
stream: Optional[bool] = None,
stream_events: Optional[bool] = None,
check_mcp_tools: bool = True,
) -> List[Union[Function, dict]]:
# Connect tools that require connection management
from functools import partial
from agno.team._default_tools import (
_get_chat_history_function,
_get_delegate_task_function,
_get_previous_sessions_messages_function,
_get_update_user_memory_function,
_update_session_state_tool,
create_knowledge_search_tool,
)
from agno.team._init import _connect_connectable_tools
from agno.team._messages import _get_user_message
from agno.utils.callables import (
get_resolved_knowledge,
get_resolved_members,
get_resolved_tools,
resolve_callable_knowledge,
resolve_callable_members,
resolve_callable_tools,
)
# In sync mode, resolve callable factories now
if not async_mode:
resolve_callable_tools(team, run_context)
resolve_callable_knowledge(team, run_context)
resolve_callable_members(team, run_context)
resolved_tools = get_resolved_tools(team, run_context)
resolved_knowledge = get_resolved_knowledge(team, run_context)
resolved_members = get_resolved_members(team, run_context)
_connect_connectable_tools(
team,
)
# Prepare tools
_tools: List[Union[Toolkit, Callable, Function, Dict]] = []
# Add provided tools
if resolved_tools is not None:
for tool in resolved_tools:
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
if hasattr(type(tool), "__mro__") and any(
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
):
# Only add the tool if it successfully connected and built its tools
if check_mcp_tools and not tool.initialized: # type: ignore
continue
_tools.append(tool)
if team.read_chat_history:
_tools.append(_get_chat_history_function(team, session=session, async_mode=async_mode))
if team.memory_manager is not None and team.enable_agentic_memory:
_tools.append(_get_update_user_memory_function(team, user_id=user_id, async_mode=async_mode))
# Add learning machine tools
if team._learning is not None:
learning_tools = team._learning.get_tools(
user_id=user_id,
session_id=session.session_id if session else None,
team_id=team.id,
)
_tools.extend(learning_tools)
if team.enable_agentic_state:
_tools.append(Function(name="update_session_state", entrypoint=partial(_update_session_state_tool, team)))
if team.search_session_history:
_tools.append(
_get_previous_sessions_messages_function(
team, num_history_sessions=team.num_history_sessions, user_id=user_id, async_mode=async_mode
)
)
# Add tools for accessing knowledge
# Single unified path through get_relevant_docs_from_knowledge(),
# which checks knowledge_retriever first, then falls back to knowledge.search().
if (resolved_knowledge is not None or team.knowledge_retriever is not None) and team.search_knowledge:
_tools.append(
create_knowledge_search_tool(
team,
run_response=run_response,
run_context=run_context,
knowledge_filters=run_context.knowledge_filters,
enable_agentic_filters=team.enable_agentic_knowledge_filters,
async_mode=async_mode,
)
)
if resolved_knowledge is not None and team.update_knowledge:
_tools.append(team.add_to_knowledge)
from agno.team.mode import TeamMode
if team.mode == TeamMode.tasks:
# Tasks mode: provide task management tools instead of delegation tools
from agno.team._task_tools import _get_task_management_tools
from agno.team.task import load_task_list
_task_list = load_task_list(run_context.session_state)
task_tools = _get_task_management_tools(
team=team,
task_list=_task_list,
run_response=run_response,
run_context=run_context,
session=session,
team_run_context=team_run_context,
user_id=user_id,
stream=stream or False,
stream_events=stream_events or False,
async_mode=async_mode,
images=images, # type: ignore
videos=videos, # type: ignore
audio=audio, # type: ignore
files=files, # type: ignore
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
debug_mode=debug_mode,
)
_tools.extend(task_tools)
elif resolved_members:
# Get the user message if we are using the input directly
user_message_content = None
if team.determine_input_for_members is False:
user_message = _get_user_message(
team,
run_response=run_response,
run_context=run_context,
input_message=input_message,
user_id=user_id,
audio=audio,
images=images,
videos=videos,
files=files,
add_dependencies_to_context=add_dependencies_to_context,
)
user_message_content = user_message.content if user_message is not None else None
delegate_task_func = _get_delegate_task_function(
team,
run_response=run_response,
run_context=run_context,
session=session,
team_run_context=team_run_context,
input=user_message_content,
user_id=user_id,
stream=stream or False,
stream_events=stream_events or False,
async_mode=async_mode,
images=images, # type: ignore
videos=videos, # type: ignore
audio=audio, # type: ignore
files=files, # type: ignore
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
debug_mode=debug_mode,
)
_tools.append(delegate_task_func)
if team.get_member_information_tool:
_tools.append(team.get_member_information)
# Get Agent tools
if len(_tools) > 0:
log_debug("Processing tools for model")
_function_names = []
_functions: List[Union[Function, dict]] = []
team._tool_instructions = []
# Get output_schema from run_context
output_schema = run_context.output_schema if run_context else None
# Check if we need strict mode for the model
strict = False
if output_schema is not None and not team.use_json_mode and model.supports_native_structured_outputs:
strict = True
for tool in _tools:
if isinstance(tool, Dict):
# If a dict is passed, it is a builtin tool
# that is run by the model provider and not the Agent
_functions.append(tool)
log_debug(f"Included builtin tool {tool}")
elif isinstance(tool, Toolkit):
# For each function in the toolkit and process entrypoint
toolkit_functions = tool.get_async_functions() if async_mode else tool.get_functions()
for name, _func in toolkit_functions.items():
if name in _function_names:
continue
_function_names.append(name)
_func = _func.model_copy(deep=True)
_func._team = team
# Respect the function's explicit strict setting if set
effective_strict = strict if _func.strict is None else _func.strict
_func.process_entrypoint(strict=effective_strict)
if strict and _func.strict is None:
_func.strict = True
if team.tool_hooks:
_func.tool_hooks = team.tool_hooks
_functions.append(_func)
log_debug(f"Added tool {_func.name} from {tool.name}")
# Add instructions from the toolkit
if tool.add_instructions and tool.instructions is not None:
if team._tool_instructions is None:
team._tool_instructions = []
team._tool_instructions.append(tool.instructions)
elif isinstance(tool, Function):
if tool.name in _function_names:
continue
_function_names.append(tool.name)
tool = tool.model_copy(deep=True)
tool._team = team
# Respect the function's explicit strict setting if set
effective_strict = strict if tool.strict is None else tool.strict
tool.process_entrypoint(strict=effective_strict)
if strict and tool.strict is None:
tool.strict = True
if team.tool_hooks:
tool.tool_hooks = team.tool_hooks
_functions.append(tool)
log_debug(f"Added tool {tool.name}")
# Add instructions from the Function
if tool.add_instructions and tool.instructions is not None:
if team._tool_instructions is None:
team._tool_instructions = []
team._tool_instructions.append(tool.instructions)
elif callable(tool):
# We add the tools, which are callable functions
try:
_func = Function.from_callable(tool, strict=strict)
_func = _func.model_copy(deep=True)
if _func.name in _function_names:
continue
_function_names.append(_func.name)
_func._team = team
if strict:
_func.strict = True
if team.tool_hooks:
_func.tool_hooks = team.tool_hooks
_functions.append(_func)
log_debug(f"Added tool {_func.name}")
except Exception as e:
log_warning(f"Could not add tool {tool}: {e}")
if _functions:
from inspect import signature
# Check if any functions need media before collecting
needs_media = any(
any(param in signature(func.entrypoint).parameters for param in ["images", "videos", "audios", "files"])
for func in _functions
if isinstance(func, Function) and func.entrypoint is not None
)
# Only collect media if functions actually need them
joint_images = collect_joint_images(run_response.input, session) if needs_media else None # type: ignore
joint_files = collect_joint_files(run_response.input) if needs_media else None # type: ignore
joint_audios = collect_joint_audios(run_response.input, session) if needs_media else None # type: ignore
joint_videos = collect_joint_videos(run_response.input, session) if needs_media else None # type: ignore
for func in _functions: # type: ignore
if isinstance(func, Function):
func._run_context = run_context
func._images = joint_images
func._files = joint_files
func._audios = joint_audios
func._videos = joint_videos
return _functions
def get_member_information(team: "Team", run_context: Optional["RunContext"] = None) -> str:
"""Get information about the members of the team, including their IDs, names, and roles."""
return team.get_members_system_message_content(indent=0, run_context=run_context)
def _get_history_for_member_agent(
team: "Team", session: TeamSession, member_agent: Union[Agent, "Team"]
) -> List[Message]:
from agno.team.team import Team
log_debug(f"Adding messages from history for {member_agent.name}")
member_agent_id = member_agent.id if isinstance(member_agent, Agent) else None
member_team_id = member_agent.id if isinstance(member_agent, Team) else None
if not member_agent_id and not member_team_id:
return []
# Only skip messages from history when system_message_role is NOT a standard conversation role.
# Standard conversation roles ("user", "assistant", "tool") should never be filtered
# to preserve conversation continuity.
skip_role = team.system_message_role if team.system_message_role not in ["user", "assistant", "tool"] else None
history = session.get_messages(
last_n_runs=member_agent.num_history_runs or team.num_history_runs,
limit=member_agent.num_history_messages,
skip_roles=[skip_role] if skip_role else None,
member_ids=[member_agent_id] if member_agent_id else None,
team_id=member_team_id,
)
if len(history) > 0:
# Create a deep copy of the history messages to avoid modifying the original messages
history_copy = [deepcopy(msg) for msg in history]
# Tag each message as coming from history
for _msg in history_copy:
_msg.from_history = True
return history_copy
return []
def _determine_team_member_interactions(
team: "Team",
team_run_context: Dict[str, Any],
images: List[Image],
videos: List[Video],
audio: List[Audio],
files: List[File],
) -> Optional[str]:
team_member_interactions_str = None
if team.share_member_interactions:
team_member_interactions_str = get_team_member_interactions_str(team_run_context=team_run_context) # type: ignore
if context_images := get_team_run_context_images(team_run_context=team_run_context): # type: ignore
images.extend(context_images)
if context_videos := get_team_run_context_videos(team_run_context=team_run_context): # type: ignore
videos.extend(context_videos)
if context_audio := get_team_run_context_audio(team_run_context=team_run_context): # type: ignore
audio.extend(context_audio)
if context_files := get_team_run_context_files(team_run_context=team_run_context): # type: ignore
files.extend(context_files)
return team_member_interactions_str
def _find_member_by_id(
team: "Team", member_id: str, run_context: Optional["RunContext"] = None
) -> Optional[Tuple[int, Union[Agent, "Team"]]]:
"""Find a member (agent or team) by its URL-safe ID, searching recursively.
Args:
team: The team to search in.
member_id (str): URL-safe ID of the member to find.
run_context: Optional RunContext for resolving callable members.
Returns:
Optional[Tuple[int, Union[Agent, "Team"]]]: Tuple containing:
- Index of the member in its immediate parent's members list
- The matched member (Agent or Team)
"""
from agno.team.team import Team
from agno.utils.callables import get_resolved_members
resolved_members = get_resolved_members(team, run_context)
if resolved_members is None:
return None
# First check direct members
for i, member in enumerate(resolved_members):
url_safe_member_id = get_member_id(member)
if url_safe_member_id == member_id:
return i, member
# If this member is a team, search its members recursively
if isinstance(member, Team):
result = member._find_member_by_id(member_id, run_context=run_context)
if result is not None:
return result
return None
def _find_member_route_by_id(
team: "Team", member_id: str, run_context: Optional[RunContext] = None
) -> Optional[Tuple[int, Union[Agent, "Team"]]]:
"""Find a routable member by ID for continue_run dispatching.
For nested matches inside a sub-team, returns the top-level sub-team so callers
can route through the sub-team's own continue_run path.
Args:
team: The team to search in.
member_id (str): URL-safe ID of the member to find.
run_context: Optional RunContext for resolving callable members.
Returns:
Optional[Tuple[int, Union[Agent, "Team"]]]: Tuple containing:
- Index of the member in its immediate parent's members list
- The direct member (or parent sub-team for nested matches)
"""
from agno.team.team import Team
from agno.utils.callables import get_resolved_members
resolved_members = get_resolved_members(team, run_context)
if resolved_members is None:
return None
for i, member in enumerate(resolved_members):
url_safe_member_id = get_member_id(member)
if url_safe_member_id == member_id:
return i, member
if isinstance(member, Team):
result = member._find_member_by_id(member_id, run_context=run_context)
if result is not None:
return i, member
return None
def _propagate_member_pause(
run_response: TeamRunOutput,
member_agent: Union[Agent, "Team"],
member_run_response: Union[RunOutput, TeamRunOutput],
) -> None:
"""Copy HITL requirements from a paused member run to the team run response."""
if not member_run_response.requirements:
return
if run_response.requirements is None:
run_response.requirements = []
member_id = get_member_id(member_agent)
for req in member_run_response.requirements:
req_copy = copy(req)
# Deepcopy mutable fields to avoid shared state with the original
if req_copy.tool_execution is not None:
req_copy.tool_execution = deepcopy(req_copy.tool_execution)
if req_copy.user_input_schema is not None:
req_copy.user_input_schema = deepcopy(req_copy.user_input_schema)
if req_copy.member_agent_id is None:
req_copy.member_agent_id = member_id
if req_copy.member_agent_name is None:
req_copy.member_agent_name = member_agent.name
if req_copy.member_run_id is None:
req_copy.member_run_id = member_run_response.run_id
# Keep a reference to the member's paused RunOutput so continue_run
# can pass it directly without needing a session/DB lookup.
req_copy._member_run_response = member_run_response
run_response.requirements.append(req_copy)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/team/_tools.py",
"license": "Apache License 2.0",
"lines": 484,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/team/_utils.py | """Shared utility helpers for Team."""
from __future__ import annotations
import json
from copy import copy
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Union,
cast,
)
if TYPE_CHECKING:
from agno.team.team import Team
from agno.filters import FilterExpr
from agno.utils.log import log_debug, log_error, log_warning
def _get_effective_filters(
team: Team, knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
) -> Optional[Any]:
"""
Determine effective filters for the team, considering:
1. Team-level filters (team.knowledge_filters)
2. Run-time filters (knowledge_filters)
Priority: Run-time filters > Team filters
"""
effective_filters = None
# Start with team-level filters if they exist
if team.knowledge_filters:
effective_filters = team.knowledge_filters.copy()
# Apply run-time filters if they exist
if knowledge_filters:
if effective_filters:
if isinstance(effective_filters, dict):
if isinstance(knowledge_filters, dict):
effective_filters.update(cast(Dict[str, Any], knowledge_filters))
else:
# If knowledge_filters is not a dict (e.g., list of FilterExpr), combine as list if effective_filters is dict
# Convert the dict to a list and concatenate
effective_filters = cast(Any, [effective_filters, *knowledge_filters])
else:
effective_filters = [*effective_filters, *knowledge_filters]
else:
effective_filters = knowledge_filters
return effective_filters
def _convert_documents_to_string(team: Team, docs: List[Union[Dict[str, Any], str]]) -> str:
if docs is None or len(docs) == 0:
return ""
if team.references_format == "yaml":
import yaml
return yaml.dump(docs)
return json.dumps(docs, indent=2)
def _convert_dependencies_to_string(team: Team, context: Dict[str, Any]) -> str:
"""Convert the context dictionary to a string representation.
Args:
context: Dictionary containing context data
Returns:
String representation of the context, or empty string if conversion fails
"""
if context is None:
return ""
try:
return json.dumps(context, indent=2, default=str)
except (TypeError, ValueError, OverflowError) as e:
log_warning(f"Failed to convert context to JSON: {e}")
# Attempt a fallback conversion for non-serializable objects
sanitized_context = {}
for key, value in context.items():
try:
# Try to serialize each value individually
json.dumps({key: value}, default=str)
sanitized_context[key] = value
except Exception as e:
log_error(f"Failed to serialize to JSON: {e}")
# If serialization fails, convert to string representation
sanitized_context[key] = str(value)
try:
return json.dumps(sanitized_context, indent=2)
except Exception as e:
log_error(f"Failed to convert sanitized context to JSON: {e}")
return str(context)
# ---------------------------------------------------------------------------
# Deep copy
# ---------------------------------------------------------------------------
def deep_copy(team: Team, *, update: Optional[Dict[str, Any]] = None) -> Team:
"""Create and return a deep copy of this Team, optionally updating fields.
This creates a fresh Team instance with isolated mutable state while sharing
heavy resources like database connections and models. Member agents are also
deep copied to ensure complete isolation.
Args:
update: Optional dictionary of fields to override in the new Team.
Returns:
Team: A new Team instance with copied state.
"""
from dataclasses import fields
from inspect import signature
# Get the set of valid __init__ parameter names
init_params = set(signature(team.__class__.__init__).parameters.keys()) - {"self"}
# Extract the fields to set for the new Team
fields_for_new_team: Dict[str, Any] = {}
for f in fields(cast(Any, team)):
# Skip private fields and fields not accepted by __init__
if f.name.startswith("_") or f.name not in init_params:
continue
field_value = getattr(team, f.name)
if field_value is not None:
try:
fields_for_new_team[f.name] = _deep_copy_field(team, f.name, field_value)
except Exception as e:
log_warning(f"Failed to deep copy field '{f.name}': {e}. Using original value.")
fields_for_new_team[f.name] = field_value
# Update fields if provided
if update:
fields_for_new_team.update(update)
# Create a new Team
try:
new_team = team.__class__(**fields_for_new_team)
log_debug(f"Created new {team.__class__.__name__}")
return new_team
except Exception as e:
log_error(f"Failed to create deep copy of {team.__class__.__name__}: {e}")
raise
def _deep_copy_field(team: Team, field_name: str, field_value: Any) -> Any:
"""Helper method to deep copy a field based on its type."""
from copy import deepcopy
from pydantic import BaseModel
# For members, return callable factories by reference; deep copy static lists
if field_name == "members" and field_value is not None:
if callable(field_value) and not isinstance(field_value, list):
return field_value
copied_members = []
for member in field_value:
if hasattr(member, "deep_copy"):
copied_members.append(member.deep_copy())
else:
copied_members.append(member)
return copied_members
# For tools, return callable factories by reference; share MCP tools but copy others
if field_name == "tools" and field_value is not None:
if callable(field_value) and not isinstance(field_value, list):
return field_value
try:
copied_tools = []
for tool in field_value:
try:
# Share MCP tools (they maintain server connections)
is_mcp_tool = hasattr(type(tool), "__mro__") and any(
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
)
if is_mcp_tool:
copied_tools.append(tool)
else:
try:
copied_tools.append(deepcopy(tool))
except Exception:
# Tool can't be deep copied, share by reference
copied_tools.append(tool)
except Exception:
# MCP detection failed, share tool by reference to be safe
copied_tools.append(tool)
return copied_tools
except Exception as e:
# If entire tools processing fails, log and return original list
log_warning(f"Failed to process tools for deep copy: {e}")
return field_value
# Share heavy resources - these maintain connections/pools that shouldn't be duplicated
if field_name in (
"db",
"model",
"reasoning_model",
"knowledge",
"memory_manager",
"parser_model",
"output_model",
"session_summary_manager",
"compression_manager",
"learning",
):
return field_value
# For compound types, attempt a deep copy
if isinstance(field_value, (list, dict, set)):
try:
return deepcopy(field_value)
except Exception:
try:
return copy(field_value)
except Exception as e:
log_warning(f"Failed to copy field: {field_name} - {e}")
return field_value
# For pydantic models, attempt a model_copy
if isinstance(field_value, BaseModel):
try:
return field_value.model_copy(deep=True)
except Exception:
try:
return field_value.model_copy(deep=False)
except Exception as e:
log_warning(f"Failed to copy field: {field_name} - {e}")
return field_value
# For other types, attempt a shallow copy first
try:
return copy(field_value)
except Exception:
# If copy fails, return as is
return field_value
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/team/_utils.py",
"license": "Apache License 2.0",
"lines": 205,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/db/postgres/test_session_isolation.py | """Integration tests for session user_id isolation in PostgresDb.
Verifies that delete_session, delete_sessions, and rename_session
correctly enforce user_id ownership when the parameter is provided.
"""
import time
import pytest
from agno.db.base import SessionType
from agno.db.postgres.postgres import PostgresDb
from agno.session.agent import AgentSession
@pytest.fixture(autouse=True)
def cleanup_sessions(postgres_db_real: PostgresDb):
yield
with postgres_db_real.Session() as session:
try:
sessions_table = postgres_db_real._get_table("sessions", create_table_if_not_found=True)
if sessions_table is not None:
session.execute(sessions_table.delete())
session.commit()
except Exception:
session.rollback()
def _make_session(session_id: str, user_id: str) -> AgentSession:
return AgentSession(
session_id=session_id,
agent_id="test_agent",
user_id=user_id,
session_data={"session_name": f"Session {session_id}"},
created_at=int(time.time()),
)
# -- delete_session isolation --
def test_delete_session_correct_user(postgres_db_real: PostgresDb):
"""delete_session with matching user_id succeeds."""
postgres_db_real.upsert_session(_make_session("s1", "alice"))
result = postgres_db_real.delete_session("s1", user_id="alice")
assert result is True
session = postgres_db_real.get_session(session_id="s1", session_type=SessionType.AGENT)
assert session is None
def test_delete_session_wrong_user(postgres_db_real: PostgresDb):
"""delete_session with wrong user_id is blocked — session survives."""
postgres_db_real.upsert_session(_make_session("s1", "alice"))
result = postgres_db_real.delete_session("s1", user_id="bob")
assert result is False
session = postgres_db_real.get_session(session_id="s1", session_type=SessionType.AGENT)
assert session is not None
assert session.user_id == "alice"
def test_delete_session_no_user_id_wildcard(postgres_db_real: PostgresDb):
"""delete_session with user_id=None is a wildcard — deletes any user's session."""
postgres_db_real.upsert_session(_make_session("s1", "alice"))
result = postgres_db_real.delete_session("s1", user_id=None)
assert result is True
session = postgres_db_real.get_session(session_id="s1", session_type=SessionType.AGENT)
assert session is None
def test_delete_session_empty_string_user_id(postgres_db_real: PostgresDb):
"""delete_session with user_id='' should NOT act as wildcard (empty != None)."""
postgres_db_real.upsert_session(_make_session("s1", "alice"))
result = postgres_db_real.delete_session("s1", user_id="")
assert result is False
session = postgres_db_real.get_session(session_id="s1", session_type=SessionType.AGENT)
assert session is not None
# -- delete_sessions isolation --
def test_delete_sessions_correct_user(postgres_db_real: PostgresDb):
"""delete_sessions with matching user_id only deletes that user's sessions."""
postgres_db_real.upsert_session(_make_session("s1", "alice"))
postgres_db_real.upsert_session(_make_session("s2", "alice"))
postgres_db_real.upsert_session(_make_session("s3", "bob"))
postgres_db_real.delete_sessions(["s1", "s2", "s3"], user_id="alice")
# Alice's sessions deleted
assert postgres_db_real.get_session(session_id="s1", session_type=SessionType.AGENT) is None
assert postgres_db_real.get_session(session_id="s2", session_type=SessionType.AGENT) is None
# Bob's session survives
bob_session = postgres_db_real.get_session(session_id="s3", session_type=SessionType.AGENT)
assert bob_session is not None
assert bob_session.user_id == "bob"
def test_delete_sessions_wrong_user(postgres_db_real: PostgresDb):
"""delete_sessions with wrong user_id deletes nothing."""
postgres_db_real.upsert_session(_make_session("s1", "alice"))
postgres_db_real.upsert_session(_make_session("s2", "alice"))
postgres_db_real.delete_sessions(["s1", "s2"], user_id="eve")
assert postgres_db_real.get_session(session_id="s1", session_type=SessionType.AGENT) is not None
assert postgres_db_real.get_session(session_id="s2", session_type=SessionType.AGENT) is not None
def test_delete_sessions_wildcard(postgres_db_real: PostgresDb):
"""delete_sessions with user_id=None deletes all specified sessions regardless of owner."""
postgres_db_real.upsert_session(_make_session("s1", "alice"))
postgres_db_real.upsert_session(_make_session("s2", "bob"))
postgres_db_real.delete_sessions(["s1", "s2"], user_id=None)
assert postgres_db_real.get_session(session_id="s1", session_type=SessionType.AGENT) is None
assert postgres_db_real.get_session(session_id="s2", session_type=SessionType.AGENT) is None
# -- rename_session isolation --
def test_rename_session_correct_user(postgres_db_real: PostgresDb):
"""rename_session with matching user_id succeeds."""
postgres_db_real.upsert_session(_make_session("s1", "alice"))
result = postgres_db_real.rename_session(
session_id="s1",
session_type=SessionType.AGENT,
session_name="Renamed by Alice",
user_id="alice",
)
assert result is not None
assert result.session_data["session_name"] == "Renamed by Alice"
def test_rename_session_wrong_user(postgres_db_real: PostgresDb):
"""rename_session with wrong user_id is blocked — name unchanged."""
postgres_db_real.upsert_session(_make_session("s1", "alice"))
result = postgres_db_real.rename_session(
session_id="s1",
session_type=SessionType.AGENT,
session_name="Hacked by Bob",
user_id="bob",
)
assert result is None
# Verify original name unchanged
session = postgres_db_real.get_session(session_id="s1", session_type=SessionType.AGENT)
assert session is not None
assert session.session_data["session_name"] == "Session s1"
def test_rename_session_wildcard(postgres_db_real: PostgresDb):
"""rename_session with user_id=None succeeds (backward compat wildcard)."""
postgres_db_real.upsert_session(_make_session("s1", "alice"))
result = postgres_db_real.rename_session(
session_id="s1",
session_type=SessionType.AGENT,
session_name="Renamed without user",
user_id=None,
)
assert result is not None
assert result.session_data["session_name"] == "Renamed without user"
def test_rename_session_empty_string_user_id(postgres_db_real: PostgresDb):
"""rename_session with user_id='' should NOT act as wildcard."""
postgres_db_real.upsert_session(_make_session("s1", "alice"))
result = postgres_db_real.rename_session(
session_id="s1",
session_type=SessionType.AGENT,
session_name="Hacked by empty",
user_id="",
)
assert result is None
session = postgres_db_real.get_session(session_id="s1", session_type=SessionType.AGENT)
assert session is not None
assert session.session_data["session_name"] == "Session s1"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/postgres/test_session_isolation.py",
"license": "Apache License 2.0",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/sqlite/test_session_isolation.py | """Integration tests for session user_id isolation in SqliteDb.
Mirrors the Postgres isolation tests to verify the same security
boundaries hold across different SQL backends.
"""
import time
import pytest
from agno.db.base import SessionType
from agno.db.sqlite.sqlite import SqliteDb
from agno.session.agent import AgentSession
@pytest.fixture(autouse=True)
def cleanup_sessions(sqlite_db_real: SqliteDb):
yield
with sqlite_db_real.Session() as session:
try:
sessions_table = sqlite_db_real._get_table("sessions", create_table_if_not_found=True)
if sessions_table is not None:
session.execute(sessions_table.delete())
session.commit()
except Exception:
session.rollback()
def _make_session(session_id: str, user_id: str) -> AgentSession:
return AgentSession(
session_id=session_id,
agent_id="test_agent",
user_id=user_id,
session_data={"session_name": f"Session {session_id}"},
created_at=int(time.time()),
)
# -- delete_session isolation --
def test_delete_session_correct_user(sqlite_db_real: SqliteDb):
sqlite_db_real.upsert_session(_make_session("s1", "alice"))
result = sqlite_db_real.delete_session("s1", user_id="alice")
assert result is True
session = sqlite_db_real.get_session(session_id="s1", session_type=SessionType.AGENT)
assert session is None
def test_delete_session_wrong_user(sqlite_db_real: SqliteDb):
sqlite_db_real.upsert_session(_make_session("s1", "alice"))
result = sqlite_db_real.delete_session("s1", user_id="bob")
assert result is False
session = sqlite_db_real.get_session(session_id="s1", session_type=SessionType.AGENT)
assert session is not None
assert session.user_id == "alice"
def test_delete_session_wildcard(sqlite_db_real: SqliteDb):
sqlite_db_real.upsert_session(_make_session("s1", "alice"))
result = sqlite_db_real.delete_session("s1", user_id=None)
assert result is True
assert sqlite_db_real.get_session(session_id="s1", session_type=SessionType.AGENT) is None
def test_delete_session_empty_string(sqlite_db_real: SqliteDb):
sqlite_db_real.upsert_session(_make_session("s1", "alice"))
result = sqlite_db_real.delete_session("s1", user_id="")
assert result is False
assert sqlite_db_real.get_session(session_id="s1", session_type=SessionType.AGENT) is not None
# -- delete_sessions isolation --
def test_delete_sessions_correct_user(sqlite_db_real: SqliteDb):
sqlite_db_real.upsert_session(_make_session("s1", "alice"))
sqlite_db_real.upsert_session(_make_session("s2", "alice"))
sqlite_db_real.upsert_session(_make_session("s3", "bob"))
sqlite_db_real.delete_sessions(["s1", "s2", "s3"], user_id="alice")
assert sqlite_db_real.get_session(session_id="s1", session_type=SessionType.AGENT) is None
assert sqlite_db_real.get_session(session_id="s2", session_type=SessionType.AGENT) is None
bob_session = sqlite_db_real.get_session(session_id="s3", session_type=SessionType.AGENT)
assert bob_session is not None
assert bob_session.user_id == "bob"
def test_delete_sessions_wrong_user(sqlite_db_real: SqliteDb):
sqlite_db_real.upsert_session(_make_session("s1", "alice"))
sqlite_db_real.delete_sessions(["s1"], user_id="eve")
assert sqlite_db_real.get_session(session_id="s1", session_type=SessionType.AGENT) is not None
def test_delete_sessions_wildcard(sqlite_db_real: SqliteDb):
sqlite_db_real.upsert_session(_make_session("s1", "alice"))
sqlite_db_real.upsert_session(_make_session("s2", "bob"))
sqlite_db_real.delete_sessions(["s1", "s2"], user_id=None)
assert sqlite_db_real.get_session(session_id="s1", session_type=SessionType.AGENT) is None
assert sqlite_db_real.get_session(session_id="s2", session_type=SessionType.AGENT) is None
# -- rename_session isolation --
def test_rename_session_correct_user(sqlite_db_real: SqliteDb):
sqlite_db_real.upsert_session(_make_session("s1", "alice"))
result = sqlite_db_real.rename_session(
session_id="s1",
session_type=SessionType.AGENT,
session_name="Renamed by Alice",
user_id="alice",
)
assert result is not None
assert result.session_data["session_name"] == "Renamed by Alice"
def test_rename_session_wrong_user(sqlite_db_real: SqliteDb):
sqlite_db_real.upsert_session(_make_session("s1", "alice"))
result = sqlite_db_real.rename_session(
session_id="s1",
session_type=SessionType.AGENT,
session_name="Hacked",
user_id="bob",
)
assert result is None
session = sqlite_db_real.get_session(session_id="s1", session_type=SessionType.AGENT)
assert session.session_data["session_name"] == "Session s1"
def test_rename_session_wildcard(sqlite_db_real: SqliteDb):
sqlite_db_real.upsert_session(_make_session("s1", "alice"))
result = sqlite_db_real.rename_session(
session_id="s1",
session_type=SessionType.AGENT,
session_name="Renamed without user",
user_id=None,
)
assert result is not None
assert result.session_data["session_name"] == "Renamed without user"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/sqlite/test_session_isolation.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/agent/test_apply_to_context.py | """Tests for ResolvedRunOptions.apply_to_context() — agent version.
NOTE: apply_to_context() always sets output_schema from resolved options.
This is intentional because the same run_context may be reused across workflow
steps with different agents, each with their own output_schema.
"""
from pydantic import BaseModel
from agno.agent._run_options import ResolvedRunOptions
from agno.run import RunContext
def _make_opts(**overrides) -> ResolvedRunOptions:
defaults = dict(
stream=False,
stream_events=False,
yield_run_output=False,
add_history_to_context=False,
add_dependencies_to_context=False,
add_session_state_to_context=False,
dependencies={"resolved": "deps"},
knowledge_filters={"resolved": "filters"},
metadata={"resolved": "meta"},
output_schema=None,
)
defaults.update(overrides)
return ResolvedRunOptions(**defaults)
def _make_context(**overrides) -> RunContext:
defaults = dict(run_id="r1", session_id="s1")
defaults.update(overrides)
return RunContext(**defaults)
class TestApplyWhenProvided:
"""When *_provided=True, the resolved value is always applied."""
def test_dependencies_provided_overwrites(self):
ctx = _make_context(dependencies={"existing": "value"})
opts = _make_opts(dependencies={"new": "value"})
opts.apply_to_context(ctx, dependencies_provided=True)
assert ctx.dependencies == {"new": "value"}
def test_knowledge_filters_provided_overwrites(self):
ctx = _make_context(knowledge_filters={"existing": "f"})
opts = _make_opts(knowledge_filters={"new": "f"})
opts.apply_to_context(ctx, knowledge_filters_provided=True)
assert ctx.knowledge_filters == {"new": "f"}
def test_metadata_provided_overwrites(self):
ctx = _make_context(metadata={"existing": "m"})
opts = _make_opts(metadata={"new": "m"})
opts.apply_to_context(ctx, metadata_provided=True)
assert ctx.metadata == {"new": "m"}
def test_output_schema_always_set_from_opts(self):
"""Agent always sets output_schema from resolved options for workflow reuse."""
class Schema(BaseModel):
x: int
ctx = _make_context(output_schema={"old": "schema"})
opts = _make_opts(output_schema=Schema)
opts.apply_to_context(ctx)
assert ctx.output_schema is Schema
class TestApplyFallbackWhenNone:
"""When *_provided=False and context field is None, fill from resolved defaults."""
def test_dependencies_none_gets_filled(self):
ctx = _make_context(dependencies=None)
opts = _make_opts(dependencies={"default": "deps"})
opts.apply_to_context(ctx)
assert ctx.dependencies == {"default": "deps"}
def test_knowledge_filters_none_gets_filled(self):
ctx = _make_context(knowledge_filters=None)
opts = _make_opts(knowledge_filters={"default": "f"})
opts.apply_to_context(ctx)
assert ctx.knowledge_filters == {"default": "f"}
def test_metadata_none_gets_filled(self):
ctx = _make_context(metadata=None)
opts = _make_opts(metadata={"default": "m"})
opts.apply_to_context(ctx)
assert ctx.metadata == {"default": "m"}
def test_output_schema_none_gets_filled(self):
class Schema(BaseModel):
y: str
ctx = _make_context(output_schema=None)
opts = _make_opts(output_schema=Schema)
opts.apply_to_context(ctx)
assert ctx.output_schema is Schema
class TestExistingContextPreserved:
"""When *_provided=False and context field is already set, leave it alone.
NOTE: output_schema is an exception - it is always set from resolved options.
"""
def test_dependencies_kept(self):
ctx = _make_context(dependencies={"keep": "me"})
opts = _make_opts(dependencies={"ignored": "value"})
opts.apply_to_context(ctx)
assert ctx.dependencies == {"keep": "me"}
def test_knowledge_filters_kept(self):
ctx = _make_context(knowledge_filters={"keep": "f"})
opts = _make_opts(knowledge_filters={"ignored": "f"})
opts.apply_to_context(ctx)
assert ctx.knowledge_filters == {"keep": "f"}
def test_metadata_kept(self):
ctx = _make_context(metadata={"keep": "m"})
opts = _make_opts(metadata={"ignored": "m"})
opts.apply_to_context(ctx)
assert ctx.metadata == {"keep": "m"}
def test_output_schema_always_overwritten(self):
"""Agent always overwrites output_schema for workflow reuse."""
class Existing(BaseModel):
a: int
class NewSchema(BaseModel):
b: int
ctx = _make_context(output_schema=Existing)
opts = _make_opts(output_schema=NewSchema)
opts.apply_to_context(ctx)
# Agent always sets output_schema from opts, even if context had one
assert ctx.output_schema is NewSchema
class TestAllFieldsTogether:
"""Apply all fields simultaneously."""
def test_mixed_provided_and_fallback(self):
ctx = _make_context(
dependencies=None,
knowledge_filters={"existing": "f"},
metadata=None,
output_schema={"existing": "schema"},
)
opts = _make_opts(
dependencies={"new": "d"},
knowledge_filters={"new": "f"},
metadata={"new": "m"},
output_schema=None,
)
opts.apply_to_context(
ctx,
dependencies_provided=True,
knowledge_filters_provided=False,
metadata_provided=False,
)
# dependencies: provided=True, so overwritten
assert ctx.dependencies == {"new": "d"}
# knowledge_filters: provided=False, existing not None, kept
assert ctx.knowledge_filters == {"existing": "f"}
# metadata: provided=False, was None, filled from opts
assert ctx.metadata == {"new": "m"}
# output_schema: always set from opts (agent behavior for workflow reuse)
assert ctx.output_schema is None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/agent/test_apply_to_context.py",
"license": "Apache License 2.0",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/agent/test_default_tools_json_history.py | import json
from typing import Any, Optional
import pytest
from agno.agent import _default_tools
from agno.agent.agent import Agent
from agno.db.base import SessionType
from agno.session import AgentSession
class _EmptySessionsDb:
def get_sessions(
self,
session_type: SessionType,
limit: Optional[int] = None,
user_id: Optional[str] = None,
sort_by: Optional[str] = None,
sort_order: Optional[str] = None,
) -> list[Any]:
return []
@pytest.mark.parametrize("db", [None, _EmptySessionsDb()])
def test_get_previous_sessions_messages_returns_valid_json_when_empty(db):
agent = Agent(name="test-agent", db=db)
get_previous_session_messages = _default_tools.get_previous_sessions_messages_function(agent)
result = get_previous_session_messages()
assert json.loads(result) == []
@pytest.mark.asyncio
@pytest.mark.parametrize("db", [None, _EmptySessionsDb()])
async def test_aget_previous_sessions_messages_returns_valid_json_when_empty(db):
agent = Agent(name="test-agent", db=db)
get_previous_session_messages_function = await _default_tools.aget_previous_sessions_messages_function(agent)
result = await get_previous_session_messages_function.entrypoint() # type: ignore[misc]
assert json.loads(result) == []
def test_get_chat_history_returns_valid_json_when_empty():
agent = Agent(name="test-agent")
session = AgentSession(session_id="session-1")
get_chat_history = _default_tools.get_chat_history_function(agent, session)
result = get_chat_history()
assert json.loads(result) == []
def test_get_tool_call_history_returns_valid_json_when_empty():
agent = Agent(name="test-agent")
session = AgentSession(session_id="session-1")
get_tool_call_history = _default_tools.get_tool_call_history_function(agent, session)
result = get_tool_call_history()
assert json.loads(result) == []
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/agent/test_default_tools_json_history.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/agent/test_run_options.py | """Tests for centralized run option resolution."""
import dataclasses
import pytest
from agno.agent._run_options import ResolvedRunOptions, resolve_run_options
from agno.agent.agent import Agent
def _make_agent(**kwargs) -> Agent:
"""Create a minimal Agent instance for testing."""
return Agent(**kwargs)
class TestResolvedRunOptionsImmutable:
def test_frozen_raises_on_assignment(self):
opts = ResolvedRunOptions(
stream=True,
stream_events=False,
yield_run_output=False,
add_history_to_context=False,
add_dependencies_to_context=False,
add_session_state_to_context=False,
dependencies=None,
knowledge_filters=None,
metadata=None,
output_schema=None,
)
with pytest.raises(dataclasses.FrozenInstanceError):
opts.stream = False # type: ignore[misc]
class TestDefaultResolution:
def test_all_defaults_from_agent(self):
agent = _make_agent(
stream=True,
stream_events=True,
add_history_to_context=True,
add_dependencies_to_context=True,
add_session_state_to_context=True,
dependencies={"db": "postgres"},
knowledge_filters={"topic": "test"},
metadata={"env": "test"},
)
opts = resolve_run_options(agent)
assert opts.stream is True
assert opts.stream_events is True
assert opts.add_history_to_context is True
assert opts.add_dependencies_to_context is True
assert opts.add_session_state_to_context is True
assert opts.dependencies == {"db": "postgres"}
assert opts.knowledge_filters == {"topic": "test"}
assert opts.metadata == {"env": "test"}
def test_bare_agent_defaults(self):
agent = _make_agent()
opts = resolve_run_options(agent)
assert opts.stream is False
assert opts.stream_events is False
assert opts.yield_run_output is False
assert opts.add_history_to_context is False
assert opts.add_dependencies_to_context is False
assert opts.add_session_state_to_context is False
assert opts.dependencies is None
assert opts.knowledge_filters is None
assert opts.metadata is None
assert opts.output_schema is None
class TestCallSiteOverrides:
def test_stream_override(self):
agent = _make_agent(stream=False)
opts = resolve_run_options(agent, stream=True)
assert opts.stream is True
def test_stream_events_override(self):
agent = _make_agent(stream=True, stream_events=False)
opts = resolve_run_options(agent, stream_events=True)
assert opts.stream_events is True
def test_yield_run_output_override(self):
agent = _make_agent()
opts = resolve_run_options(agent, yield_run_output=True)
assert opts.yield_run_output is True
def test_context_flags_override(self):
agent = _make_agent(
add_history_to_context=False,
add_dependencies_to_context=False,
add_session_state_to_context=False,
)
opts = resolve_run_options(
agent,
add_history_to_context=True,
add_dependencies_to_context=True,
add_session_state_to_context=True,
)
assert opts.add_history_to_context is True
assert opts.add_dependencies_to_context is True
assert opts.add_session_state_to_context is True
def test_dependencies_override(self):
agent = _make_agent(dependencies={"a": 1})
opts = resolve_run_options(agent, dependencies={"b": 2})
assert opts.dependencies == {"b": 2}
def test_output_schema_override(self):
from pydantic import BaseModel
class MySchema(BaseModel):
name: str
agent = _make_agent()
opts = resolve_run_options(agent, output_schema=MySchema)
assert opts.output_schema is MySchema
class TestStreamEventsCoupling:
def test_stream_false_forces_stream_events_false(self):
agent = _make_agent(stream_events=True)
opts = resolve_run_options(agent, stream=False, stream_events=True)
assert opts.stream is False
assert opts.stream_events is False
def test_stream_none_agent_none_defaults_both_false(self):
agent = _make_agent()
opts = resolve_run_options(agent)
assert opts.stream is False
assert opts.stream_events is False
def test_stream_true_allows_stream_events(self):
agent = _make_agent()
opts = resolve_run_options(agent, stream=True, stream_events=True)
assert opts.stream is True
assert opts.stream_events is True
class TestMetadataMerge:
def test_both_none(self):
agent = _make_agent()
opts = resolve_run_options(agent)
assert opts.metadata is None
def test_only_callsite(self):
agent = _make_agent()
opts = resolve_run_options(agent, metadata={"run": "value"})
assert opts.metadata == {"run": "value"}
def test_only_agent(self):
agent = _make_agent(metadata={"agent": "value"})
opts = resolve_run_options(agent)
assert opts.metadata == {"agent": "value"}
def test_merge_agent_takes_precedence(self):
agent = _make_agent(metadata={"shared": "agent_wins", "agent_only": "a"})
opts = resolve_run_options(agent, metadata={"shared": "run_value", "run_only": "r"})
# agent.metadata takes precedence on conflicts
assert opts.metadata["shared"] == "agent_wins"
assert opts.metadata["agent_only"] == "a"
assert opts.metadata["run_only"] == "r"
def test_merge_does_not_mutate_callsite(self):
agent = _make_agent(metadata={"a": 1})
callsite_meta = {"b": 2}
resolve_run_options(agent, metadata=callsite_meta)
assert callsite_meta == {"b": 2}
class TestKnowledgeFilterMerge:
def test_no_filters(self):
agent = _make_agent()
opts = resolve_run_options(agent)
assert opts.knowledge_filters is None
def test_only_agent_filters(self):
agent = _make_agent(knowledge_filters={"topic": "test"})
opts = resolve_run_options(agent)
assert opts.knowledge_filters == {"topic": "test"}
def test_only_callsite_filters(self):
agent = _make_agent()
opts = resolve_run_options(agent, knowledge_filters={"topic": "run"})
assert opts.knowledge_filters == {"topic": "run"}
def test_dict_merge_callsite_takes_precedence(self):
agent = _make_agent(knowledge_filters={"topic": "agent", "agent_key": "a"})
opts = resolve_run_options(agent, knowledge_filters={"topic": "run", "run_key": "r"})
# get_effective_filters: run-level takes precedence for dicts
assert opts.knowledge_filters["topic"] == "run"
assert opts.knowledge_filters["agent_key"] == "a"
assert opts.knowledge_filters["run_key"] == "r"
def test_list_merge(self):
from agno.filters import EQ
agent_filters = [EQ("a", "1")]
run_filters = [EQ("b", "2")]
agent = _make_agent(knowledge_filters=agent_filters)
opts = resolve_run_options(agent, knowledge_filters=run_filters)
assert len(opts.knowledge_filters) == 2
class TestAgentNotMutated:
def test_resolve_does_not_mutate_agent(self):
agent = _make_agent(
stream=True,
metadata={"a": 1},
dependencies={"db": "test"},
knowledge_filters={"topic": "test"},
)
original_stream = agent.stream
original_metadata = agent.metadata.copy()
original_deps = agent.dependencies.copy()
resolve_run_options(
agent,
stream=False,
metadata={"b": 2},
dependencies={"other": "value"},
knowledge_filters={"other_topic": "run"},
)
assert agent.stream == original_stream
assert agent.metadata == original_metadata
assert agent.dependencies == original_deps
def test_dependencies_defensive_copy(self):
agent = _make_agent(dependencies={"key": "original"})
opts = resolve_run_options(agent)
opts.dependencies["key"] = "mutated" # type: ignore[index]
assert agent.dependencies == {"key": "original"}
def test_callsite_dependencies_defensive_copy(self):
agent = _make_agent()
callsite_deps = {"key": "original"}
opts = resolve_run_options(agent, dependencies=callsite_deps)
opts.dependencies["key"] = "mutated" # type: ignore[index]
assert callsite_deps == {"key": "original"}
# ---------------------------------------------------------------------------
# Functions exist and are importable
# ---------------------------------------------------------------------------
class TestFunctionsImportable:
def test_run_dispatch_importable(self):
from agno.agent._run import run_dispatch
assert callable(run_dispatch)
def test_run_importable(self):
from agno.agent._run import _run
assert callable(_run)
def test_run_stream_importable(self):
from agno.agent._run import _run_stream
assert callable(_run_stream)
def test_arun_dispatch_importable(self):
from agno.agent._run import arun_dispatch
assert callable(arun_dispatch)
def test_arun_importable(self):
from agno.agent._run import _arun
assert callable(_arun)
def test_arun_stream_importable(self):
from agno.agent._run import _arun_stream
assert callable(_arun_stream)
# ---------------------------------------------------------------------------
# Agent.run / Agent.arun dispatch to the correct names
# ---------------------------------------------------------------------------
class TestAgentWrappersDelegateCorrectly:
def test_agent_run_delegates_to_run_dispatch(self, monkeypatch):
from agno.agent import _run as run_module
captured = {}
def fake_dispatch(agent, input, **kwargs):
captured["called"] = True
captured["input"] = input
return None
monkeypatch.setattr(run_module, "run_dispatch", fake_dispatch)
agent = _make_agent()
agent.run(input="hello")
assert captured["called"] is True
assert captured["input"] == "hello"
def test_agent_arun_delegates_to_arun_dispatch(self, monkeypatch):
from agno.agent import _run as run_module
captured = {}
def fake_dispatch(agent, input, **kwargs):
captured["called"] = True
captured["input"] = input
return None
monkeypatch.setattr(run_module, "arun_dispatch", fake_dispatch)
agent = _make_agent()
agent.arun(input="hello")
assert captured["called"] is True
assert captured["input"] == "hello"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/agent/test_run_options.py",
"license": "Apache License 2.0",
"lines": 247,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/agent/test_run_regressions.py | import inspect
from typing import Any, Optional
import pytest
from agno.agent import _init, _messages, _response, _run, _session, _storage, _tools
from agno.agent.agent import Agent
from agno.db.base import SessionType
from agno.run import RunContext
from agno.run.agent import RunErrorEvent, RunOutput
from agno.run.base import RunStatus
from agno.run.cancel import (
cancel_run,
cleanup_run,
get_active_runs,
get_cancellation_manager,
is_cancelled,
register_run,
set_cancellation_manager,
)
from agno.run.cancellation_management.in_memory_cancellation_manager import InMemoryRunCancellationManager
from agno.run.messages import RunMessages
from agno.session import AgentSession
@pytest.fixture(autouse=True)
def reset_cancellation_manager():
original_manager = get_cancellation_manager()
set_cancellation_manager(InMemoryRunCancellationManager())
try:
yield
finally:
set_cancellation_manager(original_manager)
def _patch_sync_dispatch_dependencies(
agent: Agent,
monkeypatch: pytest.MonkeyPatch,
runs: Optional[list[Any]] = None,
) -> None:
monkeypatch.setattr(_init, "has_async_db", lambda agent: False)
monkeypatch.setattr(_storage, "update_metadata", lambda agent, session=None: None)
monkeypatch.setattr(_storage, "load_session_state", lambda agent, session=None, session_state=None: session_state)
monkeypatch.setattr(_run, "resolve_run_dependencies", lambda agent, run_context: None)
monkeypatch.setattr(_response, "get_response_format", lambda agent, run_context=None: None)
monkeypatch.setattr(
_storage,
"read_or_create_session",
lambda agent, session_id=None, user_id=None: AgentSession(session_id=session_id, user_id=user_id, runs=runs),
)
def test_run_dispatch_cleans_up_registered_run_on_setup_failure(monkeypatch: pytest.MonkeyPatch):
agent = Agent(name="test-agent")
_patch_sync_dispatch_dependencies(agent, monkeypatch, runs=[])
def failing_initialize_agent(debug_mode=None):
raise RuntimeError("initialize failed")
monkeypatch.setattr(agent, "initialize_agent", failing_initialize_agent)
run_id = "run-setup-fail"
with pytest.raises(RuntimeError, match="initialize failed"):
_run.run_dispatch(agent=agent, input="hello", run_id=run_id, stream=False)
assert run_id not in get_active_runs()
def test_run_dispatch_does_not_reset_cancellation_before_impl(monkeypatch: pytest.MonkeyPatch):
agent = Agent(name="test-agent")
_patch_sync_dispatch_dependencies(agent, monkeypatch, runs=[])
run_id = "run-preserve-cancelled-state"
def initialize_and_cancel(debug_mode=None):
# register_run now happens inside _run, so we register here to test cancellation
register_run(run_id)
assert cancel_run(run_id) is True
monkeypatch.setattr(agent, "initialize_agent", initialize_and_cancel)
observed: dict[str, bool] = {}
def fake_run_impl(
agent: Agent,
run_response,
run_context,
session_id: str = "",
user_id: Optional[str] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
response_format: Optional[Any] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
):
observed["cancelled_before_model"] = is_cancelled(run_response.run_id) # type: ignore[arg-type]
cleanup_run(run_response.run_id) # type: ignore[arg-type]
return run_response
monkeypatch.setattr(_run, "_run", fake_run_impl)
_run.run_dispatch(agent=agent, input="hello", run_id=run_id, stream=False)
assert observed["cancelled_before_model"] is True
assert run_id not in get_active_runs()
def test_continue_run_dispatch_handles_none_session_runs(monkeypatch: pytest.MonkeyPatch):
agent = Agent(name="test-agent")
monkeypatch.setattr(_init, "has_async_db", lambda agent: False)
monkeypatch.setattr(agent, "initialize_agent", lambda debug_mode=None: None)
monkeypatch.setattr(_storage, "update_metadata", lambda agent, session=None: None)
monkeypatch.setattr(_storage, "load_session_state", lambda agent, session=None, session_state=None: session_state)
monkeypatch.setattr(
_storage,
"read_or_create_session",
lambda agent, session_id=None, user_id=None: AgentSession(session_id=session_id, user_id=user_id, runs=None),
)
with pytest.raises(RuntimeError, match="No runs found for run ID missing-run"):
_run.continue_run_dispatch(
agent=agent,
run_id="missing-run",
requirements=[],
session_id="session-1",
)
@pytest.mark.asyncio
async def test_acontinue_run_dispatch_handles_none_session_runs(monkeypatch: pytest.MonkeyPatch):
agent = Agent(name="test-agent")
monkeypatch.setattr(agent, "initialize_agent", lambda debug_mode=None: None)
monkeypatch.setattr(_storage, "update_metadata", lambda agent, session=None: None)
monkeypatch.setattr(_storage, "load_session_state", lambda agent, session=None, session_state=None: session_state)
async def fake_aread_or_create_session(agent, session_id: str, user_id: Optional[str] = None):
return AgentSession(session_id=session_id, user_id=user_id, runs=None)
async def fake_acleanup_and_store(agent, **kwargs: Any):
return None
async def fake_disconnect_mcp_tools(agent):
return None
monkeypatch.setattr(_storage, "aread_or_create_session", fake_aread_or_create_session)
monkeypatch.setattr(_run, "acleanup_and_store", fake_acleanup_and_store)
monkeypatch.setattr(_init, "disconnect_connectable_tools", lambda agent: None)
monkeypatch.setattr(_init, "disconnect_mcp_tools", fake_disconnect_mcp_tools)
response = await _run.acontinue_run_dispatch(
agent=agent,
run_id="missing-run",
requirements=[],
session_id="session-1",
stream=False,
)
assert response.status == RunStatus.error
assert isinstance(response.content, str)
assert "No runs found for run ID missing-run" in response.content
@pytest.mark.asyncio
async def test_acontinue_run_stream_yields_error_event_without_attribute_error(
monkeypatch: pytest.MonkeyPatch,
):
agent = Agent(name="test-agent")
run_id = "missing-stream-run"
async def fake_aread_or_create_session(agent, session_id: str, user_id: Optional[str] = None):
return AgentSession(session_id=session_id, user_id=user_id, runs=None)
async def fake_disconnect_mcp_tools(agent):
return None
monkeypatch.setattr(_storage, "aread_or_create_session", fake_aread_or_create_session)
monkeypatch.setattr(_storage, "update_metadata", lambda agent, session=None: None)
monkeypatch.setattr(_storage, "load_session_state", lambda agent, session=None, session_state=None: session_state)
monkeypatch.setattr(_init, "disconnect_connectable_tools", lambda agent: None)
monkeypatch.setattr(_init, "disconnect_mcp_tools", fake_disconnect_mcp_tools)
run_context = RunContext(
run_id=run_id,
session_id="session-1",
user_id=None,
session_state={},
)
events = []
async for event in _run._acontinue_run_stream(
agent=agent,
session_id="session-1",
run_context=run_context,
run_id=run_id,
requirements=[],
):
events.append(event)
assert len(events) == 1
assert isinstance(events[0], RunErrorEvent)
assert events[0].run_id == run_id
assert events[0].content is not None
assert "No runs found for run ID missing-stream-run" in events[0].content
@pytest.mark.asyncio
async def test_arun_stream_impl_cleans_up_registered_run_on_session_read_failure(monkeypatch: pytest.MonkeyPatch):
agent = Agent(name="test-agent")
run_id = "arun-stream-session-fail"
async def fail_aread_or_create_session(agent, session_id: str, user_id: Optional[str] = None):
raise RuntimeError("session read failed")
async def fake_disconnect_mcp_tools(agent):
return None
monkeypatch.setattr(_storage, "aread_or_create_session", fail_aread_or_create_session)
monkeypatch.setattr(_init, "disconnect_connectable_tools", lambda agent: None)
monkeypatch.setattr(_init, "disconnect_mcp_tools", fake_disconnect_mcp_tools)
run_context = RunContext(run_id=run_id, session_id="session-1", session_state={})
run_response = RunOutput(run_id=run_id)
response_stream = _run._arun_stream(
agent=agent,
run_response=run_response,
run_context=run_context,
session_id="session-1",
)
# Consume the error event yielded by the stream
events = []
async for event in response_stream:
events.append(event)
# Verify an error event was yielded with the session read failure
assert len(events) == 1
assert isinstance(events[0], RunErrorEvent)
assert "session read failed" in events[0].content
assert run_id not in get_active_runs()
@pytest.mark.asyncio
async def test_arun_impl_preserves_original_error_when_session_read_fails(monkeypatch: pytest.MonkeyPatch):
agent = Agent(name="test-agent")
run_id = "arun-session-fail"
cleanup_calls = []
async def fail_aread_or_create_session(agent, session_id: str, user_id: Optional[str] = None):
raise RuntimeError("session read failed")
async def fake_acleanup_and_store(agent, **kwargs: Any):
cleanup_calls.append(kwargs)
return None
async def fake_disconnect_mcp_tools(agent):
return None
monkeypatch.setattr(_storage, "aread_or_create_session", fail_aread_or_create_session)
monkeypatch.setattr(_run, "acleanup_and_store", fake_acleanup_and_store)
monkeypatch.setattr(_init, "disconnect_connectable_tools", lambda agent: None)
monkeypatch.setattr(_init, "disconnect_mcp_tools", fake_disconnect_mcp_tools)
run_context = RunContext(run_id=run_id, session_id="session-1", session_state={})
run_response = RunOutput(run_id=run_id)
response = await _run._arun(
agent=agent,
run_response=run_response,
run_context=run_context,
session_id="session-1",
)
assert response.status == RunStatus.error
assert response.content == "session read failed"
assert cleanup_calls == []
assert run_id not in get_active_runs()
@pytest.mark.asyncio
async def test_acontinue_run_preserves_original_error_when_session_read_fails(monkeypatch: pytest.MonkeyPatch):
agent = Agent(name="test-agent")
run_id = "acontinue-session-fail"
cleanup_calls = []
async def fail_aread_or_create_session(agent, session_id: str, user_id: Optional[str] = None):
raise RuntimeError("session read failed")
async def fake_acleanup_and_store(agent, **kwargs: Any):
cleanup_calls.append(kwargs)
return None
async def fake_disconnect_mcp_tools(agent):
return None
monkeypatch.setattr(_storage, "aread_or_create_session", fail_aread_or_create_session)
monkeypatch.setattr(_run, "acleanup_and_store", fake_acleanup_and_store)
monkeypatch.setattr(_init, "disconnect_connectable_tools", lambda agent: None)
monkeypatch.setattr(_init, "disconnect_mcp_tools", fake_disconnect_mcp_tools)
run_context = RunContext(run_id=run_id, session_id="session-1", session_state={})
response = await _run._acontinue_run(
agent=agent,
session_id="session-1",
run_context=run_context,
run_id=run_id,
requirements=[],
)
assert response.status == RunStatus.error
assert response.content == "session read failed"
assert cleanup_calls == []
assert run_id not in get_active_runs()
def test_continue_run_stream_registers_run_for_cancellation():
agent = Agent(name="test-agent")
run_id = "continue-stream-register"
run_response = RunOutput(run_id=run_id)
run_messages = RunMessages(messages=[])
run_context = RunContext(run_id=run_id, session_id="session-1", session_state={})
session = AgentSession(session_id="session-1")
response_stream = _run._continue_run_stream(
agent=agent,
run_response=run_response,
run_messages=run_messages,
run_context=run_context,
session=session,
tools=[],
stream_events=True,
)
next(response_stream)
assert run_id in get_active_runs()
assert cancel_run(run_id) is True
response_stream.close()
assert run_id not in get_active_runs()
def test_session_read_wrappers_default_to_agent_session_type():
read_default = inspect.signature(_storage.read_session).parameters["session_type"].default
aread_default = inspect.signature(_storage.aread_session).parameters["session_type"].default
assert read_default == SessionType.AGENT
assert aread_default == SessionType.AGENT
def _make_precedence_test_agent() -> Agent:
return Agent(
name="precedence-agent",
dependencies={"agent_dep": "default"},
knowledge_filters={"agent_filter": "default"},
metadata={"agent_meta": "default"},
output_schema={"type": "object", "properties": {"agent": {"type": "string"}}},
)
def _patch_continue_dispatch_dependencies(agent: Agent, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(_init, "has_async_db", lambda agent: False)
monkeypatch.setattr(agent, "initialize_agent", lambda debug_mode=None: None)
monkeypatch.setattr(_storage, "update_metadata", lambda agent, session=None: None)
monkeypatch.setattr(_storage, "load_session_state", lambda agent, session=None, session_state=None: session_state)
monkeypatch.setattr(
_storage,
"read_or_create_session",
lambda agent, session_id=None, user_id=None: AgentSession(session_id=session_id, user_id=user_id, runs=[]),
)
monkeypatch.setattr(_init, "set_default_model", lambda agent: None)
monkeypatch.setattr(_response, "get_response_format", lambda agent, run_context=None: None)
monkeypatch.setattr(agent, "get_tools", lambda **kwargs: [])
monkeypatch.setattr(_tools, "determine_tools_for_model", lambda agent, **kwargs: [])
monkeypatch.setattr(_messages, "get_continue_run_messages", lambda agent, input=None: RunMessages(messages=[]))
def test_run_dispatch_respects_run_context_precedence(monkeypatch: pytest.MonkeyPatch):
agent = _make_precedence_test_agent()
_patch_sync_dispatch_dependencies(agent, monkeypatch, runs=[])
monkeypatch.setattr(agent, "initialize_agent", lambda debug_mode=None: None)
def fake_run_impl(
agent: Agent,
run_response,
run_context,
session_id: str = "",
user_id: Optional[str] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
response_format: Optional[Any] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
):
cleanup_run(run_response.run_id) # type: ignore[arg-type]
return run_response
monkeypatch.setattr(_run, "_run", fake_run_impl)
preserved_context = RunContext(
run_id="ctx-preserve",
session_id="session-1",
session_state={},
dependencies={"ctx_dep": "keep"},
knowledge_filters={"ctx_filter": "keep"},
metadata={"ctx_meta": "keep"},
output_schema={"ctx_schema": "keep"},
)
_run.run_dispatch(
agent=agent,
input="hello",
run_id="run-preserve",
stream=False,
run_context=preserved_context,
)
assert preserved_context.dependencies == {"ctx_dep": "keep"}
assert preserved_context.knowledge_filters == {"ctx_filter": "keep"}
assert preserved_context.metadata == {"ctx_meta": "keep"}
# output_schema is always set from resolved options (for workflow reuse)
assert preserved_context.output_schema == {"type": "object", "properties": {"agent": {"type": "string"}}}
override_context = RunContext(
run_id="ctx-override",
session_id="session-1",
session_state={},
dependencies={"ctx_dep": "keep"},
knowledge_filters={"ctx_filter": "keep"},
metadata={"ctx_meta": "keep"},
output_schema={"ctx_schema": "keep"},
)
_run.run_dispatch(
agent=agent,
input="hello",
run_id="run-override",
stream=False,
run_context=override_context,
dependencies={"call_dep": "override"},
knowledge_filters={"call_filter": "override"},
metadata={"call_meta": "override"},
output_schema={"call_schema": "override"},
)
assert override_context.dependencies == {"call_dep": "override"}
assert override_context.knowledge_filters == {"agent_filter": "default", "call_filter": "override"}
assert override_context.metadata == {"call_meta": "override", "agent_meta": "default"}
assert override_context.output_schema == {"call_schema": "override"}
empty_context = RunContext(
run_id="ctx-empty",
session_id="session-1",
session_state={},
dependencies=None,
knowledge_filters=None,
metadata=None,
output_schema=None,
)
_run.run_dispatch(
agent=agent,
input="hello",
run_id="run-empty",
stream=False,
run_context=empty_context,
)
assert empty_context.dependencies == {"agent_dep": "default"}
assert empty_context.knowledge_filters == {"agent_filter": "default"}
assert empty_context.metadata == {"agent_meta": "default"}
assert empty_context.output_schema == {"type": "object", "properties": {"agent": {"type": "string"}}}
@pytest.mark.asyncio
async def test_arun_dispatch_respects_run_context_precedence(monkeypatch: pytest.MonkeyPatch):
agent = _make_precedence_test_agent()
monkeypatch.setattr(agent, "initialize_agent", lambda debug_mode=None: None)
monkeypatch.setattr(_response, "get_response_format", lambda agent, run_context=None: None)
async def fake_arun_impl(
agent: Agent,
run_response,
run_context,
user_id: Optional[str] = None,
response_format: Optional[Any] = None,
session_id: Optional[str] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
):
return run_response
monkeypatch.setattr(_run, "_arun", fake_arun_impl)
preserved_context = RunContext(
run_id="actx-preserve",
session_id="session-1",
session_state={},
dependencies={"ctx_dep": "keep"},
knowledge_filters={"ctx_filter": "keep"},
metadata={"ctx_meta": "keep"},
output_schema={"ctx_schema": "keep"},
)
await _run.arun_dispatch(
agent=agent,
input="hello",
run_id="arun-preserve",
stream=False,
run_context=preserved_context,
)
assert preserved_context.dependencies == {"ctx_dep": "keep"}
assert preserved_context.knowledge_filters == {"ctx_filter": "keep"}
assert preserved_context.metadata == {"ctx_meta": "keep"}
# output_schema is always set from resolved options (for workflow reuse)
assert preserved_context.output_schema == {"type": "object", "properties": {"agent": {"type": "string"}}}
override_context = RunContext(
run_id="actx-override",
session_id="session-1",
session_state={},
dependencies={"ctx_dep": "keep"},
knowledge_filters={"ctx_filter": "keep"},
metadata={"ctx_meta": "keep"},
output_schema={"ctx_schema": "keep"},
)
await _run.arun_dispatch(
agent=agent,
input="hello",
run_id="arun-override",
stream=False,
run_context=override_context,
dependencies={"call_dep": "override"},
knowledge_filters={"call_filter": "override"},
metadata={"call_meta": "override"},
output_schema={"call_schema": "override"},
)
assert override_context.dependencies == {"call_dep": "override"}
assert override_context.knowledge_filters == {"agent_filter": "default", "call_filter": "override"}
assert override_context.metadata == {"call_meta": "override", "agent_meta": "default"}
assert override_context.output_schema == {"call_schema": "override"}
empty_context = RunContext(
run_id="actx-empty",
session_id="session-1",
session_state={},
dependencies=None,
knowledge_filters=None,
metadata=None,
output_schema=None,
)
await _run.arun_dispatch(
agent=agent,
input="hello",
run_id="arun-empty",
stream=False,
run_context=empty_context,
)
assert empty_context.dependencies == {"agent_dep": "default"}
assert empty_context.knowledge_filters == {"agent_filter": "default"}
assert empty_context.metadata == {"agent_meta": "default"}
assert empty_context.output_schema == {"type": "object", "properties": {"agent": {"type": "string"}}}
def test_continue_run_dispatch_respects_run_context_precedence(monkeypatch: pytest.MonkeyPatch):
agent = _make_precedence_test_agent()
_patch_continue_dispatch_dependencies(agent, monkeypatch)
def fake_continue_run(
agent: Agent,
run_response: RunOutput,
run_messages: RunMessages,
run_context: RunContext,
session: AgentSession,
tools,
user_id: Optional[str] = None,
response_format: Optional[Any] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> RunOutput:
return run_response
monkeypatch.setattr(_run, "_continue_run", fake_continue_run)
preserved_context = RunContext(
run_id="continue-preserve",
session_id="session-1",
session_state={},
dependencies={"ctx_dep": "keep"},
knowledge_filters={"ctx_filter": "keep"},
metadata={"ctx_meta": "keep"},
)
_run.continue_run_dispatch(
agent=agent,
run_response=RunOutput(run_id="continue-run-1", session_id="session-1", messages=[]),
stream=False,
run_context=preserved_context,
)
assert preserved_context.dependencies == {"ctx_dep": "keep"}
assert preserved_context.knowledge_filters == {"ctx_filter": "keep"}
assert preserved_context.metadata == {"ctx_meta": "keep"}
override_context = RunContext(
run_id="continue-override",
session_id="session-1",
session_state={},
dependencies={"ctx_dep": "keep"},
knowledge_filters={"ctx_filter": "keep"},
metadata={"ctx_meta": "keep"},
)
_run.continue_run_dispatch(
agent=agent,
run_response=RunOutput(run_id="continue-run-2", session_id="session-1", messages=[]),
stream=False,
run_context=override_context,
dependencies={"call_dep": "override"},
knowledge_filters={"call_filter": "override"},
metadata={"call_meta": "override"},
)
assert override_context.dependencies == {"call_dep": "override"}
assert override_context.knowledge_filters == {"agent_filter": "default", "call_filter": "override"}
assert override_context.metadata == {"call_meta": "override", "agent_meta": "default"}
empty_context = RunContext(
run_id="continue-empty",
session_id="session-1",
session_state={},
dependencies=None,
knowledge_filters=None,
metadata=None,
)
_run.continue_run_dispatch(
agent=agent,
run_response=RunOutput(run_id="continue-run-3", session_id="session-1", messages=[]),
stream=False,
run_context=empty_context,
)
assert empty_context.dependencies == {"agent_dep": "default"}
assert empty_context.knowledge_filters == {"agent_filter": "default"}
assert empty_context.metadata == {"agent_meta": "default"}
@pytest.mark.asyncio
async def test_acontinue_run_dispatch_respects_run_context_precedence(monkeypatch: pytest.MonkeyPatch):
agent = _make_precedence_test_agent()
monkeypatch.setattr(agent, "initialize_agent", lambda debug_mode=None: None)
monkeypatch.setattr(_response, "get_response_format", lambda agent, run_context=None: None)
async def fake_acontinue_run(
agent: Agent,
session_id: str,
run_context: RunContext,
run_response: Optional[RunOutput] = None,
updated_tools=None,
requirements=None,
run_id: Optional[str] = None,
user_id: Optional[str] = None,
response_format: Optional[Any] = None,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> RunOutput:
return run_response if run_response is not None else RunOutput(run_id=run_id, session_id=session_id) # type: ignore[arg-type]
monkeypatch.setattr(_run, "_acontinue_run", fake_acontinue_run)
preserved_context = RunContext(
run_id="acontinue-preserve",
session_id="session-1",
session_state={},
dependencies={"ctx_dep": "keep"},
knowledge_filters={"ctx_filter": "keep"},
metadata={"ctx_meta": "keep"},
)
await _run.acontinue_run_dispatch(
agent=agent,
run_response=RunOutput(run_id="acontinue-run-1", session_id="session-1", messages=[]),
stream=False,
run_context=preserved_context,
)
assert preserved_context.dependencies == {"ctx_dep": "keep"}
assert preserved_context.knowledge_filters == {"ctx_filter": "keep"}
assert preserved_context.metadata == {"ctx_meta": "keep"}
override_context = RunContext(
run_id="acontinue-override",
session_id="session-1",
session_state={},
dependencies={"ctx_dep": "keep"},
knowledge_filters={"ctx_filter": "keep"},
metadata={"ctx_meta": "keep"},
)
await _run.acontinue_run_dispatch(
agent=agent,
run_response=RunOutput(run_id="acontinue-run-2", session_id="session-1", messages=[]),
stream=False,
run_context=override_context,
dependencies={"call_dep": "override"},
knowledge_filters={"call_filter": "override"},
metadata={"call_meta": "override"},
)
assert override_context.dependencies == {"call_dep": "override"}
assert override_context.knowledge_filters == {"agent_filter": "default", "call_filter": "override"}
assert override_context.metadata == {"call_meta": "override", "agent_meta": "default"}
empty_context = RunContext(
run_id="acontinue-empty",
session_id="session-1",
session_state={},
dependencies=None,
knowledge_filters=None,
metadata=None,
)
await _run.acontinue_run_dispatch(
agent=agent,
run_response=RunOutput(run_id="acontinue-run-3", session_id="session-1", messages=[]),
stream=False,
run_context=empty_context,
)
assert empty_context.dependencies == {"agent_dep": "default"}
assert empty_context.knowledge_filters == {"agent_filter": "default"}
assert empty_context.metadata == {"agent_meta": "default"}
def test_all_pause_handlers_accept_run_context():
for fn in [
_run.handle_agent_run_paused,
_run.handle_agent_run_paused_stream,
_run.ahandle_agent_run_paused,
_run.ahandle_agent_run_paused_stream,
]:
params = inspect.signature(fn).parameters
assert "run_context" in params, f"{fn.__name__} missing run_context param"
def test_handle_agent_run_paused_forwards_run_context_to_cleanup(monkeypatch: pytest.MonkeyPatch):
captured: dict[str, Any] = {}
def spy_cleanup_and_store(agent, run_response, session, run_context=None, user_id=None):
captured["run_context"] = run_context
monkeypatch.setattr(_run, "cleanup_and_store", spy_cleanup_and_store)
monkeypatch.setattr(_run, "create_approval_from_pause", lambda **kwargs: None)
agent = Agent(name="test-hitl")
run_context = RunContext(run_id="r1", session_id="s1", session_state={"key": "val"})
_run.handle_agent_run_paused(
agent=agent,
run_response=RunOutput(run_id="r1", session_id="s1", messages=[]),
session=AgentSession(session_id="s1"),
user_id="u1",
run_context=run_context,
)
assert captured["run_context"] is run_context
@pytest.mark.asyncio
async def test_ahandle_agent_run_paused_forwards_run_context_to_cleanup(monkeypatch: pytest.MonkeyPatch):
captured: dict[str, Any] = {}
async def spy_acleanup_and_store(agent, run_response, session, run_context=None, user_id=None):
captured["run_context"] = run_context
async def noop_acreate_approval(**kwargs):
return None
monkeypatch.setattr(_run, "acleanup_and_store", spy_acleanup_and_store)
monkeypatch.setattr(_run, "acreate_approval_from_pause", noop_acreate_approval)
agent = Agent(name="test-hitl-async")
run_context = RunContext(run_id="r1", session_id="s1", session_state={"key": "val"})
await _run.ahandle_agent_run_paused(
agent=agent,
run_response=RunOutput(run_id="r1", session_id="s1", messages=[]),
session=AgentSession(session_id="s1"),
user_id="u1",
run_context=run_context,
)
assert captured["run_context"] is run_context
def test_handle_agent_run_paused_persists_session_state(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr(_session, "save_session", lambda agent, session: None)
monkeypatch.setattr(_run, "create_approval_from_pause", lambda **kwargs: None)
monkeypatch.setattr(_run, "scrub_run_output_for_storage", lambda agent, run_response: None)
monkeypatch.setattr(_run, "save_run_response_to_file", lambda agent, **kwargs: None)
monkeypatch.setattr(_run, "update_session_metrics", lambda agent, session, run_response: None)
agent = Agent(name="test-hitl")
session = AgentSession(session_id="s1", session_data={})
run_response = RunOutput(run_id="r1", session_id="s1", messages=[])
run_context = RunContext(run_id="r1", session_id="s1", session_state={"watchlist": ["AAPL"]})
result = _run.handle_agent_run_paused(
agent=agent,
run_response=run_response,
session=session,
user_id="u1",
run_context=run_context,
)
assert result.status == RunStatus.paused
assert session.session_data["session_state"] == {"watchlist": ["AAPL"]}
assert result.session_state == {"watchlist": ["AAPL"]}
def test_handle_agent_run_paused_without_run_context_does_not_set_state(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr(_session, "save_session", lambda agent, session: None)
monkeypatch.setattr(_run, "create_approval_from_pause", lambda **kwargs: None)
monkeypatch.setattr(_run, "scrub_run_output_for_storage", lambda agent, run_response: None)
monkeypatch.setattr(_run, "save_run_response_to_file", lambda agent, **kwargs: None)
monkeypatch.setattr(_run, "update_session_metrics", lambda agent, session, run_response: None)
agent = Agent(name="test-hitl")
session = AgentSession(session_id="s1", session_data={})
result = _run.handle_agent_run_paused(
agent=agent,
run_response=RunOutput(run_id="r1", session_id="s1", messages=[]),
session=session,
user_id="u1",
)
assert result.status == RunStatus.paused
assert "session_state" not in session.session_data
def test_handle_agent_run_paused_persists_state_when_session_data_is_none(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr(_session, "save_session", lambda agent, session: None)
monkeypatch.setattr(_run, "create_approval_from_pause", lambda **kwargs: None)
monkeypatch.setattr(_run, "scrub_run_output_for_storage", lambda agent, run_response: None)
monkeypatch.setattr(_run, "save_run_response_to_file", lambda agent, **kwargs: None)
monkeypatch.setattr(_run, "update_session_metrics", lambda agent, session, run_response: None)
agent = Agent(name="test-hitl")
session = AgentSession(session_id="s1", session_data=None)
run_response = RunOutput(run_id="r1", session_id="s1", messages=[])
run_context = RunContext(run_id="r1", session_id="s1", session_state={"watchlist": ["AAPL"]})
result = _run.handle_agent_run_paused(
agent=agent,
run_response=run_response,
session=session,
user_id="u1",
run_context=run_context,
)
assert result.status == RunStatus.paused
assert result.session_state == {"watchlist": ["AAPL"]}
assert session.session_data == {"session_state": {"watchlist": ["AAPL"]}}
@pytest.mark.asyncio
async def test_ahandle_agent_run_paused_persists_state_when_session_data_is_none(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr(_session, "save_session", lambda agent, session: None)
monkeypatch.setattr(_run, "scrub_run_output_for_storage", lambda agent, run_response: None)
monkeypatch.setattr(_run, "save_run_response_to_file", lambda agent, **kwargs: None)
monkeypatch.setattr(_run, "update_session_metrics", lambda agent, session, run_response: None)
async def noop_acreate_approval(**kwargs):
return None
monkeypatch.setattr(_run, "acreate_approval_from_pause", noop_acreate_approval)
agent = Agent(name="test-hitl-async")
session = AgentSession(session_id="s1", session_data=None)
run_response = RunOutput(run_id="r1", session_id="s1", messages=[])
run_context = RunContext(run_id="r1", session_id="s1", session_state={"cart": ["item-1"]})
result = await _run.ahandle_agent_run_paused(
agent=agent,
run_response=run_response,
session=session,
user_id="u1",
run_context=run_context,
)
assert result.status == RunStatus.paused
assert result.session_state == {"cart": ["item-1"]}
assert session.session_data == {"session_state": {"cart": ["item-1"]}}
@pytest.mark.asyncio
async def test_ahandle_agent_run_paused_persists_session_state(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr(_session, "save_session", lambda agent, session: None)
monkeypatch.setattr(_run, "scrub_run_output_for_storage", lambda agent, run_response: None)
monkeypatch.setattr(_run, "save_run_response_to_file", lambda agent, **kwargs: None)
monkeypatch.setattr(_run, "update_session_metrics", lambda agent, session, run_response: None)
async def noop_acreate_approval(**kwargs):
return None
monkeypatch.setattr(_run, "acreate_approval_from_pause", noop_acreate_approval)
agent = Agent(name="test-hitl-async")
session = AgentSession(session_id="s1", session_data={})
run_response = RunOutput(run_id="r1", session_id="s1", messages=[])
run_context = RunContext(run_id="r1", session_id="s1", session_state={"cart": ["item-1"]})
result = await _run.ahandle_agent_run_paused(
agent=agent,
run_response=run_response,
session=session,
user_id="u1",
run_context=run_context,
)
assert result.status == RunStatus.paused
assert session.session_data["session_state"] == {"cart": ["item-1"]}
assert result.session_state == {"cart": ["item-1"]}
def test_handle_agent_run_paused_stream_forwards_run_context_to_cleanup(monkeypatch: pytest.MonkeyPatch):
captured: dict[str, Any] = {}
def spy_cleanup_and_store(agent, run_response, session, run_context=None, user_id=None):
captured["run_context"] = run_context
monkeypatch.setattr(_run, "cleanup_and_store", spy_cleanup_and_store)
monkeypatch.setattr(_run, "create_approval_from_pause", lambda **kwargs: None)
agent = Agent(name="test-hitl-stream")
run_context = RunContext(run_id="r1", session_id="s1", session_state={"key": "val"})
events = list(
_run.handle_agent_run_paused_stream(
agent=agent,
run_response=RunOutput(run_id="r1", session_id="s1", messages=[]),
session=AgentSession(session_id="s1"),
user_id="u1",
run_context=run_context,
)
)
assert captured["run_context"] is run_context
assert len(events) >= 1
@pytest.mark.asyncio
async def test_ahandle_agent_run_paused_stream_forwards_run_context_to_cleanup(monkeypatch: pytest.MonkeyPatch):
captured: dict[str, Any] = {}
async def spy_acleanup_and_store(agent, run_response, session, run_context=None, user_id=None):
captured["run_context"] = run_context
async def noop_acreate_approval(**kwargs):
return None
monkeypatch.setattr(_run, "acleanup_and_store", spy_acleanup_and_store)
monkeypatch.setattr(_run, "acreate_approval_from_pause", noop_acreate_approval)
agent = Agent(name="test-hitl-stream-async")
run_context = RunContext(run_id="r1", session_id="s1", session_state={"key": "val"})
events = []
async for event in _run.ahandle_agent_run_paused_stream(
agent=agent,
run_response=RunOutput(run_id="r1", session_id="s1", messages=[]),
session=AgentSession(session_id="s1"),
user_id="u1",
run_context=run_context,
):
events.append(event)
assert captured["run_context"] is run_context
assert len(events) >= 1
def test_handle_agent_run_paused_stream_persists_session_state(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr(_session, "save_session", lambda agent, session: None)
monkeypatch.setattr(_run, "create_approval_from_pause", lambda **kwargs: None)
monkeypatch.setattr(_run, "scrub_run_output_for_storage", lambda agent, run_response: None)
monkeypatch.setattr(_run, "save_run_response_to_file", lambda agent, **kwargs: None)
monkeypatch.setattr(_run, "update_session_metrics", lambda agent, session, run_response: None)
agent = Agent(name="test-hitl-stream")
session = AgentSession(session_id="s1", session_data={})
run_response = RunOutput(run_id="r1", session_id="s1", messages=[])
run_context = RunContext(run_id="r1", session_id="s1", session_state={"watchlist": ["AAPL"]})
events = list(
_run.handle_agent_run_paused_stream(
agent=agent,
run_response=run_response,
session=session,
user_id="u1",
run_context=run_context,
)
)
assert len(events) >= 1
assert session.session_data["session_state"] == {"watchlist": ["AAPL"]}
assert run_response.session_state == {"watchlist": ["AAPL"]}
@pytest.mark.asyncio
async def test_ahandle_agent_run_paused_stream_persists_session_state(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr(_session, "save_session", lambda agent, session: None)
monkeypatch.setattr(_run, "scrub_run_output_for_storage", lambda agent, run_response: None)
monkeypatch.setattr(_run, "save_run_response_to_file", lambda agent, **kwargs: None)
monkeypatch.setattr(_run, "update_session_metrics", lambda agent, session, run_response: None)
async def noop_acreate_approval(**kwargs):
return None
monkeypatch.setattr(_run, "acreate_approval_from_pause", noop_acreate_approval)
agent = Agent(name="test-hitl-stream-async")
session = AgentSession(session_id="s1", session_data={})
run_response = RunOutput(run_id="r1", session_id="s1", messages=[])
run_context = RunContext(run_id="r1", session_id="s1", session_state={"cart": ["item-1"]})
events = []
async for event in _run.ahandle_agent_run_paused_stream(
agent=agent,
run_response=run_response,
session=session,
user_id="u1",
run_context=run_context,
):
events.append(event)
assert len(events) >= 1
assert session.session_data["session_state"] == {"cart": ["item-1"]}
assert run_response.session_state == {"cart": ["item-1"]}
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/agent/test_run_regressions.py",
"license": "Apache License 2.0",
"lines": 850,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/db/test_migration_v2_5_0_dispatch.py | """Tests for v2.5.0 migration dispatch — verifies all DB type branches."""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
sqlalchemy = pytest.importorskip("sqlalchemy")
from agno.db.migrations.versions import v2_5_0 # noqa: E402
def _make_sync_db(class_name: str):
"""Create a mock sync DB whose type().__name__ returns the given class_name.
We create an actual class instance (not MagicMock) so that type(db).__name__
returns the correct class name. The class uses MagicMock for attribute access.
"""
# Create a class that delegates attribute access to a MagicMock
mock = MagicMock()
class FakeDb:
def __getattr__(self, name):
return getattr(mock, name)
# Rename the class to match the expected DB type
FakeDb.__name__ = class_name
FakeDb.__qualname__ = class_name
return FakeDb()
def _make_async_db(class_name: str):
"""Create a mock async DB whose type().__name__ returns the given class_name.
We create an actual class instance (not MagicMock) so that type(db).__name__
returns the correct class name. The class uses MagicMock for attribute access.
"""
# Create a class that delegates attribute access to a MagicMock
mock = MagicMock()
class FakeDb:
def __getattr__(self, name):
return getattr(mock, name)
# Rename the class to match the expected DB type
FakeDb.__name__ = class_name
FakeDb.__qualname__ = class_name
return FakeDb()
# ---------------------------------------------------------------------------
# Sync up() dispatch
# ---------------------------------------------------------------------------
class TestSyncUpDispatch:
def test_non_sessions_table_returns_false(self):
db = _make_sync_db("PostgresDb")
assert v2_5_0.up(db, "non_sessions", "some_table") is False
@patch.object(v2_5_0, "_migrate_postgres", return_value=True)
def test_postgres_dispatches(self, mock_fn):
db = _make_sync_db("PostgresDb")
result = v2_5_0.up(db, "sessions", "my_sessions")
mock_fn.assert_called_once_with(db, "my_sessions")
assert result is True
@patch.object(v2_5_0, "_migrate_mysql", return_value=True)
def test_mysql_dispatches(self, mock_fn):
db = _make_sync_db("MySQLDb")
result = v2_5_0.up(db, "sessions", "my_sessions")
mock_fn.assert_called_once_with(db, "my_sessions")
assert result is True
@patch.object(v2_5_0, "_migrate_singlestore", return_value=True)
def test_singlestore_dispatches(self, mock_fn):
db = _make_sync_db("SingleStoreDb")
result = v2_5_0.up(db, "sessions", "my_sessions")
mock_fn.assert_called_once_with(db, "my_sessions")
assert result is True
def test_sqlite_returns_false(self):
db = _make_sync_db("SqliteDb")
assert v2_5_0.up(db, "sessions", "my_sessions") is False
def test_unknown_db_returns_false(self):
db = _make_sync_db("UnknownDb")
assert v2_5_0.up(db, "sessions", "my_sessions") is False
# ---------------------------------------------------------------------------
# Async up() dispatch
# ---------------------------------------------------------------------------
class TestAsyncUpDispatch:
@pytest.mark.asyncio
async def test_non_sessions_table_returns_false(self):
db = _make_async_db("AsyncPostgresDb")
assert await v2_5_0.async_up(db, "non_sessions", "t") is False
@pytest.mark.asyncio
@patch.object(v2_5_0, "_migrate_async_postgres", new_callable=AsyncMock, return_value=True)
async def test_async_postgres_dispatches(self, mock_fn):
db = _make_async_db("AsyncPostgresDb")
result = await v2_5_0.async_up(db, "sessions", "my_sessions")
mock_fn.assert_called_once_with(db, "my_sessions")
assert result is True
@pytest.mark.asyncio
@patch.object(v2_5_0, "_migrate_async_mysql", new_callable=AsyncMock, return_value=True)
async def test_async_mysql_dispatches(self, mock_fn):
db = _make_async_db("AsyncMySQLDb")
result = await v2_5_0.async_up(db, "sessions", "my_sessions")
mock_fn.assert_called_once_with(db, "my_sessions")
assert result is True
@pytest.mark.asyncio
async def test_async_sqlite_returns_false(self):
db = _make_async_db("AsyncSqliteDb")
assert await v2_5_0.async_up(db, "sessions", "t") is False
@pytest.mark.asyncio
async def test_unknown_async_db_returns_false(self):
db = _make_async_db("AsyncUnknownDb")
assert await v2_5_0.async_up(db, "sessions", "t") is False
# ---------------------------------------------------------------------------
# Sync down() dispatch
# ---------------------------------------------------------------------------
class TestSyncDownDispatch:
def test_non_sessions_table_returns_false(self):
db = _make_sync_db("PostgresDb")
assert v2_5_0.down(db, "non_sessions", "t") is False
@patch.object(v2_5_0, "_revert_postgres", return_value=True)
def test_postgres_dispatches(self, mock_fn):
db = _make_sync_db("PostgresDb")
result = v2_5_0.down(db, "sessions", "my_sessions")
mock_fn.assert_called_once_with(db, "my_sessions")
assert result is True
@patch.object(v2_5_0, "_revert_mysql", return_value=True)
def test_mysql_dispatches(self, mock_fn):
db = _make_sync_db("MySQLDb")
result = v2_5_0.down(db, "sessions", "my_sessions")
mock_fn.assert_called_once_with(db, "my_sessions")
assert result is True
@patch.object(v2_5_0, "_revert_singlestore", return_value=True)
def test_singlestore_dispatches(self, mock_fn):
db = _make_sync_db("SingleStoreDb")
result = v2_5_0.down(db, "sessions", "my_sessions")
mock_fn.assert_called_once_with(db, "my_sessions")
assert result is True
def test_sqlite_returns_false(self):
db = _make_sync_db("SqliteDb")
assert v2_5_0.down(db, "sessions", "t") is False
# ---------------------------------------------------------------------------
# Async down() dispatch
# ---------------------------------------------------------------------------
class TestAsyncDownDispatch:
@pytest.mark.asyncio
async def test_non_sessions_table_returns_false(self):
db = _make_async_db("AsyncPostgresDb")
assert await v2_5_0.async_down(db, "non_sessions", "t") is False
@pytest.mark.asyncio
@patch.object(v2_5_0, "_revert_async_postgres", new_callable=AsyncMock, return_value=True)
async def test_async_postgres_dispatches(self, mock_fn):
db = _make_async_db("AsyncPostgresDb")
result = await v2_5_0.async_down(db, "sessions", "my_sessions")
mock_fn.assert_called_once_with(db, "my_sessions")
assert result is True
@pytest.mark.asyncio
@patch.object(v2_5_0, "_revert_async_mysql", new_callable=AsyncMock, return_value=True)
async def test_async_mysql_dispatches(self, mock_fn):
db = _make_async_db("AsyncMySQLDb")
result = await v2_5_0.async_down(db, "sessions", "my_sessions")
mock_fn.assert_called_once_with(db, "my_sessions")
assert result is True
@pytest.mark.asyncio
async def test_async_sqlite_returns_false(self):
db = _make_async_db("AsyncSqliteDb")
assert await v2_5_0.async_down(db, "sessions", "t") is False
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/db/test_migration_v2_5_0_dispatch.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/db/test_session_isolation.py | """Tests for session-level user_id isolation (IDOR prevention).
Verifies that delete_session, delete_sessions, and rename_session
properly filter by user_id when provided, preventing cross-user access.
"""
import pytest
from agno.db.base import SessionType
from agno.db.in_memory.in_memory_db import InMemoryDb
@pytest.fixture
def db():
db = InMemoryDb()
db._sessions = [
{
"session_id": "s1",
"user_id": "alice",
"session_type": "agent",
"session_data": {"session_name": "Alice Session"},
},
{
"session_id": "s2",
"user_id": "bob",
"session_type": "agent",
"session_data": {"session_name": "Bob Session"},
},
{
"session_id": "s3",
"user_id": "alice",
"session_type": "agent",
"session_data": {"session_name": "Alice Session 2"},
},
]
return db
class TestDeleteSessionIsolation:
def test_delete_own_session(self, db):
result = db.delete_session("s1", user_id="alice")
assert result is True
assert len(db._sessions) == 2
assert all(s["session_id"] != "s1" for s in db._sessions)
def test_delete_other_users_session_blocked(self, db):
result = db.delete_session("s1", user_id="bob")
assert result is False
assert len(db._sessions) == 3
def test_delete_without_user_id_wildcard(self, db):
result = db.delete_session("s1", user_id=None)
assert result is True
assert len(db._sessions) == 2
def test_delete_nonexistent_session(self, db):
result = db.delete_session("s999", user_id="alice")
assert result is False
assert len(db._sessions) == 3
class TestDeleteSessionsIsolation:
def test_delete_own_sessions(self, db):
db.delete_sessions(["s1", "s3"], user_id="alice")
assert len(db._sessions) == 1
assert db._sessions[0]["session_id"] == "s2"
def test_delete_mixed_ownership_only_deletes_own(self, db):
db.delete_sessions(["s1", "s2"], user_id="alice")
assert len(db._sessions) == 2
remaining_ids = {s["session_id"] for s in db._sessions}
assert "s2" in remaining_ids
assert "s3" in remaining_ids
def test_delete_without_user_id_wildcard(self, db):
db.delete_sessions(["s1", "s2"], user_id=None)
assert len(db._sessions) == 1
assert db._sessions[0]["session_id"] == "s3"
def test_delete_other_users_sessions_blocked(self, db):
db.delete_sessions(["s1", "s3"], user_id="bob")
assert len(db._sessions) == 3
class TestRenameSessionIsolation:
def test_rename_own_session(self, db):
result = db.rename_session(
session_id="s1",
session_type=SessionType.AGENT,
session_name="New Name",
user_id="alice",
deserialize=False,
)
assert result is not None
assert result["session_data"]["session_name"] == "New Name"
def test_rename_other_users_session_blocked(self, db):
result = db.rename_session(
session_id="s1",
session_type=SessionType.AGENT,
session_name="Hacked Name",
user_id="bob",
deserialize=False,
)
assert result is None
assert db._sessions[0]["session_data"]["session_name"] == "Alice Session"
def test_rename_without_user_id_wildcard(self, db):
result = db.rename_session(
session_id="s1",
session_type=SessionType.AGENT,
session_name="Wildcard Name",
user_id=None,
deserialize=False,
)
assert result is not None
assert result["session_data"]["session_name"] == "Wildcard Name"
def test_rename_nonexistent_session(self, db):
result = db.rename_session(
session_id="s999",
session_type=SessionType.AGENT,
session_name="Ghost",
user_id="alice",
deserialize=False,
)
assert result is None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/db/test_session_isolation.py",
"license": "Apache License 2.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/os/test_router_jwt_override.py | """Tests for JWT user_id override in session, traces, and memory routers.
Verifies that request.state.user_id (set by JWT middleware) overrides
any client-supplied user_id, preventing IDOR attacks.
"""
class FakeState:
def __init__(self, user_id=None):
self.user_id = user_id
class FakeRequest:
def __init__(self, user_id=None):
self.state = FakeState(user_id)
class TestSessionRouterJwtOverride:
"""Verify session router endpoints extract JWT user_id."""
def test_delete_session_jwt_overrides_client_user_id(self):
request = FakeRequest(user_id="jwt-user-123")
client_user_id = "attacker-user-456"
user_id = client_user_id
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
assert user_id == "jwt-user-123"
def test_delete_session_no_jwt_preserves_client_user_id(self):
request = FakeRequest(user_id=None)
client_user_id = "client-user-789"
user_id = client_user_id
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
assert user_id == "client-user-789"
def test_delete_sessions_jwt_overrides(self):
request = FakeRequest(user_id="jwt-user")
user_id = None
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
assert user_id == "jwt-user"
def test_rename_session_jwt_overrides(self):
request = FakeRequest(user_id="jwt-user")
user_id = "attacker"
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
assert user_id == "jwt-user"
class TestTracesRouterJwtOverride:
"""Verify traces router endpoints extract JWT user_id."""
def test_get_traces_jwt_override(self):
request = FakeRequest(user_id="jwt-user")
user_id = "attacker"
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
assert user_id == "jwt-user"
def test_get_trace_jwt_override(self):
request = FakeRequest(user_id="jwt-user")
user_id = None
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
assert user_id == "jwt-user"
def test_get_trace_stats_jwt_override(self):
request = FakeRequest(user_id="jwt-user")
user_id = None
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
assert user_id == "jwt-user"
class TestMemoryRouterJwtOverride:
"""Verify memory router endpoints extract JWT user_id."""
def test_delete_memory_jwt_override(self):
request = FakeRequest(user_id="jwt-user")
user_id = "attacker"
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
assert user_id == "jwt-user"
def test_delete_memories_jwt_override(self):
request = FakeRequest(user_id="jwt-user")
class BodyRequest:
user_id = "attacker"
body = BodyRequest()
if hasattr(request.state, "user_id") and request.state.user_id is not None:
body.user_id = request.state.user_id
assert body.user_id == "jwt-user"
def test_optimize_memories_jwt_override(self):
request = FakeRequest(user_id="jwt-user")
class BodyRequest:
user_id = "attacker"
body = BodyRequest()
if hasattr(request.state, "user_id") and request.state.user_id is not None:
body.user_id = request.state.user_id
assert body.user_id == "jwt-user"
def test_get_topics_jwt_override(self):
request = FakeRequest(user_id="jwt-user")
user_id = None
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
assert user_id == "jwt-user"
def test_get_user_memory_stats_jwt_override(self):
request = FakeRequest(user_id="jwt-user")
user_id = None
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
assert user_id == "jwt-user"
class TestJwtOverrideEdgeCases:
"""Edge cases for the JWT override pattern."""
def test_no_state_attribute(self):
class BareRequest:
pass
request = BareRequest()
request.state = object()
user_id = "client-value"
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
assert user_id == "client-value"
def test_state_user_id_is_none(self):
request = FakeRequest(user_id=None)
user_id = "client-value"
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
assert user_id == "client-value"
def test_state_user_id_empty_string(self):
request = FakeRequest(user_id="")
user_id = "client-value"
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
assert user_id == ""
def test_jwt_always_wins_over_attacker(self):
request = FakeRequest(user_id="legitimate-user")
for attacker_value in ["admin", "root", "other-user", "", None]:
user_id = attacker_value
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
assert user_id == "legitimate-user"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/os/test_router_jwt_override.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/os/test_validate_knowledge_names.py | """Tests for AgentOS._validate_knowledge_instance_names()."""
from unittest.mock import MagicMock
import pytest
from agno.os.app import AgentOS
def _make_knowledge(name, db_id="db1", table_name="knowledge"):
"""Create a mock Knowledge instance with the given name, contents_db, and table."""
kb = MagicMock()
kb.name = name
kb.contents_db = MagicMock()
kb.contents_db.id = db_id
kb.contents_db.knowledge_table_name = table_name
return kb
def _make_knowledge_no_db(name=None):
"""Create a mock Knowledge instance with no contents_db."""
kb = MagicMock()
kb.name = name
kb.contents_db = None
return kb
class TestValidateKnowledgeInstanceNames:
def test_unique_names_pass(self):
os = AgentOS.__new__(AgentOS)
os.knowledge_instances = [
_make_knowledge("kb_alpha", "db1"),
_make_knowledge("kb_beta", "db2"),
_make_knowledge("kb_gamma", "db3"),
]
# Should not raise
os._validate_knowledge_instance_names()
def test_same_name_different_db_passes(self):
"""Same name with different db_id is allowed (different tuple key)."""
os = AgentOS.__new__(AgentOS)
os.knowledge_instances = [
_make_knowledge("shared_name", "db1"),
_make_knowledge("shared_name", "db2"),
]
# Should not raise — different db_id makes the tuple unique
os._validate_knowledge_instance_names()
def test_same_name_different_table_passes(self):
"""Same name + same db but different table is allowed."""
os = AgentOS.__new__(AgentOS)
os.knowledge_instances = [
_make_knowledge("shared_name", "db1", "table_a"),
_make_knowledge("shared_name", "db1", "table_b"),
]
os._validate_knowledge_instance_names()
def test_duplicate_name_db_table_raises(self):
"""Same (name, db_id, table) tuple should raise."""
os = AgentOS.__new__(AgentOS)
os.knowledge_instances = [
_make_knowledge("shared_name", "db1", "knowledge"),
_make_knowledge("shared_name", "db1", "knowledge"),
]
with pytest.raises(ValueError, match="Duplicate knowledge instances"):
os._validate_knowledge_instance_names()
def test_empty_list_passes(self):
os = AgentOS.__new__(AgentOS)
os.knowledge_instances = []
os._validate_knowledge_instance_names()
def test_no_contents_db_skipped(self):
os = AgentOS.__new__(AgentOS)
os.knowledge_instances = [
_make_knowledge_no_db("orphan"),
_make_knowledge("valid", "db1"),
]
# The one without contents_db is skipped; no duplicate => passes
os._validate_knowledge_instance_names()
def test_fallback_name_from_db_id(self):
"""When knowledge.name is None, the fallback is 'knowledge_{db.id}'.
Two with same db_id and same table => duplicate tuple."""
os = AgentOS.__new__(AgentOS)
os.knowledge_instances = [
_make_knowledge(None, "same_db", "knowledge"),
_make_knowledge(None, "same_db", "knowledge"),
]
with pytest.raises(ValueError, match="Duplicate knowledge instances"):
os._validate_knowledge_instance_names()
def test_fallback_name_unique_db_ids(self):
"""Different db.id values produce different fallback names and different tuple keys."""
os = AgentOS.__new__(AgentOS)
os.knowledge_instances = [
_make_knowledge(None, "db_a"),
_make_knowledge(None, "db_b"),
]
os._validate_knowledge_instance_names()
def test_error_message_contains_duplicate_name(self):
os = AgentOS.__new__(AgentOS)
os.knowledge_instances = [
_make_knowledge("dup_name", "db1", "knowledge"),
_make_knowledge("dup_name", "db1", "knowledge"),
_make_knowledge("unique", "db3"),
]
with pytest.raises(ValueError, match="dup_name"):
os._validate_knowledge_instance_names()
def test_multiple_different_duplicates(self):
os = AgentOS.__new__(AgentOS)
os.knowledge_instances = [
_make_knowledge("name_a", "db1", "tbl"),
_make_knowledge("name_a", "db1", "tbl"),
_make_knowledge("name_b", "db3", "tbl"),
_make_knowledge("name_b", "db3", "tbl"),
]
with pytest.raises(ValueError, match="Duplicate knowledge instances"):
os._validate_knowledge_instance_names()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/os/test_validate_knowledge_names.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/team/test_apply_to_context.py | """Tests for ResolvedRunOptions.apply_to_context() — team version.
NOTE: apply_to_context() always sets output_schema from resolved options.
This is intentional because the same run_context may be reused across workflow
steps with different teams, each with their own output_schema.
"""
from pydantic import BaseModel
from agno.run import RunContext
from agno.team._run_options import ResolvedRunOptions
def _make_opts(**overrides) -> ResolvedRunOptions:
defaults = dict(
stream=False,
stream_events=False,
yield_run_output=False,
add_history_to_context=False,
add_dependencies_to_context=False,
add_session_state_to_context=False,
dependencies={"resolved": "deps"},
knowledge_filters={"resolved": "filters"},
metadata={"resolved": "meta"},
output_schema=None,
)
defaults.update(overrides)
return ResolvedRunOptions(**defaults)
def _make_context(**overrides) -> RunContext:
defaults = dict(run_id="r1", session_id="s1")
defaults.update(overrides)
return RunContext(**defaults)
class TestApplyWhenProvided:
def test_dependencies_provided_overwrites(self):
ctx = _make_context(dependencies={"existing": "value"})
opts = _make_opts(dependencies={"new": "value"})
opts.apply_to_context(ctx, dependencies_provided=True)
assert ctx.dependencies == {"new": "value"}
def test_knowledge_filters_provided_overwrites(self):
ctx = _make_context(knowledge_filters={"existing": "f"})
opts = _make_opts(knowledge_filters={"new": "f"})
opts.apply_to_context(ctx, knowledge_filters_provided=True)
assert ctx.knowledge_filters == {"new": "f"}
def test_metadata_provided_overwrites(self):
ctx = _make_context(metadata={"existing": "m"})
opts = _make_opts(metadata={"new": "m"})
opts.apply_to_context(ctx, metadata_provided=True)
assert ctx.metadata == {"new": "m"}
def test_output_schema_always_set_from_opts(self):
"""Team always sets output_schema from resolved options for workflow reuse."""
class Schema(BaseModel):
x: int
ctx = _make_context(output_schema={"old": "schema"})
opts = _make_opts(output_schema=Schema)
opts.apply_to_context(ctx)
assert ctx.output_schema is Schema
class TestApplyFallbackWhenNone:
def test_dependencies_none_gets_filled(self):
ctx = _make_context(dependencies=None)
opts = _make_opts(dependencies={"default": "deps"})
opts.apply_to_context(ctx)
assert ctx.dependencies == {"default": "deps"}
def test_knowledge_filters_none_gets_filled(self):
ctx = _make_context(knowledge_filters=None)
opts = _make_opts(knowledge_filters={"default": "f"})
opts.apply_to_context(ctx)
assert ctx.knowledge_filters == {"default": "f"}
def test_metadata_none_gets_filled(self):
ctx = _make_context(metadata=None)
opts = _make_opts(metadata={"default": "m"})
opts.apply_to_context(ctx)
assert ctx.metadata == {"default": "m"}
def test_output_schema_none_gets_filled(self):
class Schema(BaseModel):
y: str
ctx = _make_context(output_schema=None)
opts = _make_opts(output_schema=Schema)
opts.apply_to_context(ctx)
assert ctx.output_schema is Schema
class TestExistingContextPreserved:
def test_dependencies_kept(self):
ctx = _make_context(dependencies={"keep": "me"})
opts = _make_opts(dependencies={"ignored": "value"})
opts.apply_to_context(ctx)
assert ctx.dependencies == {"keep": "me"}
def test_knowledge_filters_kept(self):
ctx = _make_context(knowledge_filters={"keep": "f"})
opts = _make_opts(knowledge_filters={"ignored": "f"})
opts.apply_to_context(ctx)
assert ctx.knowledge_filters == {"keep": "f"}
def test_metadata_kept(self):
ctx = _make_context(metadata={"keep": "m"})
opts = _make_opts(metadata={"ignored": "m"})
opts.apply_to_context(ctx)
assert ctx.metadata == {"keep": "m"}
def test_output_schema_always_overwritten(self):
"""Team always overwrites output_schema (unlike agent) for workflow reuse."""
class Existing(BaseModel):
a: int
class NewSchema(BaseModel):
b: int
ctx = _make_context(output_schema=Existing)
opts = _make_opts(output_schema=NewSchema)
opts.apply_to_context(ctx)
# Team always sets output_schema from opts, even if context had one
assert ctx.output_schema is NewSchema
class TestAllFieldsTogether:
def test_mixed_provided_and_fallback(self):
ctx = _make_context(
dependencies=None,
knowledge_filters={"existing": "f"},
metadata=None,
output_schema={"existing": "schema"},
)
opts = _make_opts(
dependencies={"new": "d"},
knowledge_filters={"new": "f"},
metadata={"new": "m"},
output_schema=None,
)
opts.apply_to_context(
ctx,
dependencies_provided=True,
knowledge_filters_provided=False,
metadata_provided=False,
)
# dependencies: provided=True, so overwritten
assert ctx.dependencies == {"new": "d"}
# knowledge_filters: provided=False, existing not None, kept
assert ctx.knowledge_filters == {"existing": "f"}
# metadata: provided=False, was None, filled from opts
assert ctx.metadata == {"new": "m"}
# output_schema: always set from opts (team behavior for workflow reuse)
assert ctx.output_schema is None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/team/test_apply_to_context.py",
"license": "Apache License 2.0",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/team/test_run_context_precedence.py | from typing import Any, Optional
import pytest
from agno.run import RunContext
from agno.run.cancel import cleanup_run
from agno.run.team import TeamRunOutput
from agno.session.team import TeamSession
from agno.team import _init, _response, _run, _run_options, _storage, _utils
from agno.team.team import Team
def _make_precedence_test_team() -> Team:
return Team(
name="precedence-team",
members=[],
dependencies={"team_dep": "default"},
knowledge_filters={"team_filter": "default"},
metadata={"team_meta": "default"},
output_schema={"type": "object", "properties": {"team": {"type": "string"}}},
)
def _patch_team_dispatch_dependencies(team: Team, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(_init, "_has_async_db", lambda team: False)
monkeypatch.setattr(team, "initialize_team", lambda debug_mode=None: None)
monkeypatch.setattr(_init, "_initialize_session", lambda team, session_id=None, user_id=None: (session_id, user_id))
monkeypatch.setattr(
_storage,
"_read_or_create_session",
lambda team, session_id=None, user_id=None: TeamSession(session_id=session_id, user_id=user_id),
)
monkeypatch.setattr(_storage, "_update_metadata", lambda team, session=None: None)
monkeypatch.setattr(_init, "_initialize_session_state", lambda team, session_state=None, **kwargs: session_state)
monkeypatch.setattr(_storage, "_load_session_state", lambda team, session=None, session_state=None: session_state)
monkeypatch.setattr(_run, "_resolve_run_dependencies", lambda team, run_context: None)
monkeypatch.setattr(_response, "get_response_format", lambda team, run_context=None: None)
monkeypatch.setattr(
_utils,
"_get_effective_filters",
lambda team, knowledge_filters=None: {"team_filter": "default", **(knowledge_filters or {})},
)
# Also patch in _run_options since resolve_run_options imports from _utils at call time
monkeypatch.setattr(
_run_options,
"resolve_run_options",
lambda team, **kwargs: _run_options.ResolvedRunOptions(
stream=kwargs.get("stream") if kwargs.get("stream") is not None else (team.stream or False),
stream_events=kwargs.get("stream_events")
if kwargs.get("stream_events") is not None
else (team.stream_events or False),
yield_run_output=kwargs.get("yield_run_output") or False,
add_history_to_context=kwargs.get("add_history_to_context")
if kwargs.get("add_history_to_context") is not None
else team.add_history_to_context,
add_dependencies_to_context=kwargs.get("add_dependencies_to_context")
if kwargs.get("add_dependencies_to_context") is not None
else team.add_dependencies_to_context,
add_session_state_to_context=kwargs.get("add_session_state_to_context")
if kwargs.get("add_session_state_to_context") is not None
else team.add_session_state_to_context,
dependencies=kwargs.get("dependencies") if kwargs.get("dependencies") is not None else team.dependencies,
knowledge_filters=({"team_filter": "default", **(kwargs.get("knowledge_filters") or {})})
if (team.knowledge_filters or kwargs.get("knowledge_filters"))
else None,
metadata=({**(kwargs.get("metadata") or {}), **(team.metadata or {})})
if (kwargs.get("metadata") is not None or team.metadata is not None)
else None,
output_schema=kwargs.get("output_schema")
if kwargs.get("output_schema") is not None
else team.output_schema,
),
)
def test_run_respects_run_context_precedence(monkeypatch: pytest.MonkeyPatch):
team = _make_precedence_test_team()
_patch_team_dispatch_dependencies(team, monkeypatch)
def fake_run(
team,
run_response: TeamRunOutput,
run_context: RunContext,
session: TeamSession,
user_id: Optional[str] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
response_format: Optional[Any] = None,
stream_events: bool = False,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> TeamRunOutput:
cleanup_run(run_response.run_id) # type: ignore[arg-type]
return run_response
monkeypatch.setattr(_run, "_run", fake_run)
preserved_context = RunContext(
run_id="team-preserve",
session_id="session-1",
session_state={},
dependencies={"ctx_dep": "keep"},
knowledge_filters={"ctx_filter": "keep"},
metadata={"ctx_meta": "keep"},
output_schema={"ctx_schema": "keep"},
)
_run.run_dispatch(
team=team,
input="hello",
run_id="run-preserve",
session_id="session-1",
stream=False,
run_context=preserved_context,
)
assert preserved_context.dependencies == {"ctx_dep": "keep"}
assert preserved_context.knowledge_filters == {"ctx_filter": "keep"}
assert preserved_context.metadata == {"ctx_meta": "keep"}
# Team always sets output_schema from resolved options (for workflow reuse)
assert preserved_context.output_schema == {"type": "object", "properties": {"team": {"type": "string"}}}
override_context = RunContext(
run_id="team-override",
session_id="session-1",
session_state={},
dependencies={"ctx_dep": "keep"},
knowledge_filters={"ctx_filter": "keep"},
metadata={"ctx_meta": "keep"},
output_schema={"ctx_schema": "keep"},
)
_run.run_dispatch(
team=team,
input="hello",
run_id="run-override",
session_id="session-1",
stream=False,
run_context=override_context,
dependencies={"call_dep": "override"},
knowledge_filters={"call_filter": "override"},
metadata={"call_meta": "override"},
output_schema={"call_schema": "override"},
)
assert override_context.dependencies == {"call_dep": "override"}
assert override_context.knowledge_filters == {"team_filter": "default", "call_filter": "override"}
assert override_context.metadata == {"call_meta": "override", "team_meta": "default"}
assert override_context.output_schema == {"call_schema": "override"}
empty_context = RunContext(
run_id="team-empty",
session_id="session-1",
session_state={},
dependencies=None,
knowledge_filters=None,
metadata=None,
output_schema=None,
)
_run.run_dispatch(
team=team,
input="hello",
run_id="run-empty",
session_id="session-1",
stream=False,
run_context=empty_context,
)
assert empty_context.dependencies == {"team_dep": "default"}
assert empty_context.knowledge_filters == {"team_filter": "default"}
assert empty_context.metadata == {"team_meta": "default"}
assert empty_context.output_schema == {"type": "object", "properties": {"team": {"type": "string"}}}
@pytest.mark.asyncio
async def test_arun_respects_run_context_precedence(monkeypatch: pytest.MonkeyPatch):
team = _make_precedence_test_team()
_patch_team_dispatch_dependencies(team, monkeypatch)
async def fake_arun(
team,
run_response: TeamRunOutput,
run_context: RunContext,
session_id: str,
user_id: Optional[str] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
response_format: Optional[Any] = None,
stream_events: bool = False,
debug_mode: Optional[bool] = None,
background_tasks: Optional[Any] = None,
**kwargs: Any,
) -> TeamRunOutput:
return run_response
monkeypatch.setattr(_run, "_arun", fake_arun)
preserved_context = RunContext(
run_id="ateam-preserve",
session_id="session-1",
session_state={},
dependencies={"ctx_dep": "keep"},
knowledge_filters={"ctx_filter": "keep"},
metadata={"ctx_meta": "keep"},
output_schema={"ctx_schema": "keep"},
)
await _run.arun_dispatch(
team=team,
input="hello",
run_id="arun-preserve",
session_id="session-1",
stream=False,
run_context=preserved_context,
)
assert preserved_context.dependencies == {"ctx_dep": "keep"}
assert preserved_context.knowledge_filters == {"ctx_filter": "keep"}
assert preserved_context.metadata == {"ctx_meta": "keep"}
# Team always sets output_schema from resolved options (for workflow reuse)
assert preserved_context.output_schema == {"type": "object", "properties": {"team": {"type": "string"}}}
override_context = RunContext(
run_id="ateam-override",
session_id="session-1",
session_state={},
dependencies={"ctx_dep": "keep"},
knowledge_filters={"ctx_filter": "keep"},
metadata={"ctx_meta": "keep"},
output_schema={"ctx_schema": "keep"},
)
await _run.arun_dispatch(
team=team,
input="hello",
run_id="arun-override",
session_id="session-1",
stream=False,
run_context=override_context,
dependencies={"call_dep": "override"},
knowledge_filters={"call_filter": "override"},
metadata={"call_meta": "override"},
output_schema={"call_schema": "override"},
)
assert override_context.dependencies == {"call_dep": "override"}
assert override_context.knowledge_filters == {"team_filter": "default", "call_filter": "override"}
assert override_context.metadata == {"call_meta": "override", "team_meta": "default"}
assert override_context.output_schema == {"call_schema": "override"}
empty_context = RunContext(
run_id="ateam-empty",
session_id="session-1",
session_state={},
dependencies=None,
knowledge_filters=None,
metadata=None,
output_schema=None,
)
await _run.arun_dispatch(
team=team,
input="hello",
run_id="arun-empty",
session_id="session-1",
stream=False,
run_context=empty_context,
)
assert empty_context.dependencies == {"team_dep": "default"}
assert empty_context.knowledge_filters == {"team_filter": "default"}
assert empty_context.metadata == {"team_meta": "default"}
assert empty_context.output_schema == {"type": "object", "properties": {"team": {"type": "string"}}}
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/team/test_run_context_precedence.py",
"license": "Apache License 2.0",
"lines": 245,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/team/test_run_options.py | """Tests for centralized team run option resolution and renamed run functions."""
import dataclasses
import pytest
from agno.team._run_options import ResolvedRunOptions, resolve_run_options
from agno.team.team import Team
def _make_team(**kwargs) -> Team:
"""Create a minimal Team instance for testing."""
return Team(members=[], **kwargs)
# ---------------------------------------------------------------------------
# ResolvedRunOptions immutability
# ---------------------------------------------------------------------------
class TestResolvedRunOptionsImmutable:
def test_frozen_raises_on_assignment(self):
opts = ResolvedRunOptions(
stream=True,
stream_events=False,
yield_run_output=False,
add_history_to_context=False,
add_dependencies_to_context=False,
add_session_state_to_context=False,
dependencies=None,
knowledge_filters=None,
metadata=None,
output_schema=None,
)
with pytest.raises(dataclasses.FrozenInstanceError):
opts.stream = False # type: ignore[misc]
# ---------------------------------------------------------------------------
# Default resolution
# ---------------------------------------------------------------------------
class TestDefaultResolution:
def test_all_defaults_from_team(self):
team = _make_team(
stream=True,
stream_events=True,
add_history_to_context=True,
add_dependencies_to_context=True,
add_session_state_to_context=True,
dependencies={"db": "postgres"},
knowledge_filters={"topic": "test"},
metadata={"env": "test"},
)
opts = resolve_run_options(team)
assert opts.stream is True
assert opts.stream_events is True
assert opts.add_history_to_context is True
assert opts.add_dependencies_to_context is True
assert opts.add_session_state_to_context is True
assert opts.dependencies == {"db": "postgres"}
assert opts.knowledge_filters == {"topic": "test"}
assert opts.metadata == {"env": "test"}
def test_bare_team_defaults(self):
team = _make_team()
opts = resolve_run_options(team)
assert opts.stream is False
assert opts.stream_events is False
assert opts.yield_run_output is False
assert opts.add_history_to_context is False
assert opts.add_dependencies_to_context is False
assert opts.add_session_state_to_context is False
assert opts.dependencies is None
assert opts.knowledge_filters is None
assert opts.metadata is None
assert opts.output_schema is None
# ---------------------------------------------------------------------------
# Call-site overrides
# ---------------------------------------------------------------------------
class TestCallSiteOverrides:
def test_stream_override(self):
team = _make_team(stream=False)
opts = resolve_run_options(team, stream=True)
assert opts.stream is True
def test_stream_events_override(self):
team = _make_team(stream=True, stream_events=False)
opts = resolve_run_options(team, stream_events=True)
assert opts.stream_events is True
def test_yield_run_output_override(self):
team = _make_team()
opts = resolve_run_options(team, yield_run_output=True)
assert opts.yield_run_output is True
def test_context_flags_override(self):
team = _make_team(
add_history_to_context=False,
add_dependencies_to_context=False,
add_session_state_to_context=False,
)
opts = resolve_run_options(
team,
add_history_to_context=True,
add_dependencies_to_context=True,
add_session_state_to_context=True,
)
assert opts.add_history_to_context is True
assert opts.add_dependencies_to_context is True
assert opts.add_session_state_to_context is True
def test_dependencies_override(self):
team = _make_team(dependencies={"a": 1})
opts = resolve_run_options(team, dependencies={"b": 2})
assert opts.dependencies == {"b": 2}
def test_output_schema_override(self):
from pydantic import BaseModel
class MySchema(BaseModel):
name: str
team = _make_team()
opts = resolve_run_options(team, output_schema=MySchema)
assert opts.output_schema is MySchema
# ---------------------------------------------------------------------------
# Stream + stream_events coupling
# ---------------------------------------------------------------------------
class TestStreamEventsCoupling:
def test_stream_false_forces_stream_events_false(self):
team = _make_team(stream_events=True)
opts = resolve_run_options(team, stream=False, stream_events=True)
assert opts.stream is False
assert opts.stream_events is False
def test_stream_none_team_none_defaults_both_false(self):
team = _make_team()
opts = resolve_run_options(team)
assert opts.stream is False
assert opts.stream_events is False
def test_stream_true_allows_stream_events(self):
team = _make_team()
opts = resolve_run_options(team, stream=True, stream_events=True)
assert opts.stream is True
assert opts.stream_events is True
# ---------------------------------------------------------------------------
# Metadata merge
# ---------------------------------------------------------------------------
class TestMetadataMerge:
def test_both_none(self):
team = _make_team()
opts = resolve_run_options(team)
assert opts.metadata is None
def test_only_callsite(self):
team = _make_team()
opts = resolve_run_options(team, metadata={"run": "value"})
assert opts.metadata == {"run": "value"}
def test_only_team(self):
team = _make_team(metadata={"team": "value"})
opts = resolve_run_options(team)
assert opts.metadata == {"team": "value"}
def test_merge_team_takes_precedence(self):
team = _make_team(metadata={"shared": "team_wins", "team_only": "t"})
opts = resolve_run_options(team, metadata={"shared": "run_value", "run_only": "r"})
assert opts.metadata["shared"] == "team_wins"
assert opts.metadata["team_only"] == "t"
assert opts.metadata["run_only"] == "r"
def test_merge_does_not_mutate_callsite(self):
team = _make_team(metadata={"a": 1})
callsite_meta = {"b": 2}
resolve_run_options(team, metadata=callsite_meta)
assert callsite_meta == {"b": 2}
# ---------------------------------------------------------------------------
# Knowledge filter merge
# ---------------------------------------------------------------------------
class TestKnowledgeFilterMerge:
def test_no_filters(self):
team = _make_team()
opts = resolve_run_options(team)
assert opts.knowledge_filters is None
def test_only_team_filters(self):
team = _make_team(knowledge_filters={"topic": "test"})
opts = resolve_run_options(team)
assert opts.knowledge_filters == {"topic": "test"}
def test_only_callsite_filters(self):
team = _make_team()
opts = resolve_run_options(team, knowledge_filters={"topic": "run"})
assert opts.knowledge_filters == {"topic": "run"}
def test_dict_merge_callsite_takes_precedence(self):
team = _make_team(knowledge_filters={"topic": "team", "team_key": "t"})
opts = resolve_run_options(team, knowledge_filters={"topic": "run", "run_key": "r"})
assert opts.knowledge_filters["topic"] == "run"
assert opts.knowledge_filters["team_key"] == "t"
assert opts.knowledge_filters["run_key"] == "r"
def test_list_merge(self):
from agno.filters import EQ
team_filters = [EQ("a", "1")]
run_filters = [EQ("b", "2")]
team = _make_team(knowledge_filters=team_filters)
opts = resolve_run_options(team, knowledge_filters=run_filters)
assert len(opts.knowledge_filters) == 2
# ---------------------------------------------------------------------------
# Defensive copy (dependencies not mutated on team)
# ---------------------------------------------------------------------------
class TestTeamNotMutated:
def test_resolve_does_not_mutate_team(self):
team = _make_team(
stream=True,
metadata={"a": 1},
dependencies={"db": "test"},
knowledge_filters={"topic": "test"},
)
original_stream = team.stream
original_metadata = team.metadata.copy()
original_deps = team.dependencies.copy()
resolve_run_options(
team,
stream=False,
metadata={"b": 2},
dependencies={"other": "value"},
knowledge_filters={"other_topic": "run"},
)
assert team.stream == original_stream
assert team.metadata == original_metadata
assert team.dependencies == original_deps
def test_dependencies_defensive_copy(self):
team = _make_team(dependencies={"key": "original"})
opts = resolve_run_options(team)
# Mutating the resolved deps should not affect the team
opts.dependencies["key"] = "mutated" # type: ignore[index]
assert team.dependencies == {"key": "original"}
def test_callsite_dependencies_defensive_copy(self):
team = _make_team()
callsite_deps = {"key": "original"}
opts = resolve_run_options(team, dependencies=callsite_deps)
opts.dependencies["key"] = "mutated" # type: ignore[index]
assert callsite_deps == {"key": "original"}
# ---------------------------------------------------------------------------
# Renamed functions exist and are importable
# ---------------------------------------------------------------------------
class TestRenamedFunctionsImportable:
def test_run_dispatch_importable(self):
from agno.team._run import run_dispatch
assert callable(run_dispatch)
def test_run_importable(self):
from agno.team._run import _run
assert callable(_run)
def test_run_stream_importable(self):
from agno.team._run import _run_stream
assert callable(_run_stream)
def test_arun_dispatch_importable(self):
from agno.team._run import arun_dispatch
assert callable(arun_dispatch)
def test_arun_importable(self):
from agno.team._run import _arun
assert callable(_arun)
def test_arun_stream_importable(self):
from agno.team._run import _arun_stream
assert callable(_arun_stream)
def test_asetup_session_importable(self):
from agno.team._run import _asetup_session
assert callable(_asetup_session)
def test_old_names_not_present(self):
"""Old _impl-suffixed names should not exist on the module."""
from agno.team import _run
assert not hasattr(_run, "run_impl")
assert not hasattr(_run, "run_stream_impl")
assert not hasattr(_run, "arun_impl")
assert not hasattr(_run, "arun_stream_impl")
assert not hasattr(_run, "asetup_session")
assert not hasattr(_run, "run")
assert not hasattr(_run, "arun")
# ---------------------------------------------------------------------------
# Team.run / Team.arun dispatch to the new names
# ---------------------------------------------------------------------------
class TestTeamWrappersDelegateCorrectly:
def test_team_run_delegates_to_run_dispatch(self, monkeypatch):
"""Verify Team.run() calls _run.run_dispatch under the hood."""
from agno.team import _run as run_module
captured = {}
def fake_dispatch(team, *, input, **kwargs):
captured["called"] = True
captured["input"] = input
return None
monkeypatch.setattr(run_module, "run_dispatch", fake_dispatch)
team = _make_team()
team.run(input="hello")
assert captured["called"] is True
assert captured["input"] == "hello"
def test_team_arun_delegates_to_arun_dispatch(self, monkeypatch):
"""Verify Team.arun() calls _run.arun_dispatch under the hood."""
from agno.team import _run as run_module
captured = {}
def fake_dispatch(team, *, input, **kwargs):
captured["called"] = True
captured["input"] = input
return None
monkeypatch.setattr(run_module, "arun_dispatch", fake_dispatch)
team = _make_team()
team.arun(input="hello")
assert captured["called"] is True
assert captured["input"] == "hello"
# ---------------------------------------------------------------------------
# Parity: team and agent ResolvedRunOptions have the same fields
# ---------------------------------------------------------------------------
class TestParityWithAgent:
def test_same_fields_as_agent_run_options(self):
from agno.agent._run_options import ResolvedRunOptions as AgentOpts
from agno.team._run_options import ResolvedRunOptions as TeamOpts
agent_fields = {f.name for f in dataclasses.fields(AgentOpts)}
team_fields = {f.name for f in dataclasses.fields(TeamOpts)}
assert agent_fields == team_fields
def test_same_field_types_as_agent_run_options(self):
from agno.agent._run_options import ResolvedRunOptions as AgentOpts
from agno.team._run_options import ResolvedRunOptions as TeamOpts
agent_types = {f.name: f.type for f in dataclasses.fields(AgentOpts)}
team_types = {f.name: f.type for f in dataclasses.fields(TeamOpts)}
assert agent_types == team_types
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/team/test_run_options.py",
"license": "Apache License 2.0",
"lines": 298,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/knowledge/test_knowledge_isolation.py | """Tests for knowledge instance isolation features.
Tests that knowledge instances with isolate_vector_search=True filter by linked_to.
"""
from typing import Any, Dict, List
import pytest
from agno.knowledge.document import Document
from agno.knowledge.knowledge import Knowledge
from agno.vectordb.base import VectorDb
class MockVectorDb(VectorDb):
"""Mock VectorDb that tracks search calls and their filters."""
def __init__(self):
self.search_calls: List[Dict[str, Any]] = []
self.inserted_documents: List[Document] = []
def create(self) -> None:
pass
async def async_create(self) -> None:
pass
def name_exists(self, name: str) -> bool:
return False
async def async_name_exists(self, name: str) -> bool:
return False
def id_exists(self, id: str) -> bool:
return False
def content_hash_exists(self, content_hash: str) -> bool:
return False
def insert(self, content_hash: str, documents: List[Document], filters=None) -> None:
self.inserted_documents.extend(documents)
async def async_insert(self, content_hash: str, documents: List[Document], filters=None) -> None:
self.inserted_documents.extend(documents)
def upsert(self, content_hash: str, documents: List[Document], filters=None) -> None:
pass
async def async_upsert(self, content_hash: str, documents: List[Document], filters=None) -> None:
pass
def upsert_available(self) -> bool:
return True
def search(self, query: str, limit: int = 5, filters=None) -> List[Document]:
self.search_calls.append({"query": query, "limit": limit, "filters": filters})
return [Document(name="test", content="test content")]
async def async_search(self, query: str, limit: int = 5, filters=None) -> List[Document]:
self.search_calls.append({"query": query, "limit": limit, "filters": filters})
return [Document(name="test", content="test content")]
def drop(self) -> None:
pass
async def async_drop(self) -> None:
pass
def exists(self) -> bool:
return True
async def async_exists(self) -> bool:
return True
def delete(self) -> bool:
return True
def delete_by_id(self, id: str) -> bool:
return True
def delete_by_name(self, name: str) -> bool:
return True
def delete_by_metadata(self, metadata: Dict[str, Any]) -> bool:
return True
def update_metadata(self, content_id: str, metadata: Dict[str, Any]) -> None:
pass
def delete_by_content_id(self, content_id: str) -> bool:
return True
def get_supported_search_types(self) -> List[str]:
return ["vector"]
class TestKnowledgeIsolation:
"""Tests for knowledge isolation based on isolate_vector_search flag."""
def test_search_with_isolation_enabled_injects_filter(self):
"""Test that search with isolate_vector_search=True injects linked_to filter."""
mock_db = MockVectorDb()
knowledge = Knowledge(
name="Test KB",
vector_db=mock_db,
isolate_vector_search=True,
)
knowledge.search("test query")
assert len(mock_db.search_calls) == 1
assert mock_db.search_calls[0]["filters"] == {"linked_to": "Test KB"}
def test_search_without_isolation_no_filter(self):
"""Test that search without isolate_vector_search does not inject filter (backwards compatible)."""
mock_db = MockVectorDb()
knowledge = Knowledge(
name="Test KB",
vector_db=mock_db,
# isolate_vector_search defaults to False
)
knowledge.search("test query")
assert len(mock_db.search_calls) == 1
assert mock_db.search_calls[0]["filters"] is None
def test_search_without_name_no_filter(self):
"""Test that search without name does not inject filter even with isolation enabled."""
mock_db = MockVectorDb()
knowledge = Knowledge(
vector_db=mock_db,
isolate_vector_search=True,
)
knowledge.search("test query")
assert len(mock_db.search_calls) == 1
assert mock_db.search_calls[0]["filters"] is None
def test_search_with_isolation_merges_existing_dict_filters(self):
"""Test that linked_to filter merges with existing dict filters when isolation enabled."""
mock_db = MockVectorDb()
knowledge = Knowledge(
name="Test KB",
vector_db=mock_db,
isolate_vector_search=True,
)
knowledge.search("test query", filters={"category": "docs"})
assert len(mock_db.search_calls) == 1
assert mock_db.search_calls[0]["filters"] == {"category": "docs", "linked_to": "Test KB"}
def test_search_with_isolation_list_filters_injects_linked_to(self):
"""Test that linked_to filter is auto-injected for list-based FilterExpr filters."""
from agno.filters import EQ
mock_db = MockVectorDb()
knowledge = Knowledge(
name="Test KB",
vector_db=mock_db,
isolate_vector_search=True,
)
list_filters = [EQ("category", "docs")]
knowledge.search("test query", filters=list_filters)
assert len(mock_db.search_calls) == 1
result_filters = mock_db.search_calls[0]["filters"]
assert len(result_filters) == 2
assert result_filters[0].key == "linked_to"
assert result_filters[0].value == "Test KB"
assert result_filters[1].key == "category"
assert result_filters[1].value == "docs"
@pytest.mark.asyncio
async def test_async_search_with_isolation_list_filters_injects_linked_to(self):
"""Test that async search auto-injects linked_to for list-based FilterExpr filters."""
from agno.filters import EQ
mock_db = MockVectorDb()
knowledge = Knowledge(
name="Async Test KB",
vector_db=mock_db,
isolate_vector_search=True,
)
list_filters = [EQ("department", "legal")]
await knowledge.asearch("test query", filters=list_filters)
assert len(mock_db.search_calls) == 1
result_filters = mock_db.search_calls[0]["filters"]
assert len(result_filters) == 2
assert result_filters[0].key == "linked_to"
assert result_filters[0].value == "Async Test KB"
assert result_filters[1].key == "department"
assert result_filters[1].value == "legal"
@pytest.mark.asyncio
async def test_async_search_with_isolation_injects_filter(self):
"""Test that async search with isolation enabled injects linked_to filter."""
mock_db = MockVectorDb()
knowledge = Knowledge(
name="Async Test KB",
vector_db=mock_db,
isolate_vector_search=True,
)
await knowledge.asearch("test query")
assert len(mock_db.search_calls) == 1
assert mock_db.search_calls[0]["filters"] == {"linked_to": "Async Test KB"}
@pytest.mark.asyncio
async def test_async_search_without_isolation_no_filter(self):
"""Test that async search without isolation does not inject filter."""
mock_db = MockVectorDb()
knowledge = Knowledge(
name="Async Test KB",
vector_db=mock_db,
# isolate_vector_search defaults to False
)
await knowledge.asearch("test query")
assert len(mock_db.search_calls) == 1
assert mock_db.search_calls[0]["filters"] is None
class TestLinkedToMetadata:
"""Tests for linked_to metadata being added to documents when isolation is enabled."""
def test_prepare_documents_adds_linked_to_with_isolation(self):
"""Test that linked_to is set to knowledge name when isolation is enabled."""
mock_db = MockVectorDb()
knowledge = Knowledge(
name="My Knowledge Base",
vector_db=mock_db,
isolate_vector_search=True,
)
documents = [Document(name="doc1", content="content")]
result = knowledge._prepare_documents_for_insert(documents, "content-id")
assert result[0].meta_data["linked_to"] == "My Knowledge Base"
def test_prepare_documents_adds_linked_to_without_isolation(self):
"""Test that linked_to is always added even when isolate_vector_search is False."""
mock_db = MockVectorDb()
knowledge = Knowledge(
name="My Knowledge Base",
vector_db=mock_db,
# isolate_vector_search defaults to False
)
documents = [Document(name="doc1", content="content")]
result = knowledge._prepare_documents_for_insert(documents, "content-id")
assert result[0].meta_data["linked_to"] == "My Knowledge Base"
def test_prepare_documents_adds_empty_linked_to_no_name_with_isolation(self):
"""Test that linked_to is set to empty string when knowledge has no name but isolation enabled."""
mock_db = MockVectorDb()
knowledge = Knowledge(
vector_db=mock_db,
isolate_vector_search=True,
)
documents = [Document(name="doc1", content="content")]
result = knowledge._prepare_documents_for_insert(documents, "content-id")
assert result[0].meta_data["linked_to"] == ""
def test_linked_to_always_uses_knowledge_name(self):
"""Test that linked_to always uses the knowledge instance name, overriding any caller-supplied value."""
mock_db = MockVectorDb()
knowledge = Knowledge(
name="New KB",
vector_db=mock_db,
isolate_vector_search=True,
)
# Document already has linked_to in metadata
documents = [Document(name="doc1", content="content", meta_data={"linked_to": "Old KB"})]
result = knowledge._prepare_documents_for_insert(documents, "content-id")
# The knowledge's name should override since we set it after metadata merge
assert result[0].meta_data["linked_to"] == "New KB"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/knowledge/test_knowledge_isolation.py",
"license": "Apache License 2.0",
"lines": 216,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/05_agent_os/interfaces/slack/channel_summarizer.py | """
Channel Summarizer
==================
An agent that reads channel history and produces structured summaries.
Supports follow-up questions in the same thread via session history.
Key concepts:
- ``SlackTools`` with ``enable_get_thread`` and ``enable_search_messages``
lets the agent read Slack data as tool calls.
- ``add_history_to_context=True`` + ``db`` enables follow-up questions
within the same Slack thread — the agent remembers previous exchanges.
- ``num_history_runs=5`` includes the last 5 exchanges for context.
Slack scopes: app_mentions:read, assistant:write, chat:write, im:history,
channels:history, search:read, users:read
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIChat
from agno.os.app import AgentOS
from agno.os.interfaces.slack import Slack
from agno.tools.slack import SlackTools
# ---------------------------------------------------------------------------
# Create Example
# ---------------------------------------------------------------------------
agent_db = SqliteDb(session_table="agent_sessions", db_file="tmp/summarizer.db")
summarizer = Agent(
name="Channel Summarizer",
model=OpenAIChat(id="gpt-4o"),
db=agent_db,
tools=[
SlackTools(
enable_get_thread=True,
enable_search_messages=True,
enable_list_users=True,
)
],
instructions=[
"You summarize Slack channel activity.",
"When asked about a channel:",
"1. Get recent message history",
"2. Identify active threads and expand them",
"3. Group messages by topic/theme",
"4. Highlight decisions, action items, and blockers",
"Format summaries with clear sections:",
"- Key Discussions",
"- Decisions Made",
"- Action Items",
"- Questions/Blockers",
"Use bullet points and keep summaries concise.",
],
# Session history — enables follow-up questions in the same Slack thread
add_history_to_context=True,
num_history_runs=5,
add_datetime_to_context=True,
markdown=True,
)
agent_os = AgentOS(
agents=[summarizer],
interfaces=[
Slack(
agent=summarizer,
reply_to_mentions_only=True,
)
],
)
app = agent_os.get_app()
# ---------------------------------------------------------------------------
# Run Example
# ---------------------------------------------------------------------------
if __name__ == "__main__":
agent_os.serve(app="channel_summarizer:app", reload=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/interfaces/slack/channel_summarizer.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/05_agent_os/interfaces/slack/file_analyst.py | """
File Analyst
============
An agent that downloads files shared in Slack, analyzes their content,
and can upload results back to the channel.
Key concepts:
- ``SlackTools`` with ``enable_download_file`` and ``enable_upload_file``
gives the agent access to Slack's file APIs.
- Works with CSV, code, text, and other file types.
- Uses Claude for strong document comprehension.
Slack scopes: app_mentions:read, assistant:write, chat:write, im:history,
files:read, files:write, channels:history
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.anthropic import Claude
from agno.os.app import AgentOS
from agno.os.interfaces.slack import Slack
from agno.tools.slack import SlackTools
# ---------------------------------------------------------------------------
# Create Example
# ---------------------------------------------------------------------------
agent_db = SqliteDb(session_table="agent_sessions", db_file="tmp/file_analyst.db")
file_analyst = Agent(
name="File Analyst",
model=Claude(id="claude-sonnet-4-20250514"),
db=agent_db,
tools=[
SlackTools(
enable_download_file=True,
enable_get_channel_history=True,
enable_upload_file=True,
output_directory="/tmp/slack_analysis",
)
],
instructions=[
"You are a file analysis assistant.",
"When users share files or mention file IDs (F12345...), download and analyze them.",
"For CSV/data files: identify patterns, outliers, and key statistics.",
"For code files: explain what the code does, suggest improvements.",
"For text/docs: summarize key points.",
"You can upload analysis results back to Slack as new files.",
"Always explain your analysis in plain language.",
],
add_history_to_context=True,
num_history_runs=5,
markdown=True,
)
agent_os = AgentOS(
agents=[file_analyst],
interfaces=[
Slack(
agent=file_analyst,
reply_to_mentions_only=True,
)
],
)
app = agent_os.get_app()
# ---------------------------------------------------------------------------
# Run Example
# ---------------------------------------------------------------------------
if __name__ == "__main__":
agent_os.serve(app="file_analyst:app", reload=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/interfaces/slack/file_analyst.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/05_agent_os/interfaces/slack/research_assistant.py | """
Research Assistant
==================
An agent that combines Slack message search with web search to answer
research questions. Searches internal Slack history first, then gathers
external context from the web.
Key concepts:
- ``SlackTools`` search supports Slack query syntax
(``from:@user``, ``in:#channel``, ``has:link``, ``before:/after:``).
- ``WebSearchTools`` provides external web search.
- The agent synthesizes internal and external findings into one summary.
Slack scopes: app_mentions:read, assistant:write, chat:write, im:history,
search:read, channels:history, users:read
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIChat
from agno.os.app import AgentOS
from agno.os.interfaces.slack import Slack
from agno.tools.slack import SlackTools
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Example
# ---------------------------------------------------------------------------
agent_db = SqliteDb(session_table="agent_sessions", db_file="tmp/research_assistant.db")
research_assistant = Agent(
name="Research Assistant",
model=OpenAIChat(id="gpt-4o"),
db=agent_db,
tools=[
SlackTools(
enable_search_messages=True,
enable_get_thread=True,
enable_list_users=True,
enable_get_user_info=True,
),
WebSearchTools(),
],
instructions=[
"You are a research assistant that helps find information.",
"You can search Slack messages using: from:@user, in:#channel, has:link, before:/after:date",
"You can also search the web for current information.",
"When asked to research something:",
"1. Search Slack for internal discussions",
"2. Search the web for external context",
"3. Synthesize findings into a clear summary",
"Identify relevant experts by looking at who contributed to discussions.",
],
add_history_to_context=True,
num_history_runs=3,
add_datetime_to_context=True,
markdown=True,
)
agent_os = AgentOS(
agents=[research_assistant],
interfaces=[
Slack(
agent=research_assistant,
reply_to_mentions_only=True,
)
],
)
app = agent_os.get_app()
# ---------------------------------------------------------------------------
# Run Example
# ---------------------------------------------------------------------------
if __name__ == "__main__":
agent_os.serve(app="research_assistant:app", reload=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/interfaces/slack/research_assistant.py",
"license": "Apache License 2.0",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/05_agent_os/interfaces/slack/support_team.py | """
Support Team
============
A multi-agent team that routes support questions to the right specialist.
Technical Support handles code and API questions; Documentation Specialist
searches Slack history and the web for existing answers.
Key concepts:
- ``Team`` with a coordinator model routes questions to the best member.
- One member uses ``SlackTools`` to find past answers in Slack threads.
- Both members use ``WebSearchTools`` for external documentation.
Slack scopes: app_mentions:read, assistant:write, chat:write, im:history,
search:read, channels:history
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIChat
from agno.os.app import AgentOS
from agno.os.interfaces.slack import Slack
from agno.team import Team
from agno.tools.slack import SlackTools
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Example
# ---------------------------------------------------------------------------
team_db = SqliteDb(session_table="team_sessions", db_file="tmp/support_team.db")
# Technical Support Agent
tech_support = Agent(
name="Technical Support",
role="Code and technical troubleshooting",
model=OpenAIChat(id="gpt-4o"),
tools=[WebSearchTools()],
instructions=[
"You handle technical questions about code, APIs, and implementation.",
"Provide code examples when helpful.",
"Search for current documentation and best practices.",
],
markdown=True,
)
# Documentation Agent
docs_agent = Agent(
name="Documentation Specialist",
role="Finding and explaining documentation",
model=OpenAIChat(id="gpt-4o"),
tools=[
SlackTools(
enable_search_messages=True,
enable_get_thread=True,
),
WebSearchTools(),
],
instructions=[
"You find relevant documentation and past discussions.",
"Search Slack for previous answers to similar questions.",
"Search the web for official documentation.",
"Explain documentation in simple terms.",
],
markdown=True,
)
# The Team with a coordinator
support_team = Team(
name="Support Team",
model=OpenAIChat(id="gpt-4o"),
members=[tech_support, docs_agent],
description="A support team that routes questions to the right specialist.",
instructions=[
"You coordinate support requests.",
"Route technical/code questions to Technical Support.",
"Route 'how do I' or 'where is' questions to Documentation Specialist.",
"For complex questions, consult both agents.",
],
db=team_db,
add_history_to_context=True,
num_history_runs=3,
markdown=True,
)
agent_os = AgentOS(
teams=[support_team],
interfaces=[
Slack(
team=support_team,
reply_to_mentions_only=True,
)
],
)
app = agent_os.get_app()
# ---------------------------------------------------------------------------
# Run Example
# ---------------------------------------------------------------------------
if __name__ == "__main__":
agent_os.serve(app="support_team:app", reload=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/interfaces/slack/support_team.py",
"license": "Apache License 2.0",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/unit/os/routers/test_slack_router.py | import asyncio
import json
import time
from unittest.mock import AsyncMock, Mock, patch
import pytest
from fastapi import APIRouter, FastAPI
from agno.agent import RunEvent
from .conftest import (
build_app,
content_chunk,
make_agent_mock,
make_async_client_mock,
make_httpx_mock,
make_signed_request,
make_slack_mock,
make_stream_mock,
make_streaming_agent,
make_streaming_body,
slack_event_with_files,
wait_for_call,
)
# -- Non-streaming path --
@pytest.mark.asyncio
async def test_session_id_namespaced_with_entity_id():
agent_mock = make_agent_mock()
agent_mock.name = "Research Bot"
agent_mock.id = "researcher"
mock_slack = make_slack_mock()
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
patch("slack_sdk.web.async_client.AsyncWebClient", return_value=make_async_client_mock()),
):
app = build_app(agent_mock, reply_to_mentions_only=False)
from fastapi.testclient import TestClient
client = TestClient(app)
thread_ts = "1708123456.000100"
body = {
"type": "event_callback",
"event": {
"type": "message",
"channel_type": "im",
"text": "hello",
"user": "U123",
"channel": "C123",
"ts": "1708123456.000200",
"thread_ts": thread_ts,
},
}
resp = make_signed_request(client, body)
assert resp.status_code == 200
await wait_for_call(agent_mock.arun)
call_kwargs = agent_mock.arun.call_args
session_id = call_kwargs.kwargs.get("session_id") or call_kwargs[1].get("session_id")
assert session_id == f"researcher:{thread_ts}"
@pytest.mark.asyncio
async def test_user_id_is_none_for_shared_thread():
agent_mock = make_agent_mock()
mock_slack = make_slack_mock()
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
patch("slack_sdk.web.async_client.AsyncWebClient", return_value=make_async_client_mock()),
):
app = build_app(agent_mock, reply_to_mentions_only=False)
from fastapi.testclient import TestClient
client = TestClient(app)
body = {
"type": "event_callback",
"event": {
"type": "message",
"channel_type": "im",
"text": "hello",
"user": "U456",
"channel": "C123",
"ts": str(time.time()),
},
}
resp = make_signed_request(client, body)
assert resp.status_code == 200
await wait_for_call(agent_mock.arun)
call_kwargs = agent_mock.arun.call_args
user_id = call_kwargs.kwargs.get("user_id") or call_kwargs[1].get("user_id")
assert user_id is None
@pytest.mark.asyncio
async def test_mixed_files_categorized_correctly():
agent_mock = make_agent_mock()
mock_slack = make_slack_mock()
mock_httpx = make_httpx_mock([b"csv-data", b"img-data", b"zip-data"])
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
patch("slack_sdk.web.async_client.AsyncWebClient", return_value=make_async_client_mock()),
patch("agno.os.interfaces.slack.helpers.httpx.AsyncClient", return_value=mock_httpx),
patch.dict("os.environ", {"SLACK_TOKEN": "test"}),
):
app = build_app(agent_mock)
from fastapi.testclient import TestClient
client = TestClient(app)
body = slack_event_with_files(
[
{"id": "F5", "name": "data.csv", "mimetype": "text/csv"},
{"id": "F6", "name": "pic.jpg", "mimetype": "image/jpeg"},
{"id": "F7", "name": "bundle.zip", "mimetype": "application/zip"},
]
)
resp = make_signed_request(client, body)
assert resp.status_code == 200
await wait_for_call(agent_mock.arun)
call_kwargs = agent_mock.arun.call_args
files = call_kwargs.kwargs.get("files") or call_kwargs[1].get("files")
images = call_kwargs.kwargs.get("images") or call_kwargs[1].get("images")
assert len(files) == 2
assert files[0].mime_type == "text/csv"
assert files[1].mime_type is None
assert len(images) == 1
@pytest.mark.asyncio
async def test_non_whitelisted_mime_type_passes_none():
agent_mock = make_agent_mock()
mock_slack = make_slack_mock()
mock_httpx = make_httpx_mock(b"zipdata")
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
patch("slack_sdk.web.async_client.AsyncWebClient", return_value=make_async_client_mock()),
patch("agno.os.interfaces.slack.helpers.httpx.AsyncClient", return_value=mock_httpx),
patch.dict("os.environ", {"SLACK_TOKEN": "test"}),
):
app = build_app(agent_mock)
from fastapi.testclient import TestClient
client = TestClient(app)
body = slack_event_with_files([{"id": "F1", "name": "archive.zip", "mimetype": "application/zip"}])
resp = make_signed_request(client, body)
assert resp.status_code == 200
await wait_for_call(agent_mock.arun)
call_kwargs = agent_mock.arun.call_args
files = call_kwargs.kwargs.get("files") or call_kwargs[1].get("files")
assert files[0].mime_type is None
assert files[0].content == b"zipdata"
def test_explicit_token_passed_to_slack_tools():
agent_mock = make_agent_mock()
with (
patch("agno.os.interfaces.slack.router.SlackTools") as mock_cls,
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
):
mock_cls.return_value = make_slack_mock()
build_app(agent_mock, token="xoxb-explicit-token")
mock_cls.assert_called_once_with(token="xoxb-explicit-token", ssl=None, max_file_size=1_073_741_824)
def test_explicit_signing_secret_used():
agent_mock = make_agent_mock()
mock_slack = make_slack_mock()
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True) as mock_verify,
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
):
app = build_app(agent_mock, signing_secret="my-secret")
from fastapi.testclient import TestClient
client = TestClient(app)
body = {"type": "url_verification", "challenge": "test"}
body_bytes = json.dumps(body).encode()
ts = str(int(time.time()))
client.post(
"/events",
content=body_bytes,
headers={"Content-Type": "application/json", "X-Slack-Request-Timestamp": ts, "X-Slack-Signature": "v0=f"},
)
_, kwargs = mock_verify.call_args
assert kwargs.get("signing_secret") == "my-secret"
def test_operation_id_unique_across_instances():
from agno.os.interfaces.slack.router import attach_routes
agent_a = make_agent_mock()
agent_a.name = "Research Agent"
agent_b = make_agent_mock()
agent_b.name = "Analyst Agent"
with (
patch("agno.os.interfaces.slack.router.SlackTools"),
patch.dict("os.environ", {"SLACK_TOKEN": "test"}),
):
app = FastAPI()
router_a = APIRouter(prefix="/research")
attach_routes(router_a, agent=agent_a)
router_b = APIRouter(prefix="/analyst")
attach_routes(router_b, agent=agent_b)
app.include_router(router_a)
app.include_router(router_b)
openapi = app.openapi()
op_ids = [op.get("operationId") for path_ops in openapi["paths"].values() for op in path_ops.values()]
assert len(op_ids) == len(set(op_ids))
def test_bot_subtype_blocked():
agent_mock = make_agent_mock()
mock_slack = make_slack_mock()
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
):
app = build_app(agent_mock, reply_to_mentions_only=False)
from fastapi.testclient import TestClient
client = TestClient(app)
body = {
"type": "event_callback",
"event": {
"type": "message",
"subtype": "bot_message",
"channel_type": "im",
"text": "bot loop",
"user": "U456",
"channel": "C123",
"ts": str(time.time()),
},
}
resp = make_signed_request(client, body)
assert resp.status_code == 200
agent_mock.arun.assert_not_called()
@pytest.mark.asyncio
async def test_file_share_subtype_not_blocked():
agent_mock = make_agent_mock()
mock_slack = make_slack_mock()
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
patch("slack_sdk.web.async_client.AsyncWebClient", return_value=make_async_client_mock()),
patch("agno.os.interfaces.slack.helpers.httpx.AsyncClient", return_value=make_httpx_mock(b"file-data")),
):
app = build_app(agent_mock, reply_to_mentions_only=False)
from fastapi.testclient import TestClient
client = TestClient(app)
body = {
"type": "event_callback",
"event": {
"type": "message",
"subtype": "file_share",
"channel_type": "im",
"text": "check this",
"user": "U456",
"channel": "C123",
"ts": str(time.time()),
"files": [
{
"id": "F1",
"name": "doc.txt",
"mimetype": "text/plain",
"url_private": "https://files.slack.com/F1",
"size": 100,
}
],
},
}
resp = make_signed_request(client, body)
assert resp.status_code == 200
await wait_for_call(agent_mock.arun)
agent_mock.arun.assert_called_once()
@pytest.mark.asyncio
async def test_thread_reply_blocked_when_mentions_only():
agent_mock = make_agent_mock()
mock_slack = make_slack_mock()
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
):
app = build_app(agent_mock, reply_to_mentions_only=True)
from fastapi.testclient import TestClient
client = TestClient(app)
body = {
"type": "event_callback",
"event": {
"type": "message",
"channel_type": "channel",
"text": "reply in thread",
"user": "U456",
"channel": "C123",
"ts": "1234567890.000002",
"thread_ts": "1234567890.000001",
},
}
resp = make_signed_request(client, body)
assert resp.status_code == 200
await asyncio.sleep(0.5)
agent_mock.arun.assert_not_called()
@pytest.mark.asyncio
async def test_non_streaming_clears_status_after_response():
agent_mock = make_agent_mock()
mock_slack = make_slack_mock()
mock_client = make_async_client_mock()
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
patch("slack_sdk.web.async_client.AsyncWebClient", return_value=mock_client),
):
app = build_app(agent_mock, reply_to_mentions_only=False)
from fastapi.testclient import TestClient
client = TestClient(app)
body = {
"type": "event_callback",
"event": {
"type": "message",
"channel_type": "im",
"text": "hello",
"user": "U123",
"channel": "C123",
"ts": str(time.time()),
},
}
resp = make_signed_request(client, body)
assert resp.status_code == 200
await wait_for_call(agent_mock.arun)
status_calls = mock_client.assistant_threads_setStatus.call_args_list
assert len(status_calls) >= 2
last_call = status_calls[-1]
assert last_call.kwargs.get("status") == ""
def test_retry_header_skips_processing():
agent_mock = make_agent_mock()
mock_slack = make_slack_mock()
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
):
app = build_app(agent_mock)
from fastapi.testclient import TestClient
client = TestClient(app)
body = {
"type": "event_callback",
"event": {
"type": "message",
"channel_type": "im",
"text": "retry",
"user": "U456",
"channel": "C123",
"ts": str(time.time()),
},
}
body_bytes = json.dumps(body).encode()
ts = str(int(time.time()))
import hashlib
import hmac
sig_base = f"v0:{ts}:{body_bytes.decode()}"
sig = "v0=" + hmac.new(b"test-secret", sig_base.encode(), hashlib.sha256).hexdigest()
resp = client.post(
"/events",
content=body_bytes,
headers={
"Content-Type": "application/json",
"X-Slack-Request-Timestamp": ts,
"X-Slack-Signature": sig,
"X-Slack-Retry-Num": "1",
"X-Slack-Retry-Reason": "http_timeout",
},
)
assert resp.status_code == 200
assert resp.json()["status"] == "ok"
agent_mock.arun.assert_not_called()
# -- Streaming path --
class TestStreamingHappyPath:
@pytest.mark.asyncio
async def test_status_set_and_stream_created(self):
agent = make_streaming_agent(chunks=[])
mock_slack = make_slack_mock(token="xoxb-test")
mock_stream = make_stream_mock()
mock_client = AsyncMock()
mock_client.assistant_threads_setStatus = AsyncMock()
mock_client.chat_stream = AsyncMock(return_value=mock_stream)
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
patch("slack_sdk.web.async_client.AsyncWebClient", return_value=mock_client),
):
app = build_app(agent, streaming=True, reply_to_mentions_only=False)
from fastapi.testclient import TestClient
client = TestClient(app)
resp = make_signed_request(client, make_streaming_body())
assert resp.status_code == 200
await wait_for_call(mock_stream.stop)
status_calls = mock_client.assistant_threads_setStatus.call_args_list
assert len(status_calls) >= 1
assert status_calls[0].kwargs.get("status") == "Thinking..."
mock_client.chat_stream.assert_called_once()
@pytest.mark.asyncio
async def test_content_appended_to_stream(self):
agent = make_streaming_agent(chunks=[content_chunk("Hello "), content_chunk("world")])
mock_slack = make_slack_mock(token="xoxb-test")
mock_stream = make_stream_mock()
mock_client = AsyncMock()
mock_client.assistant_threads_setStatus = AsyncMock()
mock_client.assistant_threads_setTitle = AsyncMock()
mock_client.chat_stream = AsyncMock(return_value=mock_stream)
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
patch("slack_sdk.web.async_client.AsyncWebClient", return_value=mock_client),
):
app = build_app(agent, streaming=True, reply_to_mentions_only=False)
from fastapi.testclient import TestClient
client = TestClient(app)
resp = make_signed_request(client, make_streaming_body())
assert resp.status_code == 200
await wait_for_call(mock_stream.stop)
append_calls = mock_stream.append.call_args_list
text_calls = [c for c in append_calls if c.kwargs.get("markdown_text")]
assert len(text_calls) >= 1
mock_stream.stop.assert_called_once()
class TestRecipientUserId:
@pytest.mark.asyncio
async def test_human_user_not_bot(self):
agent = make_streaming_agent(chunks=[content_chunk("hi")])
mock_slack = make_slack_mock(token="xoxb-test")
mock_stream = make_stream_mock()
mock_client = AsyncMock()
mock_client.assistant_threads_setStatus = AsyncMock()
mock_client.assistant_threads_setTitle = AsyncMock()
mock_client.chat_stream = AsyncMock(return_value=mock_stream)
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
patch("slack_sdk.web.async_client.AsyncWebClient", return_value=mock_client),
):
app = build_app(agent, streaming=True, reply_to_mentions_only=False)
from fastapi.testclient import TestClient
client = TestClient(app)
resp = make_signed_request(client, make_streaming_body(user="U_HUMAN"))
assert resp.status_code == 200
await wait_for_call(mock_stream.stop)
call_kwargs = mock_client.chat_stream.call_args.kwargs
assert call_kwargs["recipient_user_id"] == "U_HUMAN"
assert call_kwargs["recipient_team_id"] == "T123"
class TestStreamingFallbacks:
@pytest.mark.asyncio
async def test_no_thread_ts_still_streams_using_event_ts(self):
agent = AsyncMock()
agent.arun = AsyncMock(
return_value=Mock(
status="OK",
content="fallback",
reasoning_content=None,
images=None,
files=None,
videos=None,
audio=None,
)
)
agent.name = "Test Agent"
mock_slack = make_slack_mock(token="xoxb-test")
mock_client = AsyncMock()
mock_client.assistant_threads_setStatus = AsyncMock()
mock_client.chat_stream = AsyncMock(return_value=make_stream_mock())
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
patch("slack_sdk.web.async_client.AsyncWebClient", return_value=mock_client),
):
app = build_app(agent, streaming=True, reply_to_mentions_only=False)
from fastapi.testclient import TestClient
client = TestClient(app)
ts = str(time.time())
body = {
"type": "event_callback",
"team_id": "T123",
"authorizations": [{"user_id": "B_BOT"}],
"event": {
"type": "message",
"channel_type": "im",
"text": "no thread",
"user": "U123",
"channel": "C123",
"ts": ts,
},
}
resp = make_signed_request(client, body)
assert resp.status_code == 200
await wait_for_call(agent.arun)
agent.arun.assert_called_once()
@pytest.mark.asyncio
async def test_null_response_stream_clears_status(self):
agent = AsyncMock()
agent.arun = None
agent.name = "Test Agent"
mock_slack = make_slack_mock(token="xoxb-test")
mock_client = AsyncMock()
mock_client.assistant_threads_setStatus = AsyncMock()
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
patch("slack_sdk.web.async_client.AsyncWebClient", return_value=mock_client),
):
app = build_app(agent, streaming=True, reply_to_mentions_only=False)
from fastapi.testclient import TestClient
client = TestClient(app)
resp = make_signed_request(client, make_streaming_body())
assert resp.status_code == 200
await asyncio.sleep(1.0)
status_calls = mock_client.assistant_threads_setStatus.call_args_list
clear_calls = [c for c in status_calls if c.kwargs.get("status") == ""]
assert len(clear_calls) >= 1
@pytest.mark.asyncio
async def test_exception_cleanup(self):
agent = AsyncMock()
agent.name = "Test Agent"
async def _exploding_stream(*args, **kwargs):
yield content_chunk("partial")
raise RuntimeError("mid-stream crash")
agent.arun = _exploding_stream
mock_slack = make_slack_mock(token="xoxb-test")
mock_stream = make_stream_mock()
mock_client = AsyncMock()
mock_client.assistant_threads_setStatus = AsyncMock()
mock_client.chat_stream = AsyncMock(return_value=mock_stream)
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
patch("slack_sdk.web.async_client.AsyncWebClient", return_value=mock_client),
):
app = build_app(agent, streaming=True, reply_to_mentions_only=False)
from fastapi.testclient import TestClient
client = TestClient(app)
resp = make_signed_request(client, make_streaming_body())
assert resp.status_code == 200
await asyncio.sleep(2.0)
mock_stream.stop.assert_called()
status_calls = mock_client.assistant_threads_setStatus.call_args_list
clear_calls = [c for c in status_calls if c.kwargs.get("status") == ""]
assert len(clear_calls) >= 1
mock_client.chat_postMessage.assert_called()
class TestErrorResolvesTaskCards:
@pytest.mark.asyncio
async def test_exception_resolves_pending_task_cards(self):
async def _stream_with_tool_then_crash(*args, **kwargs):
yield Mock(
event=RunEvent.tool_call_started.value,
tool=Mock(tool_name="search_web", tool_call_id="tc_1"),
content=None,
images=None,
videos=None,
audio=None,
files=None,
)
raise RuntimeError("mid-stream crash after tool start")
agent = AsyncMock()
agent.name = "Test Agent"
agent.arun = _stream_with_tool_then_crash
mock_slack = make_slack_mock(token="xoxb-test")
mock_stream = make_stream_mock()
mock_client = AsyncMock()
mock_client.assistant_threads_setStatus = AsyncMock()
mock_client.chat_stream = AsyncMock(return_value=mock_stream)
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
patch("slack_sdk.web.async_client.AsyncWebClient", return_value=mock_client),
):
app = build_app(agent, streaming=True, reply_to_mentions_only=False)
from fastapi.testclient import TestClient
client = TestClient(app)
resp = make_signed_request(client, make_streaming_body())
assert resp.status_code == 200
await asyncio.sleep(2.0)
mock_stream.stop.assert_called()
stop_kwargs = mock_stream.stop.call_args.kwargs
assert "chunks" in stop_kwargs
assert any(c.get("status") == "error" for c in stop_kwargs["chunks"])
class TestStreamingTitle:
@pytest.mark.asyncio
async def test_title_set_on_first_content(self):
agent = make_streaming_agent(chunks=[content_chunk("Hello")])
mock_slack = make_slack_mock(token="xoxb-test")
mock_stream = make_stream_mock()
mock_client = AsyncMock()
mock_client.assistant_threads_setStatus = AsyncMock()
mock_client.assistant_threads_setTitle = AsyncMock()
mock_client.chat_stream = AsyncMock(return_value=mock_stream)
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
patch("slack_sdk.web.async_client.AsyncWebClient", return_value=mock_client),
):
app = build_app(agent, streaming=True, reply_to_mentions_only=False)
from fastapi.testclient import TestClient
client = TestClient(app)
resp = make_signed_request(client, make_streaming_body())
assert resp.status_code == 200
await wait_for_call(mock_stream.stop)
mock_client.assistant_threads_setTitle.assert_called_once()
@pytest.mark.asyncio
async def test_title_not_set_twice(self):
agent = make_streaming_agent(chunks=[content_chunk("Hello "), content_chunk("world")])
mock_slack = make_slack_mock(token="xoxb-test")
mock_stream = make_stream_mock()
mock_client = AsyncMock()
mock_client.assistant_threads_setStatus = AsyncMock()
mock_client.assistant_threads_setTitle = AsyncMock()
mock_client.chat_stream = AsyncMock(return_value=mock_stream)
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
patch("slack_sdk.web.async_client.AsyncWebClient", return_value=mock_client),
):
app = build_app(agent, streaming=True, reply_to_mentions_only=False)
from fastapi.testclient import TestClient
client = TestClient(app)
resp = make_signed_request(client, make_streaming_body())
assert resp.status_code == 200
await wait_for_call(mock_stream.stop)
assert mock_client.assistant_threads_setTitle.call_count == 1
class TestThreadStarted:
@pytest.mark.asyncio
async def test_default_prompts(self):
agent = make_streaming_agent()
mock_slack = make_slack_mock(token="xoxb-test")
mock_client = AsyncMock()
mock_client.assistant_threads_setSuggestedPrompts = AsyncMock()
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
patch("slack_sdk.web.async_client.AsyncWebClient", return_value=mock_client),
):
app = build_app(agent, streaming=True, reply_to_mentions_only=False)
from fastapi.testclient import TestClient
client = TestClient(app)
body = {
"type": "event_callback",
"event": {
"type": "assistant_thread_started",
"assistant_thread": {"channel_id": "C123", "thread_ts": "1234.5678"},
},
}
resp = make_signed_request(client, body)
assert resp.status_code == 200
await asyncio.sleep(1.0)
mock_client.assistant_threads_setSuggestedPrompts.assert_called_once()
call_kwargs = mock_client.assistant_threads_setSuggestedPrompts.call_args.kwargs
assert len(call_kwargs["prompts"]) == 2
@pytest.mark.asyncio
async def test_custom_prompts(self):
agent = make_streaming_agent()
mock_slack = make_slack_mock(token="xoxb-test")
mock_client = AsyncMock()
mock_client.assistant_threads_setSuggestedPrompts = AsyncMock()
custom = [{"title": "Custom", "message": "Do X"}]
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
patch("slack_sdk.web.async_client.AsyncWebClient", return_value=mock_client),
):
app = build_app(agent, streaming=True, reply_to_mentions_only=False, suggested_prompts=custom)
from fastapi.testclient import TestClient
client = TestClient(app)
body = {
"type": "event_callback",
"event": {
"type": "assistant_thread_started",
"assistant_thread": {"channel_id": "C123", "thread_ts": "1234.5678"},
},
}
resp = make_signed_request(client, body)
assert resp.status_code == 200
await asyncio.sleep(1.0)
call_kwargs = mock_client.assistant_threads_setSuggestedPrompts.call_args.kwargs
assert call_kwargs["prompts"] == custom
@pytest.mark.asyncio
async def test_missing_channel_returns_early(self):
agent = make_streaming_agent()
mock_slack = make_slack_mock(token="xoxb-test")
mock_client = AsyncMock()
mock_client.assistant_threads_setSuggestedPrompts = AsyncMock()
with (
patch("agno.os.interfaces.slack.router.verify_slack_signature", return_value=True),
patch("agno.os.interfaces.slack.router.SlackTools", return_value=mock_slack),
patch("slack_sdk.web.async_client.AsyncWebClient", return_value=mock_client),
):
app = build_app(agent, streaming=True, reply_to_mentions_only=False)
from fastapi.testclient import TestClient
client = TestClient(app)
body = {
"type": "event_callback",
"event": {
"type": "assistant_thread_started",
"assistant_thread": {},
},
}
resp = make_signed_request(client, body)
assert resp.status_code == 200
await asyncio.sleep(0.5)
mock_client.assistant_threads_setSuggestedPrompts.assert_not_called()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/os/routers/test_slack_router.py",
"license": "Apache License 2.0",
"lines": 680,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/tools/test_slack_tools.py | import json
from unittest.mock import Mock, patch
import pytest
from slack_sdk.errors import SlackApiError
from agno.tools.slack import SlackTools
@pytest.fixture
def slack_tools():
with patch.dict("os.environ", {"SLACK_TOKEN": "test-token"}):
with patch("agno.tools.slack.WebClient") as mock_web_client:
mock_client = Mock()
mock_web_client.return_value = mock_client
tools = SlackTools()
tools.client = mock_client
return tools
# === Initialization ===
def test_init_requires_token():
with patch.dict("os.environ", clear=True):
with pytest.raises(ValueError, match="SLACK_TOKEN"):
SlackTools()
def test_init_registers_default_tools():
with patch.dict("os.environ", {"SLACK_TOKEN": "test"}):
with patch("agno.tools.slack.WebClient"):
tools = SlackTools()
names = [f.name for f in tools.functions.values()]
assert "send_message" in names
assert "send_message_thread" in names
assert len(names) == 6
def test_init_all_flag_enables_all():
with patch.dict("os.environ", {"SLACK_TOKEN": "test"}):
with patch("agno.tools.slack.WebClient"):
tools = SlackTools(all=True)
assert len(tools.functions) == 10
# === Core Tools ===
def test_send_message(slack_tools):
slack_tools.client.chat_postMessage.return_value = Mock(data={"ok": True})
result = slack_tools.send_message("#general", "Hello")
assert json.loads(result)["ok"] is True
def test_send_message_error(slack_tools):
slack_tools.client.chat_postMessage.side_effect = SlackApiError("error", response=Mock())
result = slack_tools.send_message("#general", "Hello")
assert "error" in json.loads(result)
def test_send_message_thread(slack_tools):
slack_tools.client.chat_postMessage.return_value = Mock(data={"ok": True, "thread_ts": "1.0"})
result = slack_tools.send_message_thread("C1", "reply", thread_ts="1.0")
assert json.loads(result)["ok"] is True
slack_tools.client.chat_postMessage.assert_called_with(channel="C1", text="reply", thread_ts="1.0", mrkdwn=True)
def test_list_channels(slack_tools):
slack_tools.client.conversations_list.return_value = {"channels": [{"id": "C1", "name": "general"}]}
result = slack_tools.list_channels()
assert json.loads(result) == [{"id": "C1", "name": "general"}]
def test_get_channel_history(slack_tools):
slack_tools.client.conversations_history.return_value = {"messages": [{"text": "hi", "user": "U1", "ts": "1.0"}]}
result = slack_tools.get_channel_history("C1")
messages = json.loads(result)
assert messages[0]["text"] == "hi"
def test_upload_file(slack_tools):
slack_tools.client.files_upload_v2.return_value = Mock(data={"ok": True})
result = slack_tools.upload_file("C1", "content", "file.txt")
assert json.loads(result)["ok"] is True
def test_upload_file_bytes(slack_tools):
slack_tools.client.files_upload_v2.return_value = Mock(data={"ok": True})
slack_tools.upload_file("C1", b"bytes", "file.bin")
slack_tools.client.files_upload_v2.assert_called_once()
assert slack_tools.client.files_upload_v2.call_args[1]["content"] == b"bytes"
def test_download_file_base64(slack_tools):
slack_tools.client.files_info.return_value = {
"file": {"id": "F1", "name": "f.txt", "size": 10, "url_private": "https://files.slack.com/f.txt"}
}
with patch("agno.tools.slack.httpx.get") as mock_get:
mock_get.return_value.content = b"data"
mock_get.return_value.raise_for_status = Mock()
result = slack_tools.download_file("F1")
assert "content_base64" in json.loads(result)
# === Extended Tools ===
def test_search_messages(slack_tools):
slack_tools.client.search_messages.return_value = {
"messages": {"matches": [{"text": "found", "user": "U1", "channel": {}, "ts": "1"}]}
}
result = slack_tools.search_messages("query")
assert json.loads(result)["count"] == 1
def test_get_thread(slack_tools):
slack_tools.client.conversations_replies.return_value = {"messages": [{"text": "parent", "user": "U1", "ts": "1"}]}
result = slack_tools.get_thread("C1", "1")
assert json.loads(result)["reply_count"] == 0
def test_list_users(slack_tools):
slack_tools.client.users_list.return_value = {
"members": [{"id": "U1", "name": "user", "deleted": False, "is_bot": False, "profile": {}}]
}
result = slack_tools.list_users()
assert json.loads(result)["count"] == 1
def test_get_user_info(slack_tools):
slack_tools.client.users_info.return_value = {"user": {"id": "U1", "name": "user", "profile": {}}}
result = slack_tools.get_user_info("U1")
assert json.loads(result)["name"] == "user"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_slack_tools.py",
"license": "Apache License 2.0",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.