sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
agno-agi/agno:libs/agno/agno/os/routers/components/components.py | import logging
import time
from typing import Any, Dict, List, Optional, Union
from fastapi import APIRouter, Body, Depends, HTTPException, Path, Query
from agno.db.base import AsyncBaseDb, BaseDb
from agno.db.base import ComponentType as DbComponentType
from agno.os.auth import get_authentication_dependency
from agno.os.schema import (
BadRequestResponse,
ComponentConfigResponse,
ComponentCreate,
ComponentResponse,
ComponentType,
ComponentUpdate,
ConfigCreate,
ConfigUpdate,
InternalServerErrorResponse,
NotFoundResponse,
PaginatedResponse,
PaginationInfo,
UnauthenticatedResponse,
ValidationErrorResponse,
)
from agno.os.settings import AgnoAPISettings
from agno.registry import Registry
from agno.utils.log import log_error, log_warning
from agno.utils.string import generate_id_from_name
logger = logging.getLogger(__name__)
def _resolve_db_in_config(
config: Dict[str, Any],
os_db: BaseDb,
registry: Optional[Registry] = None,
) -> Dict[str, Any]:
"""
Resolve db reference in config by looking up in registry or OS db.
If config contains a db dict with an id, this function will:
1. Check if the id matches the OS db
2. Check if the id exists in the registry
3. Convert the found db to a dict for serialization
Args:
config: The config dict that may contain a db reference
os_db: The OS database instance
registry: Optional registry containing registered databases
Returns:
Updated config dict with resolved db
"""
component_db = config.get("db")
if component_db is not None and isinstance(component_db, dict):
component_db_id = component_db.get("id")
if component_db_id is not None:
resolved_db = None
# First check if it matches the OS db
if component_db_id == os_db.id:
resolved_db = os_db
# Then check the registry
elif registry is not None:
resolved_db = registry.get_db(component_db_id)
# Store the full db dict for serialization
if resolved_db is not None:
config["db"] = resolved_db.to_dict()
else:
log_error(f"Could not resolve db with id: {component_db_id}")
elif component_db is None and "db" in config:
# Explicitly set to None, remove the key
config.pop("db", None)
return config
def get_components_router(
os_db: Union[BaseDb, AsyncBaseDb],
settings: AgnoAPISettings = AgnoAPISettings(),
registry: Optional[Registry] = None,
) -> APIRouter:
"""Create components router."""
router = APIRouter(
dependencies=[Depends(get_authentication_dependency(settings))],
tags=["Components"],
responses={
400: {"description": "Bad Request", "model": BadRequestResponse},
401: {"description": "Unauthorized", "model": UnauthenticatedResponse},
404: {"description": "Not Found", "model": NotFoundResponse},
422: {"description": "Validation Error", "model": ValidationErrorResponse},
500: {"description": "Internal Server Error", "model": InternalServerErrorResponse},
},
)
return attach_routes(router=router, os_db=os_db, registry=registry)
def attach_routes(
router: APIRouter, os_db: Union[BaseDb, AsyncBaseDb], registry: Optional[Registry] = None
) -> APIRouter:
# Component routes require sync database
if not isinstance(os_db, BaseDb):
raise ValueError("Component routes require a sync database (BaseDb), not an async database.")
db: BaseDb = os_db # Type narrowed after isinstance check
@router.get(
"/components",
response_model=PaginatedResponse[ComponentResponse],
response_model_exclude_none=True,
status_code=200,
operation_id="list_components",
summary="List Components",
description="Retrieve a paginated list of components with optional filtering by type.",
)
async def list_components(
component_type: Optional[ComponentType] = Query(None, description="Filter by type: agent, team, workflow"),
page: int = Query(1, ge=1, description="Page number"),
limit: int = Query(20, ge=1, le=100, description="Items per page"),
) -> PaginatedResponse[ComponentResponse]:
try:
start_time_ms = time.time() * 1000
offset = (page - 1) * limit
# Exclude components whose IDs are owned by the registry
exclude_ids = registry.get_all_component_ids() if registry else None
components, total_count = db.list_components(
component_type=DbComponentType(component_type.value) if component_type else None,
limit=limit,
offset=offset,
exclude_component_ids=exclude_ids or None,
)
total_pages = (total_count + limit - 1) // limit if limit > 0 else 0
return PaginatedResponse(
data=[ComponentResponse(**c) for c in components],
meta=PaginationInfo(
page=page,
limit=limit,
total_pages=total_pages,
total_count=total_count,
search_time_ms=round(time.time() * 1000 - start_time_ms, 2),
),
)
except Exception as e:
log_error(f"Error listing components: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post(
"/components",
response_model=ComponentResponse,
response_model_exclude_none=True,
status_code=201,
operation_id="create_component",
summary="Create Component",
description="Create a new component (agent, team, or workflow) with initial config.",
)
async def create_component(
body: ComponentCreate,
) -> ComponentResponse:
try:
component_id = body.component_id
if component_id is None:
component_id = generate_id_from_name(body.name)
# TODO: Create links from config
# Prepare config - ensure it's a dict and resolve db reference
config = body.config or {}
config = _resolve_db_in_config(config, db, registry)
# Warn if creating a team without members
if body.component_type == ComponentType.TEAM:
members = config.get("members")
if not members or len(members) == 0:
log_warning(
f"Creating team '{body.name}' without members. "
"If this is unintended, add members to the config."
)
component, _config = db.create_component_with_config(
component_id=component_id,
component_type=DbComponentType(body.component_type.value),
name=body.name,
description=body.description,
metadata=body.metadata,
config=config,
label=body.label,
stage=body.stage or "draft",
notes=body.notes,
)
return ComponentResponse(**component)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
log_error(f"Error creating component: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get(
"/components/{component_id}",
response_model=ComponentResponse,
response_model_exclude_none=True,
status_code=200,
operation_id="get_component",
summary="Get Component",
description="Retrieve a component by ID.",
)
async def get_component(
component_id: str = Path(description="Component ID"),
) -> ComponentResponse:
try:
component = db.get_component(component_id)
if component is None:
raise HTTPException(status_code=404, detail=f"Component {component_id} not found")
return ComponentResponse(**component)
except HTTPException:
raise
except Exception as e:
log_error(f"Error getting component: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.patch(
"/components/{component_id}",
response_model=ComponentResponse,
response_model_exclude_none=True,
status_code=200,
operation_id="update_component",
summary="Update Component",
description="Partially update a component by ID.",
)
async def update_component(
component_id: str = Path(description="Component ID"),
body: ComponentUpdate = Body(description="Component fields to update"),
) -> ComponentResponse:
try:
existing = db.get_component(component_id)
if existing is None:
raise HTTPException(status_code=404, detail=f"Component {component_id} not found")
update_kwargs: Dict[str, Any] = {"component_id": component_id}
if body.name is not None:
update_kwargs["name"] = body.name
if body.description is not None:
update_kwargs["description"] = body.description
if body.metadata is not None:
update_kwargs["metadata"] = body.metadata
if body.current_version is not None:
update_kwargs["current_version"] = body.current_version
if body.component_type is not None:
update_kwargs["component_type"] = DbComponentType(body.component_type)
component = db.upsert_component(**update_kwargs)
return ComponentResponse(**component)
except HTTPException:
raise
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
log_error(f"Error updating component: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.delete(
"/components/{component_id}",
status_code=204,
operation_id="delete_component",
summary="Delete Component",
description="Delete a component by ID.",
)
async def delete_component(
component_id: str = Path(description="Component ID"),
) -> None:
try:
deleted = db.delete_component(component_id)
if not deleted:
raise HTTPException(status_code=404, detail=f"Component {component_id} not found")
except HTTPException:
raise
except Exception as e:
log_error(f"Error deleting component: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get(
"/components/{component_id}/configs",
response_model=List[ComponentConfigResponse],
response_model_exclude_none=True,
status_code=200,
operation_id="list_configs",
summary="List Configs",
description="List all configs for a component.",
)
async def list_configs(
component_id: str = Path(description="Component ID"),
include_config: bool = Query(True, description="Include full config blob"),
) -> List[ComponentConfigResponse]:
try:
configs = db.list_configs(component_id, include_config=include_config)
return [ComponentConfigResponse(**c) for c in configs]
except Exception as e:
log_error(f"Error listing configs: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post(
"/components/{component_id}/configs",
response_model=ComponentConfigResponse,
response_model_exclude_none=True,
status_code=201,
operation_id="create_config",
summary="Create Config Version",
description="Create a new config version for a component.",
)
async def create_config(
component_id: str = Path(description="Component ID"),
body: ConfigCreate = Body(description="Config data"),
) -> ComponentConfigResponse:
try:
# Resolve db from config if present
config_data = body.config or {}
config_data = _resolve_db_in_config(config_data, db, registry)
config = db.upsert_config(
component_id=component_id,
version=None, # Always create new
config=config_data,
label=body.label,
stage=body.stage,
notes=body.notes,
links=body.links,
)
return ComponentConfigResponse(**config)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
log_error(f"Error creating config: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.patch(
"/components/{component_id}/configs/{version}",
response_model=ComponentConfigResponse,
response_model_exclude_none=True,
status_code=200,
operation_id="update_config",
summary="Update Draft Config",
description="Update an existing draft config. Cannot update published configs.",
)
async def update_config(
component_id: str = Path(description="Component ID"),
version: int = Path(description="Version number"),
body: ConfigUpdate = Body(description="Config fields to update"),
) -> ComponentConfigResponse:
try:
# Resolve db from config if present
config_data = body.config
if config_data is not None:
config_data = _resolve_db_in_config(config_data, db, registry)
config = db.upsert_config(
component_id=component_id,
version=version, # Always update existing
config=config_data,
label=body.label,
stage=body.stage,
notes=body.notes,
links=body.links,
)
return ComponentConfigResponse(**config)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
log_error(f"Error updating config: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get(
"/components/{component_id}/configs/current",
response_model=ComponentConfigResponse,
response_model_exclude_none=True,
status_code=200,
operation_id="get_current_config",
summary="Get Current Config",
description="Get the current config version for a component.",
)
async def get_current_config(
component_id: str = Path(description="Component ID"),
) -> ComponentConfigResponse:
try:
config = db.get_config(component_id)
if config is None:
raise HTTPException(status_code=404, detail=f"No current config for {component_id}")
return ComponentConfigResponse(**config)
except HTTPException:
raise
except Exception as e:
log_error(f"Error getting config: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.get(
"/components/{component_id}/configs/{version}",
response_model=ComponentConfigResponse,
response_model_exclude_none=True,
status_code=200,
operation_id="get_config",
summary="Get Config Version",
description="Get a specific config version by number.",
)
async def get_config_version(
component_id: str = Path(description="Component ID"),
version: int = Path(description="Version number"),
) -> ComponentConfigResponse:
try:
config = db.get_config(component_id, version=version)
if config is None:
raise HTTPException(status_code=404, detail=f"Config {component_id} v{version} not found")
return ComponentConfigResponse(**config)
except HTTPException:
raise
except Exception as e:
log_error(f"Error getting config: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.delete(
"/components/{component_id}/configs/{version}",
status_code=204,
operation_id="delete_config",
summary="Delete Config Version",
description="Delete a specific draft config version. Cannot delete published or current configs.",
)
async def delete_config_version(
component_id: str = Path(description="Component ID"),
version: int = Path(description="Version number"),
) -> None:
try:
# Resolve version number
deleted = db.delete_config(component_id, version=version)
if not deleted:
raise HTTPException(status_code=404, detail=f"Config {component_id} v{version} not found")
except HTTPException:
raise
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
log_error(f"Error deleting config: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
@router.post(
"/components/{component_id}/configs/{version}/set-current",
response_model=ComponentResponse,
response_model_exclude_none=True,
status_code=200,
operation_id="set_current_config",
summary="Set Current Config Version",
description="Set a published config version as current (for rollback).",
)
async def set_current_config(
component_id: str = Path(description="Component ID"),
version: int = Path(description="Version number"),
) -> ComponentResponse:
try:
success = db.set_current_version(component_id, version=version)
if not success:
raise HTTPException(
status_code=404, detail=f"Component {component_id} or config version {version} not found"
)
# Fetch and return updated component
component = db.get_component(component_id)
if component is None:
raise HTTPException(status_code=404, detail=f"Component {component_id} not found")
return ComponentResponse(**component)
except HTTPException:
raise
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
log_error(f"Error setting current config: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
return router
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/components/components.py",
"license": "Apache License 2.0",
"lines": 438,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/registry/registry.py | import inspect
import time
from typing import Any, Dict, List, Optional
from fastapi import APIRouter, Depends, HTTPException, Query
from agno.os.auth import get_authentication_dependency
from agno.os.schema import (
BadRequestResponse,
CallableMetadata,
DbMetadata,
FunctionMetadata,
InternalServerErrorResponse,
ModelMetadata,
NotFoundResponse,
PaginatedResponse,
PaginationInfo,
RegistryContentResponse,
RegistryResourceType,
SchemaMetadata,
ToolMetadata,
UnauthenticatedResponse,
ValidationErrorResponse,
VectorDbMetadata,
)
from agno.os.settings import AgnoAPISettings
from agno.registry import Registry
from agno.tools.function import Function
from agno.tools.toolkit import Toolkit
from agno.utils.log import log_error
def get_registry_router(registry: Registry, settings: AgnoAPISettings = AgnoAPISettings()) -> APIRouter:
router = APIRouter(
dependencies=[Depends(get_authentication_dependency(settings))],
tags=["Registry"],
responses={
400: {"description": "Bad Request", "model": BadRequestResponse},
401: {"description": "Unauthorized", "model": UnauthenticatedResponse},
404: {"description": "Not Found", "model": NotFoundResponse},
422: {"description": "Validation Error", "model": ValidationErrorResponse},
500: {"description": "Internal Server Error", "model": InternalServerErrorResponse},
},
)
return attach_routes(router=router, registry=registry)
def attach_routes(router: APIRouter, registry: Registry) -> APIRouter:
def _safe_str(v: Any) -> Optional[str]:
if v is None:
return None
if isinstance(v, str):
s = v.strip()
return s or None
return str(v)
def _safe_name(obj: Any, fallback: str) -> str:
n = getattr(obj, "name", None)
n = _safe_str(n)
return n or fallback
def _class_path(obj: Any) -> str:
cls = obj.__class__
return f"{cls.__module__}.{cls.__name__}"
def _maybe_jsonable(value: Any) -> Any:
# Keep only data that is likely JSON serializable
if value is None:
return None
if isinstance(value, (str, int, float, bool)):
return value
if isinstance(value, list):
return [_maybe_jsonable(x) for x in value]
if isinstance(value, dict):
out: Dict[str, Any] = {}
for k, v in value.items():
out[str(k)] = _maybe_jsonable(v)
return out
# Fallback to string to avoid serialization errors
return str(value)
def _extract_entrypoint_metadata(
entrypoint: Any,
) -> tuple[Optional[str], Optional[str], Optional[str], Optional[str]]:
"""Extract module, qualname, signature, and return annotation from an entrypoint callable."""
ep_module: Optional[str] = getattr(entrypoint, "__module__", None)
ep_qualname: Optional[str] = getattr(entrypoint, "__qualname__", None)
ep_signature: Optional[str] = None
ep_return_annotation: Optional[str] = None
try:
sig = inspect.signature(entrypoint)
ep_signature = str(sig)
if sig.return_annotation is not inspect.Signature.empty:
ep_return_annotation = str(sig.return_annotation)
except (ValueError, TypeError):
pass
return ep_module, ep_qualname, ep_signature, ep_return_annotation
def _get_callable_params(func: Any) -> Dict[str, Any]:
"""Extract JSON schema-like parameters from a callable using inspect."""
try:
sig = inspect.signature(func)
properties: Dict[str, Any] = {}
required: List[str] = []
for param_name, param in sig.parameters.items():
if param_name in ("self", "cls"):
continue
prop: Dict[str, Any] = {}
# Try to map annotation to JSON schema type
if param.annotation is not inspect.Parameter.empty:
ann = param.annotation
if ann is str or ann == "str":
prop["type"] = "string"
elif ann is int or ann == "int":
prop["type"] = "integer"
elif ann is float or ann == "float":
prop["type"] = "number"
elif ann is bool or ann == "bool":
prop["type"] = "boolean"
elif ann is list or ann == "list":
prop["type"] = "array"
elif ann is dict or ann == "dict":
prop["type"] = "object"
else:
prop["type"] = "string"
prop["annotation"] = str(ann)
else:
prop["type"] = "string"
if param.default is not inspect.Parameter.empty:
prop["default"] = (
param.default if _maybe_jsonable(param.default) == param.default else str(param.default)
)
else:
required.append(param_name)
properties[param_name] = prop
return {"type": "object", "properties": properties, "required": required}
except (ValueError, TypeError):
return {"type": "object", "properties": {}, "required": []}
def _get_resources(resource_type: Optional[RegistryResourceType] = None) -> List[RegistryContentResponse]:
resources: List[RegistryContentResponse] = []
# Tools
if resource_type is None or resource_type == RegistryResourceType.TOOL:
for tool in getattr(registry, "tools", []) or []:
if isinstance(tool, Toolkit):
toolkit_name = _safe_name(tool, fallback=tool.__class__.__name__)
functions = getattr(tool, "functions", {}) or {}
# Build full function details for each function in the toolkit
function_details: List[CallableMetadata] = []
for func in functions.values():
func_name = _safe_name(func, fallback=func.__class__.__name__)
# Check if function requires confirmation or external execution
requires_confirmation = getattr(func, "requires_confirmation", None)
external_execution = getattr(func, "external_execution", None)
# If not set on function, check toolkit settings
if requires_confirmation is None and hasattr(tool, "requires_confirmation_tools"):
requires_confirmation = func_name in (tool.requires_confirmation_tools or [])
if external_execution is None and hasattr(tool, "external_execution_required_tools"):
external_execution = func_name in (tool.external_execution_required_tools or [])
# Get parameters - ensure they're processed if needed
func_params = func.parameters
default_params = {"type": "object", "properties": {}, "required": []}
if func_params == default_params and func.entrypoint and not func.skip_entrypoint_processing:
try:
func_copy = func.model_copy(deep=False)
func_copy.process_entrypoint(strict=False)
func_params = func_copy.parameters
except Exception:
pass
# Extract callable metadata from entrypoint
func_module: Optional[str] = None
func_qualname: Optional[str] = None
func_signature: Optional[str] = None
func_return_annotation: Optional[str] = None
if func.entrypoint:
func_module, func_qualname, func_signature, func_return_annotation = (
_extract_entrypoint_metadata(func.entrypoint)
)
func_description = getattr(func, "description", None)
if func_description is None and func.entrypoint:
func_description = inspect.getdoc(func.entrypoint)
function_details.append(
CallableMetadata(
name=func_name,
description=_safe_str(func_description),
class_path=_class_path(func),
module=func_module,
qualname=func_qualname,
has_entrypoint=bool(getattr(func, "entrypoint", None)),
parameters=_maybe_jsonable(func_params),
requires_confirmation=requires_confirmation,
external_execution=external_execution,
signature=func_signature,
return_annotation=func_return_annotation,
)
)
toolkit_metadata = ToolMetadata(
class_path=_class_path(tool),
is_toolkit=True,
functions=function_details,
)
resources.append(
RegistryContentResponse(
name=toolkit_name,
type=RegistryResourceType.TOOL,
description=_safe_str(getattr(tool, "description", None)),
metadata=toolkit_metadata.model_dump(exclude_none=True),
)
)
elif isinstance(tool, Function):
func_name = _safe_name(tool, fallback=tool.__class__.__name__)
requires_confirmation = getattr(tool, "requires_confirmation", None)
external_execution = getattr(tool, "external_execution", None)
# Get parameters - ensure they're processed if needed
func_params = tool.parameters
# If parameters are empty/default and function has entrypoint, try to process it
default_params = {"type": "object", "properties": {}, "required": []}
if func_params == default_params and tool.entrypoint and not tool.skip_entrypoint_processing:
try:
# Create a copy to avoid modifying the original
tool_copy = tool.model_copy(deep=False)
tool_copy.process_entrypoint(strict=False)
func_params = tool_copy.parameters
except Exception:
# If processing fails, use original parameters
pass
# Extract callable metadata from entrypoint
tool_module: Optional[str] = None
tool_qualname: Optional[str] = None
tool_signature: Optional[str] = None
tool_return_annotation: Optional[str] = None
if tool.entrypoint:
tool_module, tool_qualname, tool_signature, tool_return_annotation = (
_extract_entrypoint_metadata(tool.entrypoint)
)
func_tool_metadata = ToolMetadata(
class_path=_class_path(tool),
module=tool_module,
qualname=tool_qualname,
has_entrypoint=bool(getattr(tool, "entrypoint", None)),
parameters=_maybe_jsonable(func_params),
requires_confirmation=requires_confirmation,
external_execution=external_execution,
signature=tool_signature,
return_annotation=tool_return_annotation,
)
resources.append(
RegistryContentResponse(
name=func_name,
type=RegistryResourceType.TOOL,
description=_safe_str(getattr(tool, "description", None)),
metadata=func_tool_metadata.model_dump(exclude_none=True),
)
)
elif callable(tool):
call_name = getattr(tool, "__name__", None) or tool.__class__.__name__
tool_module = getattr(tool, "__module__", "unknown")
# Extract signature
callable_signature: Optional[str] = None
callable_return_annotation: Optional[str] = None
try:
sig = inspect.signature(tool)
callable_signature = str(sig)
if sig.return_annotation is not inspect.Signature.empty:
callable_return_annotation = str(sig.return_annotation)
except (ValueError, TypeError):
pass
callable_metadata = ToolMetadata(
class_path=f"{tool_module}.{call_name}",
module=tool_module,
qualname=getattr(tool, "__qualname__", None),
has_entrypoint=True,
parameters=_get_callable_params(tool),
requires_confirmation=None,
external_execution=None,
signature=callable_signature,
return_annotation=callable_return_annotation,
)
resources.append(
RegistryContentResponse(
name=str(call_name),
type=RegistryResourceType.TOOL,
description=_safe_str(getattr(tool, "__doc__", None)),
metadata=callable_metadata.model_dump(exclude_none=True),
)
)
# Models
if resource_type is None or resource_type == RegistryResourceType.MODEL:
for model in getattr(registry, "models", []) or []:
model_name = (
_safe_str(getattr(model, "id", None))
or _safe_str(getattr(model, "name", None))
or model.__class__.__name__
)
model_metadata = ModelMetadata(
class_path=_class_path(model),
provider=_safe_str(getattr(model, "provider", None)),
model_id=_safe_str(getattr(model, "id", None)),
)
resources.append(
RegistryContentResponse(
name=model_name,
type=RegistryResourceType.MODEL,
description=_safe_str(getattr(model, "description", None)),
metadata=model_metadata.model_dump(exclude_none=True),
)
)
# Databases
if resource_type is None or resource_type == RegistryResourceType.DB:
for db in getattr(registry, "dbs", []) or []:
db_name = (
_safe_str(getattr(db, "name", None)) or _safe_str(getattr(db, "id", None)) or db.__class__.__name__
)
db_metadata = DbMetadata(
class_path=_class_path(db),
db_id=_safe_str(getattr(db, "id", None)),
)
resources.append(
RegistryContentResponse(
name=db_name,
type=RegistryResourceType.DB,
description=_safe_str(getattr(db, "description", None)),
metadata=db_metadata.model_dump(exclude_none=True),
)
)
# Vector databases
if resource_type is None or resource_type == RegistryResourceType.VECTOR_DB:
for vdb in getattr(registry, "vector_dbs", []) or []:
vdb_name = (
_safe_str(getattr(vdb, "name", None))
or _safe_str(getattr(vdb, "id", None))
or _safe_str(getattr(vdb, "collection", None))
or _safe_str(getattr(vdb, "table_name", None))
or vdb.__class__.__name__
)
vdb_metadata = VectorDbMetadata(
class_path=_class_path(vdb),
vector_db_id=_safe_str(getattr(vdb, "id", None)),
collection=_safe_str(getattr(vdb, "collection", None)),
table_name=_safe_str(getattr(vdb, "table_name", None)),
)
resources.append(
RegistryContentResponse(
name=vdb_name,
type=RegistryResourceType.VECTOR_DB,
description=_safe_str(getattr(vdb, "description", None)),
metadata=vdb_metadata.model_dump(exclude_none=True),
)
)
# Schemas
if resource_type is None or resource_type == RegistryResourceType.SCHEMA:
for schema in getattr(registry, "schemas", []) or []:
schema_name = schema.__name__
schema_json: Optional[Dict[str, Any]] = None
schema_error: Optional[str] = None
try:
schema_json = schema.model_json_schema() if hasattr(schema, "model_json_schema") else {}
except Exception as e:
schema_error = str(e)
schema_metadata = SchemaMetadata(
class_path=_class_path(schema),
schema=schema_json,
schema_error=schema_error,
)
resources.append(
RegistryContentResponse(
name=schema_name,
type=RegistryResourceType.SCHEMA,
metadata=schema_metadata.model_dump(exclude_none=True, by_alias=True),
)
)
# Functions (raw callables used for workflow conditions, selectors, etc.)
if resource_type is None or resource_type == RegistryResourceType.FUNCTION:
for func in getattr(registry, "functions", []) or []:
func_name = getattr(func, "__name__", None) or "anonymous"
func_module = getattr(func, "__module__", "unknown")
# Extract signature
reg_func_signature: Optional[str] = None
reg_func_return_annotation: Optional[str] = None
try:
sig = inspect.signature(func)
reg_func_signature = str(sig)
if sig.return_annotation is not inspect.Signature.empty:
reg_func_return_annotation = str(sig.return_annotation)
except (ValueError, TypeError):
pass
func_description = _safe_str(getattr(func, "__doc__", None))
reg_func_metadata = FunctionMetadata(
name=func_name,
description=func_description,
class_path=f"{func_module}.{func_name}",
module=func_module,
qualname=getattr(func, "__qualname__", None),
has_entrypoint=True,
parameters=_get_callable_params(func),
requires_confirmation=None,
external_execution=None,
signature=reg_func_signature,
return_annotation=reg_func_return_annotation,
)
resources.append(
RegistryContentResponse(
name=func_name,
type=RegistryResourceType.FUNCTION,
description=func_description,
metadata=reg_func_metadata.model_dump(exclude_none=True),
)
)
# Agents (code-defined agents for workflow rehydration)
if resource_type is None or resource_type == RegistryResourceType.AGENT:
for agent in getattr(registry, "agents", []) or []:
agent_id = getattr(agent, "id", None)
agent_name = getattr(agent, "name", None) or agent_id
resources.append(
RegistryContentResponse(
name=agent_name,
id=agent_id,
type=RegistryResourceType.AGENT,
description=_safe_str(getattr(agent, "description", None)),
metadata={
"id": agent_id,
"class_path": _class_path(agent),
},
)
)
# Teams (code-defined teams for workflow rehydration)
if resource_type is None or resource_type == RegistryResourceType.TEAM:
for team in getattr(registry, "teams", []) or []:
team_id = getattr(team, "id", None)
team_name = getattr(team, "name", None) or team_id
resources.append(
RegistryContentResponse(
name=team_name,
id=team_id,
type=RegistryResourceType.TEAM,
description=_safe_str(getattr(team, "description", None)),
metadata={
"id": team_id,
"class_path": _class_path(team),
},
)
)
# Stable ordering helps pagination
resources.sort(key=lambda r: (r.type, r.name))
return resources
@router.get(
"/registry",
response_model=PaginatedResponse[RegistryContentResponse],
response_model_exclude_none=True,
status_code=200,
operation_id="list_registry",
summary="List Registry",
description="List all resources in the registry with optional filtering.",
)
async def list_registry(
resource_type: Optional[RegistryResourceType] = Query(None, description="Filter by resource type"),
name: Optional[str] = Query(None, description="Filter by name (partial match)"),
page: int = Query(1, ge=1, description="Page number"),
limit: int = Query(20, ge=1, le=100, description="Items per page"),
) -> PaginatedResponse[RegistryContentResponse]:
try:
start_time_ms = time.time() * 1000
resources = _get_resources(resource_type)
if name:
needle = name.lower().strip()
resources = [r for r in resources if needle in r.name.lower()]
total_count = len(resources)
total_pages = (total_count + limit - 1) // limit if limit > 0 else 0
start_idx = (page - 1) * limit
paginated = resources[start_idx : start_idx + limit]
return PaginatedResponse(
data=paginated,
meta=PaginationInfo(
page=page,
limit=limit,
total_pages=total_pages,
total_count=total_count,
search_time_ms=round(time.time() * 1000 - start_time_ms, 2),
),
)
except Exception as e:
log_error(f"Error listing registry resources: {e}")
raise HTTPException(status_code=500, detail=str(e))
return router
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/registry/registry.py",
"license": "Apache License 2.0",
"lines": 472,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/registry/registry.py | from __future__ import annotations
from dataclasses import dataclass, field
from functools import cached_property
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Type
from uuid import uuid4
from pydantic import BaseModel
from agno.db.base import BaseDb
from agno.models.base import Model
from agno.tools.function import Function
from agno.tools.toolkit import Toolkit
from agno.vectordb.base import VectorDb
if TYPE_CHECKING:
from agno.agent import Agent
from agno.team import Team
@dataclass
class Registry:
"""
Registry is used to manage non serializable objects like tools, models, databases, vector databases,
agents, and teams.
"""
name: Optional[str] = None
description: Optional[str] = None
id: str = field(default_factory=lambda: str(uuid4()))
tools: List[Any] = field(default_factory=list)
models: List[Model] = field(default_factory=list)
dbs: List[BaseDb] = field(default_factory=list)
vector_dbs: List[VectorDb] = field(default_factory=list)
schemas: List[Type[BaseModel]] = field(default_factory=list)
functions: List[Callable] = field(default_factory=list)
# Code-defined agents and teams (for workflow rehydration)
agents: List[Agent] = field(default_factory=list)
teams: List[Team] = field(default_factory=list)
@cached_property
def _entrypoint_lookup(self) -> Dict[str, Callable]:
lookup: Dict[str, Callable] = {}
for tool in self.tools:
if isinstance(tool, Toolkit):
for func in tool.functions.values():
if func.entrypoint is not None:
lookup[func.name] = func.entrypoint
elif isinstance(tool, Function):
if tool.entrypoint is not None:
lookup[tool.name] = tool.entrypoint
elif callable(tool):
lookup[tool.__name__] = tool
return lookup
def rehydrate_function(self, func_dict: Dict[str, Any]) -> Function:
"""Reconstruct a Function from dict, reattaching its entrypoint."""
func = Function.from_dict(func_dict)
func.entrypoint = self._entrypoint_lookup.get(func.name)
return func
def get_schema(self, name: str) -> Optional[Type[BaseModel]]:
"""Get a schema by name."""
if self.schemas:
return next((s for s in self.schemas if s.__name__ == name), None)
return None
def get_db(self, db_id: str) -> Optional[BaseDb]:
"""Get a database by id from the registry.
Args:
db_id: The database id to look up
Returns:
The database instance if found, None otherwise
"""
if self.dbs:
return next((db for db in self.dbs if db.id == db_id), None)
return None
def get_function(self, name: str) -> Optional[Callable]:
return next((f for f in self.functions if f.__name__ == name), None)
def get_agent(self, agent_id: str) -> Optional[Agent]:
"""Get an agent by id from the registry."""
if self.agents:
return next((a for a in self.agents if getattr(a, "id", None) == agent_id), None)
return None
def get_team(self, team_id: str) -> Optional[Team]:
"""Get a team by id from the registry."""
if self.teams:
return next((t for t in self.teams if getattr(t, "id", None) == team_id), None)
return None
def get_agent_ids(self) -> Set[str]:
"""Get the set of all agent IDs in this registry."""
if self.agents:
return {aid for a in self.agents if (aid := getattr(a, "id", None)) is not None}
return set()
def get_team_ids(self) -> Set[str]:
"""Get the set of all team IDs in this registry."""
if self.teams:
return {tid for t in self.teams if (tid := getattr(t, "id", None)) is not None}
return set()
def get_all_component_ids(self) -> Set[str]:
"""Get the set of all agent and team IDs in this registry."""
return self.get_agent_ids() | self.get_team_ids()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/registry/registry.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/tools/websearch.py | import json
from typing import Any, List, Literal, Optional
from agno.tools import Toolkit
from agno.utils.log import log_debug
try:
from ddgs import DDGS
except ImportError:
raise ImportError("`ddgs` not installed. Please install using `pip install ddgs`")
# Valid timelimit values for search filtering
VALID_TIMELIMITS = frozenset({"d", "w", "m", "y"})
class WebSearchTools(Toolkit):
"""
Toolkit for searching the web. Uses the meta-search library DDGS.
Multiple search backends (e.g. google, bing, duckduckgo) are available.
Args:
enable_search (bool): Enable web search function.
enable_news (bool): Enable news search function.
backend (str): The backend to use for searching. Defaults to "auto" which
automatically selects available backends. Other options include:
"duckduckgo", "google", "bing", "brave", "yandex", "yahoo", etc.
modifier (Optional[str]): A modifier to be prepended to search queries.
fixed_max_results (Optional[int]): A fixed number of maximum results.
proxy (Optional[str]): Proxy to be used for requests.
timeout (Optional[int]): The maximum number of seconds to wait for a response.
verify_ssl (bool): Whether to verify SSL certificates.
timelimit (Optional[str]): Time limit for search results. Valid values:
"d" (day), "w" (week), "m" (month), "y" (year).
region (Optional[str]): Region for search results (e.g., "us-en", "uk-en", "ru-ru").
"""
def __init__(
self,
enable_search: bool = True,
enable_news: bool = True,
backend: str = "auto",
modifier: Optional[str] = None,
fixed_max_results: Optional[int] = None,
proxy: Optional[str] = None,
timeout: Optional[int] = 10,
verify_ssl: bool = True,
timelimit: Optional[Literal["d", "w", "m", "y"]] = None,
region: Optional[str] = None,
**kwargs,
):
# Validate timelimit parameter
if timelimit is not None and timelimit not in VALID_TIMELIMITS:
raise ValueError(
f"Invalid timelimit '{timelimit}'. Must be one of: 'd' (day), 'w' (week), 'm' (month), 'y' (year)."
)
self.proxy: Optional[str] = proxy
self.timeout: Optional[int] = timeout
self.fixed_max_results: Optional[int] = fixed_max_results
self.modifier: Optional[str] = modifier
self.verify_ssl: bool = verify_ssl
self.backend: str = backend
self.timelimit: Optional[Literal["d", "w", "m", "y"]] = timelimit
self.region: Optional[str] = region
tools: List[Any] = []
if enable_search:
tools.append(self.web_search)
if enable_news:
tools.append(self.search_news)
super().__init__(name="websearch", tools=tools, **kwargs)
def web_search(self, query: str, max_results: int = 5) -> str:
"""Use this function to search the web for a query.
Args:
query(str): The query to search for.
max_results (optional, default=5): The maximum number of results to return.
Returns:
The search results from the web.
"""
actual_max_results = self.fixed_max_results or max_results
search_query = f"{self.modifier} {query}" if self.modifier else query
log_debug(f"Searching web for: {search_query} using backend: {self.backend}")
search_kwargs: dict = {
"query": search_query,
"max_results": actual_max_results,
"backend": self.backend,
}
if self.timelimit is not None:
search_kwargs["timelimit"] = self.timelimit
if self.region is not None:
search_kwargs["region"] = self.region
with DDGS(proxy=self.proxy, timeout=self.timeout, verify=self.verify_ssl) as ddgs:
results = ddgs.text(**search_kwargs)
return json.dumps(results, indent=2)
def search_news(self, query: str, max_results: int = 5) -> str:
"""Use this function to get the latest news from the web.
Args:
query(str): The query to search for.
max_results (optional, default=5): The maximum number of results to return.
Returns:
The latest news from the web.
"""
actual_max_results = self.fixed_max_results or max_results
log_debug(f"Searching web news for: {query} using backend: {self.backend}")
search_kwargs: dict = {
"query": query,
"max_results": actual_max_results,
"backend": self.backend,
}
if self.timelimit is not None:
search_kwargs["timelimit"] = self.timelimit
if self.region is not None:
search_kwargs["region"] = self.region
with DDGS(proxy=self.proxy, timeout=self.timeout, verify=self.verify_ssl) as ddgs:
results = ddgs.news(**search_kwargs)
return json.dumps(results, indent=2)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/websearch.py",
"license": "Apache License 2.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/models/openrouter/test_gemini_reasoning.py | """
Integration tests for Gemini reasoning_details preservation via OpenRouter.
These tests verify that reasoning_details are properly extracted from Gemini
responses and preserved across multi-turn conversations.
"""
from agno.agent import Agent
from agno.db.in_memory import InMemoryDb
from agno.models.openrouter import OpenRouter
def test_gemini_multi_turn_preserves_provider_data():
"""Test that provider_data (including reasoning_details) persists across turns."""
agent = Agent(
model=OpenRouter(id="google/gemini-2.5-flash"),
db=InMemoryDb(),
add_history_to_context=True,
markdown=True,
telemetry=False,
)
# First turn
response1 = agent.run("What is 15 + 27?")
assert response1.content is not None, "First response should have content"
assert response1.model_provider_data is not None, "First response should have model_provider_data"
# Second turn - references first
response2 = agent.run("Multiply that by 3")
assert response2.content is not None, "Second response should have content"
assert response2.model_provider_data is not None, "Second response should have model_provider_data"
# Verify provider_data preserved on all assistant messages
messages = agent.get_session_messages()
assistant_messages = [m for m in messages if m.role == "assistant"]
assert len(assistant_messages) >= 2, "Should have at least 2 assistant responses"
for msg in assistant_messages:
assert msg.provider_data is not None, f"Message {msg.id} missing provider_data"
def test_gemini_with_tools():
"""Test Gemini works correctly with tools enabled."""
from agno.tools.duckduckgo import DuckDuckGoTools
agent = Agent(
model=OpenRouter(id="google/gemini-2.5-flash"),
tools=[DuckDuckGoTools()],
db=InMemoryDb(),
add_history_to_context=True,
markdown=True,
telemetry=False,
)
response = agent.run("What is 10 divided by 2?")
assert response is not None
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/openrouter/test_gemini_reasoning.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/agent/test_agent_config.py | """
Unit tests for Agent configuration serialization and persistence.
Tests cover:
- to_dict(): Serialization of agent to dictionary
- from_dict(): Deserialization of agent from dictionary
- save(): Saving agent to database
- load(): Loading agent from database
- delete(): Deleting agent from database
- get_agent_by_id(): Helper function to get agent by ID
- get_agents(): Helper function to get all agents
"""
from typing import Any, Dict
from unittest.mock import MagicMock, patch
import pytest
from agno.agent.agent import Agent, get_agent_by_id, get_agents
from agno.db.base import BaseDb, ComponentType
from agno.registry import Registry
# =============================================================================
# Fixtures
# =============================================================================
def _create_mock_db_class():
"""Create a concrete BaseDb subclass with all abstract methods stubbed."""
abstract_methods = {}
for name in dir(BaseDb):
attr = getattr(BaseDb, name, None)
if getattr(attr, "__isabstractmethod__", False):
abstract_methods[name] = MagicMock()
return type("MockDb", (BaseDb,), abstract_methods)
@pytest.fixture
def mock_db():
"""Create a mock database instance that passes isinstance(db, BaseDb)."""
MockDbClass = _create_mock_db_class()
db = MockDbClass()
# Configure common mock methods
db.upsert_component = MagicMock()
db.upsert_config = MagicMock(return_value={"version": 1})
db.delete_component = MagicMock(return_value=True)
db.get_config = MagicMock()
db.list_components = MagicMock()
db.to_dict = MagicMock(return_value={"type": "postgres", "id": "test-db"})
return db
@pytest.fixture
def basic_agent():
"""Create a basic agent for testing."""
return Agent(
id="test-agent",
name="Test Agent",
description="A test agent for unit testing",
)
@pytest.fixture
def agent_with_model():
"""Create an agent with a real model for testing to_dict."""
# Use a real model class with mocked internals for serialization testing
from agno.models.openai import OpenAIChat
model = OpenAIChat(id="gpt-4o-mini")
agent = Agent(
id="model-agent",
name="Model Agent",
model=model,
)
return agent
@pytest.fixture
def agent_with_settings():
"""Create an agent with various settings configured."""
return Agent(
id="settings-agent",
name="Settings Agent",
description="Agent with many settings",
instructions="Be helpful and concise",
markdown=True,
debug_mode=True,
retries=3,
tool_call_limit=10,
num_history_runs=5,
add_history_to_context=True,
add_datetime_to_context=True,
)
@pytest.fixture
def sample_agent_config() -> Dict[str, Any]:
"""Sample agent configuration dictionary."""
return {
"id": "sample-agent",
"name": "Sample Agent",
"description": "A sample agent",
"instructions": "Be helpful",
"markdown": True,
"model": {"provider": "openai", "id": "gpt-4o-mini"},
}
# =============================================================================
# to_dict() Tests
# =============================================================================
class TestAgentToDict:
"""Tests for Agent.to_dict() method."""
def test_to_dict_basic_agent(self, basic_agent):
"""Test to_dict with a basic agent."""
config = basic_agent.to_dict()
assert config["id"] == "test-agent"
assert config["name"] == "Test Agent"
assert config["description"] == "A test agent for unit testing"
def test_to_dict_with_model(self, agent_with_model):
"""Test to_dict includes model configuration."""
config = agent_with_model.to_dict()
assert "model" in config
assert config["model"]["provider"] == "OpenAI"
assert config["model"]["id"] == "gpt-4o-mini"
def test_to_dict_with_settings(self, agent_with_settings):
"""Test to_dict preserves all settings."""
config = agent_with_settings.to_dict()
assert config["id"] == "settings-agent"
assert config["name"] == "Settings Agent"
assert config["description"] == "Agent with many settings"
assert config["instructions"] == "Be helpful and concise"
assert config["markdown"] is True
assert config["debug_mode"] is True
assert config["retries"] == 3
assert config["tool_call_limit"] == 10
assert config["num_history_runs"] == 5
assert config["add_history_to_context"] is True
assert config["add_datetime_to_context"] is True
def test_to_dict_excludes_default_values(self):
"""Test that default values are not included in the config."""
agent = Agent(id="minimal-agent")
config = agent.to_dict()
# Default values should not be present
assert "markdown" not in config # defaults to False
assert "debug_mode" not in config # defaults to False
assert "retries" not in config # defaults to 0
assert "add_history_to_context" not in config # defaults to False
assert "store_history_messages" not in config # defaults to False
def test_to_dict_includes_store_history_messages_when_true(self):
"""Test that store_history_messages=True is serialized."""
agent = Agent(id="history-agent", store_history_messages=True)
config = agent.to_dict()
assert "store_history_messages" in config
assert config["store_history_messages"] is True
def test_to_dict_with_db(self, basic_agent, mock_db):
"""Test to_dict includes database configuration."""
basic_agent.db = mock_db
config = basic_agent.to_dict()
assert "db" in config
assert config["db"] == {"type": "postgres", "id": "test-db"}
def test_to_dict_with_instructions_list(self):
"""Test to_dict handles instructions as a list."""
agent = Agent(
id="list-instructions-agent",
instructions=["Step 1: Do this", "Step 2: Do that"],
)
config = agent.to_dict()
assert config["instructions"] == ["Step 1: Do this", "Step 2: Do that"]
def test_to_dict_with_system_message(self):
"""Test to_dict includes system message when it's a string."""
agent = Agent(
id="system-message-agent",
system_message="You are a helpful assistant.",
)
config = agent.to_dict()
assert config["system_message"] == "You are a helpful assistant."
def test_to_dict_with_metadata(self):
"""Test to_dict includes metadata."""
agent = Agent(
id="metadata-agent",
metadata={"version": "1.0", "author": "test"},
)
config = agent.to_dict()
assert config["metadata"] == {"version": "1.0", "author": "test"}
def test_to_dict_with_user_and_session(self):
"""Test to_dict includes user and session settings."""
agent = Agent(
id="session-agent",
user_id="user-123",
session_id="session-456",
)
config = agent.to_dict()
assert config["user_id"] == "user-123"
assert config["session_id"] == "session-456"
# =============================================================================
# from_dict() Tests
# =============================================================================
class TestAgentFromDict:
"""Tests for Agent.from_dict() method."""
def test_from_dict_basic(self, sample_agent_config):
"""Test from_dict creates agent with basic config."""
# Remove model to avoid model lookup
config = sample_agent_config.copy()
del config["model"]
agent = Agent.from_dict(config)
assert agent.id == "sample-agent"
assert agent.name == "Sample Agent"
assert agent.description == "A sample agent"
assert agent.instructions == "Be helpful"
assert agent.markdown is True
def test_from_dict_with_model(self):
"""Test from_dict reconstructs model from config."""
from agno.models.openai import OpenAIChat
config = {
"id": "model-agent",
"name": "Model Agent",
"model": {"provider": "openai", "id": "gpt-4o-mini"},
}
# from_dict should reconstruct the model from the config
agent = Agent.from_dict(config)
# Model should be reconstructed
assert agent.model is not None
assert isinstance(agent.model, OpenAIChat)
assert agent.model.id == "gpt-4o-mini"
def test_from_dict_preserves_settings(self):
"""Test from_dict preserves all settings."""
config = {
"id": "full-agent",
"name": "Full Agent",
"debug_mode": True,
"retries": 3,
"tool_call_limit": 10,
"num_history_runs": 5,
"add_history_to_context": True,
"add_datetime_to_context": True,
}
agent = Agent.from_dict(config)
assert agent.debug_mode is True
assert agent.retries == 3
assert agent.tool_call_limit == 10
assert agent.num_history_runs == 5
assert agent.add_history_to_context is True
assert agent.add_datetime_to_context is True
def test_from_dict_with_db_postgres(self):
"""Test from_dict reconstructs PostgresDb."""
config = {
"id": "db-agent",
"db": {"type": "postgres", "db_url": "postgresql://localhost/test"},
}
with patch("agno.db.postgres.PostgresDb.from_dict") as mock_from_dict:
mock_db = MagicMock()
mock_from_dict.return_value = mock_db
agent = Agent.from_dict(config)
mock_from_dict.assert_called_once()
assert agent.db == mock_db
def test_from_dict_with_db_sqlite(self):
"""Test from_dict reconstructs SqliteDb."""
config = {
"id": "sqlite-agent",
"db": {"type": "sqlite", "db_file": "/tmp/test.db"},
}
with patch("agno.db.sqlite.SqliteDb.from_dict") as mock_from_dict:
mock_db = MagicMock()
mock_from_dict.return_value = mock_db
agent = Agent.from_dict(config)
mock_from_dict.assert_called_once()
assert agent.db == mock_db
def test_from_dict_with_registry_tools(self):
"""Test from_dict uses registry to rehydrate tools."""
config = {
"id": "tools-agent",
"tools": [{"name": "search", "description": "Search the web"}],
}
mock_registry = MagicMock()
mock_tool = MagicMock()
mock_registry.rehydrate_function.return_value = mock_tool
agent = Agent.from_dict(config, registry=mock_registry)
mock_registry.rehydrate_function.assert_called_once()
assert agent.tools == [mock_tool]
def test_from_dict_without_registry_removes_tools(self):
"""Test from_dict removes tools when no registry is provided."""
config = {
"id": "no-registry-agent",
"tools": [{"name": "search"}],
}
agent = Agent.from_dict(config)
# Tools should be None/empty since no registry was provided
assert agent.tools is None or agent.tools == []
def test_from_dict_roundtrip(self, agent_with_settings):
"""Test that to_dict -> from_dict preserves agent configuration."""
config = agent_with_settings.to_dict()
reconstructed = Agent.from_dict(config)
assert reconstructed.id == agent_with_settings.id
assert reconstructed.name == agent_with_settings.name
assert reconstructed.description == agent_with_settings.description
assert reconstructed.markdown == agent_with_settings.markdown
assert reconstructed.debug_mode == agent_with_settings.debug_mode
assert reconstructed.retries == agent_with_settings.retries
def test_from_dict_roundtrip_store_history_messages_true(self):
"""Test that store_history_messages=True survives to_dict/from_dict round-trip."""
agent = Agent(id="roundtrip-agent", store_history_messages=True)
config = agent.to_dict()
reconstructed = Agent.from_dict(config)
assert reconstructed.store_history_messages is True
def test_from_dict_roundtrip_store_history_messages_false(self):
"""Test that store_history_messages=False (default) survives round-trip."""
agent = Agent(id="roundtrip-agent-default", store_history_messages=False)
config = agent.to_dict()
reconstructed = Agent.from_dict(config)
assert reconstructed.store_history_messages is False
# =============================================================================
# save() Tests
# =============================================================================
class TestAgentSave:
"""Tests for Agent.save() method."""
def test_save_calls_upsert_component(self, basic_agent, mock_db):
"""Test save calls upsert_component with correct parameters."""
mock_db.upsert_config.return_value = {"version": 1}
basic_agent.db = mock_db
version = basic_agent.save()
mock_db.upsert_component.assert_called_once_with(
component_id="test-agent",
component_type=ComponentType.AGENT,
name="Test Agent",
description="A test agent for unit testing",
metadata=None,
)
assert version == 1
def test_save_calls_upsert_config(self, basic_agent, mock_db):
"""Test save calls upsert_config with agent config."""
mock_db.upsert_config.return_value = {"version": 2}
basic_agent.db = mock_db
version = basic_agent.save()
mock_db.upsert_config.assert_called_once()
call_args = mock_db.upsert_config.call_args
assert call_args.kwargs["component_id"] == "test-agent"
assert "config" in call_args.kwargs
assert version == 2
def test_save_with_explicit_db(self, basic_agent, mock_db):
"""Test save uses explicitly provided db."""
mock_db.upsert_config.return_value = {"version": 1}
version = basic_agent.save(db=mock_db)
mock_db.upsert_component.assert_called_once()
mock_db.upsert_config.assert_called_once()
assert version == 1
def test_save_with_label(self, basic_agent, mock_db):
"""Test save passes label to upsert_config."""
mock_db.upsert_config.return_value = {"version": 1}
basic_agent.db = mock_db
basic_agent.save(label="production")
call_args = mock_db.upsert_config.call_args
assert call_args.kwargs["label"] == "production"
def test_save_with_stage(self, basic_agent, mock_db):
"""Test save passes stage to upsert_config."""
mock_db.upsert_config.return_value = {"version": 1}
basic_agent.db = mock_db
basic_agent.save(stage="draft")
call_args = mock_db.upsert_config.call_args
assert call_args.kwargs["stage"] == "draft"
def test_save_with_notes(self, basic_agent, mock_db):
"""Test save passes notes to upsert_config."""
mock_db.upsert_config.return_value = {"version": 1}
basic_agent.db = mock_db
basic_agent.save(notes="Initial version")
call_args = mock_db.upsert_config.call_args
assert call_args.kwargs["notes"] == "Initial version"
def test_save_without_db_raises_error(self, basic_agent):
"""Test save raises error when no db is available."""
with pytest.raises(ValueError, match="Db not initialized or provided"):
basic_agent.save()
def test_save_generates_id_from_name(self, mock_db):
"""Test save generates id from name if not provided."""
mock_db.upsert_config.return_value = {"version": 1}
agent = Agent(name="My Test Agent", db=mock_db)
agent.save()
# ID should be generated from name
assert agent.id is not None
call_args = mock_db.upsert_component.call_args
assert call_args.kwargs["component_id"] is not None
def test_save_handles_db_error(self, basic_agent, mock_db):
"""Test save raises error when database operation fails."""
mock_db.upsert_component.side_effect = Exception("Database error")
basic_agent.db = mock_db
with pytest.raises(Exception, match="Database error"):
basic_agent.save()
# =============================================================================
# load() Tests
# =============================================================================
class TestAgentLoad:
"""Tests for Agent.load() class method."""
def test_load_returns_agent(self, mock_db, sample_agent_config):
"""Test load returns an agent from database."""
# Remove model to avoid model lookup issues
config = sample_agent_config.copy()
del config["model"]
mock_db.get_config.return_value = {"config": config}
agent = Agent.load(id="sample-agent", db=mock_db)
assert agent is not None
assert agent.id == "sample-agent"
assert agent.name == "Sample Agent"
def test_load_with_version(self, mock_db):
"""Test load retrieves specific version."""
mock_db.get_config.return_value = {"config": {"id": "versioned-agent", "name": "V2 Agent"}}
Agent.load(id="versioned-agent", db=mock_db, version=2)
mock_db.get_config.assert_called_once_with(component_id="versioned-agent", label=None, version=2)
def test_load_with_label(self, mock_db):
"""Test load retrieves labeled version."""
mock_db.get_config.return_value = {"config": {"id": "labeled-agent", "name": "Production Agent"}}
Agent.load(id="labeled-agent", db=mock_db, label="production")
mock_db.get_config.assert_called_once_with(component_id="labeled-agent", label="production", version=None)
def test_load_with_registry(self, mock_db):
"""Test load passes registry to from_dict."""
mock_db.get_config.return_value = {"config": {"id": "registry-agent", "tools": [{"name": "search"}]}}
mock_registry = MagicMock()
mock_registry.rehydrate_function.return_value = MagicMock()
agent = Agent.load(id="registry-agent", db=mock_db, registry=mock_registry)
assert agent is not None
mock_registry.rehydrate_function.assert_called()
def test_load_returns_none_when_not_found(self, mock_db):
"""Test load returns None when agent not found."""
mock_db.get_config.return_value = None
agent = Agent.load(id="nonexistent-agent", db=mock_db)
assert agent is None
def test_load_returns_none_when_config_missing(self, mock_db):
"""Test load returns None when config is missing."""
mock_db.get_config.return_value = {"config": None}
agent = Agent.load(id="empty-config-agent", db=mock_db)
assert agent is None
def test_load_sets_db_on_agent(self, mock_db):
"""Test load sets db attribute on returned agent."""
mock_db.get_config.return_value = {"config": {"id": "db-agent", "name": "DB Agent"}}
agent = Agent.load(id="db-agent", db=mock_db)
assert agent is not None
assert agent.db == mock_db
def test_save_load_preserves_store_history_messages(self, mock_db):
"""Test that store_history_messages=True survives save/load round-trip."""
agent = Agent(id="persist-agent", name="Persist Agent", store_history_messages=True, db=mock_db)
# Capture the config passed to upsert_config during save
saved_config = {}
def capture_config(**kwargs):
saved_config.update(kwargs.get("config", {}))
return {"version": 1}
mock_db.upsert_config.side_effect = capture_config
agent.save()
assert saved_config.get("store_history_messages") is True
# Simulate load returning the saved config
mock_db.get_config.return_value = {"config": saved_config}
loaded = Agent.load(id="persist-agent", db=mock_db)
assert loaded is not None
assert loaded.store_history_messages is True
# =============================================================================
# delete() Tests
# =============================================================================
class TestAgentDelete:
"""Tests for Agent.delete() method."""
def test_delete_calls_delete_component(self, basic_agent, mock_db):
"""Test delete calls delete_component."""
mock_db.delete_component.return_value = True
basic_agent.db = mock_db
result = basic_agent.delete()
mock_db.delete_component.assert_called_once_with(component_id="test-agent", hard_delete=False)
assert result is True
def test_delete_with_hard_delete(self, basic_agent, mock_db):
"""Test delete with hard_delete flag."""
mock_db.delete_component.return_value = True
basic_agent.db = mock_db
result = basic_agent.delete(hard_delete=True)
mock_db.delete_component.assert_called_once_with(component_id="test-agent", hard_delete=True)
assert result is True
def test_delete_with_explicit_db(self, basic_agent, mock_db):
"""Test delete uses explicitly provided db."""
mock_db.delete_component.return_value = True
result = basic_agent.delete(db=mock_db)
mock_db.delete_component.assert_called_once()
assert result is True
def test_delete_without_db_raises_error(self, basic_agent):
"""Test delete raises error when no db is available."""
with pytest.raises(ValueError, match="Db not initialized or provided"):
basic_agent.delete()
def test_delete_returns_false_on_failure(self, basic_agent, mock_db):
"""Test delete returns False when operation fails."""
mock_db.delete_component.return_value = False
basic_agent.db = mock_db
result = basic_agent.delete()
assert result is False
# =============================================================================
# get_agent_by_id() Tests
# =============================================================================
class TestGetAgentById:
"""Tests for get_agent_by_id() helper function."""
def test_get_agent_by_id_returns_agent(self, mock_db):
"""Test get_agent_by_id returns agent from database."""
mock_db.get_config.return_value = {"config": {"id": "found-agent", "name": "Found Agent"}}
agent = get_agent_by_id(db=mock_db, id="found-agent")
assert agent is not None
assert agent.id == "found-agent"
assert agent.name == "Found Agent"
def test_get_agent_by_id_with_version(self, mock_db):
"""Test get_agent_by_id retrieves specific version."""
mock_db.get_config.return_value = {"config": {"id": "versioned", "name": "V3"}}
get_agent_by_id(db=mock_db, id="versioned", version=3)
mock_db.get_config.assert_called_once_with(component_id="versioned", label=None, version=3)
def test_get_agent_by_id_with_label(self, mock_db):
"""Test get_agent_by_id retrieves labeled version."""
mock_db.get_config.return_value = {"config": {"id": "labeled", "name": "Staging"}}
get_agent_by_id(db=mock_db, id="labeled", label="staging")
mock_db.get_config.assert_called_once_with(component_id="labeled", label="staging", version=None)
def test_get_agent_by_id_with_registry(self, mock_db):
"""Test get_agent_by_id passes registry."""
mock_db.get_config.return_value = {"config": {"id": "registry-agent", "tools": [{"name": "calc"}]}}
mock_registry = MagicMock()
mock_registry.rehydrate_function.return_value = MagicMock()
agent = get_agent_by_id(db=mock_db, id="registry-agent", registry=mock_registry)
assert agent is not None
def test_get_agent_by_id_returns_none_when_not_found(self, mock_db):
"""Test get_agent_by_id returns None when not found."""
mock_db.get_config.return_value = None
agent = get_agent_by_id(db=mock_db, id="missing")
assert agent is None
def test_get_agent_by_id_sets_db(self, mock_db):
"""Test get_agent_by_id sets db on returned agent via registry."""
# The db is set via registry lookup when config contains a serialized db reference
mock_db.id = "test-db"
mock_db.get_config.return_value = {
"config": {
"id": "db-agent",
"name": "DB Agent",
"db": {"type": "postgres", "id": "test-db"},
}
}
# Create registry with the mock db registered
registry = Registry(dbs=[mock_db])
agent = get_agent_by_id(db=mock_db, id="db-agent", registry=registry)
assert agent is not None
assert agent.db == mock_db
def test_get_agent_by_id_handles_error(self, mock_db):
"""Test get_agent_by_id returns None on error."""
mock_db.get_config.side_effect = Exception("DB error")
agent = get_agent_by_id(db=mock_db, id="error-agent")
assert agent is None
# =============================================================================
# get_agents() Tests
# =============================================================================
class TestGetAgents:
"""Tests for get_agents() helper function."""
def test_get_agents_returns_list(self, mock_db):
"""Test get_agents returns list of agents."""
mock_db.list_components.return_value = (
[
{"component_id": "agent-1"},
{"component_id": "agent-2"},
],
None,
)
mock_db.get_config.side_effect = [
{"config": {"id": "agent-1", "name": "Agent 1"}},
{"config": {"id": "agent-2", "name": "Agent 2"}},
]
agents = get_agents(db=mock_db)
assert len(agents) == 2
assert agents[0].id == "agent-1"
assert agents[1].id == "agent-2"
def test_get_agents_filters_by_type(self, mock_db):
"""Test get_agents filters by AGENT component type."""
mock_db.list_components.return_value = ([], None)
get_agents(db=mock_db)
mock_db.list_components.assert_called_once_with(component_type=ComponentType.AGENT, exclude_component_ids=None)
def test_get_agents_with_registry(self, mock_db):
"""Test get_agents passes registry to from_dict."""
mock_db.list_components.return_value = (
[{"component_id": "tools-agent"}],
None,
)
mock_db.get_config.return_value = {"config": {"id": "tools-agent", "tools": [{"name": "search"}]}}
mock_registry = MagicMock()
mock_registry.rehydrate_function.return_value = MagicMock()
agents = get_agents(db=mock_db, registry=mock_registry)
assert len(agents) == 1
def test_get_agents_returns_empty_list_on_error(self, mock_db):
"""Test get_agents returns empty list on error."""
mock_db.list_components.side_effect = Exception("DB error")
agents = get_agents(db=mock_db)
assert agents == []
def test_get_agents_skips_invalid_configs(self, mock_db):
"""Test get_agents skips agents with invalid configs."""
mock_db.list_components.return_value = (
[
{"component_id": "valid-agent"},
{"component_id": "invalid-agent"},
],
None,
)
mock_db.get_config.side_effect = [
{"config": {"id": "valid-agent", "name": "Valid"}},
{"config": None}, # Invalid config
]
agents = get_agents(db=mock_db)
assert len(agents) == 1
assert agents[0].id == "valid-agent"
def test_get_agents_sets_db_on_all_agents(self, mock_db):
"""Test get_agents sets db on all returned agents via registry."""
# The db is set via registry lookup when config contains a serialized db reference
mock_db.id = "test-db"
mock_db.list_components.return_value = (
[{"component_id": "agent-1"}],
None,
)
mock_db.get_config.return_value = {
"config": {
"id": "agent-1",
"name": "Agent 1",
"db": {"type": "postgres", "id": "test-db"},
}
}
# Create registry with the mock db registered
registry = Registry(dbs=[mock_db])
agents = get_agents(db=mock_db, registry=registry)
assert len(agents) == 1
assert agents[0].db == mock_db
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/agent/test_agent_config.py",
"license": "Apache License 2.0",
"lines": 600,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/knowledge/test_filesystem_knowledge.py | """Unit tests for FileSystemKnowledge implementation."""
from pathlib import Path
import pytest
from agno.knowledge.document import Document
from agno.knowledge.filesystem import FileSystemKnowledge
# Initialization tests
def test_init_with_valid_directory(tmp_path):
"""Test initialization with a valid directory."""
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
assert fs_knowledge.base_path == tmp_path.resolve()
assert fs_knowledge.max_results == 50
assert fs_knowledge.include_patterns == []
assert ".git" in fs_knowledge.exclude_patterns
def test_init_with_custom_config(tmp_path):
"""Test initialization with custom configuration."""
fs_knowledge = FileSystemKnowledge(
base_dir=str(tmp_path),
max_results=100,
include_patterns=["*.py", "*.md"],
exclude_patterns=["test_*"],
)
assert fs_knowledge.max_results == 100
assert "*.py" in fs_knowledge.include_patterns
assert "test_*" in fs_knowledge.exclude_patterns
def test_init_with_nonexistent_directory():
"""Test initialization fails with nonexistent directory."""
with pytest.raises(ValueError, match="Directory does not exist"):
FileSystemKnowledge(base_dir="/nonexistent/path/12345")
def test_init_with_file_not_directory(tmp_path):
"""Test initialization fails when path is a file, not directory."""
test_file = tmp_path / "test.txt"
test_file.write_text("content")
with pytest.raises(ValueError, match="Path is not a directory"):
FileSystemKnowledge(base_dir=str(test_file))
# Internal helper method tests
def test_should_include_file_default(tmp_path):
"""Test file inclusion with default patterns."""
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
# Should include regular files
regular_file = tmp_path / "test.py"
assert fs_knowledge._should_include_file(regular_file)
# Should exclude .git files
git_file = tmp_path / ".git" / "config"
assert not fs_knowledge._should_include_file(git_file)
# Should exclude __pycache__ files
pycache_file = tmp_path / "__pycache__" / "module.pyc"
assert not fs_knowledge._should_include_file(pycache_file)
def test_should_include_file_with_include_patterns(tmp_path):
"""Test file inclusion with include patterns."""
fs_knowledge = FileSystemKnowledge(
base_dir=str(tmp_path),
include_patterns=["*.py", "*.md"],
)
# Should include matching files
py_file = tmp_path / "test.py"
assert fs_knowledge._should_include_file(py_file)
md_file = tmp_path / "README.md"
assert fs_knowledge._should_include_file(md_file)
# Should exclude non-matching files
txt_file = tmp_path / "data.txt"
assert not fs_knowledge._should_include_file(txt_file)
def test_should_include_file_with_exclude_patterns(tmp_path):
"""Test file inclusion with custom exclude patterns."""
fs_knowledge = FileSystemKnowledge(
base_dir=str(tmp_path),
exclude_patterns=["excluded_", "skip_"],
)
# Should exclude files matching exclude patterns
excluded_file = tmp_path / "excluded_module.py"
assert not fs_knowledge._should_include_file(excluded_file)
skip_file = tmp_path / "skip_data.txt"
assert not fs_knowledge._should_include_file(skip_file)
# Should include files not matching exclude patterns
normal_file = tmp_path / "module.py"
assert fs_knowledge._should_include_file(normal_file)
# List files tests
def test_list_files_all(tmp_path):
"""Test listing all files."""
# Create test files
(tmp_path / "file1.py").write_text("content1")
(tmp_path / "file2.md").write_text("content2")
(tmp_path / "file3.txt").write_text("content3")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._list_files("*")
assert len(docs) == 3
assert all(isinstance(doc, Document) for doc in docs)
assert all(doc.meta_data["type"] == "file_listing" for doc in docs)
def test_list_files_with_pattern(tmp_path):
"""Test listing files with glob pattern."""
# Create test files
(tmp_path / "test1.py").write_text("content1")
(tmp_path / "test2.py").write_text("content2")
(tmp_path / "readme.md").write_text("content3")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._list_files("*.py")
assert len(docs) == 2
assert all(doc.name.endswith(".py") for doc in docs)
def test_list_files_with_max_results(tmp_path):
"""Test max_results limit."""
# Create many files
for i in range(10):
(tmp_path / f"file{i}.txt").write_text(f"content{i}")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._list_files("*", max_results=5)
assert len(docs) == 5
def test_list_files_nested_directories(tmp_path):
"""Test listing files in nested directories."""
# Create nested structure
subdir = tmp_path / "subdir"
subdir.mkdir()
(tmp_path / "root.txt").write_text("root content")
(subdir / "nested.txt").write_text("nested content")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._list_files("*")
assert len(docs) == 2
file_names = [doc.name for doc in docs]
assert "root.txt" in file_names
assert str(Path("subdir") / "nested.txt") in file_names
def test_list_files_respects_exclude_patterns(tmp_path):
"""Test that excluded directories are skipped."""
# Create excluded directory
node_modules = tmp_path / "node_modules"
node_modules.mkdir()
(node_modules / "package.js").write_text("excluded")
(tmp_path / "app.py").write_text("included")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._list_files("*")
assert len(docs) == 1
assert docs[0].name == "app.py"
# Get file tests
def test_get_file_relative_path(tmp_path):
"""Test getting file with relative path."""
test_file = tmp_path / "test.txt"
test_content = "Hello, World!"
test_file.write_text(test_content)
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._get_file("test.txt")
assert len(docs) == 1
assert docs[0].content == test_content
assert docs[0].name == "test.txt"
assert docs[0].meta_data["type"] == "file_content"
def test_get_file_absolute_path(tmp_path):
"""Test getting file with absolute path."""
test_file = tmp_path / "test.txt"
test_content = "Absolute path test"
test_file.write_text(test_content)
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._get_file(str(test_file))
assert len(docs) == 1
assert docs[0].content == test_content
def test_get_file_nested_path(tmp_path):
"""Test getting file in nested directory."""
subdir = tmp_path / "subdir"
subdir.mkdir()
test_file = subdir / "nested.txt"
test_content = "Nested file content"
test_file.write_text(test_content)
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._get_file(str(Path("subdir") / "nested.txt"))
assert len(docs) == 1
assert docs[0].content == test_content
def test_get_file_nonexistent(tmp_path):
"""Test getting nonexistent file returns empty list."""
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._get_file("nonexistent.txt")
assert len(docs) == 0
def test_get_file_is_directory(tmp_path):
"""Test getting a directory returns empty list."""
subdir = tmp_path / "subdir"
subdir.mkdir()
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._get_file("subdir")
assert len(docs) == 0
def test_get_file_metadata(tmp_path):
"""Test file metadata is included."""
test_file = tmp_path / "test.py"
test_content = "print('hello')\n"
test_file.write_text(test_content)
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._get_file("test.py")
assert docs[0].meta_data["extension"] == ".py"
assert docs[0].meta_data["size"] == len(test_content)
assert docs[0].meta_data["lines"] == 2
# Grep tests
def test_grep_literal_string(tmp_path):
"""Test grep with literal string."""
(tmp_path / "file1.txt").write_text("Hello, World!")
(tmp_path / "file2.txt").write_text("Goodbye, World!")
(tmp_path / "file3.txt").write_text("No match here")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._grep("World")
assert len(docs) == 2
file_names = [doc.name for doc in docs]
assert "file1.txt" in file_names
assert "file2.txt" in file_names
def test_grep_case_insensitive(tmp_path):
"""Test grep is case insensitive."""
(tmp_path / "test.txt").write_text("Hello, WORLD!")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._grep("world")
assert len(docs) == 1
def test_grep_regex_pattern(tmp_path):
"""Test grep with regex pattern."""
(tmp_path / "file1.txt").write_text("email@example.com")
(tmp_path / "file2.txt").write_text("no email here")
(tmp_path / "file3.txt").write_text("test@domain.org")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._grep(r"\w+@\w+\.\w+")
assert len(docs) == 2
def test_grep_invalid_regex_fallback_to_literal(tmp_path):
"""Test invalid regex falls back to literal search."""
(tmp_path / "test.txt").write_text("Test [bracket] text")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._grep("[")
assert len(docs) == 1
def test_grep_with_context(tmp_path):
"""Test grep includes context lines."""
test_content = """line 1
line 2
target line
line 4
line 5"""
(tmp_path / "test.txt").write_text(test_content)
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._grep("target")
assert len(docs) == 1
# Context should include line before and after
assert "line 2" in docs[0].content
assert "target line" in docs[0].content
assert "line 4" in docs[0].content
def test_grep_max_results(tmp_path):
"""Test max_results limit."""
for i in range(10):
(tmp_path / f"file{i}.txt").write_text("match")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._grep("match", max_results=5)
assert len(docs) == 5
def test_grep_metadata(tmp_path):
"""Test grep result includes metadata."""
(tmp_path / "test.txt").write_text("find this match here")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._grep("match")
assert docs[0].meta_data["type"] == "grep_result"
assert docs[0].meta_data["match_count"] > 0
assert "matches" in docs[0].meta_data
def test_grep_no_matches(tmp_path):
"""Test grep with no matches returns empty list."""
(tmp_path / "test.txt").write_text("some content")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._grep("nonexistent")
assert len(docs) == 0
# Protocol implementation tests
def test_build_context(tmp_path):
"""Test build_context returns proper instructions."""
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
context = fs_knowledge.build_context()
assert isinstance(context, str)
assert len(context) > 0
assert "grep_file" in context
assert "list_files" in context
assert "get_file" in context
assert str(tmp_path) in context
def test_get_tools_returns_three_tools(tmp_path):
"""Test get_tools returns three tools."""
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
tools = fs_knowledge.get_tools()
assert len(tools) == 3
tool_names = [tool.name for tool in tools]
assert "grep_file" in tool_names
assert "list_files" in tool_names
assert "get_file" in tool_names
@pytest.mark.asyncio
async def test_aget_tools(tmp_path):
"""Test async get_tools."""
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
tools = await fs_knowledge.aget_tools()
assert len(tools) == 3
def test_retrieve_uses_grep(tmp_path):
"""Test retrieve method uses grep internally."""
(tmp_path / "test.txt").write_text("retrievable content")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge.retrieve("retrievable", max_results=10)
assert len(docs) == 1
assert isinstance(docs[0], Document)
@pytest.mark.asyncio
async def test_aretrieve(tmp_path):
"""Test async retrieve."""
(tmp_path / "test.txt").write_text("async retrievable content")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = await fs_knowledge.aretrieve("retrievable")
assert len(docs) == 1
# Tool execution tests
def test_grep_file_tool(tmp_path):
"""Test grep_file tool execution."""
(tmp_path / "test.txt").write_text("Find this text")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
tools = fs_knowledge.get_tools()
grep_tool = next(t for t in tools if t.name == "grep_file")
result = grep_tool.entrypoint("Find")
assert isinstance(result, str)
assert "test.txt" in result
assert "Find this text" in result
def test_grep_file_tool_no_matches(tmp_path):
"""Test grep_file tool with no matches."""
(tmp_path / "test.txt").write_text("some content")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
tools = fs_knowledge.get_tools()
grep_tool = next(t for t in tools if t.name == "grep_file")
result = grep_tool.entrypoint("nonexistent")
assert "No matches found" in result
def test_list_files_tool(tmp_path):
"""Test list_files tool execution."""
(tmp_path / "file1.py").write_text("content1")
(tmp_path / "file2.py").write_text("content2")
(tmp_path / "readme.md").write_text("content3")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
tools = fs_knowledge.get_tools()
list_tool = next(t for t in tools if t.name == "list_files")
result = list_tool.entrypoint("*.py")
assert isinstance(result, str)
assert "file1.py" in result
assert "file2.py" in result
assert "readme.md" not in result
def test_list_files_tool_all_files(tmp_path):
"""Test list_files tool with wildcard."""
(tmp_path / "file1.txt").write_text("content1")
(tmp_path / "file2.txt").write_text("content2")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
tools = fs_knowledge.get_tools()
list_tool = next(t for t in tools if t.name == "list_files")
result = list_tool.entrypoint("*")
assert "file1.txt" in result
assert "file2.txt" in result
def test_list_files_tool_no_matches(tmp_path):
"""Test list_files tool with no matches."""
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
tools = fs_knowledge.get_tools()
list_tool = next(t for t in tools if t.name == "list_files")
result = list_tool.entrypoint("*.nonexistent")
assert "No files found" in result
def test_get_file_tool(tmp_path):
"""Test get_file tool execution."""
test_content = "File content here"
(tmp_path / "test.txt").write_text(test_content)
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
tools = fs_knowledge.get_tools()
get_file_tool = next(t for t in tools if t.name == "get_file")
result = get_file_tool.entrypoint("test.txt")
assert isinstance(result, str)
assert test_content in result
assert "test.txt" in result
def test_get_file_tool_not_found(tmp_path):
"""Test get_file tool with nonexistent file."""
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
tools = fs_knowledge.get_tools()
get_file_tool = next(t for t in tools if t.name == "get_file")
result = get_file_tool.entrypoint("nonexistent.txt")
assert "File not found" in result
# Edge cases and error conditions
def test_empty_directory(tmp_path):
"""Test operations on empty directory."""
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
assert len(fs_knowledge._list_files("*")) == 0
assert len(fs_knowledge._grep("anything")) == 0
assert len(fs_knowledge._get_file("nonexistent.txt")) == 0
def test_binary_files_skipped_in_grep(tmp_path):
"""Test that binary files don't crash grep."""
# Create a binary file
(tmp_path / "binary.bin").write_bytes(b"\x00\x01\x02\x03\x04")
(tmp_path / "text.txt").write_text("searchable text")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._grep("text")
# Should find the text file, skip binary
assert len(docs) == 1
assert docs[0].name == "text.txt"
def test_unicode_content(tmp_path):
"""Test handling of unicode content."""
unicode_content = "Hello 世界 🌍"
(tmp_path / "unicode.txt").write_text(unicode_content, encoding="utf-8")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._get_file("unicode.txt")
assert len(docs) == 1
assert docs[0].content == unicode_content
def test_large_file_handling(tmp_path):
"""Test handling of larger files."""
# Create a file with many lines
large_content = "\n".join([f"Line {i}" for i in range(1000)])
(tmp_path / "large.txt").write_text(large_content)
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._get_file("large.txt")
assert len(docs) == 1
assert len(docs[0].content.split("\n")) == 1000
def test_special_characters_in_filename(tmp_path):
"""Test files with special characters in names."""
test_file = tmp_path / "file with spaces.txt"
test_file.write_text("content")
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._list_files("*")
assert len(docs) == 1
assert docs[0].name == "file with spaces.txt"
def test_symlinks_are_handled(tmp_path):
"""Test that symlinks are handled gracefully."""
# Create a real file
real_file = tmp_path / "real.txt"
real_file.write_text("real content")
# Create a symlink
link_file = tmp_path / "link.txt"
try:
link_file.symlink_to(real_file)
fs_knowledge = FileSystemKnowledge(base_dir=str(tmp_path))
docs = fs_knowledge._list_files("*")
# Should list both real file and link
assert len(docs) >= 1
except OSError:
# Symlinks might not be supported on all systems
pytest.skip("Symlinks not supported on this system")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/knowledge/test_filesystem_knowledge.py",
"license": "Apache License 2.0",
"lines": 416,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/knowledge/test_knowledge_insert_many_auth.py | """Tests for insert_many() and ainsert_many() auth parameter passing."""
from unittest.mock import AsyncMock, patch
import pytest
from agno.knowledge.content import ContentAuth
from agno.knowledge.knowledge import Knowledge
from agno.vectordb.base import VectorDb
class MockVectorDb(VectorDb):
"""Minimal VectorDb stub for testing."""
def create(self) -> None:
pass
async def async_create(self) -> None:
pass
def name_exists(self, name: str) -> bool:
return False
def async_name_exists(self, name: str) -> bool:
return False
def id_exists(self, id: str) -> bool:
return False
def content_hash_exists(self, content_hash: str) -> bool:
return False
def insert(self, content_hash: str, documents, filters=None) -> None:
pass
async def async_insert(self, content_hash: str, documents, filters=None) -> None:
pass
def upsert(self, content_hash: str, documents, filters=None) -> None:
pass
async def async_upsert(self, content_hash: str, documents, filters=None) -> None:
pass
def search(self, query: str, limit: int = 5, filters=None):
return []
async def async_search(self, query: str, limit: int = 5, filters=None):
return []
def drop(self) -> None:
pass
async def async_drop(self) -> None:
pass
def exists(self) -> bool:
return True
async def async_exists(self) -> bool:
return True
def delete(self) -> bool:
return True
def delete_by_id(self, id: str) -> bool:
return True
def delete_by_name(self, name: str) -> bool:
return True
def delete_by_metadata(self, metadata) -> bool:
return True
def update_metadata(self, content_id: str, metadata) -> None:
pass
def delete_by_content_id(self, content_id: str) -> bool:
return True
def get_supported_search_types(self):
return ["vector"]
def test_insert_many_passes_auth_to_insert():
"""Test that insert_many() passes auth parameter to internal insert() calls."""
knowledge = Knowledge(vector_db=MockVectorDb())
auth1 = ContentAuth(password="secret1")
auth2 = ContentAuth(password="secret2")
with patch.object(knowledge, "insert") as mock_insert:
knowledge.insert_many(
[
{"text_content": "doc1", "auth": auth1},
{"text_content": "doc2", "auth": auth2},
]
)
assert mock_insert.call_count == 2
# Check first call has auth1
call1_kwargs = mock_insert.call_args_list[0][1]
assert call1_kwargs.get("auth") == auth1
# Check second call has auth2
call2_kwargs = mock_insert.call_args_list[1][1]
assert call2_kwargs.get("auth") == auth2
def test_insert_many_passes_none_auth_when_not_provided():
"""Test that insert_many() passes None for auth when not provided."""
knowledge = Knowledge(vector_db=MockVectorDb())
with patch.object(knowledge, "insert") as mock_insert:
knowledge.insert_many(
[
{"text_content": "doc1"},
{"text_content": "doc2"},
]
)
assert mock_insert.call_count == 2
# Both calls should have auth=None
for call in mock_insert.call_args_list:
assert call[1].get("auth") is None
@pytest.mark.asyncio
async def test_ainsert_many_passes_auth_to_ainsert():
"""Test that ainsert_many() passes auth parameter to internal ainsert() calls."""
knowledge = Knowledge(vector_db=MockVectorDb())
auth1 = ContentAuth(password="secret1")
auth2 = ContentAuth(password="secret2")
with patch.object(knowledge, "ainsert", new_callable=AsyncMock) as mock_ainsert:
await knowledge.ainsert_many(
[
{"text_content": "doc1", "auth": auth1},
{"text_content": "doc2", "auth": auth2},
]
)
assert mock_ainsert.call_count == 2
# Check first call has auth1
call1_kwargs = mock_ainsert.call_args_list[0][1]
assert call1_kwargs.get("auth") == auth1
# Check second call has auth2
call2_kwargs = mock_ainsert.call_args_list[1][1]
assert call2_kwargs.get("auth") == auth2
@pytest.mark.asyncio
async def test_ainsert_many_passes_none_auth_when_not_provided():
"""Test that ainsert_many() passes None for auth when not provided."""
knowledge = Knowledge(vector_db=MockVectorDb())
with patch.object(knowledge, "ainsert", new_callable=AsyncMock) as mock_ainsert:
await knowledge.ainsert_many(
[
{"text_content": "doc1"},
{"text_content": "doc2"},
]
)
assert mock_ainsert.call_count == 2
# Both calls should have auth=None
for call in mock_ainsert.call_args_list:
assert call[1].get("auth") is None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/knowledge/test_knowledge_insert_many_auth.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/os/routers/test_components_router.py | """
Unit tests for the Components router.
Tests cover:
- GET /components - List components
- POST /components - Create component
- GET /components/{component_id} - Get component
- PATCH /components/{component_id} - Update component
- DELETE /components/{component_id} - Delete component
- GET /components/{component_id}/configs - List configs
- POST /components/{component_id}/configs - Create config
- GET /components/{component_id}/configs/current - Get current config
- GET /components/{component_id}/configs/{version} - Get config version
- PATCH /components/{component_id}/configs/{version} - Update config
- DELETE /components/{component_id}/configs/{version} - Delete config
- POST /components/{component_id}/configs/{version}/set-current - Set current version
"""
from unittest.mock import MagicMock
import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
from agno.db.base import BaseDb, ComponentType
from agno.os.routers.components import get_components_router
from agno.os.settings import AgnoAPISettings
# =============================================================================
# Fixtures
# =============================================================================
def _create_mock_db_class():
"""Create a concrete BaseDb subclass with all abstract methods stubbed."""
abstract_methods = {}
for name in dir(BaseDb):
attr = getattr(BaseDb, name, None)
if getattr(attr, "__isabstractmethod__", False):
abstract_methods[name] = MagicMock()
return type("MockDb", (BaseDb,), abstract_methods)
@pytest.fixture
def mock_db():
"""Create a mock database instance."""
MockDbClass = _create_mock_db_class()
db = MockDbClass()
db.id = "test-db"
db.list_components = MagicMock()
db.get_component = MagicMock()
db.upsert_component = MagicMock()
db.delete_component = MagicMock()
db.create_component_with_config = MagicMock()
db.list_configs = MagicMock()
db.get_config = MagicMock()
db.upsert_config = MagicMock()
db.delete_config = MagicMock()
db.set_current_version = MagicMock()
db.to_dict = MagicMock(return_value={"type": "postgres", "id": "test-db"})
return db
@pytest.fixture
def settings():
"""Create test settings with auth disabled (no security key = auth disabled)."""
return AgnoAPISettings()
@pytest.fixture
def client(mock_db, settings):
"""Create a FastAPI test client with the components router."""
app = FastAPI()
router = get_components_router(os_db=mock_db, settings=settings)
app.include_router(router)
return TestClient(app)
# =============================================================================
# List Components Tests
# =============================================================================
class TestListComponents:
"""Tests for GET /components endpoint."""
def test_list_components_returns_paginated_response(self, client, mock_db):
"""Test list_components returns paginated response."""
mock_db.list_components.return_value = (
[
{"component_id": "agent-1", "name": "Agent 1", "component_type": "agent", "created_at": 1234567890},
{"component_id": "agent-2", "name": "Agent 2", "component_type": "agent", "created_at": 1234567890},
],
2,
)
response = client.get("/components")
assert response.status_code == 200
data = response.json()
assert len(data["data"]) == 2
assert data["meta"]["total_count"] == 2
assert data["meta"]["page"] == 1
def test_list_components_with_type_filter(self, client, mock_db):
"""Test list_components filters by component type."""
mock_db.list_components.return_value = ([], 0)
response = client.get("/components?component_type=agent")
assert response.status_code == 200
mock_db.list_components.assert_called_once()
call_args = mock_db.list_components.call_args
assert call_args.kwargs["component_type"] == ComponentType.AGENT
def test_list_components_with_pagination(self, client, mock_db):
"""Test list_components with pagination parameters."""
mock_db.list_components.return_value = ([], 100)
response = client.get("/components?page=3&limit=10")
assert response.status_code == 200
mock_db.list_components.assert_called_once()
call_args = mock_db.list_components.call_args
assert call_args.kwargs["limit"] == 10
assert call_args.kwargs["offset"] == 20 # (3-1) * 10
def test_list_components_handles_error(self, client, mock_db):
"""Test list_components returns 500 on error."""
mock_db.list_components.side_effect = Exception("DB error")
response = client.get("/components")
assert response.status_code == 500
# =============================================================================
# Create Component Tests
# =============================================================================
class TestCreateComponent:
"""Tests for POST /components endpoint."""
def test_create_component_success(self, client, mock_db):
"""Test create_component creates a new component."""
mock_db.create_component_with_config.return_value = (
{
"component_id": "test-agent",
"name": "Test Agent",
"component_type": "agent",
"created_at": 1234567890,
},
{"version": 1},
)
response = client.post(
"/components",
json={
"name": "Test Agent",
"component_type": "agent",
"config": {"id": "test-agent"},
},
)
assert response.status_code == 201
data = response.json()
assert data["component_id"] == "test-agent"
assert data["name"] == "Test Agent"
def test_create_component_generates_id_from_name(self, client, mock_db):
"""Test create_component generates ID from name if not provided."""
mock_db.create_component_with_config.return_value = (
{"component_id": "my-agent", "name": "My Agent", "component_type": "agent", "created_at": 1234567890},
{"version": 1},
)
response = client.post(
"/components",
json={"name": "My Agent", "component_type": "agent"},
)
assert response.status_code == 201
# Verify that component_id was generated (checked in the call)
call_args = mock_db.create_component_with_config.call_args
assert call_args.kwargs["component_id"] == "my-agent"
def test_create_component_with_explicit_id(self, client, mock_db):
"""Test create_component uses provided component_id."""
mock_db.create_component_with_config.return_value = (
{"component_id": "custom-id", "name": "Test", "component_type": "agent", "created_at": 1234567890},
{"version": 1},
)
response = client.post(
"/components",
json={
"name": "Test",
"component_type": "agent",
"component_id": "custom-id",
},
)
assert response.status_code == 201
call_args = mock_db.create_component_with_config.call_args
assert call_args.kwargs["component_id"] == "custom-id"
def test_create_component_handles_value_error(self, client, mock_db):
"""Test create_component returns 400 on ValueError."""
mock_db.create_component_with_config.side_effect = ValueError("Invalid config")
response = client.post(
"/components",
json={"name": "Test", "component_type": "agent"},
)
assert response.status_code == 400
# =============================================================================
# Get Component Tests
# =============================================================================
class TestGetComponent:
"""Tests for GET /components/{component_id} endpoint."""
def test_get_component_success(self, client, mock_db):
"""Test get_component returns component."""
mock_db.get_component.return_value = {
"component_id": "agent-1",
"name": "Agent 1",
"component_type": "agent",
"created_at": 1234567890,
}
response = client.get("/components/agent-1")
assert response.status_code == 200
data = response.json()
assert data["component_id"] == "agent-1"
def test_get_component_not_found(self, client, mock_db):
"""Test get_component returns 404 when not found."""
mock_db.get_component.return_value = None
response = client.get("/components/nonexistent")
assert response.status_code == 404
# =============================================================================
# Update Component Tests
# =============================================================================
class TestUpdateComponent:
"""Tests for PATCH /components/{component_id} endpoint."""
def test_update_component_success(self, client, mock_db):
"""Test update_component updates component."""
mock_db.get_component.return_value = {
"component_id": "agent-1",
"name": "Old Name",
"component_type": "agent",
"created_at": 1234567890,
}
mock_db.upsert_component.return_value = {
"component_id": "agent-1",
"name": "New Name",
"component_type": "agent",
"created_at": 1234567890,
}
response = client.patch("/components/agent-1", json={"name": "New Name"})
assert response.status_code == 200
data = response.json()
assert data["name"] == "New Name"
def test_update_component_not_found(self, client, mock_db):
"""Test update_component returns 404 when not found."""
mock_db.get_component.return_value = None
response = client.patch("/components/nonexistent", json={"name": "New Name"})
assert response.status_code == 404
# =============================================================================
# Delete Component Tests
# =============================================================================
class TestDeleteComponent:
"""Tests for DELETE /components/{component_id} endpoint."""
def test_delete_component_success(self, client, mock_db):
"""Test delete_component deletes component."""
mock_db.delete_component.return_value = True
response = client.delete("/components/agent-1")
assert response.status_code == 204
def test_delete_component_not_found(self, client, mock_db):
"""Test delete_component returns 404 when not found."""
mock_db.delete_component.return_value = False
response = client.delete("/components/nonexistent")
assert response.status_code == 404
# =============================================================================
# List Configs Tests
# =============================================================================
class TestListConfigs:
"""Tests for GET /components/{component_id}/configs endpoint."""
def test_list_configs_success(self, client, mock_db):
"""Test list_configs returns list of configs."""
mock_db.list_configs.return_value = [
{"component_id": "agent-1", "version": 1, "stage": "draft", "config": {}, "created_at": 1234567890},
{"component_id": "agent-1", "version": 2, "stage": "published", "config": {}, "created_at": 1234567890},
]
response = client.get("/components/agent-1/configs")
assert response.status_code == 200
data = response.json()
assert len(data) == 2
def test_list_configs_with_include_config(self, client, mock_db):
"""Test list_configs passes include_config parameter."""
mock_db.list_configs.return_value = []
response = client.get("/components/agent-1/configs?include_config=false")
assert response.status_code == 200
mock_db.list_configs.assert_called_once_with("agent-1", include_config=False)
# =============================================================================
# Create Config Tests
# =============================================================================
class TestCreateConfig:
"""Tests for POST /components/{component_id}/configs endpoint."""
def test_create_config_success(self, client, mock_db):
"""Test create_config creates new config version."""
mock_db.upsert_config.return_value = {
"component_id": "agent-1",
"version": 1,
"config": {"name": "Agent"},
"stage": "draft",
"created_at": 1234567890,
}
response = client.post(
"/components/agent-1/configs",
json={"config": {"name": "Agent"}, "stage": "draft"},
)
assert response.status_code == 201
data = response.json()
assert data["version"] == 1
def test_create_config_handles_value_error(self, client, mock_db):
"""Test create_config returns 400 on ValueError."""
mock_db.upsert_config.side_effect = ValueError("Invalid config")
response = client.post(
"/components/agent-1/configs",
json={"config": {}},
)
assert response.status_code == 400
# =============================================================================
# Get Current Config Tests
# =============================================================================
class TestGetCurrentConfig:
"""Tests for GET /components/{component_id}/configs/current endpoint."""
def test_get_current_config_success(self, client, mock_db):
"""Test get_current_config returns current config."""
mock_db.get_config.return_value = {
"component_id": "agent-1",
"version": 2,
"config": {"name": "Agent"},
"stage": "published",
"created_at": 1234567890,
}
response = client.get("/components/agent-1/configs/current")
assert response.status_code == 200
data = response.json()
assert data["version"] == 2
def test_get_current_config_not_found(self, client, mock_db):
"""Test get_current_config returns 404 when no current config."""
mock_db.get_config.return_value = None
response = client.get("/components/agent-1/configs/current")
assert response.status_code == 404
# =============================================================================
# Get Config Version Tests
# =============================================================================
class TestGetConfigVersion:
"""Tests for GET /components/{component_id}/configs/{version} endpoint."""
def test_get_config_version_success(self, client, mock_db):
"""Test get_config_version returns specific version."""
mock_db.get_config.return_value = {
"component_id": "agent-1",
"version": 3,
"config": {"name": "Agent v3"},
"stage": "published",
"created_at": 1234567890,
}
response = client.get("/components/agent-1/configs/3")
assert response.status_code == 200
data = response.json()
assert data["version"] == 3
def test_get_config_version_not_found(self, client, mock_db):
"""Test get_config_version returns 404 when version not found."""
mock_db.get_config.return_value = None
response = client.get("/components/agent-1/configs/999")
assert response.status_code == 404
# =============================================================================
# Update Config Tests
# =============================================================================
class TestUpdateConfig:
"""Tests for PATCH /components/{component_id}/configs/{version} endpoint."""
def test_update_config_success(self, client, mock_db):
"""Test update_config updates config version."""
mock_db.upsert_config.return_value = {
"component_id": "agent-1",
"version": 1,
"config": {"name": "Updated Agent"},
"stage": "draft",
"created_at": 1234567890,
}
response = client.patch(
"/components/agent-1/configs/1",
json={"config": {"name": "Updated Agent"}},
)
assert response.status_code == 200
data = response.json()
assert data["config"]["name"] == "Updated Agent"
def test_update_config_handles_value_error(self, client, mock_db):
"""Test update_config returns 400 on ValueError."""
mock_db.upsert_config.side_effect = ValueError("Cannot update published config")
response = client.patch(
"/components/agent-1/configs/1",
json={"stage": "published"},
)
assert response.status_code == 400
# =============================================================================
# Delete Config Tests
# =============================================================================
class TestDeleteConfig:
"""Tests for DELETE /components/{component_id}/configs/{version} endpoint."""
def test_delete_config_success(self, client, mock_db):
"""Test delete_config deletes config version."""
mock_db.delete_config.return_value = True
response = client.delete("/components/agent-1/configs/1")
assert response.status_code == 204
def test_delete_config_not_found(self, client, mock_db):
"""Test delete_config returns 404 when not found."""
mock_db.delete_config.return_value = False
response = client.delete("/components/agent-1/configs/999")
assert response.status_code == 404
def test_delete_config_handles_value_error(self, client, mock_db):
"""Test delete_config returns 400 on ValueError."""
mock_db.delete_config.side_effect = ValueError("Cannot delete current config")
response = client.delete("/components/agent-1/configs/1")
assert response.status_code == 400
# =============================================================================
# Set Current Config Tests
# =============================================================================
class TestSetCurrentConfig:
"""Tests for POST /components/{component_id}/configs/{version}/set-current endpoint."""
def test_set_current_config_success(self, client, mock_db):
"""Test set_current_config sets version as current."""
mock_db.set_current_version.return_value = True
mock_db.get_component.return_value = {
"component_id": "agent-1",
"name": "Agent 1",
"component_type": "agent",
"current_version": 3,
"created_at": 1234567890,
}
response = client.post("/components/agent-1/configs/3/set-current")
assert response.status_code == 200
data = response.json()
assert data["current_version"] == 3
def test_set_current_config_not_found(self, client, mock_db):
"""Test set_current_config returns 404 when version not found."""
mock_db.set_current_version.return_value = False
response = client.post("/components/agent-1/configs/999/set-current")
assert response.status_code == 404
def test_set_current_config_handles_value_error(self, client, mock_db):
"""Test set_current_config returns 400 on ValueError."""
mock_db.set_current_version.side_effect = ValueError("Version not published")
response = client.post("/components/agent-1/configs/1/set-current")
assert response.status_code == 400
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/os/routers/test_components_router.py",
"license": "Apache License 2.0",
"lines": 411,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/os/routers/test_registry_router.py | """
Unit tests for the Registry router.
Tests cover:
- GET /registry - List registry components (tools, models, dbs, vector_dbs, schemas, functions)
"""
from unittest.mock import MagicMock
import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
from pydantic import BaseModel
from agno.db.base import BaseDb
from agno.models.base import Model
from agno.os.routers.registry import get_registry_router
from agno.os.settings import AgnoAPISettings
from agno.registry import Registry
from agno.tools.function import Function
from agno.tools.toolkit import Toolkit
from agno.vectordb.base import VectorDb
# =============================================================================
# Fixtures
# =============================================================================
def _create_mock_db_class():
"""Create a concrete BaseDb subclass with all abstract methods stubbed."""
abstract_methods = {}
for name in dir(BaseDb):
attr = getattr(BaseDb, name, None)
if getattr(attr, "__isabstractmethod__", False):
abstract_methods[name] = MagicMock()
return type("MockDb", (BaseDb,), abstract_methods)
def _create_mock_vectordb_class():
"""Create a concrete VectorDb subclass with all abstract methods stubbed."""
abstract_methods = {}
for name in dir(VectorDb):
attr = getattr(VectorDb, name, None)
if getattr(attr, "__isabstractmethod__", False):
abstract_methods[name] = MagicMock()
return type("MockVectorDb", (VectorDb,), abstract_methods)
def _create_mock_model_class():
"""Create a concrete Model subclass with all abstract methods stubbed."""
abstract_methods = {}
for name in dir(Model):
attr = getattr(Model, name, None)
if getattr(attr, "__isabstractmethod__", False):
abstract_methods[name] = MagicMock()
return type("MockModel", (Model,), abstract_methods)
@pytest.fixture
def settings():
"""Create test settings with auth disabled (no security key = auth disabled)."""
return AgnoAPISettings()
@pytest.fixture
def empty_registry():
"""Create an empty registry."""
return Registry()
@pytest.fixture
def client_with_empty_registry(empty_registry, settings):
"""Create a FastAPI test client with empty registry."""
app = FastAPI()
router = get_registry_router(registry=empty_registry, settings=settings)
app.include_router(router)
return TestClient(app)
# =============================================================================
# List Registry Tests - Empty Registry
# =============================================================================
class TestListRegistryEmpty:
"""Tests for GET /registry endpoint with empty registry."""
def test_list_registry_empty(self, client_with_empty_registry):
"""Test list_registry returns empty list for empty registry."""
response = client_with_empty_registry.get("/registry")
assert response.status_code == 200
data = response.json()
assert data["data"] == []
assert data["meta"]["total_count"] == 0
def test_list_registry_pagination_info(self, client_with_empty_registry):
"""Test list_registry returns correct pagination info."""
response = client_with_empty_registry.get("/registry?page=1&limit=10")
assert response.status_code == 200
data = response.json()
assert data["meta"]["page"] == 1
assert data["meta"]["limit"] == 10
assert data["meta"]["total_pages"] == 0
# =============================================================================
# List Registry Tests - With Tools
# =============================================================================
class TestListRegistryWithTools:
"""Tests for GET /registry endpoint with tools."""
def test_list_registry_with_function(self, settings):
"""Test list_registry includes Function tools."""
def my_tool(x: int) -> int:
"""A test tool."""
return x * 2
func = Function(
name="my_tool",
description="A test tool",
entrypoint=my_tool,
)
registry = Registry(tools=[func])
app = FastAPI()
router = get_registry_router(registry=registry, settings=settings)
app.include_router(router)
client = TestClient(app)
response = client.get("/registry")
assert response.status_code == 200
data = response.json()
assert data["meta"]["total_count"] >= 1
# Find the tool in the response
tools = [c for c in data["data"] if c["type"] == "tool"]
assert len(tools) >= 1
tool_names = [t["name"] for t in tools]
assert "my_tool" in tool_names
def test_list_registry_with_toolkit(self, settings):
"""Test list_registry includes Toolkit with embedded functions."""
class MyToolkit(Toolkit):
def __init__(self):
super().__init__(name="my_toolkit")
self.description = "A test toolkit"
self.register(self.tool_one)
self.register(self.tool_two)
def tool_one(self, x: int) -> int:
"""First tool."""
return x + 1
def tool_two(self, y: str) -> str:
"""Second tool."""
return y.upper()
toolkit = MyToolkit()
registry = Registry(tools=[toolkit])
app = FastAPI()
router = get_registry_router(registry=registry, settings=settings)
app.include_router(router)
client = TestClient(app)
response = client.get("/registry")
assert response.status_code == 200
data = response.json()
# Toolkit is returned as type="tool" with is_toolkit=True
tools = [c for c in data["data"] if c["type"] == "tool"]
assert len(tools) == 1
assert tools[0]["name"] == "my_toolkit"
assert tools[0]["metadata"]["is_toolkit"] is True
# Functions are embedded in metadata
functions = tools[0]["metadata"]["functions"]
assert len(functions) == 2
func_names = [f["name"] for f in functions]
assert "tool_one" in func_names
assert "tool_two" in func_names
def test_list_registry_with_callable(self, settings):
"""Test list_registry includes callable tools."""
def simple_function(x: int) -> int:
"""A simple function."""
return x * 2
registry = Registry(tools=[simple_function])
app = FastAPI()
router = get_registry_router(registry=registry, settings=settings)
app.include_router(router)
client = TestClient(app)
response = client.get("/registry")
assert response.status_code == 200
data = response.json()
tools = [c for c in data["data"] if c["type"] == "tool"]
assert len(tools) >= 1
tool_names = [t["name"] for t in tools]
assert "simple_function" in tool_names
# =============================================================================
# List Registry Tests - With Models
# =============================================================================
class TestListRegistryWithModels:
"""Tests for GET /registry endpoint with models."""
def test_list_registry_with_model(self, settings):
"""Test list_registry includes models."""
MockModelClass = _create_mock_model_class()
model = MockModelClass(id="gpt-4")
model.name = "GPT-4"
model.provider = "OpenAI"
registry = Registry(models=[model])
app = FastAPI()
router = get_registry_router(registry=registry, settings=settings)
app.include_router(router)
client = TestClient(app)
response = client.get("/registry")
assert response.status_code == 200
data = response.json()
models = [c for c in data["data"] if c["type"] == "model"]
assert len(models) == 1
assert models[0]["name"] == "gpt-4"
assert models[0]["metadata"]["provider"] == "OpenAI"
# =============================================================================
# List Registry Tests - With Databases
# =============================================================================
class TestListRegistryWithDatabases:
"""Tests for GET /registry endpoint with databases."""
def test_list_registry_with_db(self, settings):
"""Test list_registry includes databases."""
MockDbClass = _create_mock_db_class()
db = MockDbClass()
db.id = "main-db"
db.name = "Main Database"
registry = Registry(dbs=[db])
app = FastAPI()
router = get_registry_router(registry=registry, settings=settings)
app.include_router(router)
client = TestClient(app)
response = client.get("/registry")
assert response.status_code == 200
data = response.json()
dbs = [c for c in data["data"] if c["type"] == "db"]
assert len(dbs) == 1
assert dbs[0]["name"] == "Main Database"
assert dbs[0]["metadata"]["db_id"] == "main-db"
def test_list_registry_with_vector_db(self, settings):
"""Test list_registry includes vector databases."""
MockVectorDbClass = _create_mock_vectordb_class()
vdb = MockVectorDbClass()
vdb.id = "vectors-db"
vdb.name = "Vectors"
vdb.collection = "embeddings"
registry = Registry(vector_dbs=[vdb])
app = FastAPI()
router = get_registry_router(registry=registry, settings=settings)
app.include_router(router)
client = TestClient(app)
response = client.get("/registry")
assert response.status_code == 200
data = response.json()
vdbs = [c for c in data["data"] if c["type"] == "vector_db"]
assert len(vdbs) == 1
assert vdbs[0]["name"] == "Vectors"
assert vdbs[0]["metadata"]["collection"] == "embeddings"
# =============================================================================
# List Registry Tests - With Schemas
# =============================================================================
class TestListRegistryWithSchemas:
"""Tests for GET /registry endpoint with schemas."""
def test_list_registry_with_schema(self, settings):
"""Test list_registry includes schemas."""
class UserInput(BaseModel):
name: str
age: int
registry = Registry(schemas=[UserInput])
app = FastAPI()
router = get_registry_router(registry=registry, settings=settings)
app.include_router(router)
client = TestClient(app)
response = client.get("/registry")
assert response.status_code == 200
data = response.json()
schemas = [c for c in data["data"] if c["type"] == "schema"]
assert len(schemas) == 1
assert schemas[0]["name"] == "UserInput"
def test_list_registry_with_schema_includes_json_schema(self, settings):
"""Test list_registry includes JSON schema when requested."""
class UserInput(BaseModel):
name: str
age: int
registry = Registry(schemas=[UserInput])
app = FastAPI()
router = get_registry_router(registry=registry, settings=settings)
app.include_router(router)
client = TestClient(app)
response = client.get("/registry?include_schema=true")
assert response.status_code == 200
data = response.json()
schemas = [c for c in data["data"] if c["type"] == "schema"]
assert len(schemas) == 1
assert "schema" in schemas[0]["metadata"]
assert "properties" in schemas[0]["metadata"]["schema"]
# =============================================================================
# List Registry Tests - Filtering
# =============================================================================
class TestListRegistryFiltering:
"""Tests for GET /registry endpoint filtering."""
def test_list_registry_filter_by_type(self, settings):
"""Test list_registry filters by resource_type."""
def my_tool():
pass
MockDbClass = _create_mock_db_class()
db = MockDbClass()
db.id = "test-db"
db.name = "Test DB"
registry = Registry(tools=[my_tool], dbs=[db])
app = FastAPI()
router = get_registry_router(registry=registry, settings=settings)
app.include_router(router)
client = TestClient(app)
response = client.get("/registry?resource_type=db")
assert response.status_code == 200
data = response.json()
# Should only return db components
assert all(c["type"] == "db" for c in data["data"])
assert data["meta"]["total_count"] == 1
def test_list_registry_filter_by_name(self, settings):
"""Test list_registry filters by name (partial match)."""
def search_tool():
"""Search tool."""
pass
def fetch_tool():
"""Fetch tool."""
pass
registry = Registry(tools=[search_tool, fetch_tool])
app = FastAPI()
router = get_registry_router(registry=registry, settings=settings)
app.include_router(router)
client = TestClient(app)
response = client.get("/registry?name=search")
assert response.status_code == 200
data = response.json()
# Should only return components with "search" in the name
assert data["meta"]["total_count"] == 1
assert "search" in data["data"][0]["name"].lower()
# =============================================================================
# List Registry Tests - Pagination
# =============================================================================
class TestListRegistryPagination:
"""Tests for GET /registry endpoint pagination."""
def test_list_registry_pagination(self, settings):
"""Test list_registry paginates results."""
# Create multiple tools
tools = []
for i in range(25):
func = Function(name=f"tool_{i:02d}", description=f"Tool {i}")
tools.append(func)
registry = Registry(tools=tools)
app = FastAPI()
router = get_registry_router(registry=registry, settings=settings)
app.include_router(router)
client = TestClient(app)
# Get first page
response = client.get("/registry?page=1&limit=10")
assert response.status_code == 200
data = response.json()
assert len(data["data"]) == 10
assert data["meta"]["page"] == 1
assert data["meta"]["total_count"] == 25
assert data["meta"]["total_pages"] == 3
# Get second page
response = client.get("/registry?page=2&limit=10")
assert response.status_code == 200
data = response.json()
assert len(data["data"]) == 10
assert data["meta"]["page"] == 2
# Get last page
response = client.get("/registry?page=3&limit=10")
assert response.status_code == 200
data = response.json()
assert len(data["data"]) == 5 # Only 5 remaining
assert data["meta"]["page"] == 3
# =============================================================================
# List Registry Tests - Mixed Components
# =============================================================================
class TestListRegistryMixed:
"""Tests for GET /registry endpoint with mixed components."""
def test_list_registry_mixed_components(self, settings):
"""Test list_registry with all component types."""
def my_tool():
"""A tool."""
pass
class MySchema(BaseModel):
field: str
MockDbClass = _create_mock_db_class()
db = MockDbClass()
db.id = "test-db"
db.name = "Test DB"
MockVectorDbClass = _create_mock_vectordb_class()
vdb = MockVectorDbClass()
vdb.id = "test-vdb"
vdb.name = "Test VDB"
vdb.collection = "test"
MockModelClass = _create_mock_model_class()
model = MockModelClass(id="test-model")
model.name = "Test Model"
model.provider = "Test"
registry = Registry(
tools=[my_tool],
models=[model],
dbs=[db],
vector_dbs=[vdb],
schemas=[MySchema],
)
app = FastAPI()
router = get_registry_router(registry=registry, settings=settings)
app.include_router(router)
client = TestClient(app)
response = client.get("/registry")
assert response.status_code == 200
data = response.json()
# Should have all component types
component_types = set(c["type"] for c in data["data"])
assert "tool" in component_types
assert "model" in component_types
assert "db" in component_types
assert "vector_db" in component_types
assert "schema" in component_types
def test_list_registry_sorted_by_type_and_name(self, settings):
"""Test list_registry results are sorted by type and name."""
def z_tool():
pass
def a_tool():
pass
MockDbClass = _create_mock_db_class()
db = MockDbClass()
db.id = "a-db"
db.name = "A DB"
registry = Registry(tools=[z_tool, a_tool], dbs=[db])
app = FastAPI()
router = get_registry_router(registry=registry, settings=settings)
app.include_router(router)
client = TestClient(app)
response = client.get("/registry")
assert response.status_code == 200
data = response.json()
# Results should be sorted by (component_type, name)
# db comes before tool alphabetically
types = [c["type"] for c in data["data"]]
assert types == sorted(types)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/os/routers/test_registry_router.py",
"license": "Apache License 2.0",
"lines": 411,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/os/test_agentos_db.py | """Unit tests for AgentOS db parameter propagation."""
from unittest.mock import patch
import pytest
from agno.agent.agent import Agent
from agno.db.in_memory import InMemoryDb
from agno.os import AgentOS
from agno.team.team import Team
from agno.workflow.workflow import Workflow
@pytest.fixture
def default_db():
"""Create a default database for AgentOS."""
return InMemoryDb()
@pytest.fixture
def secondary_db():
"""Create a separate database for an agent."""
return InMemoryDb()
def test_db_propagates_to_agent_team_workflow_without_db(default_db):
"""Test that AgentOS db is set on agents without their own db."""
agent = Agent(name="test-agent", id="test-agent-id")
team = Team(name="test-team", id="test-team-id", members=[agent])
workflow = Workflow(name="test-workflow", id="test-workflow-id")
assert agent.db is None
assert team.db is None
assert workflow.db is None
AgentOS(agents=[agent], teams=[team], workflows=[workflow], db=default_db)
assert agent.db is default_db
assert team.db is default_db
assert workflow.db is default_db
def test_db_does_not_override_agent_team_workflow_db(default_db, secondary_db):
"""Test that AgentOS db does not override agent's own db."""
agent = Agent(name="test-agent", id="test-agent-id", db=secondary_db)
team = Team(name="test-team", id="test-team-id", members=[agent], db=secondary_db)
workflow = Workflow(name="test-workflow", id="test-workflow-id", db=secondary_db)
assert agent.db is secondary_db
assert team.db is secondary_db
assert workflow.db is secondary_db
agent_os = AgentOS(agents=[agent], teams=[team], workflows=[workflow], db=default_db)
assert agent_os.db is default_db
assert agent.db is secondary_db
assert team.db is secondary_db
assert workflow.db is secondary_db
@patch("agno.os.app.setup_tracing_for_os")
def test_tracing_uses_default_db(mock_setup_tracing, default_db):
"""Test that tracing uses the AgentOS default db."""
agent = Agent(name="test-agent", id="test-agent-id")
AgentOS(agents=[agent], db=default_db, tracing=True)
mock_setup_tracing.assert_called_once_with(db=default_db)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/os/test_agentos_db.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/os/test_utils.py | """Unit tests for OS utility functions."""
from datetime import datetime, timezone
from agno.os.utils import to_utc_datetime
def test_returns_none_for_none_input():
"""Test that None input returns None."""
assert to_utc_datetime(None) is None
def test_converts_int_timestamp():
"""Test conversion of integer Unix timestamp."""
# Unix timestamp for 2024-01-01 00:00:00 UTC
timestamp = 1704067200
result = to_utc_datetime(timestamp)
assert isinstance(result, datetime)
assert result.tzinfo == timezone.utc
assert result.year == 2024
assert result.month == 1
assert result.day == 1
def test_converts_float_timestamp():
"""Test conversion of float Unix timestamp with microseconds."""
# Unix timestamp with fractional seconds
timestamp = 1704067200.123456
result = to_utc_datetime(timestamp)
assert isinstance(result, datetime)
assert result.tzinfo == timezone.utc
assert result.microsecond > 0
def test_preserves_utc_datetime():
"""Test that UTC datetime is returned as-is."""
dt = datetime(2024, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
result = to_utc_datetime(dt)
assert result is dt
def test_adds_utc_to_naive_datetime():
"""Test that naive datetime gets UTC timezone added."""
dt = datetime(2024, 1, 1, 12, 0, 0)
result = to_utc_datetime(dt)
assert result is not None
assert result.tzinfo == timezone.utc
assert result.year == 2024
assert result.month == 1
assert result.day == 1
assert result.hour == 12
def test_preserves_non_utc_timezone():
"""Test that datetime with non-UTC timezone is preserved."""
from datetime import timedelta
# Create a datetime with +5:30 offset (IST)
ist = timezone(timedelta(hours=5, minutes=30))
dt = datetime(2024, 1, 1, 12, 0, 0, tzinfo=ist)
result = to_utc_datetime(dt)
# Should preserve the original timezone
assert result == dt
def test_handles_zero_timestamp():
"""Test handling of zero timestamp (Unix epoch)."""
result = to_utc_datetime(0)
assert isinstance(result, datetime)
assert result.tzinfo == timezone.utc
assert result.year == 1970
assert result.month == 1
assert result.day == 1
def test_handles_negative_timestamp():
"""Test handling of negative timestamp (before Unix epoch)."""
# One day before Unix epoch
result = to_utc_datetime(-86400)
assert isinstance(result, datetime)
assert result.tzinfo == timezone.utc
assert result.year == 1969
assert result.month == 12
assert result.day == 31
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/os/test_utils.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/registry/test_registry.py | """
Unit tests for Registry class.
Tests cover:
- Registry initialization with various components
- _entrypoint_lookup property for tools
- rehydrate_function() for reconstructing Functions
- get_schema() for retrieving schemas by name
"""
from typing import Optional
from unittest.mock import MagicMock
import pytest
from pydantic import BaseModel
from agno.registry.registry import Registry
from agno.tools.function import Function
from agno.tools.toolkit import Toolkit
# =============================================================================
# Test Schemas
# =============================================================================
class SampleInputSchema(BaseModel):
"""Sample input schema for registry tests."""
query: str
limit: int = 10
class SampleOutputSchema(BaseModel):
"""Sample output schema for registry tests."""
result: str
count: int
class AnotherSchema(BaseModel):
"""Another test schema."""
name: str
value: Optional[float] = None
# =============================================================================
# Test Functions
# =============================================================================
def sample_function(x: int, y: int) -> int:
"""A sample function for testing."""
return x + y
def another_function(text: str) -> str:
"""Another sample function."""
return text.upper()
def search_function(query: str) -> str:
"""A search function for testing."""
return f"Results for: {query}"
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture
def basic_registry():
"""Create a basic registry with no components."""
return Registry(
name="Basic Registry",
description="A basic test registry",
)
@pytest.fixture
def mock_model():
"""Create a mock model."""
model = MagicMock()
model.id = "gpt-4o-mini"
return model
@pytest.fixture
def mock_db():
"""Create a mock database."""
db = MagicMock()
db.id = "test-db"
return db
@pytest.fixture
def mock_vector_db():
"""Create a mock vector database."""
vdb = MagicMock()
vdb.id = "test-vectordb"
return vdb
@pytest.fixture
def function_tool():
"""Create a Function tool."""
return Function.from_callable(sample_function)
@pytest.fixture
def mock_toolkit():
"""Create a mock Toolkit with functions."""
toolkit = MagicMock(spec=Toolkit)
func1 = MagicMock(spec=Function)
func1.name = "toolkit_func_1"
func1.entrypoint = lambda: "func1 result"
func2 = MagicMock(spec=Function)
func2.name = "toolkit_func_2"
func2.entrypoint = lambda: "func2 result"
toolkit.functions = {
"toolkit_func_1": func1,
"toolkit_func_2": func2,
}
return toolkit
@pytest.fixture
def registry_with_tools(function_tool, mock_toolkit):
"""Create a registry with various tools."""
return Registry(
name="Tools Registry",
tools=[function_tool, mock_toolkit, another_function],
)
@pytest.fixture
def registry_with_schemas():
"""Create a registry with schemas."""
return Registry(
name="Schema Registry",
schemas=[SampleInputSchema, SampleOutputSchema, AnotherSchema],
)
@pytest.fixture
def full_registry(mock_model, mock_db, mock_vector_db, function_tool):
"""Create a registry with all component types."""
return Registry(
name="Full Registry",
description="A registry with all components",
tools=[function_tool, search_function],
models=[mock_model],
dbs=[mock_db],
vector_dbs=[mock_vector_db],
schemas=[SampleInputSchema, SampleOutputSchema],
)
# =============================================================================
# Initialization Tests
# =============================================================================
class TestRegistryInit:
"""Tests for Registry initialization."""
def test_init_basic(self, basic_registry):
"""Test basic registry initialization."""
assert basic_registry.name == "Basic Registry"
assert basic_registry.description == "A basic test registry"
assert basic_registry.id is not None
assert basic_registry.tools == []
assert basic_registry.models == []
assert basic_registry.dbs == []
assert basic_registry.vector_dbs == []
assert basic_registry.schemas == []
assert basic_registry.functions == []
def test_init_generates_unique_id(self):
"""Test that each registry gets a unique ID."""
reg1 = Registry()
reg2 = Registry()
assert reg1.id != reg2.id
def test_init_with_custom_id(self):
"""Test registry with custom ID."""
reg = Registry(id="custom-id-123")
assert reg.id == "custom-id-123"
def test_init_with_tools(self, function_tool):
"""Test registry initialization with tools."""
reg = Registry(tools=[function_tool, sample_function])
assert len(reg.tools) == 2
def test_init_with_models(self, mock_model):
"""Test registry initialization with models."""
reg = Registry(models=[mock_model])
assert len(reg.models) == 1
assert reg.models[0] == mock_model
def test_init_with_dbs(self, mock_db):
"""Test registry initialization with databases."""
reg = Registry(dbs=[mock_db])
assert len(reg.dbs) == 1
assert reg.dbs[0] == mock_db
def test_init_with_vector_dbs(self, mock_vector_db):
"""Test registry initialization with vector databases."""
reg = Registry(vector_dbs=[mock_vector_db])
assert len(reg.vector_dbs) == 1
assert reg.vector_dbs[0] == mock_vector_db
def test_init_with_schemas(self):
"""Test registry initialization with schemas."""
reg = Registry(schemas=[SampleInputSchema, SampleOutputSchema])
assert len(reg.schemas) == 2
assert SampleInputSchema in reg.schemas
assert SampleOutputSchema in reg.schemas
def test_init_with_functions(self):
"""Test registry initialization with functions."""
reg = Registry(functions=[sample_function, another_function])
assert len(reg.functions) == 2
assert sample_function in reg.functions
assert another_function in reg.functions
def test_init_full_registry(self, full_registry):
"""Test registry with all component types."""
assert full_registry.name == "Full Registry"
assert full_registry.description == "A registry with all components"
assert len(full_registry.tools) == 2
assert len(full_registry.models) == 1
assert len(full_registry.dbs) == 1
assert len(full_registry.vector_dbs) == 1
assert len(full_registry.schemas) == 2
# =============================================================================
# _entrypoint_lookup Tests
# =============================================================================
class TestEntrypointLookup:
"""Tests for Registry._entrypoint_lookup property."""
def test_entrypoint_lookup_with_function(self, function_tool):
"""Test entrypoint lookup with Function tool."""
reg = Registry(tools=[function_tool])
lookup = reg._entrypoint_lookup
assert "sample_function" in lookup
assert lookup["sample_function"] == function_tool.entrypoint
def test_entrypoint_lookup_with_callable(self):
"""Test entrypoint lookup with raw callable."""
reg = Registry(tools=[sample_function, another_function])
lookup = reg._entrypoint_lookup
assert "sample_function" in lookup
assert lookup["sample_function"] == sample_function
assert "another_function" in lookup
assert lookup["another_function"] == another_function
def test_entrypoint_lookup_with_toolkit(self, mock_toolkit):
"""Test entrypoint lookup with Toolkit."""
reg = Registry(tools=[mock_toolkit])
lookup = reg._entrypoint_lookup
assert "toolkit_func_1" in lookup
assert "toolkit_func_2" in lookup
def test_entrypoint_lookup_mixed_tools(self, registry_with_tools):
"""Test entrypoint lookup with mixed tool types."""
lookup = registry_with_tools._entrypoint_lookup
# Function tool
assert "sample_function" in lookup
# Toolkit functions
assert "toolkit_func_1" in lookup
assert "toolkit_func_2" in lookup
# Raw callable
assert "another_function" in lookup
def test_entrypoint_lookup_empty_registry(self, basic_registry):
"""Test entrypoint lookup with no tools."""
lookup = basic_registry._entrypoint_lookup
assert lookup == {}
def test_entrypoint_lookup_is_cached(self, function_tool):
"""Test that entrypoint lookup is cached."""
reg = Registry(tools=[function_tool])
lookup1 = reg._entrypoint_lookup
lookup2 = reg._entrypoint_lookup
# Should return the same cached object
assert lookup1 is lookup2
# =============================================================================
# rehydrate_function() Tests
# =============================================================================
class TestRehydrateFunction:
"""Tests for Registry.rehydrate_function() method."""
def test_rehydrate_function_basic(self, function_tool):
"""Test basic function rehydration."""
reg = Registry(tools=[function_tool])
# Serialize the function
func_dict = function_tool.to_dict()
# Rehydrate
rehydrated = reg.rehydrate_function(func_dict)
assert rehydrated.name == "sample_function"
assert rehydrated.entrypoint is not None
def test_rehydrate_function_restores_entrypoint(self, function_tool):
"""Test that rehydration restores the entrypoint."""
reg = Registry(tools=[function_tool])
func_dict = function_tool.to_dict()
rehydrated = reg.rehydrate_function(func_dict)
# Entrypoint should be the same as original
assert rehydrated.entrypoint == function_tool.entrypoint
def test_rehydrate_function_from_callable(self):
"""Test rehydrating a function registered as callable."""
reg = Registry(tools=[sample_function])
# Create a Function from the callable and serialize it
func = Function.from_callable(sample_function)
func_dict = func.to_dict()
# Rehydrate
rehydrated = reg.rehydrate_function(func_dict)
assert rehydrated.name == "sample_function"
assert rehydrated.entrypoint == sample_function
def test_rehydrate_function_not_in_registry(self, basic_registry):
"""Test rehydrating a function not in registry."""
func = Function.from_callable(sample_function)
func_dict = func.to_dict()
# Rehydrate with empty registry
rehydrated = basic_registry.rehydrate_function(func_dict)
# Function is created but entrypoint is None
assert rehydrated.name == "sample_function"
assert rehydrated.entrypoint is None
def test_rehydrate_function_preserves_metadata(self, function_tool):
"""Test that rehydration preserves function metadata."""
reg = Registry(tools=[function_tool])
func_dict = function_tool.to_dict()
rehydrated = reg.rehydrate_function(func_dict)
assert rehydrated.name == function_tool.name
assert rehydrated.description == function_tool.description
def test_rehydrate_multiple_functions(self):
"""Test rehydrating multiple functions."""
reg = Registry(tools=[sample_function, another_function, search_function])
# Rehydrate each
funcs = [
Function.from_callable(sample_function),
Function.from_callable(another_function),
Function.from_callable(search_function),
]
for func in funcs:
rehydrated = reg.rehydrate_function(func.to_dict())
assert rehydrated.entrypoint is not None
# =============================================================================
# get_schema() Tests
# =============================================================================
class TestGetSchema:
"""Tests for Registry.get_schema() method."""
def test_get_schema_found(self, registry_with_schemas):
"""Test getting a schema that exists."""
schema = registry_with_schemas.get_schema("SampleInputSchema")
assert schema is SampleInputSchema
def test_get_schema_multiple(self, registry_with_schemas):
"""Test getting different schemas."""
input_schema = registry_with_schemas.get_schema("SampleInputSchema")
output_schema = registry_with_schemas.get_schema("SampleOutputSchema")
another = registry_with_schemas.get_schema("AnotherSchema")
assert input_schema is SampleInputSchema
assert output_schema is SampleOutputSchema
assert another is AnotherSchema
def test_get_schema_not_found(self, registry_with_schemas):
"""Test getting a schema that doesn't exist."""
schema = registry_with_schemas.get_schema("NonExistentSchema")
assert schema is None
def test_get_schema_empty_registry(self, basic_registry):
"""Test getting schema from empty registry."""
schema = basic_registry.get_schema("SampleInputSchema")
assert schema is None
def test_get_schema_case_sensitive(self, registry_with_schemas):
"""Test that schema lookup is case sensitive."""
# Correct case
found = registry_with_schemas.get_schema("SampleInputSchema")
assert found is SampleInputSchema
# Wrong case
not_found = registry_with_schemas.get_schema("testinputschema")
assert not_found is None
not_found2 = registry_with_schemas.get_schema("TESTINPUTSCHEMA")
assert not_found2 is None
# =============================================================================
# Integration Tests
# =============================================================================
class TestRegistryIntegration:
"""Integration tests for Registry with Agent/Team/Workflow."""
def test_registry_with_agent_from_dict(self, full_registry):
"""Test using registry with Agent.from_dict."""
from agno.agent.agent import Agent
# Create agent config with tools - include parameters to match Function requirements
config = {
"id": "test-agent",
"name": "Test Agent",
"tools": [
{
"name": "sample_function",
"description": "A sample function",
"parameters": {
"type": "object",
"properties": {
"x": {"type": "integer"},
"y": {"type": "integer"},
},
},
}
],
}
# Create agent using registry
agent = Agent.from_dict(config, registry=full_registry)
assert agent.id == "test-agent"
# Tools should be rehydrated
if agent.tools:
assert len(agent.tools) == 1
def test_registry_schema_with_agent(self):
"""Test registry schema lookup with agent config."""
reg = Registry(schemas=[SampleInputSchema, SampleOutputSchema])
# Simulate what from_dict does for schemas
schema_name = "SampleInputSchema"
schema = reg.get_schema(schema_name)
assert schema is SampleInputSchema
assert issubclass(schema, BaseModel)
def test_empty_registry_handles_gracefully(self):
"""Test that empty registry handles operations gracefully."""
reg = Registry()
# Should not raise errors
lookup = reg._entrypoint_lookup
assert lookup == {}
schema = reg.get_schema("SomeSchema")
assert schema is None
# Rehydrate with no matching entrypoint - need valid parameters
func_dict = {
"name": "unknown_func",
"description": "Unknown",
"parameters": {"type": "object", "properties": {}},
}
rehydrated = reg.rehydrate_function(func_dict)
assert rehydrated.entrypoint is None
# =============================================================================
# get_function() Tests
# =============================================================================
class TestGetFunction:
"""Tests for Registry.get_function() method."""
def test_get_function_found(self):
"""Test getting a function that exists."""
reg = Registry(functions=[sample_function, another_function])
func = reg.get_function("sample_function")
assert func is sample_function
def test_get_function_multiple(self):
"""Test getting different functions."""
reg = Registry(functions=[sample_function, another_function, search_function])
func1 = reg.get_function("sample_function")
func2 = reg.get_function("another_function")
func3 = reg.get_function("search_function")
assert func1 is sample_function
assert func2 is another_function
assert func3 is search_function
def test_get_function_not_found(self):
"""Test getting a function that doesn't exist."""
reg = Registry(functions=[sample_function])
func = reg.get_function("nonexistent_function")
assert func is None
def test_get_function_empty_registry(self, basic_registry):
"""Test getting function from empty registry."""
func = basic_registry.get_function("sample_function")
assert func is None
def test_get_function_case_sensitive(self):
"""Test that function lookup is case sensitive."""
reg = Registry(functions=[sample_function])
# Correct name
found = reg.get_function("sample_function")
assert found is sample_function
# Wrong case
not_found = reg.get_function("Sample_Function")
assert not_found is None
not_found2 = reg.get_function("SAMPLE_FUNCTION")
assert not_found2 is None
def test_get_function_with_lambda(self):
"""Test that lambdas work (they have __name__ = '<lambda>')."""
my_lambda = lambda x: x * 2 # noqa: E731
reg = Registry(functions=[my_lambda])
# Lambda functions have __name__ = '<lambda>'
func = reg.get_function("<lambda>")
assert func is my_lambda
# =============================================================================
# get_db() Tests
# =============================================================================
class TestGetDb:
"""Tests for Registry.get_db() method."""
def test_get_db_found(self, mock_db):
"""Test getting a database that exists."""
reg = Registry(dbs=[mock_db])
db = reg.get_db("test-db")
assert db is mock_db
def test_get_db_multiple(self):
"""Test getting different databases."""
db1 = MagicMock()
db1.id = "db-1"
db2 = MagicMock()
db2.id = "db-2"
db3 = MagicMock()
db3.id = "db-3"
reg = Registry(dbs=[db1, db2, db3])
assert reg.get_db("db-1") is db1
assert reg.get_db("db-2") is db2
assert reg.get_db("db-3") is db3
def test_get_db_not_found(self, mock_db):
"""Test getting a database that doesn't exist."""
reg = Registry(dbs=[mock_db])
db = reg.get_db("nonexistent-db")
assert db is None
def test_get_db_empty_registry(self, basic_registry):
"""Test getting database from empty registry."""
db = basic_registry.get_db("some-db")
assert db is None
def test_get_db_case_sensitive(self, mock_db):
"""Test that database lookup is case sensitive."""
reg = Registry(dbs=[mock_db])
# Correct id
found = reg.get_db("test-db")
assert found is mock_db
# Wrong case
not_found = reg.get_db("Test-Db")
assert not_found is None
not_found2 = reg.get_db("TEST-DB")
assert not_found2 is None
# =============================================================================
# get_agent() / get_team() Tests
# =============================================================================
class TestGetAgent:
"""Tests for Registry.get_agent() method."""
def test_get_agent_found(self):
"""Test getting an agent that exists."""
agent = MagicMock()
agent.id = "agent-1"
reg = Registry(agents=[agent])
result = reg.get_agent("agent-1")
assert result is agent
def test_get_agent_multiple(self):
"""Test getting different agents."""
a1 = MagicMock()
a1.id = "a1"
a2 = MagicMock()
a2.id = "a2"
reg = Registry(agents=[a1, a2])
assert reg.get_agent("a1") is a1
assert reg.get_agent("a2") is a2
def test_get_agent_not_found(self):
"""Test getting an agent that doesn't exist."""
agent = MagicMock()
agent.id = "agent-1"
reg = Registry(agents=[agent])
assert reg.get_agent("nonexistent") is None
def test_get_agent_empty_registry(self, basic_registry):
"""Test getting agent from registry with no agents."""
assert basic_registry.get_agent("any-id") is None
def test_get_agent_no_id_attribute(self):
"""Test agent without id attribute is skipped."""
agent = MagicMock(spec=[]) # no attributes
reg = Registry(agents=[agent])
assert reg.get_agent("anything") is None
class TestGetTeam:
"""Tests for Registry.get_team() method."""
def test_get_team_found(self):
"""Test getting a team that exists."""
team = MagicMock()
team.id = "team-1"
reg = Registry(teams=[team])
result = reg.get_team("team-1")
assert result is team
def test_get_team_not_found(self):
"""Test getting a team that doesn't exist."""
team = MagicMock()
team.id = "team-1"
reg = Registry(teams=[team])
assert reg.get_team("nonexistent") is None
def test_get_team_empty_registry(self, basic_registry):
"""Test getting team from registry with no teams."""
assert basic_registry.get_team("any-id") is None
# =============================================================================
# get_agent_ids() / get_team_ids() / get_all_component_ids() Tests
# =============================================================================
class TestGetComponentIds:
"""Tests for Registry ID set methods."""
def test_get_agent_ids(self):
"""Test getting all agent IDs."""
a1 = MagicMock()
a1.id = "agent-1"
a2 = MagicMock()
a2.id = "agent-2"
reg = Registry(agents=[a1, a2])
assert reg.get_agent_ids() == {"agent-1", "agent-2"}
def test_get_agent_ids_empty(self, basic_registry):
"""Test agent IDs from empty registry."""
assert basic_registry.get_agent_ids() == set()
def test_get_agent_ids_skips_none(self):
"""Test that agents without id are excluded."""
a1 = MagicMock()
a1.id = "agent-1"
a2 = MagicMock(spec=[]) # no id attribute
reg = Registry(agents=[a1, a2])
assert reg.get_agent_ids() == {"agent-1"}
def test_get_team_ids(self):
"""Test getting all team IDs."""
t1 = MagicMock()
t1.id = "team-1"
t2 = MagicMock()
t2.id = "team-2"
reg = Registry(teams=[t1, t2])
assert reg.get_team_ids() == {"team-1", "team-2"}
def test_get_team_ids_empty(self, basic_registry):
"""Test team IDs from empty registry."""
assert basic_registry.get_team_ids() == set()
def test_get_all_component_ids(self):
"""Test getting combined agent + team IDs."""
a1 = MagicMock()
a1.id = "agent-1"
t1 = MagicMock()
t1.id = "team-1"
reg = Registry(agents=[a1], teams=[t1])
assert reg.get_all_component_ids() == {"agent-1", "team-1"}
def test_get_all_component_ids_no_overlap(self):
"""Test that agent and team IDs are unioned, not intersected."""
a1 = MagicMock()
a1.id = "shared-id"
t1 = MagicMock()
t1.id = "shared-id"
reg = Registry(agents=[a1], teams=[t1])
# Same ID from both should appear once
assert reg.get_all_component_ids() == {"shared-id"}
def test_get_all_component_ids_empty(self, basic_registry):
"""Test combined IDs from empty registry."""
assert basic_registry.get_all_component_ids() == set()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/registry/test_registry.py",
"license": "Apache License 2.0",
"lines": 571,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/team/test_team_config.py | """
Unit tests for Team configuration serialization and persistence.
Tests cover:
- to_dict(): Serialization of team to dictionary
- from_dict(): Deserialization of team from dictionary
- save(): Saving team to database (including members)
- load(): Loading team from database (with hydrated members)
- delete(): Deleting team from database
- get_team_by_id(): Helper function to get team by ID
- get_teams(): Helper function to get all teams
"""
from typing import Any, Dict
from unittest.mock import MagicMock, patch
import pytest
from agno.agent.agent import Agent
from agno.db.base import BaseDb, ComponentType
from agno.registry import Registry
from agno.session import TeamSession
from agno.team.team import Team, get_team_by_id, get_teams
# =============================================================================
# Fixtures
# =============================================================================
def _create_mock_db_class():
"""Create a concrete BaseDb subclass with all abstract methods stubbed."""
abstract_methods = {}
for name in dir(BaseDb):
attr = getattr(BaseDb, name, None)
if getattr(attr, "__isabstractmethod__", False):
abstract_methods[name] = MagicMock()
return type("MockDb", (BaseDb,), abstract_methods)
@pytest.fixture
def mock_db():
"""Create a mock database instance that passes isinstance(db, BaseDb)."""
MockDbClass = _create_mock_db_class()
db = MockDbClass()
# Configure common mock methods
db.upsert_component = MagicMock()
db.upsert_config = MagicMock(return_value={"version": 1})
db.delete_component = MagicMock(return_value=True)
db.get_config = MagicMock()
db.list_components = MagicMock()
db.upsert_component_link = MagicMock()
db.load_component_graph = MagicMock()
db.to_dict = MagicMock(return_value={"type": "postgres", "id": "test-db"})
return db
@pytest.fixture
def basic_team():
"""Create a basic team for testing."""
return Team(
id="test-team",
name="Test Team",
description="A test team for unit testing",
members=[],
)
@pytest.fixture
def member_agent():
"""Create a member agent for team testing."""
return Agent(
id="member-agent",
name="Member Agent",
role="A member agent",
)
@pytest.fixture
def team_with_members(member_agent):
"""Create a team with member agents."""
agent2 = Agent(
id="agent-2",
name="Agent 2",
role="Second agent",
)
return Team(
id="team-with-members",
name="Team With Members",
members=[member_agent, agent2],
)
@pytest.fixture
def team_with_model():
"""Create a team with a real model."""
from agno.models.openai import OpenAIChat
model = OpenAIChat(id="gpt-4o-mini")
return Team(
id="model-team",
name="Model Team",
model=model,
members=[],
)
@pytest.fixture
def team_with_settings():
"""Create a team with various settings configured."""
return Team(
id="settings-team",
name="Settings Team",
description="Team with many settings",
instructions="Work together efficiently",
markdown=True,
debug_mode=True,
retries=3,
respond_directly=True,
delegate_to_all_members=True,
add_datetime_to_context=True,
members=[],
)
@pytest.fixture
def sample_team_config() -> Dict[str, Any]:
"""Sample team configuration dictionary."""
return {
"id": "sample-team",
"name": "Sample Team",
"description": "A sample team",
"instructions": "Be helpful",
"markdown": True,
}
# =============================================================================
# to_dict() Tests
# =============================================================================
class TestTeamToDict:
"""Tests for Team.to_dict() method."""
def test_to_dict_basic_team(self, basic_team):
"""Test to_dict with a basic team."""
config = basic_team.to_dict()
assert config["id"] == "test-team"
assert config["name"] == "Test Team"
assert config["description"] == "A test team for unit testing"
def test_to_dict_with_model(self, team_with_model):
"""Test to_dict includes model configuration."""
config = team_with_model.to_dict()
assert "model" in config
assert config["model"]["provider"] == "OpenAI"
assert config["model"]["id"] == "gpt-4o-mini"
def test_to_dict_with_members(self, team_with_members):
"""Test to_dict serializes members as references."""
config = team_with_members.to_dict()
assert "members" in config
assert len(config["members"]) == 2
assert config["members"][0] == {"type": "agent", "agent_id": "member-agent"}
assert config["members"][1] == {"type": "agent", "agent_id": "agent-2"}
def test_to_dict_with_settings(self, team_with_settings):
"""Test to_dict preserves all settings."""
config = team_with_settings.to_dict()
assert config["id"] == "settings-team"
assert config["name"] == "Settings Team"
assert config["description"] == "Team with many settings"
assert config["instructions"] == "Work together efficiently"
assert config["markdown"] is True
assert config["debug_mode"] is True
assert config["retries"] == 3
assert config["respond_directly"] is True
assert config["delegate_to_all_members"] is True
assert config["add_datetime_to_context"] is True
def test_to_dict_excludes_default_values(self):
"""Test that default values are not included in the config."""
team = Team(id="minimal-team", members=[])
config = team.to_dict()
# Default values should not be present
assert "markdown" not in config # defaults to False
assert "debug_mode" not in config # defaults to False
assert "retries" not in config # defaults to 0
assert "respond_directly" not in config # defaults to False
def test_store_history_messages_default_is_false(self):
"""Test store_history_messages defaults to False and is omitted from config."""
team = Team(id="history-default-team", members=[])
assert team.store_history_messages is False
assert "store_history_messages" not in team.to_dict()
def test_add_search_knowledge_instructions_default_omitted(self):
"""Test add_search_knowledge_instructions default is omitted from config."""
team = Team(id="search-default-team", members=[])
assert "add_search_knowledge_instructions" not in team.to_dict()
def test_add_search_knowledge_instructions_false_is_serialized(self):
"""Test add_search_knowledge_instructions=False is serialized in config."""
team = Team(id="search-false-team", members=[], add_search_knowledge_instructions=False)
config = team.to_dict()
assert config["add_search_knowledge_instructions"] is False
def test_to_dict_with_db(self, basic_team, mock_db):
"""Test to_dict includes database configuration."""
basic_team.db = mock_db
config = basic_team.to_dict()
assert "db" in config
assert config["db"] == {"type": "postgres", "id": "test-db"}
def test_to_dict_with_instructions_list(self):
"""Test to_dict handles instructions as a list."""
team = Team(
id="list-instructions-team",
instructions=["Step 1: Coordinate", "Step 2: Execute"],
members=[],
)
config = team.to_dict()
assert config["instructions"] == ["Step 1: Coordinate", "Step 2: Execute"]
def test_to_dict_with_system_message(self):
"""Test to_dict includes system message when it's a string."""
team = Team(
id="system-message-team",
system_message="You are a coordinating team leader.",
members=[],
)
config = team.to_dict()
assert config["system_message"] == "You are a coordinating team leader."
def test_to_dict_with_metadata(self):
"""Test to_dict includes metadata."""
team = Team(
id="metadata-team",
metadata={"version": "1.0", "team_type": "research"},
members=[],
)
config = team.to_dict()
assert config["metadata"] == {"version": "1.0", "team_type": "research"}
def test_to_dict_with_nested_team(self):
"""Test to_dict serializes nested team as reference."""
inner_team = Team(id="inner-team", name="Inner Team", members=[])
outer_team = Team(
id="outer-team",
name="Outer Team",
members=[inner_team],
)
config = outer_team.to_dict()
assert "members" in config
assert len(config["members"]) == 1
assert config["members"][0] == {"type": "team", "team_id": "inner-team"}
def test_to_dict_with_mode(self):
"""Test to_dict includes mode and max_iterations."""
from agno.team.mode import TeamMode
team = Team(id="task-team", members=[], mode=TeamMode.tasks, max_iterations=20)
config = team.to_dict()
assert config["mode"] == "tasks"
assert config["max_iterations"] == 20
def test_to_dict_mode_default_not_serialized(self):
"""Test that default max_iterations is not serialized."""
from agno.team.mode import TeamMode
team = Team(id="coord-team", members=[], mode=TeamMode.coordinate)
config = team.to_dict()
assert config["mode"] == "coordinate"
assert "max_iterations" not in config # default=10 should not be serialized
# =============================================================================
# from_dict() Tests
# =============================================================================
class TestTeamFromDict:
"""Tests for Team.from_dict() method."""
def test_from_dict_basic(self, sample_team_config):
"""Test from_dict creates team with basic config."""
team = Team.from_dict(sample_team_config)
assert team.id == "sample-team"
assert team.name == "Sample Team"
assert team.description == "A sample team"
assert team.instructions == "Be helpful"
assert team.markdown is True
def test_from_dict_with_model(self):
"""Test from_dict reconstructs model from config."""
from agno.models.openai import OpenAIChat
config = {
"id": "model-team",
"name": "Model Team",
"model": {"provider": "openai", "id": "gpt-4o-mini"},
}
team = Team.from_dict(config)
assert team.model is not None
assert isinstance(team.model, OpenAIChat)
assert team.model.id == "gpt-4o-mini"
def test_from_dict_preserves_settings(self):
"""Test from_dict preserves all settings."""
config = {
"id": "full-team",
"name": "Full Team",
"debug_mode": True,
"retries": 3,
"respond_directly": True,
"delegate_to_all_members": True,
"add_datetime_to_context": True,
}
team = Team.from_dict(config)
assert team.debug_mode is True
assert team.retries == 3
assert team.respond_directly is True
assert team.delegate_to_all_members is True
assert team.add_datetime_to_context is True
def test_from_dict_with_db_postgres(self):
"""Test from_dict reconstructs PostgresDb."""
config = {
"id": "db-team",
"db": {"type": "postgres", "db_url": "postgresql://localhost/test"},
}
with patch("agno.db.postgres.PostgresDb.from_dict") as mock_from_dict:
mock_db = MagicMock()
mock_from_dict.return_value = mock_db
team = Team.from_dict(config)
mock_from_dict.assert_called_once()
assert team.db == mock_db
def test_from_dict_with_db_sqlite(self):
"""Test from_dict reconstructs SqliteDb."""
config = {
"id": "sqlite-team",
"db": {"type": "sqlite", "db_file": "/tmp/test.db"},
}
with patch("agno.db.sqlite.SqliteDb.from_dict") as mock_from_dict:
mock_db = MagicMock()
mock_from_dict.return_value = mock_db
team = Team.from_dict(config)
mock_from_dict.assert_called_once()
assert team.db == mock_db
def test_from_dict_with_registry_tools(self):
"""Test from_dict uses registry to rehydrate tools."""
config = {
"id": "tools-team",
"tools": [{"name": "search", "description": "Search the web"}],
}
mock_registry = MagicMock()
mock_tool = MagicMock()
mock_registry.rehydrate_function.return_value = mock_tool
team = Team.from_dict(config, registry=mock_registry)
mock_registry.rehydrate_function.assert_called_once()
assert team.tools == [mock_tool]
def test_from_dict_without_registry_removes_tools(self):
"""Test from_dict removes tools when no registry is provided."""
config = {
"id": "no-registry-team",
"tools": [{"name": "search"}],
}
team = Team.from_dict(config)
# Tools should be None/empty since no registry was provided
assert team.tools is None or team.tools == []
def test_from_dict_with_members_loads_from_db(self, mock_db):
"""Test from_dict loads member agents from database."""
config = {
"id": "members-team",
"members": [{"type": "agent", "agent_id": "agent-1"}],
}
# get_agent_by_id is imported inside from_dict from agno.agent
with patch("agno.agent.get_agent_by_id") as mock_get_agent:
mock_agent = MagicMock()
mock_get_agent.return_value = mock_agent
team = Team.from_dict(config, db=mock_db)
mock_get_agent.assert_called_once_with(id="agent-1", db=mock_db, registry=None)
assert team.members == [mock_agent]
def test_from_dict_roundtrip(self, team_with_settings):
"""Test that to_dict -> from_dict preserves team configuration."""
config = team_with_settings.to_dict()
reconstructed = Team.from_dict(config)
assert reconstructed.id == team_with_settings.id
assert reconstructed.name == team_with_settings.name
assert reconstructed.description == team_with_settings.description
assert reconstructed.markdown == team_with_settings.markdown
assert reconstructed.debug_mode == team_with_settings.debug_mode
assert reconstructed.retries == team_with_settings.retries
def test_from_dict_with_mode(self):
"""Test from_dict reconstructs mode and max_iterations."""
from agno.team.mode import TeamMode
config = {
"id": "task-team",
"mode": "tasks",
"max_iterations": 25,
}
team = Team.from_dict(config)
assert team.mode == TeamMode.tasks
assert team.max_iterations == 25
def test_from_dict_mode_roundtrip(self):
"""Test to_dict -> from_dict roundtrip preserves mode."""
from agno.team.mode import TeamMode
team = Team(id="rt-team", members=[], mode=TeamMode.route, max_iterations=15)
config = team.to_dict()
reconstructed = Team.from_dict(config)
assert reconstructed.mode == TeamMode.route
assert reconstructed.max_iterations == 15
def test_from_dict_no_mode_defaults(self):
"""Test from_dict with no mode field defaults correctly."""
config = {"id": "no-mode-team"}
team = Team.from_dict(config)
# Mode should be inferred as coordinate (default)
from agno.team.mode import TeamMode
assert team.mode == TeamMode.coordinate
assert team.max_iterations == 10
# =============================================================================
# save() Tests
# =============================================================================
class TestTeamSave:
"""Tests for Team.save() method."""
def test_save_calls_upsert_component(self, basic_team, mock_db):
"""Test save calls upsert_component with correct parameters."""
mock_db.upsert_config.return_value = {"version": 1}
basic_team.db = mock_db
version = basic_team.save()
mock_db.upsert_component.assert_called_once_with(
component_id="test-team",
component_type=ComponentType.TEAM,
name="Test Team",
description="A test team for unit testing",
metadata=None,
)
assert version == 1
def test_save_calls_upsert_config(self, basic_team, mock_db):
"""Test save calls upsert_config with team config."""
mock_db.upsert_config.return_value = {"version": 2}
basic_team.db = mock_db
version = basic_team.save()
mock_db.upsert_config.assert_called_once()
call_args = mock_db.upsert_config.call_args
assert call_args.kwargs["component_id"] == "test-team"
assert "config" in call_args.kwargs
assert version == 2
def test_save_with_members_saves_each_member(self, mock_db, member_agent):
"""Test save saves each member agent."""
mock_db.upsert_config.return_value = {"version": 1}
# Create a spy on member.save
member_agent.save = MagicMock(return_value=1)
team = Team(
id="team-with-member",
name="Team",
members=[member_agent],
db=mock_db,
)
team.save()
# Member should be saved
member_agent.save.assert_called_once()
def test_save_creates_member_links(self, mock_db, member_agent):
"""Test save creates links for members."""
mock_db.upsert_config.return_value = {"version": 1}
member_agent.save = MagicMock(return_value=5)
team = Team(
id="linked-team",
name="Linked Team",
members=[member_agent],
db=mock_db,
)
team.save()
# Check that links were passed to upsert_config
call_args = mock_db.upsert_config.call_args
links = call_args.kwargs.get("links")
assert links is not None
assert len(links) == 1
assert links[0]["link_kind"] == "member"
assert links[0]["child_component_id"] == "member-agent"
assert links[0]["child_version"] == 5
assert links[0]["meta"]["type"] == "agent"
def test_save_with_explicit_db(self, basic_team, mock_db):
"""Test save uses explicitly provided db."""
mock_db.upsert_config.return_value = {"version": 1}
version = basic_team.save(db=mock_db)
mock_db.upsert_component.assert_called_once()
mock_db.upsert_config.assert_called_once()
assert version == 1
def test_save_with_label(self, basic_team, mock_db):
"""Test save passes label to upsert_config."""
mock_db.upsert_config.return_value = {"version": 1}
basic_team.db = mock_db
basic_team.save(label="production")
call_args = mock_db.upsert_config.call_args
assert call_args.kwargs["label"] == "production"
def test_save_with_stage(self, basic_team, mock_db):
"""Test save passes stage to upsert_config."""
mock_db.upsert_config.return_value = {"version": 1}
basic_team.db = mock_db
basic_team.save(stage="draft")
call_args = mock_db.upsert_config.call_args
assert call_args.kwargs["stage"] == "draft"
def test_save_without_db_raises_error(self, basic_team):
"""Test save raises error when no db is available."""
with pytest.raises(ValueError, match="Db not initialized or provided"):
basic_team.save()
def test_save_handles_db_error(self, basic_team, mock_db):
"""Test save raises error when database operation fails."""
mock_db.upsert_component.side_effect = Exception("Database error")
basic_team.db = mock_db
with pytest.raises(Exception, match="Database error"):
basic_team.save()
# =============================================================================
# load() Tests
# =============================================================================
class TestTeamLoad:
"""Tests for Team.load() class method."""
def test_load_returns_team(self, mock_db, sample_team_config):
"""Test load returns a team from database."""
mock_db.load_component_graph.return_value = {
"component": {"component_id": "sample-team"},
"config": {"config": sample_team_config},
"children": [],
}
team = Team.load(id="sample-team", db=mock_db)
assert team is not None
assert team.id == "sample-team"
assert team.name == "Sample Team"
def test_load_with_version(self, mock_db):
"""Test load retrieves specific version."""
mock_db.load_component_graph.return_value = {
"component": {"component_id": "versioned-team"},
"config": {"config": {"id": "versioned-team", "name": "V2 Team"}},
"children": [],
}
Team.load(id="versioned-team", db=mock_db, version=2)
mock_db.load_component_graph.assert_called_once_with("versioned-team", version=2, label=None)
def test_load_with_label(self, mock_db):
"""Test load retrieves labeled version."""
mock_db.load_component_graph.return_value = {
"component": {"component_id": "labeled-team"},
"config": {"config": {"id": "labeled-team", "name": "Production Team"}},
"children": [],
}
Team.load(id="labeled-team", db=mock_db, label="production")
mock_db.load_component_graph.assert_called_once_with("labeled-team", version=None, label="production")
def test_load_hydrates_member_agents(self, mock_db):
"""Test load hydrates member agents from graph."""
mock_db.load_component_graph.return_value = {
"component": {"component_id": "team-with-members"},
"config": {"config": {"id": "team-with-members", "name": "Team"}},
"children": [
{
"link": {"meta": {"type": "agent"}},
"graph": {
"component": {"component_id": "agent-1"},
"config": {"config": {"id": "agent-1", "name": "Agent 1"}},
},
}
],
}
team = Team.load(id="team-with-members", db=mock_db)
assert team is not None
assert len(team.members) == 1
assert team.members[0].id == "agent-1"
def test_load_returns_none_when_not_found(self, mock_db):
"""Test load returns None when team not found."""
mock_db.load_component_graph.return_value = None
team = Team.load(id="nonexistent-team", db=mock_db)
assert team is None
def test_load_returns_none_when_config_missing(self, mock_db):
"""Test load returns None when config is missing."""
mock_db.load_component_graph.return_value = {
"component": {"component_id": "empty-config"},
"config": {"config": None},
"children": [],
}
team = Team.load(id="empty-config-team", db=mock_db)
assert team is None
def test_load_sets_db_on_team(self, mock_db):
"""Test load sets db attribute on returned team."""
mock_db.load_component_graph.return_value = {
"component": {"component_id": "db-team"},
"config": {"config": {"id": "db-team", "name": "DB Team"}},
"children": [],
}
team = Team.load(id="db-team", db=mock_db)
assert team is not None
assert team.db == mock_db
# =============================================================================
# delete() Tests
# =============================================================================
class TestTeamDelete:
"""Tests for Team.delete() method."""
def test_delete_calls_delete_component(self, basic_team, mock_db):
"""Test delete calls delete_component."""
mock_db.delete_component.return_value = True
basic_team.db = mock_db
result = basic_team.delete()
mock_db.delete_component.assert_called_once_with(component_id="test-team", hard_delete=False)
assert result is True
def test_delete_with_hard_delete(self, basic_team, mock_db):
"""Test delete with hard_delete flag."""
mock_db.delete_component.return_value = True
basic_team.db = mock_db
result = basic_team.delete(hard_delete=True)
mock_db.delete_component.assert_called_once_with(component_id="test-team", hard_delete=True)
assert result is True
def test_delete_with_explicit_db(self, basic_team, mock_db):
"""Test delete uses explicitly provided db."""
mock_db.delete_component.return_value = True
result = basic_team.delete(db=mock_db)
mock_db.delete_component.assert_called_once()
assert result is True
def test_delete_without_db_raises_error(self, basic_team):
"""Test delete raises error when no db is available."""
with pytest.raises(ValueError, match="Db not initialized or provided"):
basic_team.delete()
def test_delete_returns_false_on_failure(self, basic_team, mock_db):
"""Test delete returns False when operation fails."""
mock_db.delete_component.return_value = False
basic_team.db = mock_db
result = basic_team.delete()
assert result is False
class TestTeamSessionNaming:
def test_generate_session_name_fallback_after_max_retries(self):
"""Test generate_session_name falls back after repeated invalid model output."""
team = Team(id="session-name-team", members=[])
team.model = MagicMock()
team.model.response = MagicMock(return_value=MagicMock(content=None))
session = TeamSession(session_id="session-1", runs=[])
session_name = team.generate_session_name(session=session)
assert session_name == "Team Session"
assert team.model.response.call_count == 4
# =============================================================================
# get_team_by_id() Tests
# =============================================================================
class TestGetTeamById:
"""Tests for get_team_by_id() helper function."""
def test_get_team_by_id_returns_team(self, mock_db):
"""Test get_team_by_id returns team from database."""
mock_db.get_config.return_value = {"config": {"id": "found-team", "name": "Found Team"}}
team = get_team_by_id(db=mock_db, id="found-team")
assert team is not None
assert team.id == "found-team"
assert team.name == "Found Team"
def test_get_team_by_id_with_version(self, mock_db):
"""Test get_team_by_id retrieves specific version."""
mock_db.get_config.return_value = {"config": {"id": "versioned", "name": "V3"}}
get_team_by_id(db=mock_db, id="versioned", version=3)
mock_db.get_config.assert_called_once_with(component_id="versioned", version=3, label=None)
def test_get_team_by_id_with_label(self, mock_db):
"""Test get_team_by_id retrieves labeled version."""
mock_db.get_config.return_value = {"config": {"id": "labeled", "name": "Staging"}}
get_team_by_id(db=mock_db, id="labeled", label="staging")
mock_db.get_config.assert_called_once_with(component_id="labeled", version=None, label="staging")
def test_get_team_by_id_with_registry(self, mock_db):
"""Test get_team_by_id passes registry."""
mock_db.get_config.return_value = {"config": {"id": "registry-team", "tools": [{"name": "calc"}]}}
mock_registry = MagicMock()
mock_registry.rehydrate_function.return_value = MagicMock()
team = get_team_by_id(db=mock_db, id="registry-team", registry=mock_registry)
assert team is not None
def test_get_team_by_id_returns_none_when_not_found(self, mock_db):
"""Test get_team_by_id returns None when not found."""
mock_db.get_config.return_value = None
team = get_team_by_id(db=mock_db, id="missing")
assert team is None
def test_get_team_by_id_sets_db(self, mock_db):
"""Test get_team_by_id sets db on returned team via registry."""
# The db is set via registry lookup when config contains a serialized db reference
mock_db.id = "test-db"
mock_db.get_config.return_value = {
"config": {
"id": "db-team",
"name": "DB Team",
"db": {"type": "postgres", "id": "test-db"},
}
}
# Create registry with the mock db registered
registry = Registry(dbs=[mock_db])
team = get_team_by_id(db=mock_db, id="db-team", registry=registry)
assert team is not None
assert team.db == mock_db
def test_get_team_by_id_handles_error(self, mock_db):
"""Test get_team_by_id returns None on error."""
mock_db.get_config.side_effect = Exception("DB error")
team = get_team_by_id(db=mock_db, id="error-team")
assert team is None
# =============================================================================
# get_teams() Tests
# =============================================================================
class TestGetTeams:
"""Tests for get_teams() helper function."""
def test_get_teams_returns_list(self, mock_db):
"""Test get_teams returns list of teams."""
mock_db.list_components.return_value = (
[
{"component_id": "team-1"},
{"component_id": "team-2"},
],
None,
)
mock_db.get_config.side_effect = [
{"config": {"id": "team-1", "name": "Team 1"}},
{"config": {"id": "team-2", "name": "Team 2"}},
]
teams = get_teams(db=mock_db)
assert len(teams) == 2
assert teams[0].id == "team-1"
assert teams[1].id == "team-2"
def test_get_teams_filters_by_type(self, mock_db):
"""Test get_teams filters by TEAM component type."""
mock_db.list_components.return_value = ([], None)
get_teams(db=mock_db)
mock_db.list_components.assert_called_once_with(component_type=ComponentType.TEAM, exclude_component_ids=None)
def test_get_teams_with_registry(self, mock_db):
"""Test get_teams passes registry to from_dict."""
mock_db.list_components.return_value = (
[{"component_id": "tools-team"}],
None,
)
mock_db.get_config.return_value = {"config": {"id": "tools-team", "tools": [{"name": "search"}]}}
mock_registry = MagicMock()
mock_registry.rehydrate_function.return_value = MagicMock()
teams = get_teams(db=mock_db, registry=mock_registry)
assert len(teams) == 1
def test_get_teams_returns_empty_list_on_error(self, mock_db):
"""Test get_teams returns empty list on error."""
mock_db.list_components.side_effect = Exception("DB error")
teams = get_teams(db=mock_db)
assert teams == []
def test_get_teams_skips_invalid_configs(self, mock_db):
"""Test get_teams skips teams with invalid configs."""
mock_db.list_components.return_value = (
[
{"component_id": "valid-team"},
{"component_id": "invalid-team"},
],
None,
)
mock_db.get_config.side_effect = [
{"config": {"id": "valid-team", "name": "Valid"}},
{"config": None}, # Invalid config
]
teams = get_teams(db=mock_db)
assert len(teams) == 1
assert teams[0].id == "valid-team"
def test_get_teams_sets_db_on_all_teams(self, mock_db):
"""Test get_teams sets db on all returned teams via registry."""
# The db is set via registry lookup when config contains a serialized db reference
mock_db.id = "test-db"
mock_db.list_components.return_value = (
[{"component_id": "team-1"}],
None,
)
mock_db.get_config.return_value = {
"config": {
"id": "team-1",
"name": "Team 1",
"db": {"type": "postgres", "id": "test-db"},
}
}
# Create registry with the mock db registered
registry = Registry(dbs=[mock_db])
teams = get_teams(db=mock_db, registry=registry)
assert len(teams) == 1
assert teams[0].db == mock_db
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/team/test_team_config.py",
"license": "Apache License 2.0",
"lines": 717,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/tools/test_websearch.py | """Unit tests for WebSearchTools class."""
import json
from unittest.mock import MagicMock, patch
import pytest
from agno.tools.websearch import VALID_TIMELIMITS, WebSearchTools
@pytest.fixture
def mock_ddgs():
"""Create a mock DDGS instance."""
with patch("agno.tools.websearch.DDGS") as mock_ddgs_cls:
mock_instance = MagicMock()
mock_ddgs_cls.return_value.__enter__ = MagicMock(return_value=mock_instance)
mock_ddgs_cls.return_value.__exit__ = MagicMock(return_value=False)
yield mock_instance, mock_ddgs_cls
# ============================================================================
# INITIALIZATION TESTS
# ============================================================================
def test_init_defaults():
"""Test initialization with default parameters."""
with patch("agno.tools.websearch.DDGS"):
tools = WebSearchTools()
assert tools.backend == "auto"
assert tools.proxy is None
assert tools.timeout == 10
assert tools.fixed_max_results is None
assert tools.modifier is None
assert tools.verify_ssl is True
assert tools.timelimit is None
assert tools.region is None
def test_init_with_timelimit():
"""Test initialization with timelimit parameter."""
with patch("agno.tools.websearch.DDGS"):
tools = WebSearchTools(timelimit="d")
assert tools.timelimit == "d"
def test_init_with_region():
"""Test initialization with region parameter."""
with patch("agno.tools.websearch.DDGS"):
tools = WebSearchTools(region="us-en")
assert tools.region == "us-en"
def test_init_with_backend():
"""Test initialization with custom backend parameter."""
with patch("agno.tools.websearch.DDGS"):
tools = WebSearchTools(backend="google")
assert tools.backend == "google"
def test_init_with_all_params():
"""Test initialization with all parameters."""
with patch("agno.tools.websearch.DDGS"):
tools = WebSearchTools(
enable_search=True,
enable_news=True,
backend="bing",
modifier="site:example.com",
fixed_max_results=20,
proxy="http://proxy:8080",
timeout=60,
verify_ssl=False,
timelimit="m",
region="ru-ru",
)
assert tools.backend == "bing"
assert tools.proxy == "http://proxy:8080"
assert tools.timeout == 60
assert tools.fixed_max_results == 20
assert tools.modifier == "site:example.com"
assert tools.verify_ssl is False
assert tools.timelimit == "m"
assert tools.region == "ru-ru"
# ============================================================================
# TIMELIMIT VALIDATION TESTS
# ============================================================================
def test_valid_timelimit_day():
"""Test that 'd' is a valid timelimit."""
with patch("agno.tools.websearch.DDGS"):
tools = WebSearchTools(timelimit="d")
assert tools.timelimit == "d"
def test_valid_timelimit_week():
"""Test that 'w' is a valid timelimit."""
with patch("agno.tools.websearch.DDGS"):
tools = WebSearchTools(timelimit="w")
assert tools.timelimit == "w"
def test_valid_timelimit_month():
"""Test that 'm' is a valid timelimit."""
with patch("agno.tools.websearch.DDGS"):
tools = WebSearchTools(timelimit="m")
assert tools.timelimit == "m"
def test_valid_timelimit_year():
"""Test that 'y' is a valid timelimit."""
with patch("agno.tools.websearch.DDGS"):
tools = WebSearchTools(timelimit="y")
assert tools.timelimit == "y"
def test_valid_timelimit_none():
"""Test that None is a valid timelimit."""
with patch("agno.tools.websearch.DDGS"):
tools = WebSearchTools(timelimit=None)
assert tools.timelimit is None
def test_invalid_timelimit_raises_error():
"""Test that invalid timelimit raises ValueError."""
with patch("agno.tools.websearch.DDGS"):
with pytest.raises(ValueError) as exc_info:
WebSearchTools(timelimit="invalid")
assert "Invalid timelimit 'invalid'" in str(exc_info.value)
assert "'d' (day)" in str(exc_info.value)
assert "'w' (week)" in str(exc_info.value)
assert "'m' (month)" in str(exc_info.value)
assert "'y' (year)" in str(exc_info.value)
def test_invalid_timelimit_empty_string():
"""Test that empty string timelimit raises ValueError."""
with patch("agno.tools.websearch.DDGS"):
with pytest.raises(ValueError) as exc_info:
WebSearchTools(timelimit="")
assert "Invalid timelimit ''" in str(exc_info.value)
def test_invalid_timelimit_uppercase():
"""Test that uppercase timelimit raises ValueError (case-sensitive)."""
with patch("agno.tools.websearch.DDGS"):
with pytest.raises(ValueError) as exc_info:
WebSearchTools(timelimit="D")
assert "Invalid timelimit 'D'" in str(exc_info.value)
def test_invalid_timelimit_full_word():
"""Test that full word timelimit raises ValueError."""
with patch("agno.tools.websearch.DDGS"):
with pytest.raises(ValueError) as exc_info:
WebSearchTools(timelimit="day")
assert "Invalid timelimit 'day'" in str(exc_info.value)
def test_valid_timelimits_constant():
"""Test that VALID_TIMELIMITS contains expected values."""
assert VALID_TIMELIMITS == frozenset({"d", "w", "m", "y"})
# ============================================================================
# TOOL REGISTRATION TESTS
# ============================================================================
def test_enable_search_only():
"""Test enabling only search function."""
with patch("agno.tools.websearch.DDGS"):
tools = WebSearchTools(enable_search=True, enable_news=False)
tool_names = [t.__name__ for t in tools.tools]
assert "web_search" in tool_names
assert "search_news" not in tool_names
def test_enable_news_only():
"""Test enabling only news function."""
with patch("agno.tools.websearch.DDGS"):
tools = WebSearchTools(enable_search=False, enable_news=True)
tool_names = [t.__name__ for t in tools.tools]
assert "web_search" not in tool_names
assert "search_news" in tool_names
def test_enable_both():
"""Test enabling both search and news functions."""
with patch("agno.tools.websearch.DDGS"):
tools = WebSearchTools(enable_search=True, enable_news=True)
tool_names = [t.__name__ for t in tools.tools]
assert "web_search" in tool_names
assert "search_news" in tool_names
def test_disable_both():
"""Test disabling both functions."""
with patch("agno.tools.websearch.DDGS"):
tools = WebSearchTools(enable_search=False, enable_news=False)
assert len(tools.tools) == 0
# ============================================================================
# WEB SEARCH TESTS
# ============================================================================
def test_web_search_basic(mock_ddgs):
"""Test basic web search."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = [
{"title": "Result 1", "href": "https://example.com", "body": "Description 1"},
]
tools = WebSearchTools()
result = tools.web_search("test query")
result_data = json.loads(result)
assert len(result_data) == 1
assert result_data[0]["title"] == "Result 1"
mock_instance.text.assert_called_once_with(query="test query", max_results=5, backend="auto")
def test_web_search_with_timelimit(mock_ddgs):
"""Test that timelimit is passed to ddgs.text()."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools(timelimit="d")
tools.web_search("test query")
mock_instance.text.assert_called_once_with(query="test query", max_results=5, backend="auto", timelimit="d")
def test_web_search_with_region(mock_ddgs):
"""Test that region is passed to ddgs.text()."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools(region="us-en")
tools.web_search("test query")
mock_instance.text.assert_called_once_with(query="test query", max_results=5, backend="auto", region="us-en")
def test_web_search_with_modifier(mock_ddgs):
"""Test that modifier is prepended to query."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools(modifier="site:github.com")
tools.web_search("python")
mock_instance.text.assert_called_once_with(query="site:github.com python", max_results=5, backend="auto")
def test_web_search_with_fixed_max_results(mock_ddgs):
"""Test that fixed_max_results overrides max_results parameter."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools(fixed_max_results=10)
tools.web_search("test", max_results=5) # Should use 10, not 5
mock_instance.text.assert_called_once_with(query="test", max_results=10, backend="auto")
def test_web_search_with_custom_max_results(mock_ddgs):
"""Test web search with custom max_results parameter."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools()
tools.web_search("test", max_results=20)
mock_instance.text.assert_called_once_with(query="test", max_results=20, backend="auto")
def test_web_search_without_optional_params(mock_ddgs):
"""Test that optional params are not passed when None."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools()
tools.web_search("test query")
# Should not include timelimit or region
mock_instance.text.assert_called_once_with(query="test query", max_results=5, backend="auto")
def test_web_search_with_all_params(mock_ddgs):
"""Test web search with all parameters."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = [
{"title": "Result 1", "href": "https://example.com", "body": "Description 1"},
]
tools = WebSearchTools(
backend="google",
timelimit="w",
region="uk-en",
modifier="site:docs.python.org",
fixed_max_results=15,
)
result = tools.web_search("asyncio")
result_data = json.loads(result)
assert len(result_data) == 1
mock_instance.text.assert_called_once_with(
query="site:docs.python.org asyncio",
max_results=15,
backend="google",
timelimit="w",
region="uk-en",
)
# ============================================================================
# NEWS SEARCH TESTS
# ============================================================================
def test_search_news_basic(mock_ddgs):
"""Test basic news search."""
mock_instance, _ = mock_ddgs
mock_instance.news.return_value = [
{"title": "News 1", "url": "https://news.com", "body": "News body 1"},
]
tools = WebSearchTools()
result = tools.search_news("breaking news")
result_data = json.loads(result)
assert len(result_data) == 1
assert result_data[0]["title"] == "News 1"
mock_instance.news.assert_called_once_with(query="breaking news", max_results=5, backend="auto")
def test_search_news_with_timelimit(mock_ddgs):
"""Test that timelimit is passed to ddgs.news()."""
mock_instance, _ = mock_ddgs
mock_instance.news.return_value = []
tools = WebSearchTools(timelimit="d")
tools.search_news("test news")
mock_instance.news.assert_called_once_with(query="test news", max_results=5, backend="auto", timelimit="d")
def test_search_news_with_region(mock_ddgs):
"""Test that region is passed to ddgs.news()."""
mock_instance, _ = mock_ddgs
mock_instance.news.return_value = []
tools = WebSearchTools(region="de-de")
tools.search_news("test news")
mock_instance.news.assert_called_once_with(query="test news", max_results=5, backend="auto", region="de-de")
def test_search_news_with_fixed_max_results(mock_ddgs):
"""Test that fixed_max_results overrides max_results parameter."""
mock_instance, _ = mock_ddgs
mock_instance.news.return_value = []
tools = WebSearchTools(fixed_max_results=3)
tools.search_news("test", max_results=10) # Should use 3, not 10
mock_instance.news.assert_called_once_with(query="test", max_results=3, backend="auto")
def test_search_news_with_all_params(mock_ddgs):
"""Test news search with all parameters."""
mock_instance, _ = mock_ddgs
mock_instance.news.return_value = [
{"title": "News 1", "url": "https://news.com", "body": "News body 1"},
]
tools = WebSearchTools(
backend="bing",
timelimit="m",
region="fr-fr",
fixed_max_results=8,
)
result = tools.search_news("technology")
result_data = json.loads(result)
assert len(result_data) == 1
mock_instance.news.assert_called_once_with(
query="technology",
max_results=8,
backend="bing",
timelimit="m",
region="fr-fr",
)
# ============================================================================
# BACKEND TESTS
# ============================================================================
def test_backend_auto(mock_ddgs):
"""Test auto backend selection."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools(backend="auto")
tools.web_search("test")
call_kwargs = mock_instance.text.call_args[1]
assert call_kwargs["backend"] == "auto"
def test_backend_duckduckgo(mock_ddgs):
"""Test DuckDuckGo backend."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools(backend="duckduckgo")
tools.web_search("test")
call_kwargs = mock_instance.text.call_args[1]
assert call_kwargs["backend"] == "duckduckgo"
def test_backend_google(mock_ddgs):
"""Test Google backend."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools(backend="google")
tools.web_search("test")
call_kwargs = mock_instance.text.call_args[1]
assert call_kwargs["backend"] == "google"
def test_backend_bing(mock_ddgs):
"""Test Bing backend."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools(backend="bing")
tools.web_search("test")
call_kwargs = mock_instance.text.call_args[1]
assert call_kwargs["backend"] == "bing"
def test_backend_brave(mock_ddgs):
"""Test Brave backend."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools(backend="brave")
tools.web_search("test")
call_kwargs = mock_instance.text.call_args[1]
assert call_kwargs["backend"] == "brave"
def test_backend_yandex(mock_ddgs):
"""Test Yandex backend."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools(backend="yandex")
tools.web_search("test")
call_kwargs = mock_instance.text.call_args[1]
assert call_kwargs["backend"] == "yandex"
def test_backend_yahoo(mock_ddgs):
"""Test Yahoo backend."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools(backend="yahoo")
tools.web_search("test")
call_kwargs = mock_instance.text.call_args[1]
assert call_kwargs["backend"] == "yahoo"
# ============================================================================
# DDGS CLIENT CONFIGURATION TESTS
# ============================================================================
def test_ddgs_client_with_proxy(mock_ddgs):
"""Test that proxy is stored correctly."""
with patch("agno.tools.websearch.DDGS"):
tools = WebSearchTools(proxy="socks5://localhost:9050")
assert tools.proxy == "socks5://localhost:9050"
def test_ddgs_client_with_timeout(mock_ddgs):
"""Test that timeout is stored correctly."""
with patch("agno.tools.websearch.DDGS"):
tools = WebSearchTools(timeout=30)
assert tools.timeout == 30
def test_ddgs_client_with_verify_ssl_false(mock_ddgs):
"""Test that verify_ssl=False is stored correctly."""
with patch("agno.tools.websearch.DDGS"):
tools = WebSearchTools(verify_ssl=False)
assert tools.verify_ssl is False
def test_ddgs_client_with_all_config(mock_ddgs):
"""Test DDGS client with all configuration options."""
with patch("agno.tools.websearch.DDGS"):
tools = WebSearchTools(
proxy="http://proxy:8080",
timeout=60,
verify_ssl=False,
)
assert tools.proxy == "http://proxy:8080"
assert tools.timeout == 60
assert tools.verify_ssl is False
# ============================================================================
# REGION TESTS
# ============================================================================
def test_region_us_en(mock_ddgs):
"""Test US English region."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools(region="us-en")
tools.web_search("test")
call_kwargs = mock_instance.text.call_args[1]
assert call_kwargs["region"] == "us-en"
def test_region_uk_en(mock_ddgs):
"""Test UK English region."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools(region="uk-en")
tools.web_search("test")
call_kwargs = mock_instance.text.call_args[1]
assert call_kwargs["region"] == "uk-en"
def test_region_de_de(mock_ddgs):
"""Test German region."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools(region="de-de")
tools.web_search("test")
call_kwargs = mock_instance.text.call_args[1]
assert call_kwargs["region"] == "de-de"
def test_region_fr_fr(mock_ddgs):
"""Test French region."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools(region="fr-fr")
tools.web_search("test")
call_kwargs = mock_instance.text.call_args[1]
assert call_kwargs["region"] == "fr-fr"
def test_region_ru_ru(mock_ddgs):
"""Test Russian region."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools(region="ru-ru")
tools.web_search("test")
call_kwargs = mock_instance.text.call_args[1]
assert call_kwargs["region"] == "ru-ru"
# ============================================================================
# JSON OUTPUT TESTS
# ============================================================================
def test_web_search_returns_json(mock_ddgs):
"""Test that web_search returns valid JSON."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = [
{"title": "Test", "href": "https://test.com", "body": "Test body"},
]
tools = WebSearchTools()
result = tools.web_search("test")
# Should be valid JSON
parsed = json.loads(result)
assert isinstance(parsed, list)
assert len(parsed) == 1
def test_search_news_returns_json(mock_ddgs):
"""Test that search_news returns valid JSON."""
mock_instance, _ = mock_ddgs
mock_instance.news.return_value = [
{"title": "News", "url": "https://news.com", "body": "News body"},
]
tools = WebSearchTools()
result = tools.search_news("test")
# Should be valid JSON
parsed = json.loads(result)
assert isinstance(parsed, list)
assert len(parsed) == 1
def test_web_search_empty_results(mock_ddgs):
"""Test web_search with empty results."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = WebSearchTools()
result = tools.web_search("nonexistent query")
parsed = json.loads(result)
assert parsed == []
def test_search_news_empty_results(mock_ddgs):
"""Test search_news with empty results."""
mock_instance, _ = mock_ddgs
mock_instance.news.return_value = []
tools = WebSearchTools()
result = tools.search_news("nonexistent news")
parsed = json.loads(result)
assert parsed == []
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_websearch.py",
"license": "Apache License 2.0",
"lines": 467,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/workflow/test_workflow_config.py | """
Unit tests for Workflow configuration serialization and persistence.
Tests cover:
- to_dict(): Serialization of workflow to dictionary
- from_dict(): Deserialization of workflow from dictionary
- save(): Saving workflow to database (including steps with agents/teams)
- load(): Loading workflow from database
- delete(): Deleting workflow from database
- get_workflow_by_id(): Helper function to get workflow by ID
- get_workflows(): Helper function to get all workflows
"""
from typing import Any, Dict
from unittest.mock import MagicMock, patch
import pytest
from agno.db.base import BaseDb, ComponentType
from agno.registry import Registry
from agno.workflow.workflow import Workflow, get_workflow_by_id, get_workflows
# =============================================================================
# Fixtures
# =============================================================================
def _create_mock_db_class():
"""Create a concrete BaseDb subclass with all abstract methods stubbed."""
abstract_methods = {}
for name in dir(BaseDb):
attr = getattr(BaseDb, name, None)
if getattr(attr, "__isabstractmethod__", False):
abstract_methods[name] = MagicMock()
return type("MockDb", (BaseDb,), abstract_methods)
@pytest.fixture
def mock_db():
"""Create a mock database instance that passes isinstance(db, BaseDb)."""
MockDbClass = _create_mock_db_class()
db = MockDbClass()
# Configure common mock methods
db.upsert_component = MagicMock()
db.upsert_config = MagicMock(return_value={"version": 1})
db.delete_component = MagicMock(return_value=True)
db.get_config = MagicMock()
db.list_components = MagicMock()
db.get_links = MagicMock()
db.to_dict = MagicMock(return_value={"type": "postgres", "id": "test-db"})
return db
@pytest.fixture
def basic_workflow():
"""Create a basic workflow for testing."""
return Workflow(
id="test-workflow",
name="Test Workflow",
description="A test workflow for unit testing",
)
@pytest.fixture
def workflow_with_settings():
"""Create a workflow with various settings configured."""
return Workflow(
id="settings-workflow",
name="Settings Workflow",
description="Workflow with many settings",
debug_mode=True,
stream_events=True,
store_events=True,
add_workflow_history_to_steps=True,
num_history_runs=5,
)
@pytest.fixture
def sample_workflow_config() -> Dict[str, Any]:
"""Sample workflow configuration dictionary."""
return {
"id": "sample-workflow",
"name": "Sample Workflow",
"description": "A sample workflow",
"debug_mode": False,
"telemetry": True,
}
# =============================================================================
# to_dict() Tests
# =============================================================================
class TestWorkflowToDict:
"""Tests for Workflow.to_dict() method."""
def test_to_dict_basic_workflow(self, basic_workflow):
"""Test to_dict with a basic workflow."""
config = basic_workflow.to_dict()
assert config["id"] == "test-workflow"
assert config["name"] == "Test Workflow"
assert config["description"] == "A test workflow for unit testing"
def test_to_dict_with_settings(self, workflow_with_settings):
"""Test to_dict preserves all settings."""
config = workflow_with_settings.to_dict()
assert config["id"] == "settings-workflow"
assert config["name"] == "Settings Workflow"
assert config["description"] == "Workflow with many settings"
assert config["debug_mode"] is True
assert config["stream_events"] is True
assert config["store_events"] is True
assert config["add_workflow_history_to_steps"] is True
assert config["num_history_runs"] == 5
def test_to_dict_with_db(self, basic_workflow, mock_db):
"""Test to_dict includes database configuration."""
basic_workflow.db = mock_db
config = basic_workflow.to_dict()
assert "db" in config
assert config["db"] == {"type": "postgres", "id": "test-db"}
def test_to_dict_with_metadata(self):
"""Test to_dict includes metadata."""
workflow = Workflow(
id="metadata-workflow",
metadata={"version": "1.0", "workflow_type": "etl"},
)
config = workflow.to_dict()
assert config["metadata"] == {"version": "1.0", "workflow_type": "etl"}
def test_to_dict_with_user_and_session(self):
"""Test to_dict includes user and session settings."""
workflow = Workflow(
id="session-workflow",
user_id="user-123",
session_id="session-456",
)
config = workflow.to_dict()
assert config["user_id"] == "user-123"
assert config["session_id"] == "session-456"
def test_to_dict_includes_default_settings(self):
"""Test to_dict includes certain default settings."""
workflow = Workflow(id="defaults-workflow")
config = workflow.to_dict()
# These settings are always included
assert "debug_mode" in config
assert "telemetry" in config
assert "add_workflow_history_to_steps" in config
assert "num_history_runs" in config
def test_to_dict_with_steps(self):
"""Test to_dict serializes steps."""
from agno.workflow.workflow import Step
mock_step = MagicMock(spec=Step)
mock_step.to_dict.return_value = {"name": "step-1", "executor_id": "agent-1"}
workflow = Workflow(
id="steps-workflow",
steps=[mock_step],
)
config = workflow.to_dict()
assert "steps" in config
assert len(config["steps"]) == 1
assert config["steps"][0] == {"name": "step-1", "executor_id": "agent-1"}
# =============================================================================
# from_dict() Tests
# =============================================================================
class TestWorkflowFromDict:
"""Tests for Workflow.from_dict() method."""
def test_from_dict_basic(self, sample_workflow_config):
"""Test from_dict creates workflow with basic config."""
workflow = Workflow.from_dict(sample_workflow_config)
assert workflow.id == "sample-workflow"
assert workflow.name == "Sample Workflow"
assert workflow.description == "A sample workflow"
assert workflow.debug_mode is False
assert workflow.telemetry is True
def test_from_dict_preserves_settings(self):
"""Test from_dict preserves all settings."""
config = {
"id": "full-workflow",
"name": "Full Workflow",
"debug_mode": True,
"stream_events": True,
"store_events": True,
"add_workflow_history_to_steps": True,
"num_history_runs": 10,
}
workflow = Workflow.from_dict(config)
assert workflow.debug_mode is True
assert workflow.stream_events is True
assert workflow.store_events is True
assert workflow.add_workflow_history_to_steps is True
assert workflow.num_history_runs == 10
def test_from_dict_with_db_postgres(self):
"""Test from_dict reconstructs PostgresDb."""
config = {
"id": "db-workflow",
"db": {"type": "postgres", "db_url": "postgresql://localhost/test"},
}
with patch("agno.db.postgres.PostgresDb.from_dict") as mock_from_dict:
mock_db = MagicMock()
mock_from_dict.return_value = mock_db
workflow = Workflow.from_dict(config)
mock_from_dict.assert_called_once()
assert workflow.db == mock_db
def test_from_dict_with_db_sqlite(self):
"""Test from_dict reconstructs SqliteDb."""
config = {
"id": "sqlite-workflow",
"db": {"type": "sqlite", "db_file": "/tmp/test.db"},
}
with patch("agno.db.sqlite.SqliteDb.from_dict") as mock_from_dict:
mock_db = MagicMock()
mock_from_dict.return_value = mock_db
workflow = Workflow.from_dict(config)
mock_from_dict.assert_called_once()
assert workflow.db == mock_db
def test_from_dict_with_steps(self):
"""Test from_dict reconstructs steps."""
config = {
"id": "steps-workflow",
"steps": [{"name": "step-1"}],
}
with patch("agno.workflow.workflow.Step.from_dict") as mock_step_from_dict:
mock_step = MagicMock()
mock_step_from_dict.return_value = mock_step
workflow = Workflow.from_dict(config)
mock_step_from_dict.assert_called_once()
assert workflow.steps == [mock_step]
def test_from_dict_roundtrip(self, workflow_with_settings):
"""Test that to_dict -> from_dict preserves workflow configuration."""
config = workflow_with_settings.to_dict()
reconstructed = Workflow.from_dict(config)
assert reconstructed.id == workflow_with_settings.id
assert reconstructed.name == workflow_with_settings.name
assert reconstructed.description == workflow_with_settings.description
assert reconstructed.debug_mode == workflow_with_settings.debug_mode
assert reconstructed.stream_events == workflow_with_settings.stream_events
# =============================================================================
# save() Tests
# =============================================================================
class TestWorkflowSave:
"""Tests for Workflow.save() method."""
def test_save_calls_upsert_component(self, basic_workflow, mock_db):
"""Test save calls upsert_component with correct parameters."""
mock_db.upsert_config.return_value = {"version": 1}
basic_workflow.db = mock_db
version = basic_workflow.save()
mock_db.upsert_component.assert_called_once_with(
component_id="test-workflow",
component_type=ComponentType.WORKFLOW,
name="Test Workflow",
description="A test workflow for unit testing",
metadata=None,
)
assert version == 1
def test_save_calls_upsert_config(self, basic_workflow, mock_db):
"""Test save calls upsert_config with workflow config."""
mock_db.upsert_config.return_value = {"version": 2}
basic_workflow.db = mock_db
version = basic_workflow.save()
mock_db.upsert_config.assert_called_once()
call_args = mock_db.upsert_config.call_args
assert call_args.kwargs["component_id"] == "test-workflow"
assert "config" in call_args.kwargs
assert version == 2
def test_save_with_explicit_db(self, basic_workflow, mock_db):
"""Test save uses explicitly provided db."""
mock_db.upsert_config.return_value = {"version": 1}
version = basic_workflow.save(db=mock_db)
mock_db.upsert_component.assert_called_once()
mock_db.upsert_config.assert_called_once()
assert version == 1
def test_save_with_label(self, basic_workflow, mock_db):
"""Test save passes label to upsert_config."""
mock_db.upsert_config.return_value = {"version": 1}
basic_workflow.db = mock_db
basic_workflow.save(label="production")
call_args = mock_db.upsert_config.call_args
assert call_args.kwargs["label"] == "production"
def test_save_with_stage(self, basic_workflow, mock_db):
"""Test save passes stage to upsert_config."""
mock_db.upsert_config.return_value = {"version": 1}
basic_workflow.db = mock_db
basic_workflow.save(stage="draft")
call_args = mock_db.upsert_config.call_args
assert call_args.kwargs["stage"] == "draft"
def test_save_with_notes(self, basic_workflow, mock_db):
"""Test save passes notes to upsert_config."""
mock_db.upsert_config.return_value = {"version": 1}
basic_workflow.db = mock_db
basic_workflow.save(notes="Initial version")
call_args = mock_db.upsert_config.call_args
assert call_args.kwargs["notes"] == "Initial version"
def test_save_without_db_raises_error(self, basic_workflow):
"""Test save raises error when no db is available."""
with pytest.raises(ValueError, match="Db not initialized or provided"):
basic_workflow.save()
def test_save_with_steps_saves_agents(self, mock_db):
"""Test save saves agent executors in steps."""
from agno.agent.agent import Agent
from agno.workflow.workflow import Step
mock_db.upsert_config.return_value = {"version": 1}
mock_agent = MagicMock(spec=Agent)
mock_agent.id = "step-agent"
mock_agent.save.return_value = 3
mock_step = MagicMock(spec=Step)
mock_step.agent = mock_agent
mock_step.team = None
mock_step.to_dict.return_value = {"name": "step-1"}
mock_step.get_links.return_value = [{"child_component_id": "step-agent", "link_kind": "executor"}]
workflow = Workflow(
id="agent-workflow",
name="Agent Workflow",
steps=[mock_step],
db=mock_db,
)
workflow.save()
# Agent should be saved
mock_agent.save.assert_called_once()
def test_save_returns_none_on_error(self, basic_workflow, mock_db):
"""Test save returns None when database operation fails."""
mock_db.upsert_component.side_effect = Exception("Database error")
basic_workflow.db = mock_db
version = basic_workflow.save()
assert version is None
# =============================================================================
# load() Tests
# =============================================================================
class TestWorkflowLoad:
"""Tests for Workflow.load() class method."""
def test_load_returns_workflow(self, mock_db, sample_workflow_config):
"""Test load returns a workflow from database."""
mock_db.get_config.return_value = {"config": sample_workflow_config}
workflow = Workflow.load(id="sample-workflow", db=mock_db)
assert workflow is not None
assert workflow.id == "sample-workflow"
assert workflow.name == "Sample Workflow"
def test_load_with_version(self, mock_db):
"""Test load retrieves specific version."""
mock_db.get_config.return_value = {"config": {"id": "versioned-workflow", "name": "V2 Workflow"}}
Workflow.load(id="versioned-workflow", db=mock_db, version=2)
mock_db.get_config.assert_called_once_with(component_id="versioned-workflow", label=None, version=2)
def test_load_with_label(self, mock_db):
"""Test load retrieves labeled version."""
mock_db.get_config.return_value = {"config": {"id": "labeled-workflow", "name": "Production Workflow"}}
Workflow.load(id="labeled-workflow", db=mock_db, label="production")
mock_db.get_config.assert_called_once_with(component_id="labeled-workflow", label="production", version=None)
def test_load_returns_none_when_not_found(self, mock_db):
"""Test load returns None when workflow not found."""
mock_db.get_config.return_value = None
workflow = Workflow.load(id="nonexistent-workflow", db=mock_db)
assert workflow is None
def test_load_returns_none_when_config_missing(self, mock_db):
"""Test load returns None when config is missing."""
mock_db.get_config.return_value = {"config": None}
workflow = Workflow.load(id="empty-config-workflow", db=mock_db)
assert workflow is None
def test_load_sets_db_on_workflow(self, mock_db):
"""Test load sets db attribute on returned workflow."""
mock_db.get_config.return_value = {"config": {"id": "db-workflow", "name": "DB Workflow"}}
workflow = Workflow.load(id="db-workflow", db=mock_db)
assert workflow is not None
assert workflow.db == mock_db
# =============================================================================
# delete() Tests
# =============================================================================
class TestWorkflowDelete:
"""Tests for Workflow.delete() method."""
def test_delete_calls_delete_component(self, basic_workflow, mock_db):
"""Test delete calls delete_component."""
mock_db.delete_component.return_value = True
basic_workflow.db = mock_db
result = basic_workflow.delete()
mock_db.delete_component.assert_called_once_with(component_id="test-workflow", hard_delete=False)
assert result is True
def test_delete_with_hard_delete(self, basic_workflow, mock_db):
"""Test delete with hard_delete flag."""
mock_db.delete_component.return_value = True
basic_workflow.db = mock_db
result = basic_workflow.delete(hard_delete=True)
mock_db.delete_component.assert_called_once_with(component_id="test-workflow", hard_delete=True)
assert result is True
def test_delete_with_explicit_db(self, basic_workflow, mock_db):
"""Test delete uses explicitly provided db."""
mock_db.delete_component.return_value = True
result = basic_workflow.delete(db=mock_db)
mock_db.delete_component.assert_called_once()
assert result is True
def test_delete_without_db_raises_error(self, basic_workflow):
"""Test delete raises error when no db is available."""
with pytest.raises(ValueError, match="Db not initialized or provided"):
basic_workflow.delete()
def test_delete_returns_false_on_failure(self, basic_workflow, mock_db):
"""Test delete returns False when operation fails."""
mock_db.delete_component.return_value = False
basic_workflow.db = mock_db
result = basic_workflow.delete()
assert result is False
# =============================================================================
# get_workflow_by_id() Tests
# =============================================================================
class TestGetWorkflowById:
"""Tests for get_workflow_by_id() helper function."""
def test_get_workflow_by_id_returns_workflow(self, mock_db):
"""Test get_workflow_by_id returns workflow from database."""
mock_db.get_config.return_value = {
"config": {"id": "found-workflow", "name": "Found Workflow"},
"version": 1,
}
mock_db.get_links.return_value = []
workflow = get_workflow_by_id(db=mock_db, id="found-workflow")
assert workflow is not None
assert workflow.id == "found-workflow"
assert workflow.name == "Found Workflow"
def test_get_workflow_by_id_with_version(self, mock_db):
"""Test get_workflow_by_id retrieves specific version."""
mock_db.get_config.return_value = {
"config": {"id": "versioned", "name": "V3"},
"version": 3,
}
mock_db.get_links.return_value = []
get_workflow_by_id(db=mock_db, id="versioned", version=3)
mock_db.get_config.assert_called_once_with(component_id="versioned", version=3, label=None)
def test_get_workflow_by_id_with_label(self, mock_db):
"""Test get_workflow_by_id retrieves labeled version."""
mock_db.get_config.return_value = {
"config": {"id": "labeled", "name": "Staging"},
"version": 2,
}
mock_db.get_links.return_value = []
get_workflow_by_id(db=mock_db, id="labeled", label="staging")
mock_db.get_config.assert_called_once_with(component_id="labeled", version=None, label="staging")
def test_get_workflow_by_id_fetches_links(self, mock_db):
"""Test get_workflow_by_id fetches links for the workflow version."""
mock_db.get_config.return_value = {
"config": {"id": "linked-workflow", "name": "Linked"},
"version": 5,
}
mock_db.get_links.return_value = [{"child_component_id": "agent-1"}]
get_workflow_by_id(db=mock_db, id="linked-workflow")
mock_db.get_links.assert_called_once_with(component_id="linked-workflow", version=5)
def test_get_workflow_by_id_returns_none_when_not_found(self, mock_db):
"""Test get_workflow_by_id returns None when not found."""
mock_db.get_config.return_value = None
workflow = get_workflow_by_id(db=mock_db, id="missing")
assert workflow is None
def test_get_workflow_by_id_sets_db(self, mock_db):
"""Test get_workflow_by_id sets db on returned workflow via registry."""
# The db is set via registry lookup when config contains a serialized db reference
mock_db.id = "test-db"
mock_db.get_config.return_value = {
"config": {
"id": "db-workflow",
"name": "DB Workflow",
"db": {"type": "postgres", "id": "test-db"},
},
"version": 1,
}
mock_db.get_links.return_value = []
# Create registry with the mock db registered
registry = Registry(dbs=[mock_db])
workflow = get_workflow_by_id(db=mock_db, id="db-workflow", registry=registry)
assert workflow is not None
assert workflow.db == mock_db
def test_get_workflow_by_id_handles_error(self, mock_db):
"""Test get_workflow_by_id returns None on error."""
mock_db.get_config.side_effect = Exception("DB error")
workflow = get_workflow_by_id(db=mock_db, id="error-workflow")
assert workflow is None
# =============================================================================
# get_workflows() Tests
# =============================================================================
class TestGetWorkflows:
"""Tests for get_workflows() helper function."""
def test_get_workflows_returns_list(self, mock_db):
"""Test get_workflows returns list of workflows."""
mock_db.list_components.return_value = (
[
{"component_id": "workflow-1"},
{"component_id": "workflow-2"},
],
None,
)
mock_db.get_config.side_effect = [
{"config": {"id": "workflow-1", "name": "Workflow 1"}},
{"config": {"id": "workflow-2", "name": "Workflow 2"}},
]
workflows = get_workflows(db=mock_db)
assert len(workflows) == 2
assert workflows[0].id == "workflow-1"
assert workflows[1].id == "workflow-2"
def test_get_workflows_filters_by_type(self, mock_db):
"""Test get_workflows filters by WORKFLOW component type."""
mock_db.list_components.return_value = ([], None)
get_workflows(db=mock_db)
mock_db.list_components.assert_called_once_with(component_type=ComponentType.WORKFLOW)
def test_get_workflows_returns_empty_list_on_error(self, mock_db):
"""Test get_workflows returns empty list on error."""
mock_db.list_components.side_effect = Exception("DB error")
workflows = get_workflows(db=mock_db)
assert workflows == []
def test_get_workflows_skips_invalid_configs(self, mock_db):
"""Test get_workflows skips workflows with invalid configs."""
mock_db.list_components.return_value = (
[
{"component_id": "valid-workflow"},
{"component_id": "invalid-workflow"},
],
None,
)
mock_db.get_config.side_effect = [
{"config": {"id": "valid-workflow", "name": "Valid"}},
{"config": None}, # Invalid config
]
workflows = get_workflows(db=mock_db)
assert len(workflows) == 1
assert workflows[0].id == "valid-workflow"
def test_get_workflows_sets_db_on_all_workflows(self, mock_db):
"""Test get_workflows sets db on all returned workflows via registry."""
# The db is set via registry lookup when config contains a serialized db reference
mock_db.id = "test-db"
mock_db.list_components.return_value = (
[{"component_id": "workflow-1"}],
None,
)
mock_db.get_config.return_value = {
"config": {
"id": "workflow-1",
"name": "Workflow 1",
"db": {"type": "postgres", "id": "test-db"},
}
}
# Create registry with the mock db registered
registry = Registry(dbs=[mock_db])
workflows = get_workflows(db=mock_db, registry=registry)
assert len(workflows) == 1
assert workflows[0].db == mock_db
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/workflow/test_workflow_config.py",
"license": "Apache License 2.0",
"lines": 516,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/os/middleware/trailing_slash.py | from starlette.middleware.base import BaseHTTPMiddleware
from starlette.requests import Request
from starlette.responses import Response
class TrailingSlashMiddleware(BaseHTTPMiddleware):
"""
Middleware that strips trailing slashes from request paths.
This ensures that both /agents and /agents/ are handled identically
without requiring a redirect. Updates both 'path' and 'raw_path'
"""
async def dispatch(self, request: Request, call_next) -> Response:
# Get the path from the request scope
path = request.scope.get("path", "")
# Strip trailing slash if path is not root "/"
if path != "/" and path.endswith("/"):
normalized_path = path.rstrip("/")
if normalized_path: # Ensure we don't end up with empty path
# Modify the scope to remove trailing slash
request.scope["path"] = normalized_path
# Update raw_path for ASGI spec compliance
request.scope["raw_path"] = normalized_path.encode("utf-8")
return await call_next(request)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/middleware/trailing_slash.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/unit/os/test_trailing_slash_middleware.py | """Unit tests for TrailingSlashMiddleware."""
import pytest
from starlette.applications import Starlette
from starlette.responses import PlainTextResponse
from starlette.routing import Route
from starlette.testclient import TestClient
from agno.os.middleware.trailing_slash import TrailingSlashMiddleware
def homepage(request):
"""Simple homepage handler."""
return PlainTextResponse(f"Path: {request.url.path}")
def agents_handler(request):
"""Agents endpoint handler."""
return PlainTextResponse(f"Agents - Path: {request.url.path}")
def agent_runs_handler(request):
"""Agent runs endpoint handler."""
agent_id = request.path_params.get("agent_id", "unknown")
return PlainTextResponse(f"Agent {agent_id} runs - Path: {request.url.path}")
@pytest.fixture
def app():
"""Create a test app with trailing slash middleware."""
routes = [
Route("/", homepage),
Route("/agents", agents_handler),
Route("/agents/{agent_id}/runs", agent_runs_handler),
Route("/health", homepage),
]
app = Starlette(routes=routes)
app.add_middleware(TrailingSlashMiddleware)
return app
@pytest.fixture
def client(app):
"""Create a test client."""
return TestClient(app, raise_server_exceptions=False)
def test_root_path_not_modified(client):
"""Root path '/' should not be modified."""
response = client.get("/")
assert response.status_code == 200
assert "Path: /" in response.text
def test_path_without_trailing_slash(client):
"""Paths without trailing slash should work normally."""
response = client.get("/agents")
assert response.status_code == 200
assert "Path: /agents" in response.text
def test_path_with_trailing_slash_stripped(client):
"""Paths with trailing slash should be stripped and route correctly."""
response = client.get("/agents/")
assert response.status_code == 200
# The middleware strips the trailing slash, so the path seen by handler is /agents
assert "Path: /agents" in response.text
def test_health_endpoint_with_trailing_slash(client):
"""Health endpoint should work with trailing slash."""
response = client.get("/health/")
assert response.status_code == 200
assert "Path: /health" in response.text
def test_path_params_with_trailing_slash(client):
"""Path parameters should work correctly with trailing slash."""
response = client.get("/agents/test-agent-123/runs/")
assert response.status_code == 200
assert "Agent test-agent-123 runs" in response.text
assert "Path: /agents/test-agent-123/runs" in response.text
def test_path_params_without_trailing_slash(client):
"""Path parameters should work correctly without trailing slash."""
response = client.get("/agents/test-agent-123/runs")
assert response.status_code == 200
assert "Agent test-agent-123 runs" in response.text
def test_post_request_with_trailing_slash(client):
"""POST requests with trailing slash should work."""
response = client.post("/agents/")
# Will get 405 since we only defined GET, but the point is no redirect
assert response.status_code == 405 # Method not allowed, not 307 redirect
def test_no_redirect_status_code(client):
"""Ensure no 307 redirect is returned for trailing slash."""
response = client.get("/agents/")
# Should be 200, not 307 redirect
assert response.status_code == 200
# Verify the response is the actual content, not a redirect response
assert "Agents" in response.text
def test_multiple_trailing_slashes(client):
"""Multiple trailing slashes should all be stripped."""
# Note: This depends on how rstrip works - it strips all trailing slashes
response = client.get("/agents///")
assert response.status_code == 200
assert "Path: /agents" in response.text
def test_query_string_preserved(app):
"""Query strings should be preserved when trailing slash is stripped."""
def query_handler(request):
return PlainTextResponse(f"Query: {request.query_params.get('foo', 'none')}")
app.routes.append(Route("/search", query_handler))
client = TestClient(app, raise_server_exceptions=False)
response = client.get("/search/?foo=bar")
assert response.status_code == 200
assert "Query: bar" in response.text
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/os/test_trailing_slash_middleware.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/os/test_per_request_isolation.py | """Integration tests for per-request isolation feature.
Per-request isolation is the default behavior in AgentOS. Each request
gets a fresh instance of the agent/team/workflow to prevent state
contamination between concurrent requests.
"""
import asyncio
import uuid
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Dict, List
from unittest.mock import AsyncMock, patch
import pytest
from fastapi.testclient import TestClient
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.os.app import AgentOS
from agno.team import Team
from agno.workflow import Workflow
from agno.workflow.step import Step
@pytest.fixture
def test_agent():
"""Create a test agent."""
return Agent(
name="test-agent",
id="test-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
)
@pytest.fixture
def test_agent_with_metadata():
"""Create a test agent with initial metadata."""
return Agent(
name="test-agent-metadata",
id="test-agent-metadata-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"request_count": 0, "user_id": None},
)
@pytest.fixture
def test_team(test_agent):
"""Create a test team with the test agent as a member."""
return Team(
name="test-team",
id="test-team-id",
members=[test_agent],
model=OpenAIChat(id="gpt-4o-mini"),
)
@pytest.fixture
def test_workflow():
"""Create a test workflow with agent steps."""
agent = Agent(
name="workflow-agent",
id="workflow-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"step_executions": 0},
)
return Workflow(
name="test-workflow",
id="test-workflow-id",
steps=[Step(name="agent-step", agent=agent)],
)
# ============================================================================
# Basic AgentOS Per-Request Isolation Tests
# ============================================================================
class TestAgentOSPerRequestIsolation:
"""Tests for AgentOS with per-request isolation (default behavior)."""
def test_agent_run_creates_fresh_instance(self, test_agent):
"""Each request should use a fresh agent instance."""
os = AgentOS(agents=[test_agent])
app = os.get_app()
client = TestClient(app)
class MockRunOutput:
def to_dict(self):
return {"run_id": str(uuid.uuid4())}
with patch.object(Agent, "arun", new_callable=AsyncMock) as mock_arun:
mock_arun.return_value = MockRunOutput()
# Make two requests
response1 = client.post(
f"/agents/{test_agent.id}/runs",
data={"message": "Request 1", "stream": "false"},
)
response2 = client.post(
f"/agents/{test_agent.id}/runs",
data={"message": "Request 2", "stream": "false"},
)
assert response1.status_code == 200
assert response2.status_code == 200
# Each request should have different run_ids
assert response1.json()["run_id"] != response2.json()["run_id"]
# ============================================================================
# Metadata Isolation Tests
# ============================================================================
class TestMetadataIsolation:
"""Tests for metadata isolation between requests."""
def test_metadata_not_shared_between_requests(self, test_agent):
"""Metadata changes in one request should not affect others."""
test_agent.metadata = {"initial": "value"}
os = AgentOS(agents=[test_agent])
app = os.get_app()
client = TestClient(app)
class MockRunOutput:
def to_dict(self):
return {"run_id": str(uuid.uuid4())}
with patch.object(Agent, "arun", new_callable=AsyncMock) as mock_arun:
mock_arun.return_value = MockRunOutput()
# Make a request
response = client.post(
f"/agents/{test_agent.id}/runs",
data={"message": "Hello", "stream": "false"},
)
assert response.status_code == 200
# Original agent's metadata should be unchanged
assert test_agent.metadata == {"initial": "value"}
def test_metadata_mutation_during_request_isolated(self, test_agent_with_metadata):
"""Metadata mutated during a request should not leak to other requests."""
os = AgentOS(agents=[test_agent_with_metadata])
app = os.get_app()
client = TestClient(app)
# Track what metadata each request sees
metadata_seen: List[Dict[str, Any]] = []
class MockRunOutput:
def to_dict(self):
return {"run_id": str(uuid.uuid4())}
async def mock_arun_with_mutation(self, *args, **kwargs):
# Capture current metadata
metadata_seen.append(dict(self.metadata) if self.metadata else {})
# Mutate metadata (simulating what happens during a real run)
if self.metadata:
self.metadata["request_count"] = self.metadata.get("request_count", 0) + 1
self.metadata["user_id"] = kwargs.get("user_id", "unknown")
return MockRunOutput()
with patch.object(Agent, "arun", mock_arun_with_mutation):
# Make multiple sequential requests
for i in range(3):
client.post(
f"/agents/{test_agent_with_metadata.id}/runs",
data={"message": f"Request {i}", "stream": "false", "user_id": f"user_{i}"},
)
# Original template should be unchanged
assert test_agent_with_metadata.metadata["request_count"] == 0
assert test_agent_with_metadata.metadata["user_id"] is None
# Each request should have seen fresh metadata (request_count=0)
for metadata in metadata_seen:
assert metadata.get("request_count") == 0
# ============================================================================
# Team Isolation Tests
# ============================================================================
class TestTeamIsolation:
"""Tests for Team per-request isolation."""
def test_team_creates_fresh_instance(self, test_team):
"""Each request should use a fresh team instance."""
os = AgentOS(teams=[test_team])
app = os.get_app()
client = TestClient(app)
class MockRunOutput:
def to_dict(self):
return {"run_id": str(uuid.uuid4())}
with patch.object(Team, "arun", new_callable=AsyncMock) as mock_arun:
mock_arun.return_value = MockRunOutput()
response = client.post(
f"/teams/{test_team.id}/runs",
data={"message": "Hello", "stream": "false"},
)
assert response.status_code == 200
def test_team_member_metadata_isolated(self):
"""Team member metadata should be isolated between requests."""
member = Agent(
name="member",
id="member-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"tasks_completed": 0},
)
team = Team(
name="team",
id="team-id",
members=[member],
model=OpenAIChat(id="gpt-4o-mini"),
)
os = AgentOS(teams=[team])
app = os.get_app()
client = TestClient(app)
class MockRunOutput:
def to_dict(self):
return {"run_id": str(uuid.uuid4())}
with patch.object(Team, "arun", new_callable=AsyncMock) as mock_arun:
mock_arun.return_value = MockRunOutput()
# Make multiple requests
for _ in range(3):
client.post(
f"/teams/{team.id}/runs",
data={"message": "Hello", "stream": "false"},
)
# Original member's metadata should be unchanged
assert member.metadata["tasks_completed"] == 0
# ============================================================================
# Workflow Isolation Tests
# ============================================================================
class TestWorkflowIsolation:
"""Tests for Workflow per-request isolation."""
def test_workflow_agent_step_isolated(self, test_workflow):
"""Agents inside workflow steps should be isolated."""
os = AgentOS(workflows=[test_workflow])
app = os.get_app()
client = TestClient(app)
class MockRunOutput:
content = "Test response"
run_id = str(uuid.uuid4())
status = "completed"
def to_dict(self):
return {"run_id": self.run_id, "content": self.content}
with patch.object(Workflow, "arun", new_callable=AsyncMock) as mock_arun:
mock_arun.return_value = MockRunOutput()
response = client.post(
f"/workflows/{test_workflow.id}/runs",
data={"message": "Hello", "stream": "false"},
)
assert response.status_code == 200
# Original workflow's agent metadata should be unchanged
original_agent = test_workflow.steps[0].agent
assert original_agent.metadata["step_executions"] == 0
# ============================================================================
# Concurrent Request Simulation Tests
# ============================================================================
class TestConcurrentRequestIsolation:
"""Tests simulating concurrent requests to verify isolation."""
def test_concurrent_agent_requests_isolated(self, test_agent_with_metadata):
"""Concurrent requests should not interfere with each other."""
os = AgentOS(agents=[test_agent_with_metadata])
app = os.get_app()
results: List[Dict[str, Any]] = []
errors: List[Exception] = []
class MockRunOutput:
def __init__(self, user_id):
self.user_id = user_id
self.run_id = str(uuid.uuid4())
def to_dict(self):
return {"run_id": self.run_id, "user_id": self.user_id}
async def mock_arun_with_delay(self, *args, **kwargs):
user_id = kwargs.get("user_id", "unknown")
# Simulate some processing time
await asyncio.sleep(0.01)
# Mutate metadata
if self.metadata:
self.metadata["user_id"] = user_id
return MockRunOutput(user_id)
def make_request(user_id: str):
try:
with TestClient(app) as client:
response = client.post(
f"/agents/{test_agent_with_metadata.id}/runs",
data={"message": f"Hello from {user_id}", "stream": "false", "user_id": user_id},
)
results.append({"user_id": user_id, "status": response.status_code})
except Exception as e:
errors.append(e)
with patch.object(Agent, "arun", mock_arun_with_delay):
# Simulate concurrent requests using threads
with ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(make_request, f"user_{i}") for i in range(5)]
for future in futures:
future.result()
# All requests should succeed
assert len(errors) == 0
assert all(r["status"] == 200 for r in results)
# Original agent metadata should be unchanged
assert test_agent_with_metadata.metadata["user_id"] is None
# ============================================================================
# Shared Resources Tests
# ============================================================================
class TestSharedResources:
"""Tests to verify heavy resources are shared, not copied."""
def test_model_configuration_preserved(self):
"""Model configuration should be preserved in copies."""
model = OpenAIChat(id="gpt-4o-mini")
agent = Agent(name="test-agent", id="test-id", model=model)
copy = agent.deep_copy()
assert copy.model is not None
assert copy.model.id == "gpt-4o-mini"
def test_agent_db_shared_in_copy(self):
"""Database should be shared (not copied) between agent instances."""
# Create a mock DB that we can track
class MockDb:
def __init__(self):
self.instance_id = uuid.uuid4()
db = MockDb()
agent = Agent(name="test-agent", id="test-id", db=db)
copy = agent.deep_copy()
# DB should be the same instance (shared)
assert copy.db is db
assert copy.db.instance_id == db.instance_id
def test_workflow_db_shared_in_copy(self):
"""Database should be shared (not copied) between workflow instances."""
class MockDb:
def __init__(self):
self.instance_id = uuid.uuid4()
db = MockDb()
workflow = Workflow(name="test-workflow", id="test-id", db=db)
copy = workflow.deep_copy()
# DB should be the same instance (shared)
assert copy.db is db
assert copy.db.instance_id == db.instance_id
# ============================================================================
# Internal State Reset Tests
# ============================================================================
class TestInternalStateReset:
"""Tests to verify internal mutable state is properly reset."""
def test_cached_session_reset_on_copy(self):
"""_cached_session should be None in copied agent."""
agent = Agent(name="test-agent", id="test-id")
agent._cached_session = "cached_session_data" # type: ignore
copy = agent.deep_copy()
assert copy._cached_session is None
assert agent._cached_session == "cached_session_data"
def test_mcp_tools_list_reset_on_copy(self):
"""_mcp_tools_initialized_on_run should be empty in copied agent."""
agent = Agent(name="test-agent", id="test-id")
agent._mcp_tools_initialized_on_run = ["tool1", "tool2"]
copy = agent.deep_copy()
assert copy._mcp_tools_initialized_on_run == []
assert agent._mcp_tools_initialized_on_run == ["tool1", "tool2"]
def test_connectable_tools_list_reset_on_copy(self):
"""_connectable_tools_initialized_on_run should be empty in copied agent."""
agent = Agent(name="test-agent", id="test-id")
agent._connectable_tools_initialized_on_run = ["conn1", "conn2"]
copy = agent.deep_copy()
assert copy._connectable_tools_initialized_on_run == []
assert agent._connectable_tools_initialized_on_run == ["conn1", "conn2"]
# ============================================================================
# Workflow Deep Copy Integration Tests
# ============================================================================
class TestWorkflowDeepCopyIntegration:
"""Integration tests for workflow deep copying."""
def test_workflow_with_nested_steps_isolation(self):
"""Workflow with nested steps should have all agents isolated."""
from agno.workflow.loop import Loop
from agno.workflow.parallel import Parallel
agent1 = Agent(
name="agent-1",
id="agent-1-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"counter": 0},
)
agent2 = Agent(
name="agent-2",
id="agent-2-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"counter": 0},
)
workflow = Workflow(
name="nested-workflow",
id="nested-workflow-id",
steps=[
Loop(
name="loop",
max_iterations=2,
steps=[
Parallel(
Step(name="step-1", agent=agent1),
Step(name="step-2", agent=agent2),
name="parallel",
)
],
)
],
)
copy = workflow.deep_copy()
# Navigate to nested agents
loop_copy = copy.steps[0]
parallel_copy = loop_copy.steps[0]
agent1_copy = parallel_copy.steps[0].agent
agent2_copy = parallel_copy.steps[1].agent
# Verify agents are different instances
assert agent1_copy is not agent1
assert agent2_copy is not agent2
# Modify copied agents' metadata
agent1_copy.metadata["counter"] = 100
agent2_copy.metadata["counter"] = 200
# Original agents should be unchanged
assert agent1.metadata["counter"] == 0
assert agent2.metadata["counter"] == 0
def test_workflow_multiple_copies_independent(self):
"""Multiple workflow copies should be completely independent."""
agent = Agent(
name="shared-agent",
id="shared-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"copy_id": None},
)
workflow = Workflow(
name="template-workflow",
id="template-workflow-id",
steps=[Step(name="step", agent=agent)],
)
# Create multiple copies
copy1 = workflow.deep_copy()
copy2 = workflow.deep_copy()
copy3 = workflow.deep_copy()
# Modify each copy's agent
copy1.steps[0].agent.metadata["copy_id"] = "copy-1"
copy2.steps[0].agent.metadata["copy_id"] = "copy-2"
copy3.steps[0].agent.metadata["copy_id"] = "copy-3"
# All copies should have different metadata
assert copy1.steps[0].agent.metadata["copy_id"] == "copy-1"
assert copy2.steps[0].agent.metadata["copy_id"] == "copy-2"
assert copy3.steps[0].agent.metadata["copy_id"] == "copy-3"
# Original should be unchanged
assert agent.metadata["copy_id"] is None
# All agents should be different instances
assert copy1.steps[0].agent is not copy2.steps[0].agent
assert copy2.steps[0].agent is not copy3.steps[0].agent
assert copy1.steps[0].agent is not agent
# ============================================================================
# Custom Executor Step Tests
# ============================================================================
class TestCustomExecutorStepIsolation:
"""Tests for workflows with custom executor (function) steps."""
def test_function_executor_step_preserved(self):
"""Function executors should be preserved (same reference) in copies."""
from agno.workflow.types import StepInput, StepOutput
def my_custom_executor(step_input: StepInput) -> StepOutput:
return StepOutput(content="Custom result")
workflow = Workflow(
name="function-workflow",
id="function-workflow-id",
steps=[Step(name="custom-step", executor=my_custom_executor)],
)
copy = workflow.deep_copy()
# Function reference should be preserved (functions are shared, not copied)
assert copy.steps[0].executor is my_custom_executor
def test_mixed_agent_and_function_steps(self):
"""Workflow with both agent and function steps should copy correctly."""
from agno.workflow.types import StepInput, StepOutput
def preprocessing_step(step_input: StepInput) -> StepOutput:
return StepOutput(content=f"Preprocessed: {step_input.input}")
def postprocessing_step(step_input: StepInput) -> StepOutput:
return StepOutput(content=f"Postprocessed: {step_input.previous_step_content}")
agent = Agent(
name="middle-agent",
id="middle-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"processed": False},
)
workflow = Workflow(
name="mixed-workflow",
id="mixed-workflow-id",
steps=[
Step(name="preprocess", executor=preprocessing_step),
Step(name="agent-step", agent=agent),
Step(name="postprocess", executor=postprocessing_step),
],
)
copy = workflow.deep_copy()
# Function executors should be preserved
assert copy.steps[0].executor is preprocessing_step
assert copy.steps[2].executor is postprocessing_step
# Agent should be copied
assert copy.steps[1].agent is not agent
assert copy.steps[1].agent.id == agent.id
# Modify copy's agent
copy.steps[1].agent.metadata["processed"] = True
# Original should be unchanged
assert agent.metadata["processed"] is False
def test_function_step_with_closure_state(self):
"""Function executors with closure state - functions are shared."""
from agno.workflow.types import StepInput, StepOutput
# Create a function with closure state
external_counter = {"count": 0}
def counter_step(step_input: StepInput) -> StepOutput:
external_counter["count"] += 1
return StepOutput(content=f"Count: {external_counter['count']}")
workflow = Workflow(
name="closure-workflow",
id="closure-workflow-id",
steps=[Step(name="counter", executor=counter_step)],
)
copy1 = workflow.deep_copy()
copy2 = workflow.deep_copy()
# Both copies share the same function reference
assert copy1.steps[0].executor is counter_step
assert copy2.steps[0].executor is counter_step
assert copy1.steps[0].executor is copy2.steps[0].executor
def test_workflow_callable_steps_preserved(self):
"""Workflow with callable steps (Workflows 1.0 style) should preserve function."""
from agno.workflow.types import WorkflowExecutionInput
def my_workflow_function(workflow: Workflow, execution_input: WorkflowExecutionInput):
return f"Executed with: {execution_input.input}"
workflow = Workflow(
name="callable-workflow",
id="callable-workflow-id",
steps=my_workflow_function,
)
copy = workflow.deep_copy()
# Callable steps should be preserved (same reference)
assert copy.steps is my_workflow_function
def test_function_executor_in_parallel(self):
"""Function executors in Parallel steps should be preserved."""
from agno.workflow.parallel import Parallel
from agno.workflow.types import StepInput, StepOutput
def func_a(step_input: StepInput) -> StepOutput:
return StepOutput(content="Result A")
def func_b(step_input: StepInput) -> StepOutput:
return StepOutput(content="Result B")
workflow = Workflow(
name="parallel-func-workflow",
id="parallel-func-workflow-id",
steps=[
Parallel(
Step(name="func-a", executor=func_a),
Step(name="func-b", executor=func_b),
name="parallel-funcs",
)
],
)
copy = workflow.deep_copy()
parallel_copy = copy.steps[0]
assert parallel_copy.steps[0].executor is func_a
assert parallel_copy.steps[1].executor is func_b
def test_function_executor_in_loop(self):
"""Function executors in Loop steps should be preserved."""
from agno.workflow.loop import Loop
from agno.workflow.types import StepInput, StepOutput
iteration_tracker = {"iterations": 0}
def loop_body(step_input: StepInput) -> StepOutput:
iteration_tracker["iterations"] += 1
return StepOutput(content=f"Iteration {iteration_tracker['iterations']}")
def end_condition(outputs):
return len(outputs) >= 3
workflow = Workflow(
name="loop-func-workflow",
id="loop-func-workflow-id",
steps=[
Loop(
name="loop",
max_iterations=5,
end_condition=end_condition,
steps=[Step(name="loop-body", executor=loop_body)],
)
],
)
copy = workflow.deep_copy()
loop_copy = copy.steps[0]
assert loop_copy.steps[0].executor is loop_body
assert loop_copy.end_condition is end_condition
assert loop_copy.max_iterations == 5
def test_function_executor_in_condition(self):
"""Function executors in Condition steps should be preserved."""
from agno.workflow.condition import Condition
from agno.workflow.types import StepInput, StepOutput
def evaluator(step_input: StepInput) -> bool:
return step_input.input == "proceed"
def conditional_step(step_input: StepInput) -> StepOutput:
return StepOutput(content="Condition met!")
workflow = Workflow(
name="condition-func-workflow",
id="condition-func-workflow-id",
steps=[
Condition(
name="condition",
evaluator=evaluator,
steps=[Step(name="conditional", executor=conditional_step)],
)
],
)
copy = workflow.deep_copy()
condition_copy = copy.steps[0]
assert condition_copy.evaluator is evaluator
assert condition_copy.steps[0].executor is conditional_step
def test_function_executor_in_router(self):
"""Function executors in Router steps should be preserved."""
from agno.workflow.router import Router
from agno.workflow.types import StepInput, StepOutput
def route_a(step_input: StepInput) -> StepOutput:
return StepOutput(content="Route A")
def route_b(step_input: StepInput) -> StepOutput:
return StepOutput(content="Route B")
def selector(step_input: StepInput):
if "a" in str(step_input.input).lower():
return [Step(name="route-a", executor=route_a)]
return [Step(name="route-b", executor=route_b)]
workflow = Workflow(
name="router-func-workflow",
id="router-func-workflow-id",
steps=[
Router(
name="router",
selector=selector,
choices=[
Step(name="choice-a", executor=route_a),
Step(name="choice-b", executor=route_b),
],
)
],
)
copy = workflow.deep_copy()
router_copy = copy.steps[0]
assert router_copy.selector is selector
assert router_copy.choices[0].executor is route_a
assert router_copy.choices[1].executor is route_b
def test_mixed_function_agent_team_in_parallel(self):
"""Parallel with function, agent, and team steps should copy correctly."""
from agno.workflow.parallel import Parallel
from agno.workflow.types import StepInput, StepOutput
def func_step(step_input: StepInput) -> StepOutput:
return StepOutput(content="Function result")
agent = Agent(
name="parallel-agent",
id="parallel-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"type": "agent"},
)
member = Agent(
name="team-member",
id="team-member-id",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="parallel-team",
id="parallel-team-id",
members=[member],
model=OpenAIChat(id="gpt-4o-mini"),
)
workflow = Workflow(
name="mixed-parallel-workflow",
id="mixed-parallel-workflow-id",
steps=[
Parallel(
Step(name="func", executor=func_step),
Step(name="agent", agent=agent),
Step(name="team", team=team),
name="mixed-parallel",
)
],
)
copy = workflow.deep_copy()
parallel_copy = copy.steps[0]
# Function should be same reference
assert parallel_copy.steps[0].executor is func_step
# Agent should be different instance
assert parallel_copy.steps[1].agent is not agent
assert parallel_copy.steps[1].agent.id == agent.id
# Team should be different instance
assert parallel_copy.steps[2].team is not team
assert parallel_copy.steps[2].team.id == team.id
# Team member should also be different instance
assert parallel_copy.steps[2].team.members[0] is not member
def test_deeply_nested_function_executors(self):
"""Deeply nested function executors should all be preserved."""
from agno.workflow.condition import Condition
from agno.workflow.loop import Loop
from agno.workflow.parallel import Parallel
from agno.workflow.types import StepInput, StepOutput
def level1_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="Level 1")
def level2_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="Level 2")
def level3_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="Level 3")
def evaluator(step_input: StepInput) -> bool:
return True
# Loop > Parallel > Condition > Function
workflow = Workflow(
name="deeply-nested-func-workflow",
id="deeply-nested-func-workflow-id",
steps=[
Loop(
name="loop",
max_iterations=2,
steps=[
Parallel(
Condition(
name="condition",
evaluator=evaluator,
steps=[Step(name="deep-func", executor=level3_func)],
),
Step(name="parallel-func", executor=level2_func),
name="parallel",
),
Step(name="loop-func", executor=level1_func),
],
)
],
)
copy = workflow.deep_copy()
# Navigate through nesting
loop_copy = copy.steps[0]
parallel_copy = loop_copy.steps[0]
condition_copy = parallel_copy.steps[0]
# All functions should be same references
assert condition_copy.steps[0].executor is level3_func
assert parallel_copy.steps[1].executor is level2_func
assert loop_copy.steps[1].executor is level1_func
assert condition_copy.evaluator is evaluator
def test_step_attributes_preserved_with_function_executor(self):
"""Step configuration attributes should be preserved for function executor steps."""
from agno.workflow.types import StepInput, StepOutput
def my_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="Result")
workflow = Workflow(
name="attr-workflow",
id="attr-workflow-id",
steps=[
Step(
name="configured-step",
description="A configured function step",
executor=my_func,
max_retries=3,
skip_on_failure=True,
)
],
)
copy = workflow.deep_copy()
step_copy = copy.steps[0]
assert step_copy.name == "configured-step"
assert step_copy.description == "A configured function step"
assert step_copy.executor is my_func
assert step_copy.max_retries == 3
assert step_copy.skip_on_failure is True
# ============================================================================
# Custom Executor with Internal Agent/Team Tests
# ============================================================================
class TestCustomExecutorWithInternalAgentTeam:
"""Tests for custom executor functions that internally use agents/teams.
This is a critical edge case: when a function executor captures an agent/team
in its closure, that agent/team is NOT copied during workflow.deep_copy().
This can lead to state contamination if not handled carefully.
"""
def test_function_with_closure_agent_shares_reference(self):
"""Function capturing agent in closure - agent is NOT copied (shared reference).
This documents the LIMITATION: functions with closure-captured agents
will share that agent across workflow copies.
"""
from agno.workflow.types import StepInput, StepOutput
# Agent captured in closure
closure_agent = Agent(
name="closure-agent",
id="closure-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"call_count": 0},
)
def agent_wrapper_step(step_input: StepInput) -> StepOutput:
# This function captures closure_agent - it will be shared!
closure_agent.metadata["call_count"] += 1
return StepOutput(content=f"Agent called {closure_agent.metadata['call_count']} times")
workflow = Workflow(
name="closure-agent-workflow",
id="closure-agent-workflow-id",
steps=[Step(name="agent-wrapper", executor=agent_wrapper_step)],
)
copy1 = workflow.deep_copy()
copy2 = workflow.deep_copy()
# The function is shared (same reference)
assert copy1.steps[0].executor is agent_wrapper_step
assert copy2.steps[0].executor is agent_wrapper_step
# IMPORTANT: The closure_agent is NOT part of the workflow's step structure,
# so it won't be copied. This is a known limitation.
# Both copies share the same closure_agent reference.
def test_function_with_closure_team_shares_reference(self):
"""Function capturing team in closure - team is NOT copied (shared reference)."""
from agno.workflow.types import StepInput, StepOutput
member = Agent(
name="team-member",
id="team-member-id",
model=OpenAIChat(id="gpt-4o-mini"),
)
closure_team = Team(
name="closure-team",
id="closure-team-id",
members=[member],
model=OpenAIChat(id="gpt-4o-mini"),
)
def team_wrapper_step(step_input: StepInput) -> StepOutput:
# This function captures closure_team - it will be shared!
return StepOutput(content=f"Team {closure_team.name} executed")
workflow = Workflow(
name="closure-team-workflow",
id="closure-team-workflow-id",
steps=[Step(name="team-wrapper", executor=team_wrapper_step)],
)
copy1 = workflow.deep_copy()
copy2 = workflow.deep_copy()
# Both copies share the same function reference
assert copy1.steps[0].executor is copy2.steps[0].executor
def test_recommended_pattern_agent_in_step(self):
"""RECOMMENDED: Put agent directly in Step for proper isolation."""
agent = Agent(
name="step-agent",
id="step-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"isolated": True},
)
workflow = Workflow(
name="proper-agent-workflow",
id="proper-agent-workflow-id",
steps=[Step(name="agent-step", agent=agent)],
)
copy1 = workflow.deep_copy()
copy2 = workflow.deep_copy()
# Agents ARE properly isolated when placed in Step
assert copy1.steps[0].agent is not agent
assert copy2.steps[0].agent is not agent
assert copy1.steps[0].agent is not copy2.steps[0].agent
# Modify one copy's agent
copy1.steps[0].agent.metadata["isolated"] = False
# Others are unaffected
assert agent.metadata["isolated"] is True
assert copy2.steps[0].agent.metadata["isolated"] is True
def test_recommended_pattern_team_in_step(self):
"""RECOMMENDED: Put team directly in Step for proper isolation."""
member = Agent(
name="member",
id="member-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"task_count": 0},
)
team = Team(
name="step-team",
id="step-team-id",
members=[member],
model=OpenAIChat(id="gpt-4o-mini"),
)
workflow = Workflow(
name="proper-team-workflow",
id="proper-team-workflow-id",
steps=[Step(name="team-step", team=team)],
)
copy1 = workflow.deep_copy()
copy2 = workflow.deep_copy()
# Teams ARE properly isolated when placed in Step
assert copy1.steps[0].team is not team
assert copy2.steps[0].team is not team
assert copy1.steps[0].team is not copy2.steps[0].team
# Members are also isolated
assert copy1.steps[0].team.members[0] is not member
assert copy2.steps[0].team.members[0] is not member
def test_function_with_step_input_agent_pattern(self):
"""Pattern: Pass agent via step_input.additional_data for isolation."""
from agno.workflow.types import StepInput, StepOutput
def dynamic_agent_step(step_input: StepInput) -> StepOutput:
# Get agent from additional_data - this allows for dynamic injection
agent_config = step_input.additional_data.get("agent_config", {}) if step_input.additional_data else {}
return StepOutput(content=f"Processed with config: {agent_config}")
workflow = Workflow(
name="dynamic-agent-workflow",
id="dynamic-agent-workflow-id",
steps=[Step(name="dynamic-step", executor=dynamic_agent_step)],
)
copy = workflow.deep_copy()
# Function is preserved
assert copy.steps[0].executor is dynamic_agent_step
def test_hybrid_pattern_function_then_agent(self):
"""Hybrid pattern: Function preprocessor followed by agent step."""
from agno.workflow.types import StepInput, StepOutput
def preprocessor(step_input: StepInput) -> StepOutput:
# Preprocessing logic - no agent needed here
processed = f"PROCESSED: {step_input.input}"
return StepOutput(content=processed)
agent = Agent(
name="processor-agent",
id="processor-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"processed_count": 0},
)
workflow = Workflow(
name="hybrid-workflow",
id="hybrid-workflow-id",
steps=[
Step(name="preprocess", executor=preprocessor),
Step(name="agent-process", agent=agent),
],
)
copy = workflow.deep_copy()
# Function is shared (same reference)
assert copy.steps[0].executor is preprocessor
# Agent is properly isolated (different instance)
assert copy.steps[1].agent is not agent
assert copy.steps[1].agent.id == agent.id
def test_hybrid_pattern_agent_then_function(self):
"""Hybrid pattern: Agent step followed by function postprocessor."""
from agno.workflow.types import StepInput, StepOutput
agent = Agent(
name="generator-agent",
id="generator-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"generated_count": 0},
)
def postprocessor(step_input: StepInput) -> StepOutput:
# Postprocessing logic - uses previous step's output
result = f"FINAL: {step_input.previous_step_content}"
return StepOutput(content=result)
workflow = Workflow(
name="agent-then-func-workflow",
id="agent-then-func-workflow-id",
steps=[
Step(name="generate", agent=agent),
Step(name="postprocess", executor=postprocessor),
],
)
copy = workflow.deep_copy()
# Agent is properly isolated
assert copy.steps[0].agent is not agent
# Function is shared
assert copy.steps[1].executor is postprocessor
def test_parallel_mixed_agent_and_function_isolation(self):
"""Parallel execution with both agent steps and function steps."""
from agno.workflow.parallel import Parallel
from agno.workflow.types import StepInput, StepOutput
agent1 = Agent(
name="parallel-agent-1",
id="parallel-agent-1-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"branch": "A"},
)
agent2 = Agent(
name="parallel-agent-2",
id="parallel-agent-2-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"branch": "B"},
)
def func_branch(step_input: StepInput) -> StepOutput:
return StepOutput(content="Function branch result")
workflow = Workflow(
name="parallel-mixed-workflow",
id="parallel-mixed-workflow-id",
steps=[
Parallel(
Step(name="agent-branch-1", agent=agent1),
Step(name="func-branch", executor=func_branch),
Step(name="agent-branch-2", agent=agent2),
name="mixed-parallel",
)
],
)
copy = workflow.deep_copy()
parallel_copy = copy.steps[0]
# Agents are isolated
assert parallel_copy.steps[0].agent is not agent1
assert parallel_copy.steps[2].agent is not agent2
# Function is shared
assert parallel_copy.steps[1].executor is func_branch
# Modify copied agents
parallel_copy.steps[0].agent.metadata["branch"] = "A-modified"
parallel_copy.steps[2].agent.metadata["branch"] = "B-modified"
# Originals unchanged
assert agent1.metadata["branch"] == "A"
assert agent2.metadata["branch"] == "B"
def test_loop_with_agent_and_function_steps(self):
"""Loop containing both agent and function steps."""
from agno.workflow.loop import Loop
from agno.workflow.types import StepInput, StepOutput
loop_agent = Agent(
name="loop-agent",
id="loop-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"iterations": 0},
)
def iteration_check(step_input: StepInput) -> StepOutput:
return StepOutput(content="Iteration complete")
def end_condition(outputs):
return len(outputs) >= 2
workflow = Workflow(
name="loop-mixed-workflow",
id="loop-mixed-workflow-id",
steps=[
Loop(
name="mixed-loop",
max_iterations=5,
end_condition=end_condition,
steps=[
Step(name="agent-iteration", agent=loop_agent),
Step(name="check-iteration", executor=iteration_check),
],
)
],
)
copy = workflow.deep_copy()
loop_copy = copy.steps[0]
# Agent in loop is isolated
assert loop_copy.steps[0].agent is not loop_agent
assert loop_copy.steps[0].agent.id == loop_agent.id
# Function in loop is shared
assert loop_copy.steps[1].executor is iteration_check
# End condition function is shared
assert loop_copy.end_condition is end_condition
def test_condition_with_agent_and_function_evaluator(self):
"""Condition with function evaluator and agent step inside."""
from agno.workflow.condition import Condition
from agno.workflow.types import StepInput
def should_execute(step_input: StepInput) -> bool:
return "yes" in str(step_input.input).lower()
conditional_agent = Agent(
name="conditional-agent",
id="conditional-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"executed": False},
)
workflow = Workflow(
name="condition-mixed-workflow",
id="condition-mixed-workflow-id",
steps=[
Condition(
name="conditional",
evaluator=should_execute,
steps=[Step(name="conditional-agent-step", agent=conditional_agent)],
)
],
)
copy = workflow.deep_copy()
condition_copy = copy.steps[0]
# Evaluator function is shared
assert condition_copy.evaluator is should_execute
# Agent inside condition is isolated
assert condition_copy.steps[0].agent is not conditional_agent
assert condition_copy.steps[0].agent.id == conditional_agent.id
def test_router_with_agent_choices_and_function_selector(self):
"""Router with function selector and agent steps as choices."""
from agno.workflow.router import Router
from agno.workflow.types import StepInput
route_agent_a = Agent(
name="route-a-agent",
id="route-a-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"route": "A"},
)
route_agent_b = Agent(
name="route-b-agent",
id="route-b-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"route": "B"},
)
def route_selector(step_input: StepInput):
if "a" in str(step_input.input).lower():
return [Step(name="route-a", agent=route_agent_a)]
return [Step(name="route-b", agent=route_agent_b)]
workflow = Workflow(
name="router-agent-workflow",
id="router-agent-workflow-id",
steps=[
Router(
name="agent-router",
selector=route_selector,
choices=[
Step(name="choice-a", agent=route_agent_a),
Step(name="choice-b", agent=route_agent_b),
],
)
],
)
copy = workflow.deep_copy()
router_copy = copy.steps[0]
# Selector function is shared
assert router_copy.selector is route_selector
# Agent choices are isolated
assert router_copy.choices[0].agent is not route_agent_a
assert router_copy.choices[1].agent is not route_agent_b
assert router_copy.choices[0].agent.id == route_agent_a.id
assert router_copy.choices[1].agent.id == route_agent_b.id
# NOTE: The agents referenced inside route_selector's closure
# (route_agent_a, route_agent_b) are NOT copied. This is a limitation.
# Only the agents in `choices` are properly isolated.
def test_complex_workflow_with_all_step_types(self):
"""Complex workflow combining all step types with agents and functions."""
from agno.workflow.condition import Condition
from agno.workflow.loop import Loop
from agno.workflow.parallel import Parallel
from agno.workflow.router import Router
from agno.workflow.types import StepInput, StepOutput
# Agents
preprocess_agent = Agent(
name="preprocess-agent",
id="preprocess-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"stage": "preprocess"},
)
main_agent = Agent(
name="main-agent",
id="main-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"stage": "main"},
)
fallback_agent = Agent(
name="fallback-agent",
id="fallback-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"stage": "fallback"},
)
# Functions
def validate_input(step_input: StepInput) -> StepOutput:
return StepOutput(content="Validated")
def should_process(step_input: StepInput) -> bool:
return True
def select_route(step_input: StepInput):
return [Step(name="main", agent=main_agent)]
def end_loop(outputs):
return len(outputs) >= 1
workflow = Workflow(
name="complex-workflow",
id="complex-workflow-id",
steps=[
# Step 1: Function validation
Step(name="validate", executor=validate_input),
# Step 2: Agent preprocessing
Step(name="preprocess", agent=preprocess_agent),
# Step 3: Conditional execution
Condition(
name="should-process",
evaluator=should_process,
steps=[
# Nested parallel
Parallel(
# Router with agent choices
Router(
name="route-processor",
selector=select_route,
choices=[
Step(name="main-choice", agent=main_agent),
Step(name="fallback-choice", agent=fallback_agent),
],
),
# Loop with agent
Loop(
name="retry-loop",
max_iterations=2,
end_condition=end_loop,
steps=[Step(name="retry-agent", agent=main_agent)],
),
name="parallel-processing",
)
],
),
],
)
copy = workflow.deep_copy()
# Verify structure integrity
assert len(copy.steps) == 3
# Step 1: Function preserved
assert copy.steps[0].executor is validate_input
# Step 2: Agent isolated
assert copy.steps[1].agent is not preprocess_agent
assert copy.steps[1].agent.metadata["stage"] == "preprocess"
# Step 3: Condition
condition_copy = copy.steps[2]
assert condition_copy.evaluator is should_process
# Navigate to parallel
parallel_copy = condition_copy.steps[0]
# Router inside parallel
router_copy = parallel_copy.steps[0]
assert router_copy.selector is select_route
assert router_copy.choices[0].agent is not main_agent
assert router_copy.choices[1].agent is not fallback_agent
# Loop inside parallel
loop_copy = parallel_copy.steps[1]
assert loop_copy.end_condition is end_loop
assert loop_copy.steps[0].agent is not main_agent
# Modify all copied agents
copy.steps[1].agent.metadata["stage"] = "modified"
router_copy.choices[0].agent.metadata["stage"] = "modified"
router_copy.choices[1].agent.metadata["stage"] = "modified"
loop_copy.steps[0].agent.metadata["stage"] = "modified"
# All originals unchanged
assert preprocess_agent.metadata["stage"] == "preprocess"
assert main_agent.metadata["stage"] == "main"
assert fallback_agent.metadata["stage"] == "fallback"
def test_step_id_unique_per_copy(self):
"""Each workflow copy should get NEW unique step_ids for per-request isolation."""
from agno.workflow.types import StepInput, StepOutput
def my_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="Result")
agent = Agent(
name="test-agent",
id="test-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
)
# Create steps with explicit step_ids
func_step = Step(
name="func-step",
step_id="original-func-step-id",
executor=my_func,
)
agent_step = Step(
name="agent-step",
step_id="original-agent-step-id",
agent=agent,
)
workflow = Workflow(
name="step-id-workflow",
id="step-id-workflow-id",
steps=[func_step, agent_step],
)
copy1 = workflow.deep_copy()
copy2 = workflow.deep_copy()
# Each copy should have NEW unique step_ids (different from original)
assert copy1.steps[0].step_id != "original-func-step-id"
assert copy1.steps[1].step_id != "original-agent-step-id"
assert copy2.steps[0].step_id != "original-func-step-id"
assert copy2.steps[1].step_id != "original-agent-step-id"
# Each copy should have DIFFERENT step_ids from each other
assert copy1.steps[0].step_id != copy2.steps[0].step_id
assert copy1.steps[1].step_id != copy2.steps[1].step_id
# Step names should be preserved
assert copy1.steps[0].name == "func-step"
assert copy1.steps[1].name == "agent-step"
# Agent should be a different instance
assert copy1.steps[1].agent is not agent
assert copy2.steps[1].agent is not agent
assert copy1.steps[1].agent is not copy2.steps[1].agent
def test_step_id_unique_in_nested_steps(self):
"""Nested steps should also get unique step_ids per copy."""
from agno.workflow.loop import Loop
from agno.workflow.parallel import Parallel
from agno.workflow.types import StepInput, StepOutput
def my_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="Result")
agent = Agent(
name="nested-agent",
id="nested-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
)
inner_step_1 = Step(
name="inner-1",
step_id="original-inner-1-id",
executor=my_func,
)
inner_step_2 = Step(
name="inner-2",
step_id="original-inner-2-id",
agent=agent,
)
workflow = Workflow(
name="nested-step-id-workflow",
id="nested-step-id-workflow-id",
steps=[
Loop(
name="loop",
max_iterations=2,
steps=[
Parallel(
inner_step_1,
inner_step_2,
name="parallel",
)
],
)
],
)
copy1 = workflow.deep_copy()
copy2 = workflow.deep_copy()
# Navigate to nested steps
loop_copy1 = copy1.steps[0]
parallel_copy1 = loop_copy1.steps[0]
loop_copy2 = copy2.steps[0]
parallel_copy2 = loop_copy2.steps[0]
# Each copy should have NEW unique step_ids
assert parallel_copy1.steps[0].step_id != "original-inner-1-id"
assert parallel_copy1.steps[1].step_id != "original-inner-2-id"
# Each copy should have DIFFERENT step_ids from each other
assert parallel_copy1.steps[0].step_id != parallel_copy2.steps[0].step_id
assert parallel_copy1.steps[1].step_id != parallel_copy2.steps[1].step_id
# Step names should be preserved
assert parallel_copy1.steps[0].name == "inner-1"
assert parallel_copy1.steps[1].name == "inner-2"
# Agent should be a different instance
assert parallel_copy1.steps[1].agent is not agent
assert parallel_copy2.steps[1].agent is not agent
def test_function_executor_calling_agent_run(self):
"""Function executor that calls agent.run() internally - agent is shared via closure."""
from agno.workflow.types import StepInput, StepOutput
# Agent captured in closure and used via run()
inner_agent = Agent(
name="inner-agent",
id="inner-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"run_count": 0},
)
def executor_with_agent_run(step_input: StepInput) -> StepOutput:
# This captures inner_agent in closure and calls run()
# The agent is SHARED across workflow copies - this is a limitation!
inner_agent.metadata["run_count"] += 1
# In real usage: result = inner_agent.run(step_input.input)
return StepOutput(content=f"Agent run count: {inner_agent.metadata['run_count']}")
workflow = Workflow(
name="agent-run-workflow",
id="agent-run-workflow-id",
steps=[Step(name="agent-run-step", executor=executor_with_agent_run)],
)
copy1 = workflow.deep_copy()
copy2 = workflow.deep_copy()
# Function is shared
assert copy1.steps[0].executor is executor_with_agent_run
assert copy2.steps[0].executor is executor_with_agent_run
# LIMITATION: inner_agent is shared via closure
# Simulating what happens when both copies execute
copy1.steps[0].executor(StepInput(input="test1"))
assert inner_agent.metadata["run_count"] == 1
copy2.steps[0].executor(StepInput(input="test2"))
assert inner_agent.metadata["run_count"] == 2 # Incremented by copy2!
# This demonstrates the state contamination issue
def test_function_executor_calling_team_run(self):
"""Function executor that calls team.run() internally - team is shared via closure."""
from agno.workflow.types import StepInput, StepOutput
member = Agent(
name="team-member",
id="team-member-id",
model=OpenAIChat(id="gpt-4o-mini"),
)
closure_team = Team(
name="inner-team",
id="inner-team-id",
members=[member],
model=OpenAIChat(id="gpt-4o-mini"),
)
call_log: List[str] = []
def executor_with_team_run(step_input: StepInput) -> StepOutput:
# This captures closure_team in closure and would call run()
# The team is SHARED across workflow copies
call_log.append(f"team_run:{step_input.input}")
# In real usage: result = closure_team.run(step_input.input)
_ = closure_team # Reference to show closure captures this
return StepOutput(content=f"Team executed for: {step_input.input}")
workflow = Workflow(
name="team-run-workflow",
id="team-run-workflow-id",
steps=[Step(name="team-run-step", executor=executor_with_team_run)],
)
copy1 = workflow.deep_copy()
copy2 = workflow.deep_copy()
# Execute via both copies
copy1.steps[0].executor(StepInput(input="request_A"))
copy2.steps[0].executor(StepInput(input="request_B"))
# Both calls logged to same shared list (demonstrating shared state)
assert len(call_log) == 2
assert "team_run:request_A" in call_log
assert "team_run:request_B" in call_log
def test_function_executor_with_agent_run_mocked(self):
"""Test function executor calling agent.arun() with mocked response."""
from agno.workflow.types import StepInput, StepOutput
inner_agent = Agent(
name="mocked-agent",
id="mocked-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"user_id": None},
)
async def executor_with_async_agent(step_input: StepInput) -> StepOutput:
# Simulate modifying agent state before run
inner_agent.metadata["user_id"] = (
step_input.additional_data.get("user_id") if step_input.additional_data else None
)
# In real usage: result = await inner_agent.arun(step_input.input)
return StepOutput(content=f"Processed for user: {inner_agent.metadata['user_id']}")
workflow = Workflow(
name="async-agent-run-workflow",
id="async-agent-run-workflow-id",
steps=[Step(name="async-agent-step", executor=executor_with_async_agent)],
)
copy1 = workflow.deep_copy()
copy2 = workflow.deep_copy()
# Both copies share the same inner_agent via closure
# This means user_id can leak between requests!
# Simulate concurrent execution scenario
import asyncio
async def simulate_requests():
# Request 1 sets user_id to "alice"
await copy1.steps[0].executor(StepInput(input="hello", additional_data={"user_id": "alice"}))
alice_user_id = inner_agent.metadata["user_id"]
# Request 2 sets user_id to "bob"
await copy2.steps[0].executor(StepInput(input="hello", additional_data={"user_id": "bob"}))
bob_user_id = inner_agent.metadata["user_id"]
return alice_user_id, bob_user_id
alice_id, bob_id = asyncio.get_event_loop().run_until_complete(simulate_requests())
# After both requests, inner_agent has bob's user_id (last write wins)
assert inner_agent.metadata["user_id"] == "bob"
# This demonstrates the state contamination problem
def test_safe_pattern_agent_factory_in_function(self):
"""SAFE PATTERN: Create new agent instance inside function to avoid sharing."""
from agno.workflow.types import StepInput, StepOutput
execution_log: List[Dict[str, Any]] = []
def safe_executor_with_agent_factory(step_input: StepInput) -> StepOutput:
# SAFE: Create a NEW agent instance for each execution
local_agent = Agent(
name="local-agent",
id="local-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={
"request_id": step_input.additional_data.get("request_id") if step_input.additional_data else None
},
)
execution_log.append(
{
"request_id": local_agent.metadata["request_id"],
"agent_id": id(local_agent),
}
)
# In real usage: result = local_agent.run(step_input.input)
return StepOutput(content=f"Processed request: {local_agent.metadata['request_id']}")
workflow = Workflow(
name="safe-agent-factory-workflow",
id="safe-agent-factory-workflow-id",
steps=[Step(name="safe-step", executor=safe_executor_with_agent_factory)],
)
copy1 = workflow.deep_copy()
copy2 = workflow.deep_copy()
# Execute via both copies
copy1.steps[0].executor(StepInput(input="test", additional_data={"request_id": "req-1"}))
copy2.steps[0].executor(StepInput(input="test", additional_data={"request_id": "req-2"}))
# Each execution created a different agent instance
assert len(execution_log) == 2
assert execution_log[0]["request_id"] == "req-1"
assert execution_log[1]["request_id"] == "req-2"
assert execution_log[0]["agent_id"] != execution_log[1]["agent_id"]
def test_safe_pattern_agent_deep_copy_in_function(self):
"""SAFE PATTERN: deep_copy the template agent inside function."""
from agno.workflow.types import StepInput, StepOutput
# Template agent (not used directly, only as template)
template_agent = Agent(
name="template-agent",
id="template-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"processed": False, "user_id": None},
)
execution_agents: List[Agent] = []
def safe_executor_with_deep_copy(step_input: StepInput) -> StepOutput:
# SAFE: Create isolated copy from template
local_agent = template_agent.deep_copy()
local_agent.metadata["user_id"] = (
step_input.additional_data.get("user_id") if step_input.additional_data else None
)
local_agent.metadata["processed"] = True
execution_agents.append(local_agent)
# In real usage: result = local_agent.run(step_input.input)
return StepOutput(content=f"Processed for: {local_agent.metadata['user_id']}")
workflow = Workflow(
name="deep-copy-agent-workflow",
id="deep-copy-agent-workflow-id",
steps=[Step(name="deep-copy-step", executor=safe_executor_with_deep_copy)],
)
copy1 = workflow.deep_copy()
copy2 = workflow.deep_copy()
# Execute via both copies
copy1.steps[0].executor(StepInput(input="test", additional_data={"user_id": "alice"}))
copy2.steps[0].executor(StepInput(input="test", additional_data={"user_id": "bob"}))
# Template is unchanged
assert template_agent.metadata["processed"] is False
assert template_agent.metadata["user_id"] is None
# Each execution got isolated agent
assert len(execution_agents) == 2
assert execution_agents[0].metadata["user_id"] == "alice"
assert execution_agents[1].metadata["user_id"] == "bob"
assert execution_agents[0] is not execution_agents[1]
assert execution_agents[0] is not template_agent
def test_safe_pattern_team_deep_copy_in_function(self):
"""SAFE PATTERN: deep_copy the template team inside function."""
from agno.workflow.types import StepInput, StepOutput
member = Agent(
name="member",
id="member-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"tasks": 0},
)
template_team = Team(
name="template-team",
id="template-team-id",
members=[member],
model=OpenAIChat(id="gpt-4o-mini"),
)
execution_teams: List[Team] = []
def safe_executor_with_team_copy(step_input: StepInput) -> StepOutput:
# SAFE: Create isolated copy from template
local_team = template_team.deep_copy()
local_team.members[0].metadata["tasks"] += 1
execution_teams.append(local_team)
# In real usage: result = local_team.run(step_input.input)
return StepOutput(content="Team executed")
workflow = Workflow(
name="deep-copy-team-workflow",
id="deep-copy-team-workflow-id",
steps=[Step(name="deep-copy-team-step", executor=safe_executor_with_team_copy)],
)
copy1 = workflow.deep_copy()
copy2 = workflow.deep_copy()
# Execute via both copies
copy1.steps[0].executor(StepInput(input="task1"))
copy2.steps[0].executor(StepInput(input="task2"))
# Template member unchanged
assert member.metadata["tasks"] == 0
# Each execution got isolated team with isolated member
assert len(execution_teams) == 2
assert execution_teams[0].members[0].metadata["tasks"] == 1
assert execution_teams[1].members[0].metadata["tasks"] == 1
assert execution_teams[0] is not execution_teams[1]
assert execution_teams[0].members[0] is not execution_teams[1].members[0]
def test_integration_workflow_with_agent_run_in_agentos(self):
"""Integration test: Workflow with function that calls agent inside AgentOS."""
from agno.workflow.types import StepInput, StepOutput
# Shared agent via closure (demonstrating the problem)
shared_agent = Agent(
name="shared-agent",
id="shared-agent-id",
model=OpenAIChat(id="gpt-4o-mini"),
metadata={"last_user": None},
)
def executor_using_shared_agent(step_input: StepInput) -> StepOutput:
user = step_input.additional_data.get("user") if step_input.additional_data else "unknown"
shared_agent.metadata["last_user"] = user
return StepOutput(content=f"Processed for {user}")
workflow = Workflow(
name="shared-agent-workflow",
id="shared-agent-workflow-id",
steps=[Step(name="shared-step", executor=executor_using_shared_agent)],
)
os = AgentOS(workflows=[workflow])
app = os.get_app()
client = TestClient(app)
class MockRunOutput:
def __init__(self):
self.run_id = str(uuid.uuid4())
self.content = "result"
def to_dict(self):
return {"run_id": self.run_id, "content": self.content}
with patch.object(Workflow, "arun", new_callable=AsyncMock) as mock_arun:
mock_arun.return_value = MockRunOutput()
# Make requests
response1 = client.post(
f"/workflows/{workflow.id}/runs",
data={"message": "Hello", "stream": "false"},
)
response2 = client.post(
f"/workflows/{workflow.id}/runs",
data={"message": "World", "stream": "false"},
)
assert response1.status_code == 200
assert response2.status_code == 200
# The workflow itself is copied via deep_copy, but the shared_agent
# inside the closure is NOT copied - it remains shared.
# This test documents this behavior.
def test_integration_safe_workflow_with_agent_factory(self):
"""Integration test: Safe workflow pattern with agent factory inside AgentOS."""
from agno.workflow.types import StepInput, StepOutput
agent_instances_created: List[str] = []
def safe_executor_factory(step_input: StepInput) -> StepOutput:
# SAFE: Create new agent per execution
request_id = str(uuid.uuid4())
local_agent = Agent(
name=f"local-agent-{request_id}",
id=f"local-agent-{request_id}",
model=OpenAIChat(id="gpt-4o-mini"),
)
agent_instances_created.append(local_agent.id)
return StepOutput(content=f"Created agent: {local_agent.id}")
workflow = Workflow(
name="safe-factory-workflow",
id="safe-factory-workflow-id",
steps=[Step(name="safe-factory-step", executor=safe_executor_factory)],
)
os = AgentOS(workflows=[workflow])
app = os.get_app()
client = TestClient(app)
class MockRunOutput:
def __init__(self):
self.run_id = str(uuid.uuid4())
def to_dict(self):
return {"run_id": self.run_id}
with patch.object(Workflow, "arun", new_callable=AsyncMock) as mock_arun:
mock_arun.return_value = MockRunOutput()
# Make multiple requests
for _ in range(3):
response = client.post(
f"/workflows/{workflow.id}/runs",
data={"message": "Test", "stream": "false"},
)
assert response.status_code == 200
# Each workflow run would create its own agent (if actually executed)
# The mock bypasses actual execution, but the pattern is demonstrated
# ============================================================================
# Tools Deep Copy Tests
# ============================================================================
class TestToolsDeepCopy:
"""Tests for tools handling during Agent.deep_copy()."""
def test_regular_tools_are_deep_copied(self):
"""Regular (non-MCP) tools should be deep copied, not shared."""
from agno.tools.toolkit import Toolkit
class CustomTool(Toolkit):
"""A simple custom toolkit for testing."""
def __init__(self):
super().__init__(name="custom_tool")
self.counter = 0
self.register(self.increment)
def increment(self) -> int:
"""Increment and return counter."""
self.counter += 1
return self.counter
tool = CustomTool()
agent = Agent(name="test-agent", id="test-id", tools=[tool])
copy = agent.deep_copy()
# Tools list should be a different list
assert copy.tools is not agent.tools
# The copied tool should be a different instance
assert len(copy.tools) == 1
assert copy.tools[0] is not tool
# Modifying original tool shouldn't affect copy
tool.counter = 100
assert copy.tools[0].counter == 0 # Copy should have initial value
def test_function_tools_are_copied(self):
"""Function-based tools should be copied."""
def my_tool_func(x: int) -> int:
"""A simple tool function."""
return x * 2
agent = Agent(name="test-agent", id="test-id", tools=[my_tool_func])
copy = agent.deep_copy()
# Tools list should be a different list
assert copy.tools is not agent.tools
# Function reference may be same (functions are immutable)
assert len(copy.tools) == 1
def test_mcp_tools_are_shared_not_copied(self):
"""MCP tools should be shared (same instance) to maintain connections."""
# Create a mock MCP tools class
class MockMCPTools:
"""Mock MCP tools for testing - simulates MCPTools behavior."""
def __init__(self):
self.instance_id = uuid.uuid4()
self.connected = True
# Create a subclass that includes MCPTools in its MRO for detection
class MCPTools:
pass
# Create a subclass that includes MCPTools in its MRO
class TestMCPTools(MCPTools):
def __init__(self):
self.instance_id = uuid.uuid4()
self.connected = True
mcp_tool = TestMCPTools()
agent = Agent(name="test-agent", id="test-id", tools=[mcp_tool])
copy = agent.deep_copy()
# MCP tool should be the SAME instance (shared)
assert copy.tools[0] is mcp_tool
assert copy.tools[0].instance_id == mcp_tool.instance_id
def test_multi_mcp_tools_are_shared(self):
"""MultiMCPTools should also be shared."""
class MultiMCPTools:
"""Mock MultiMCPTools for testing."""
def __init__(self):
self.instance_id = uuid.uuid4()
self.servers = ["server1", "server2"]
class TestMultiMCPTools(MultiMCPTools):
pass
multi_mcp = TestMultiMCPTools()
agent = Agent(name="test-agent", id="test-id", tools=[multi_mcp])
copy = agent.deep_copy()
# MultiMCPTools should be shared
assert copy.tools[0] is multi_mcp
def test_mixed_tools_handled_correctly(self):
"""Mix of MCP and regular tools should be handled correctly."""
from agno.tools.toolkit import Toolkit
class RegularTool(Toolkit):
def __init__(self):
super().__init__(name="regular")
self.value = "original"
self.register(self.get_value)
def get_value(self) -> str:
return self.value
class MCPTools:
pass
class MockMCP(MCPTools):
def __init__(self):
self.instance_id = uuid.uuid4()
regular = RegularTool()
mcp = MockMCP()
agent = Agent(name="test-agent", id="test-id", tools=[regular, mcp])
copy = agent.deep_copy()
assert len(copy.tools) == 2
# Regular tool should be copied (different instance)
assert copy.tools[0] is not regular
# MCP tool should be shared (same instance)
assert copy.tools[1] is mcp
def test_non_copyable_tool_falls_back_to_sharing(self):
"""Tools that can't be deep copied should be shared by reference."""
class NonCopyableTool:
"""A tool that raises on deepcopy."""
def __init__(self):
self.instance_id = uuid.uuid4()
def __deepcopy__(self, memo):
raise TypeError("Cannot deep copy this tool")
def __copy__(self):
raise TypeError("Cannot copy this tool")
non_copyable = NonCopyableTool()
agent = Agent(name="test-agent", id="test-id", tools=[non_copyable])
# Should not raise - falls back to sharing
copy = agent.deep_copy()
# Non-copyable tool should be shared
assert copy.tools[0] is non_copyable
assert copy.tools[0].instance_id == non_copyable.instance_id
def test_tool_with_failing_mro_check_still_works(self):
"""Tools where MRO check fails should still be processed safely."""
class WeirdTool:
"""Tool with unusual type() behavior."""
def __init__(self):
self.value = "test"
weird = WeirdTool()
agent = Agent(name="test-agent", id="test-id", tools=[weird])
# Should not raise
copy = agent.deep_copy()
assert len(copy.tools) == 1
def test_empty_tools_list_copied(self):
"""Empty tools list should be handled correctly."""
agent = Agent(name="test-agent", id="test-id", tools=[])
copy = agent.deep_copy()
assert copy.tools == []
assert copy.tools is not agent.tools
def test_none_tools_handled(self):
"""None tools gets normalized to empty list by Agent."""
agent = Agent(name="test-agent", id="test-id", tools=None)
copy = agent.deep_copy()
# Agent normalizes None to empty list, so copy should also be empty list
assert copy.tools == []
def test_tools_with_state_isolation(self):
"""Tool state should be isolated between copies."""
from agno.tools.toolkit import Toolkit
class StatefulTool(Toolkit):
def __init__(self):
super().__init__(name="stateful")
self.call_count = 0
self.history: List[str] = []
self.register(self.record)
def record(self, message: str) -> str:
self.call_count += 1
self.history.append(message)
return f"Recorded: {message}"
tool = StatefulTool()
tool.call_count = 5
tool.history = ["msg1", "msg2"]
agent = Agent(name="test-agent", id="test-id", tools=[tool])
copy1 = agent.deep_copy()
copy2 = agent.deep_copy()
# Each copy should have independent state
copied_tool1 = copy1.tools[0]
copied_tool2 = copy2.tools[0]
# Initial state should be copied
assert copied_tool1.call_count == 5
assert copied_tool1.history == ["msg1", "msg2"]
# Modifications should be independent
copied_tool1.call_count = 100
copied_tool1.history.append("new_msg")
assert copied_tool2.call_count == 5 # Unchanged
assert copied_tool2.history == ["msg1", "msg2"] # Unchanged
assert tool.call_count == 5 # Original unchanged
# ============================================================================
# Knowledge Deep Copy Tests
# ============================================================================
class TestKnowledgeDeepCopy:
"""Tests for knowledge handling during deep_copy()."""
def test_knowledge_is_shared_not_copied(self):
"""Knowledge base should be shared (not copied) between instances."""
class MockKnowledge:
"""Mock knowledge base for testing."""
def __init__(self):
self.instance_id = uuid.uuid4()
self.documents = ["doc1", "doc2"]
def search(self, query: str):
return self.documents
knowledge = MockKnowledge()
agent = Agent(name="test-agent", id="test-id", knowledge=knowledge)
copy = agent.deep_copy()
# Knowledge should be the SAME instance (shared)
assert copy.knowledge is knowledge
assert copy.knowledge.instance_id == knowledge.instance_id
def test_knowledge_sharing_in_team(self):
"""Knowledge in team members should also be shared."""
class MockKnowledge:
def __init__(self):
self.instance_id = uuid.uuid4()
knowledge = MockKnowledge()
member = Agent(name="member", id="member-id", knowledge=knowledge)
team = Team(
name="test-team",
id="test-team-id",
members=[member],
model=OpenAIChat(id="gpt-4o-mini"),
)
copy = team.deep_copy()
# Member's knowledge should be shared
assert copy.members[0].knowledge is knowledge
def test_knowledge_none_handled(self):
"""None knowledge should remain None."""
agent = Agent(name="test-agent", id="test-id", knowledge=None)
copy = agent.deep_copy()
assert copy.knowledge is None
# ============================================================================
# Model Deep Copy Tests
# ============================================================================
class TestModelDeepCopy:
"""Tests for model handling during deep_copy()."""
def test_model_is_shared_not_copied(self):
"""Model should be shared (not copied) between instances."""
model = OpenAIChat(id="gpt-4o-mini")
agent = Agent(name="test-agent", id="test-id", model=model)
copy = agent.deep_copy()
# Model should be the SAME instance (shared)
assert copy.model is model
def test_reasoning_model_is_shared(self):
"""Reasoning model should also be shared."""
model = OpenAIChat(id="gpt-4o-mini")
reasoning_model = OpenAIChat(id="gpt-4o")
agent = Agent(name="test-agent", id="test-id", model=model, reasoning_model=reasoning_model)
copy = agent.deep_copy()
assert copy.model is model
assert copy.reasoning_model is reasoning_model
def test_model_in_team_is_shared(self):
"""Team model and member models should be shared."""
team_model = OpenAIChat(id="gpt-4o-mini")
member_model = OpenAIChat(id="gpt-4o")
member = Agent(name="member", id="member-id", model=member_model)
team = Team(
name="test-team",
id="test-team-id",
members=[member],
model=team_model,
)
copy = team.deep_copy()
assert copy.model is team_model
assert copy.members[0].model is member_model
def test_parser_model_is_shared(self):
"""Parser model should be shared (not copied) between instances."""
model = OpenAIChat(id="gpt-4o-mini")
parser_model = OpenAIChat(id="gpt-4o")
agent = Agent(name="test-agent", id="test-id", model=model, parser_model=parser_model)
copy = agent.deep_copy()
# Parser model should be the SAME instance (shared)
assert copy.parser_model is parser_model
def test_output_model_is_shared(self):
"""Output model should be shared (not copied) between instances."""
model = OpenAIChat(id="gpt-4o-mini")
output_model = OpenAIChat(id="gpt-4o")
agent = Agent(name="test-agent", id="test-id", model=model, output_model=output_model)
copy = agent.deep_copy()
# Output model should be the SAME instance (shared)
assert copy.output_model is output_model
def test_session_summary_manager_is_shared(self):
"""Session summary manager should be shared (not copied) between instances."""
from agno.session.summary import SessionSummaryManager
model = OpenAIChat(id="gpt-4o-mini")
session_summary_manager = SessionSummaryManager(model=model)
agent = Agent(
name="test-agent",
id="test-id",
model=model,
session_summary_manager=session_summary_manager,
)
copy = agent.deep_copy()
# Session summary manager should be the SAME instance (shared)
assert copy.session_summary_manager is session_summary_manager
def test_parser_model_in_team_is_shared(self):
"""Team's parser model should be shared."""
model = OpenAIChat(id="gpt-4o-mini")
parser_model = OpenAIChat(id="gpt-4o")
team = Team(
name="test-team",
id="test-team-id",
members=[Agent(name="member", id="member-id", model=model)],
model=model,
parser_model=parser_model,
)
copy = team.deep_copy()
assert copy.parser_model is parser_model
def test_output_model_in_team_is_shared(self):
"""Team's output model should be shared."""
model = OpenAIChat(id="gpt-4o-mini")
output_model = OpenAIChat(id="gpt-4o")
team = Team(
name="test-team",
id="test-team-id",
members=[Agent(name="member", id="member-id", model=model)],
model=model,
output_model=output_model,
)
copy = team.deep_copy()
assert copy.output_model is output_model
# ============================================================================
# Database Deep Copy Tests
# ============================================================================
class TestDatabaseDeepCopy:
"""Tests for database handling during deep_copy()."""
def test_db_is_shared_not_copied(self):
"""Database should be shared (not copied) between instances."""
class MockDatabase:
def __init__(self):
self.instance_id = uuid.uuid4()
self.connection_pool = ["conn1", "conn2"]
db = MockDatabase()
agent = Agent(name="test-agent", id="test-id", db=db)
copy = agent.deep_copy()
# DB should be the SAME instance (shared)
assert copy.db is db
assert copy.db.instance_id == db.instance_id
def test_db_shared_across_multiple_copies(self):
"""Multiple copies should all share the same DB."""
class MockDatabase:
def __init__(self):
self.instance_id = uuid.uuid4()
db = MockDatabase()
agent = Agent(name="test-agent", id="test-id", db=db)
copies = [agent.deep_copy() for _ in range(5)]
# All copies should share same DB
for copy in copies:
assert copy.db is db
def test_db_shared_in_team_members(self):
"""Team members' databases should be shared."""
class MockDatabase:
def __init__(self):
self.instance_id = uuid.uuid4()
db = MockDatabase()
member = Agent(name="member", id="member-id", db=db)
team = Team(
name="test-team",
id="test-team-id",
members=[member],
model=OpenAIChat(id="gpt-4o-mini"),
)
copy = team.deep_copy()
# Member's DB should be shared
assert copy.members[0].db is db
def test_db_shared_in_workflow_step_agents(self):
"""Workflow step agents' databases should be shared."""
class MockDatabase:
def __init__(self):
self.instance_id = uuid.uuid4()
db = MockDatabase()
step_agent = Agent(name="step-agent", id="step-agent-id", db=db)
workflow = Workflow(
name="test-workflow",
id="test-workflow-id",
steps=[Step(name="step", agent=step_agent)],
)
copy = workflow.deep_copy()
# Step agent's DB should be shared
assert copy.steps[0].agent.db is db
# ============================================================================
# Memory Manager Deep Copy Tests
# ============================================================================
class TestMemoryManagerDeepCopy:
"""Tests for memory_manager handling during deep_copy()."""
def test_memory_manager_is_shared(self):
"""Memory manager should be shared (not copied)."""
class MockMemoryManager:
def __init__(self):
self.instance_id = uuid.uuid4()
mm = MockMemoryManager()
agent = Agent(name="test-agent", id="test-id", memory_manager=mm)
copy = agent.deep_copy()
# Memory manager should be shared
assert copy.memory_manager is mm
# ============================================================================
# Reasoning Agent Deep Copy Tests
# ============================================================================
class TestReasoningAgentDeepCopy:
"""Tests for reasoning_agent handling during deep_copy()."""
def test_reasoning_agent_is_deep_copied(self):
"""Reasoning agent should be deep copied (isolated)."""
reasoning_agent = Agent(name="reasoner", id="reasoner-id", model=OpenAIChat(id="gpt-4o"))
agent = Agent(
name="main-agent",
id="main-id",
model=OpenAIChat(id="gpt-4o-mini"),
reasoning_agent=reasoning_agent,
)
copy = agent.deep_copy()
# Reasoning agent should be a DIFFERENT instance (copied)
assert copy.reasoning_agent is not reasoning_agent
assert copy.reasoning_agent.id == reasoning_agent.id
assert copy.reasoning_agent.name == reasoning_agent.name
def test_reasoning_agent_state_isolated(self):
"""Reasoning agent state should be isolated."""
reasoning_agent = Agent(
name="reasoner",
id="reasoner-id",
model=OpenAIChat(id="gpt-4o"),
metadata={"thoughts": []},
)
agent = Agent(
name="main-agent",
id="main-id",
model=OpenAIChat(id="gpt-4o-mini"),
reasoning_agent=reasoning_agent,
)
copy = agent.deep_copy()
# Modify original reasoning agent metadata
reasoning_agent.metadata["thoughts"].append("thought1")
# Copy's reasoning agent should be unaffected
assert copy.reasoning_agent.metadata["thoughts"] == []
# ============================================================================
# Error Handling Deep Copy Tests
# ============================================================================
class TestDeepCopyErrorHandling:
"""Tests for error handling during deep_copy()."""
def test_tool_iteration_error_handled(self):
"""Errors during tool iteration should be handled gracefully."""
class FailingIterableTools:
"""A tools list that fails during iteration."""
def __init__(self):
self.items = ["tool1", "tool2"]
self._iter_count = 0
def __iter__(self):
return self
def __next__(self):
if self._iter_count >= 1:
raise RuntimeError("Iteration failed")
self._iter_count += 1
return self.items[self._iter_count - 1]
# This won't work directly as tools expects a list, but we can test
# the outer error handling by using a regular list and patching
agent = Agent(name="test-agent", id="test-id", tools=["tool1"])
# Should not raise
copy = agent.deep_copy()
assert copy is not None
def test_deep_copy_with_unusual_tool_types(self):
"""Deep copy should handle unusual tool types gracefully."""
# Test with various edge cases
tools: List[Any] = [
lambda x: x, # Lambda function
42, # Integer (unusual but shouldn't crash)
"string_tool", # String (unusual but shouldn't crash)
None, # None in list
]
agent = Agent(name="test-agent", id="test-id", tools=tools)
# Should not raise - handles gracefully
copy = agent.deep_copy()
assert len(copy.tools) == 4
def test_concurrent_deep_copy_safety(self):
"""Multiple concurrent deep_copy calls should be safe."""
from agno.tools.toolkit import Toolkit
class CounterTool(Toolkit):
def __init__(self):
super().__init__(name="counter")
self.count = 0
self.register(self.increment)
def increment(self) -> int:
self.count += 1
return self.count
tool = CounterTool()
agent = Agent(name="test-agent", id="test-id", tools=[tool])
# Create many copies concurrently
with ThreadPoolExecutor(max_workers=10) as executor:
futures = [executor.submit(agent.deep_copy) for _ in range(50)]
copies = [f.result() for f in futures]
# All copies should be independent
assert len(copies) == 50
for i, copy in enumerate(copies):
assert copy is not agent
assert copy.tools[0] is not tool
# Original should be unchanged
assert tool.count == 0
# ============================================================================
# Comprehensive Integration Tests
# ============================================================================
class TestComprehensiveDeepCopyIntegration:
"""Integration tests combining multiple aspects of deep copy."""
def test_agent_with_all_shared_resources(self):
"""Agent with all shared resources (db, model, knowledge, memory_manager)."""
class MockDb:
def __init__(self):
self.id = uuid.uuid4()
class MockKnowledge:
def __init__(self):
self.id = uuid.uuid4()
class MockMemoryManager:
def __init__(self):
self.id = uuid.uuid4()
db = MockDb()
knowledge = MockKnowledge()
memory_manager = MockMemoryManager()
model = OpenAIChat(id="gpt-4o-mini")
agent = Agent(
name="full-agent",
id="full-agent-id",
model=model,
db=db,
knowledge=knowledge,
memory_manager=memory_manager,
metadata={"user": "test"},
)
copy = agent.deep_copy()
# Shared resources should be same instance
assert copy.db is db
assert copy.knowledge is knowledge
assert copy.memory_manager is memory_manager
assert copy.model is model
# Mutable state should be isolated
assert copy.metadata is not agent.metadata
assert copy.metadata == {"user": "test"}
def test_team_with_agents_having_tools_and_knowledge(self):
"""Team with member agents that have tools and knowledge."""
from agno.tools.toolkit import Toolkit
class MockKnowledge:
def __init__(self, name: str):
self.name = name
self.id = uuid.uuid4()
class MockTool(Toolkit):
def __init__(self, name: str):
super().__init__(name=name)
self.call_count = 0
self.register(self.do_work)
def do_work(self) -> str:
self.call_count += 1
return f"Work done by {self.name}"
knowledge1 = MockKnowledge("kb1")
knowledge2 = MockKnowledge("kb2")
tool1 = MockTool("tool1")
tool2 = MockTool("tool2")
member1 = Agent(
name="member1",
id="member1-id",
model=OpenAIChat(id="gpt-4o-mini"),
knowledge=knowledge1,
tools=[tool1],
)
member2 = Agent(
name="member2",
id="member2-id",
model=OpenAIChat(id="gpt-4o-mini"),
knowledge=knowledge2,
tools=[tool2],
)
team = Team(
name="full-team",
id="full-team-id",
members=[member1, member2],
model=OpenAIChat(id="gpt-4o-mini"),
)
copy = team.deep_copy()
# Members should be different instances
assert copy.members[0] is not member1
assert copy.members[1] is not member2
# Knowledge should be shared
assert copy.members[0].knowledge is knowledge1
assert copy.members[1].knowledge is knowledge2
# Tools should be copied (different instances)
assert copy.members[0].tools[0] is not tool1
assert copy.members[1].tools[0] is not tool2
# Tool state should be isolated
tool1.call_count = 10
assert copy.members[0].tools[0].call_count == 0
def test_workflow_with_full_agent_configuration(self):
"""Workflow with step agents having full configuration."""
from agno.tools.toolkit import Toolkit
class MockDb:
def __init__(self):
self.id = uuid.uuid4()
class MockTool(Toolkit):
def __init__(self):
super().__init__(name="mock")
self.state = "initial"
self.register(self.action)
def action(self) -> str:
return self.state
db = MockDb()
tool = MockTool()
model = OpenAIChat(id="gpt-4o-mini")
step_agent = Agent(
name="step-agent",
id="step-agent-id",
model=model,
db=db,
tools=[tool],
metadata={"step": 1},
)
workflow = Workflow(
name="full-workflow",
id="full-workflow-id",
db=db,
steps=[Step(name="full-step", agent=step_agent)],
)
copy = workflow.deep_copy()
# Workflow DB should be shared
assert copy.db is db
# Step agent should be copied
assert copy.steps[0].agent is not step_agent
# Step agent's DB should be shared
assert copy.steps[0].agent.db is db
# Step agent's model should be shared
assert copy.steps[0].agent.model is model
# Step agent's tool should be copied
assert copy.steps[0].agent.tools[0] is not tool
# Step agent's metadata should be isolated
step_agent.metadata["step"] = 2
assert copy.steps[0].agent.metadata["step"] == 1
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/os/test_per_request_isolation.py",
"license": "Apache License 2.0",
"lines": 2178,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/os/test_per_request_isolation.py | """Unit tests for per-request isolation feature.
This module tests:
- Factory functions (get_agent_by_id, get_team_by_id, get_workflow_by_id with create_fresh=True)
- Deep copying of Agent, Team, and Workflow classes
- Complex workflow structures including nested step containers
- State isolation between copies
- Edge cases and concurrent request scenarios
"""
import pytest
from agno.agent import Agent
from agno.os.utils import (
get_agent_by_id,
get_team_by_id,
get_workflow_by_id,
)
from agno.team import Team
from agno.workflow import Workflow
from agno.workflow.condition import Condition
from agno.workflow.loop import Loop
from agno.workflow.parallel import Parallel
from agno.workflow.router import Router
from agno.workflow.step import Step
from agno.workflow.steps import Steps
from agno.workflow.types import StepInput, StepOutput
# ============================================================================
# Fixtures
# ============================================================================
@pytest.fixture
def basic_agent():
"""Create a basic test agent."""
return Agent(name="basic-agent", id="basic-agent-id")
@pytest.fixture
def basic_team():
"""Create a basic test team."""
member1 = Agent(name="member-1", id="member-1-id")
member2 = Agent(name="member-2", id="member-2-id")
return Team(name="basic-team", id="basic-team-id", members=[member1, member2])
# ============================================================================
# Factory Function Tests
# ============================================================================
class TestGetAgentForRequest:
"""Tests for get_agent_by_id factory function."""
def test_returns_same_instance_when_create_fresh_false(self):
"""When create_fresh=False, returns the exact same agent instance."""
agent = Agent(name="test-agent", id="test-id")
agents = [agent]
result = get_agent_by_id("test-id", agents, create_fresh=False)
assert result is agent
def test_returns_new_instance_when_create_fresh_true(self):
"""When create_fresh=True, returns a new agent instance."""
agent = Agent(name="test-agent", id="test-id")
agents = [agent]
result = get_agent_by_id("test-id", agents, create_fresh=True)
assert result is not agent
assert result.id == agent.id
assert result.name == agent.name
def test_returns_none_for_unknown_agent(self):
"""Returns None when agent ID is not found."""
agent = Agent(name="test-agent", id="test-id")
agents = [agent]
result = get_agent_by_id("unknown-id", agents, create_fresh=True)
assert result is None
def test_preserves_agent_id_in_copy(self):
"""The copied agent preserves the original ID."""
agent = Agent(name="test-agent", id="test-id")
agents = [agent]
result = get_agent_by_id("test-id", agents, create_fresh=True)
assert result.id == "test-id"
def test_mutable_state_is_isolated(self):
"""Mutable state is isolated between original and copy."""
agent = Agent(name="test-agent", id="test-id", metadata={"key": "original"})
agents = [agent]
copy = get_agent_by_id("test-id", agents, create_fresh=True)
# Modify the copy's metadata
copy.metadata["key"] = "modified"
# Original should be unchanged (deep copy)
assert agent.metadata["key"] == "original"
def test_internal_state_is_reset(self):
"""Internal mutable state like _cached_session should be reset."""
agent = Agent(name="test-agent", id="test-id")
# Simulate some internal state
agent._cached_session = "some_cached_value" # type: ignore
agents = [agent]
copy = get_agent_by_id("test-id", agents, create_fresh=True)
# Internal state should be reset to initial values
assert copy._cached_session is None
class TestGetTeamForRequest:
"""Tests for get_team_by_id factory function."""
def test_returns_same_instance_when_create_fresh_false(self):
"""When create_fresh=False, returns the exact same team instance."""
member = Agent(name="member", id="member-id")
team = Team(name="test-team", id="test-id", members=[member])
teams = [team]
result = get_team_by_id("test-id", teams, create_fresh=False)
assert result is team
def test_returns_new_instance_when_create_fresh_true(self):
"""When create_fresh=True, returns a new team instance."""
member = Agent(name="member", id="member-id")
team = Team(name="test-team", id="test-id", members=[member])
teams = [team]
result = get_team_by_id("test-id", teams, create_fresh=True)
assert result is not team
assert result.id == team.id
assert result.name == team.name
def test_member_agents_are_also_copied(self):
"""Member agents should also be deep copied."""
member = Agent(name="member", id="member-id")
team = Team(name="test-team", id="test-id", members=[member])
teams = [team]
result = get_team_by_id("test-id", teams, create_fresh=True)
# Members should be different instances
assert result.members[0] is not member
assert result.members[0].id == member.id
def test_returns_none_for_unknown_team(self):
"""Returns None when team ID is not found."""
member = Agent(name="member", id="member-id")
team = Team(name="test-team", id="test-id", members=[member])
teams = [team]
result = get_team_by_id("unknown-id", teams, create_fresh=True)
assert result is None
class TestGetWorkflowForRequest:
"""Tests for get_workflow_by_id factory function."""
def test_returns_same_instance_when_create_fresh_false(self):
"""When create_fresh=False, returns the exact same workflow instance."""
workflow = Workflow(name="test-workflow", id="test-id")
workflows = [workflow]
result = get_workflow_by_id("test-id", workflows, create_fresh=False)
assert result is workflow
def test_returns_new_instance_when_create_fresh_true(self):
"""When create_fresh=True, returns a new workflow instance."""
workflow = Workflow(name="test-workflow", id="test-id")
workflows = [workflow]
result = get_workflow_by_id("test-id", workflows, create_fresh=True)
assert result is not workflow
assert result.id == workflow.id
assert result.name == workflow.name
def test_returns_none_for_unknown_workflow(self):
"""Returns None when workflow ID is not found."""
workflow = Workflow(name="test-workflow", id="test-id")
workflows = [workflow]
result = get_workflow_by_id("unknown-id", workflows, create_fresh=True)
assert result is None
# ============================================================================
# Agent Deep Copy Tests
# ============================================================================
class TestAgentDeepCopy:
"""Tests for Agent.deep_copy() method."""
def test_deep_copy_creates_new_instance(self):
"""deep_copy creates a new Agent instance."""
agent = Agent(name="test-agent", id="test-id")
copy = agent.deep_copy()
assert copy is not agent
assert copy.id == agent.id
def test_deep_copy_preserves_configuration(self):
"""deep_copy preserves all configuration settings."""
agent = Agent(
name="test-agent",
id="test-id",
description="A test agent",
instructions=["Do this", "Do that"],
markdown=True,
)
copy = agent.deep_copy()
assert copy.name == agent.name
assert copy.description == agent.description
assert copy.instructions == agent.instructions
assert copy.markdown == agent.markdown
def test_deep_copy_with_update(self):
"""deep_copy can update specific fields."""
agent = Agent(name="original", id="test-id")
copy = agent.deep_copy(update={"name": "updated"})
assert copy.name == "updated"
assert agent.name == "original"
def test_deep_copy_with_team_id_set(self):
"""deep_copy works when agent has team_id set (part of a team)."""
agent = Agent(name="team-member", id="member-id")
agent.team_id = "parent-team-id" # Set at runtime when added to team
copy = agent.deep_copy()
assert copy is not agent
assert copy.id == agent.id
# team_id should NOT be copied (it's a runtime field)
assert copy.team_id is None
def test_deep_copy_with_workflow_id_set(self):
"""deep_copy works when agent has workflow_id set (part of a workflow)."""
agent = Agent(name="workflow-agent", id="agent-id")
agent.workflow_id = "parent-workflow-id"
copy = agent.deep_copy()
assert copy is not agent
assert copy.id == agent.id
assert copy.workflow_id is None
# ============================================================================
# Team Deep Copy Tests
# ============================================================================
class TestTeamDeepCopy:
"""Tests for Team.deep_copy() method."""
def test_deep_copy_creates_new_instance(self):
"""deep_copy creates a new Team instance."""
member = Agent(name="member", id="member-id")
team = Team(name="test-team", id="test-id", members=[member])
copy = team.deep_copy()
assert copy is not team
assert copy.id == team.id
def test_deep_copy_copies_members(self):
"""deep_copy creates copies of all member agents."""
member1 = Agent(name="member1", id="member1-id")
member2 = Agent(name="member2", id="member2-id")
team = Team(name="test-team", id="test-id", members=[member1, member2])
copy = team.deep_copy()
assert len(copy.members) == 2
assert copy.members[0] is not member1
assert copy.members[1] is not member2
assert copy.members[0].id == member1.id
assert copy.members[1].id == member2.id
def test_deep_copy_with_parent_team_id_set(self):
"""deep_copy works when team has parent_team_id set (nested team)."""
inner_agent = Agent(name="inner", id="inner-id")
team = Team(name="nested-team", id="nested-id", members=[inner_agent])
team.parent_team_id = "outer-team-id" # Set at runtime
copy = team.deep_copy()
assert copy is not team
assert copy.id == team.id
assert copy.parent_team_id is None
def test_deep_copy_with_workflow_id_set(self):
"""deep_copy works when team has workflow_id set (part of workflow)."""
agent = Agent(name="member", id="member-id")
team = Team(name="workflow-team", id="team-id", members=[agent])
team.workflow_id = "workflow-id"
copy = team.deep_copy()
assert copy is not team
assert copy.id == team.id
assert copy.workflow_id is None
# ============================================================================
# Nested Team Deep Copy Tests
# ============================================================================
class TestNestedTeamDeepCopy:
"""Tests for deep copying nested team structures.
These tests specifically verify that deep_copy works correctly when teams
have runtime-set fields like parent_team_id that are not __init__ parameters.
"""
def test_outer_team_deep_copy_with_nested_team(self):
"""Outer team can be deep copied when it contains nested teams."""
inner_agent = Agent(name="inner-agent", id="inner-agent-id")
inner_team = Team(name="inner-team", id="inner-team-id", members=[inner_agent])
outer_team = Team(name="outer-team", id="outer-team-id", members=[inner_team])
# Initialize the outer team (this sets parent_team_id on inner_team)
outer_team.initialize_team()
# Verify runtime field was set
assert inner_team.parent_team_id == "outer-team-id"
# This should not raise TypeError
copy = outer_team.deep_copy()
assert copy is not outer_team
assert copy.members[0] is not inner_team
assert copy.members[0].id == inner_team.id
def test_three_level_nested_teams(self):
"""Deep copy works for three levels of nested teams."""
agent = Agent(name="agent", id="agent-id")
level3 = Team(name="level3", id="level3-id", members=[agent])
level2 = Team(name="level2", id="level2-id", members=[level3])
level1 = Team(name="level1", id="level1-id", members=[level2])
level1.initialize_team()
# Verify runtime fields were set at each level
assert level2.parent_team_id == "level1-id"
assert level3.parent_team_id == "level2-id"
# This should not raise TypeError
copy = level1.deep_copy()
# Verify copies are independent instances
assert copy is not level1
assert copy.members[0] is not level2
assert copy.members[0].members[0] is not level3
# Verify IDs are preserved
assert copy.id == level1.id
assert copy.members[0].id == level2.id
assert copy.members[0].members[0].id == level3.id
def test_nested_team_runtime_fields_not_propagated(self):
"""Runtime fields should not be propagated to copies."""
inner_agent = Agent(name="inner-agent", id="inner-agent-id")
inner_team = Team(name="inner-team", id="inner-team-id", members=[inner_agent])
outer_team = Team(name="outer-team", id="outer-team-id", members=[inner_team])
outer_team.initialize_team()
# Both teams now have runtime fields set
assert inner_team.parent_team_id == "outer-team-id"
copy = outer_team.deep_copy()
# The copied inner team should not have parent_team_id set
# (it's a runtime field that gets set during initialization)
assert copy.members[0].parent_team_id is None
# ============================================================================
# Workflow Deep Copy - Basic Tests
# ============================================================================
class TestWorkflowDeepCopy:
"""Tests for Workflow.deep_copy() method."""
def test_deep_copy_creates_new_instance(self):
"""deep_copy creates a new Workflow instance."""
workflow = Workflow(name="test-workflow", id="test-id")
copy = workflow.deep_copy()
assert copy is not workflow
assert copy.id == workflow.id
def test_deep_copy_preserves_configuration(self):
"""deep_copy preserves all configuration settings."""
workflow = Workflow(
name="test-workflow",
id="test-id",
description="A test workflow",
debug_mode=True,
)
copy = workflow.deep_copy()
assert copy.name == workflow.name
assert copy.description == workflow.description
assert copy.debug_mode == workflow.debug_mode
# ============================================================================
# Workflow Deep Copy - Basic Step Types
# ============================================================================
class TestWorkflowDeepCopyBasicSteps:
"""Tests for Workflow.deep_copy() with basic step types."""
def test_workflow_with_single_agent_step(self, basic_agent):
"""Test deep copying a workflow with a single agent step."""
workflow = Workflow(
name="single-agent-workflow",
id="workflow-id",
steps=[Step(name="agent-step", agent=basic_agent)],
)
copy = workflow.deep_copy()
assert copy is not workflow
assert copy.id == workflow.id
assert len(copy.steps) == 1
# Agent should be copied (different instance)
assert copy.steps[0].agent is not basic_agent
assert copy.steps[0].agent.id == basic_agent.id
def test_workflow_with_single_team_step(self, basic_team):
"""Test deep copying a workflow with a single team step."""
workflow = Workflow(
name="single-team-workflow",
id="workflow-id",
steps=[Step(name="team-step", team=basic_team)],
)
copy = workflow.deep_copy()
assert copy is not workflow
# Team should be copied
assert copy.steps[0].team is not basic_team
assert copy.steps[0].team.id == basic_team.id
# Team members should also be copied
assert copy.steps[0].team.members[0] is not basic_team.members[0]
def test_workflow_with_function_executor_step(self):
"""Test deep copying a workflow with a function executor step."""
def my_function(step_input: StepInput) -> StepOutput:
return StepOutput(content="result")
workflow = Workflow(
name="function-workflow",
id="workflow-id",
steps=[Step(name="function-step", executor=my_function)],
)
copy = workflow.deep_copy()
assert copy is not workflow
# Function reference should be the same (functions can't be deep copied)
assert copy.steps[0].executor is my_function
def test_workflow_with_direct_agent_step(self, basic_agent):
"""Test deep copying when agent is directly in steps list (not wrapped in Step)."""
workflow = Workflow(
name="direct-agent-workflow",
id="workflow-id",
steps=[basic_agent],
)
copy = workflow.deep_copy()
assert copy is not workflow
# Direct agent should be copied
assert copy.steps[0] is not basic_agent
assert copy.steps[0].id == basic_agent.id
def test_workflow_with_direct_team_step(self, basic_team):
"""Test deep copying when team is directly in steps list."""
workflow = Workflow(
name="direct-team-workflow",
id="workflow-id",
steps=[basic_team],
)
copy = workflow.deep_copy()
assert copy is not workflow
# Direct team should be copied
assert copy.steps[0] is not basic_team
assert copy.steps[0].id == basic_team.id
# ============================================================================
# Workflow Deep Copy - Step Container Types
# ============================================================================
class TestWorkflowDeepCopyContainerSteps:
"""Tests for Workflow.deep_copy() with container step types."""
def test_workflow_with_parallel_steps(self):
"""Test deep copying a workflow with Parallel steps."""
agent1 = Agent(name="parallel-agent-1", id="parallel-agent-1-id")
agent2 = Agent(name="parallel-agent-2", id="parallel-agent-2-id")
workflow = Workflow(
name="parallel-workflow",
id="workflow-id",
steps=[
Parallel(
Step(name="parallel-step-1", agent=agent1),
Step(name="parallel-step-2", agent=agent2),
name="parallel-container",
description="Parallel execution",
)
],
)
copy = workflow.deep_copy()
assert copy is not workflow
parallel_copy = copy.steps[0]
assert isinstance(parallel_copy, Parallel)
assert parallel_copy.name == "parallel-container"
assert parallel_copy.description == "Parallel execution"
# Agents inside parallel should be copied
assert parallel_copy.steps[0].agent is not agent1
assert parallel_copy.steps[1].agent is not agent2
assert parallel_copy.steps[0].agent.id == agent1.id
assert parallel_copy.steps[1].agent.id == agent2.id
def test_workflow_with_loop_steps(self, basic_agent):
"""Test deep copying a workflow with Loop steps."""
def end_condition(outputs):
return len(outputs) >= 2
workflow = Workflow(
name="loop-workflow",
id="workflow-id",
steps=[
Loop(
name="loop-container",
description="Loop execution",
steps=[Step(name="loop-step", agent=basic_agent)],
max_iterations=5,
end_condition=end_condition,
)
],
)
copy = workflow.deep_copy()
assert copy is not workflow
loop_copy = copy.steps[0]
assert isinstance(loop_copy, Loop)
assert loop_copy.name == "loop-container"
assert loop_copy.description == "Loop execution"
assert loop_copy.max_iterations == 5
assert loop_copy.end_condition is end_condition # Function reference preserved
# Agent inside loop should be copied
assert loop_copy.steps[0].agent is not basic_agent
assert loop_copy.steps[0].agent.id == basic_agent.id
def test_workflow_with_condition_steps(self, basic_agent):
"""Test deep copying a workflow with Condition steps."""
def evaluator(step_input):
return True
workflow = Workflow(
name="condition-workflow",
id="workflow-id",
steps=[
Condition(
name="condition-container",
description="Conditional execution",
evaluator=evaluator,
steps=[Step(name="condition-step", agent=basic_agent)],
)
],
)
copy = workflow.deep_copy()
assert copy is not workflow
condition_copy = copy.steps[0]
assert isinstance(condition_copy, Condition)
assert condition_copy.name == "condition-container"
assert condition_copy.description == "Conditional execution"
assert condition_copy.evaluator is evaluator # Function reference preserved
# Agent inside condition should be copied
assert condition_copy.steps[0].agent is not basic_agent
assert condition_copy.steps[0].agent.id == basic_agent.id
def test_workflow_with_router_steps(self):
"""Test deep copying a workflow with Router steps."""
agent1 = Agent(name="choice-1", id="choice-1-id")
agent2 = Agent(name="choice-2", id="choice-2-id")
def selector(step_input):
return [Step(name="selected", agent=agent1)]
workflow = Workflow(
name="router-workflow",
id="workflow-id",
steps=[
Router(
name="router-container",
description="Router execution",
selector=selector,
choices=[
Step(name="choice-step-1", agent=agent1),
Step(name="choice-step-2", agent=agent2),
],
)
],
)
copy = workflow.deep_copy()
assert copy is not workflow
router_copy = copy.steps[0]
assert isinstance(router_copy, Router)
assert router_copy.name == "router-container"
assert router_copy.description == "Router execution"
assert router_copy.selector is selector # Function reference preserved
# Choices should be copied
assert router_copy.choices[0].agent is not agent1
assert router_copy.choices[1].agent is not agent2
assert router_copy.choices[0].agent.id == agent1.id
assert router_copy.choices[1].agent.id == agent2.id
def test_workflow_with_steps_container(self):
"""Test deep copying a workflow with Steps container."""
agent1 = Agent(name="steps-agent-1", id="steps-agent-1-id")
agent2 = Agent(name="steps-agent-2", id="steps-agent-2-id")
workflow = Workflow(
name="steps-workflow",
id="workflow-id",
steps=[
Steps(
name="steps-container",
description="Steps sequence",
steps=[
Step(name="step-1", agent=agent1),
Step(name="step-2", agent=agent2),
],
)
],
)
copy = workflow.deep_copy()
assert copy is not workflow
steps_copy = copy.steps[0]
assert isinstance(steps_copy, Steps)
assert steps_copy.name == "steps-container"
assert steps_copy.description == "Steps sequence"
assert steps_copy.steps[0].agent is not agent1
assert steps_copy.steps[1].agent is not agent2
# ============================================================================
# Workflow Deep Copy - Deeply Nested Structures
# ============================================================================
class TestWorkflowDeepCopyDeeplyNested:
"""Tests for deeply nested step structures."""
def test_parallel_inside_loop(self):
"""Test Parallel steps nested inside a Loop."""
agent1 = Agent(name="nested-agent-1", id="nested-agent-1-id")
agent2 = Agent(name="nested-agent-2", id="nested-agent-2-id")
workflow = Workflow(
name="nested-workflow",
id="workflow-id",
steps=[
Loop(
name="outer-loop",
max_iterations=3,
steps=[
Parallel(
Step(name="inner-parallel-1", agent=agent1),
Step(name="inner-parallel-2", agent=agent2),
name="inner-parallel",
)
],
)
],
)
copy = workflow.deep_copy()
# Navigate to deeply nested agents
loop_copy = copy.steps[0]
parallel_copy = loop_copy.steps[0]
assert parallel_copy.steps[0].agent is not agent1
assert parallel_copy.steps[1].agent is not agent2
assert parallel_copy.steps[0].agent.id == agent1.id
assert parallel_copy.steps[1].agent.id == agent2.id
def test_condition_inside_parallel(self):
"""Test Condition steps nested inside Parallel."""
def evaluator(step_input):
return True
agent1 = Agent(name="cond-agent-1", id="cond-agent-1-id")
agent2 = Agent(name="cond-agent-2", id="cond-agent-2-id")
workflow = Workflow(
name="nested-workflow",
id="workflow-id",
steps=[
Parallel(
Condition(
name="cond-1",
evaluator=evaluator,
steps=[Step(name="cond-step-1", agent=agent1)],
),
Condition(
name="cond-2",
evaluator=evaluator,
steps=[Step(name="cond-step-2", agent=agent2)],
),
name="outer-parallel",
)
],
)
copy = workflow.deep_copy()
parallel_copy = copy.steps[0]
cond1_copy = parallel_copy.steps[0]
cond2_copy = parallel_copy.steps[1]
assert cond1_copy.steps[0].agent is not agent1
assert cond2_copy.steps[0].agent is not agent2
def test_router_inside_condition(self):
"""Test Router nested inside Condition."""
def evaluator(step_input):
return True
def selector(step_input):
return []
agent1 = Agent(name="router-choice-1", id="router-choice-1-id")
agent2 = Agent(name="router-choice-2", id="router-choice-2-id")
workflow = Workflow(
name="nested-workflow",
id="workflow-id",
steps=[
Condition(
name="outer-condition",
evaluator=evaluator,
steps=[
Router(
name="inner-router",
selector=selector,
choices=[
Step(name="choice-1", agent=agent1),
Step(name="choice-2", agent=agent2),
],
)
],
)
],
)
copy = workflow.deep_copy()
condition_copy = copy.steps[0]
router_copy = condition_copy.steps[0]
assert router_copy.choices[0].agent is not agent1
assert router_copy.choices[1].agent is not agent2
def test_three_levels_nesting(self):
"""Test 3 levels of nesting: Loop > Parallel > Condition > Agent."""
def evaluator(step_input):
return True
deep_agent = Agent(name="deep-agent", id="deep-agent-id")
workflow = Workflow(
name="deeply-nested-workflow",
id="workflow-id",
steps=[
Loop(
name="level-1-loop",
max_iterations=2,
steps=[
Parallel(
Condition(
name="level-3-condition",
evaluator=evaluator,
steps=[Step(name="deep-step", agent=deep_agent)],
),
name="level-2-parallel",
)
],
)
],
)
copy = workflow.deep_copy()
# Navigate through 3 levels
loop_copy = copy.steps[0]
parallel_copy = loop_copy.steps[0]
condition_copy = parallel_copy.steps[0]
assert condition_copy.steps[0].agent is not deep_agent
assert condition_copy.steps[0].agent.id == deep_agent.id
# ============================================================================
# Workflow Deep Copy - Step Attribute Preservation
# ============================================================================
class TestWorkflowDeepCopyStepAttributes:
"""Tests for Step attribute preservation during copying."""
def test_step_name_and_description_preserved(self, basic_agent):
"""Test that Step name and description are preserved."""
workflow = Workflow(
name="attr-workflow",
id="workflow-id",
steps=[
Step(
name="named-step",
description="Step description",
agent=basic_agent,
)
],
)
copy = workflow.deep_copy()
assert copy.steps[0].name == "named-step"
assert copy.steps[0].description == "Step description"
def test_step_config_attributes_preserved(self, basic_agent):
"""Test that Step configuration attributes are preserved."""
workflow = Workflow(
name="config-workflow",
id="workflow-id",
steps=[
Step(
name="configured-step",
agent=basic_agent,
max_retries=5,
skip_on_failure=True,
)
],
)
copy = workflow.deep_copy()
assert copy.steps[0].max_retries == 5
assert copy.steps[0].skip_on_failure is True
# ============================================================================
# Agent/Team State Isolation Tests
# ============================================================================
class TestAgentStateIsolation:
"""Tests for agent state isolation between copies."""
def test_metadata_changes_isolated(self):
"""Changes to metadata in copy don't affect original."""
agent = Agent(
name="isolation-agent",
id="isolation-id",
metadata={"counter": 0, "user": "original"},
)
agents = [agent]
copy = get_agent_by_id("isolation-id", agents, create_fresh=True)
# Modify the copy
copy.metadata["counter"] = 100
copy.metadata["user"] = "modified"
copy.metadata["new_key"] = "new_value"
# Original should be unchanged
assert agent.metadata["counter"] == 0
assert agent.metadata["user"] == "original"
assert "new_key" not in agent.metadata
def test_internal_list_state_isolated(self):
"""Internal list state like _mcp_tools_initialized_on_run is isolated."""
agent = Agent(name="list-agent", id="list-id")
# Simulate accumulated state
agent._mcp_tools_initialized_on_run = ["tool1", "tool2"]
agents = [agent]
copy = get_agent_by_id("list-id", agents, create_fresh=True)
# Copy should have empty/reset lists
assert copy._mcp_tools_initialized_on_run == []
# Original should be unchanged
assert len(agent._mcp_tools_initialized_on_run) == 2
def test_cached_session_reset(self):
"""_cached_session should be None in copy."""
agent = Agent(name="session-agent", id="session-id")
agent._cached_session = "cached_value" # type: ignore
agents = [agent]
copy = get_agent_by_id("session-id", agents, create_fresh=True)
assert copy._cached_session is None
assert agent._cached_session == "cached_value"
class TestTeamStateIsolation:
"""Tests for team state isolation between copies."""
def test_member_modification_isolated(self):
"""Modifications to team members don't affect original."""
member = Agent(name="member", id="member-id", metadata={"role": "worker"})
team = Team(name="team", id="team-id", members=[member])
teams = [team]
copy = get_team_by_id("team-id", teams, create_fresh=True)
# Modify the copied member
copy.members[0].metadata["role"] = "leader"
# Original member should be unchanged
assert team.members[0].metadata["role"] == "worker"
def test_nested_team_member_isolation(self):
"""Nested team members are also isolated."""
inner_agent = Agent(name="inner", id="inner-id")
inner_team = Team(name="inner-team", id="inner-team-id", members=[inner_agent])
outer_team = Team(name="outer-team", id="outer-team-id", members=[inner_team])
teams = [outer_team]
copy = get_team_by_id("outer-team-id", teams, create_fresh=True)
# The inner team should be different
copied_inner = copy.members[0]
assert copied_inner is not inner_team
# The inner agent should be different
assert copied_inner.members[0] is not inner_agent
# ============================================================================
# Workflow with Teams Tests
# ============================================================================
class TestWorkflowWithTeams:
"""Tests for workflows containing teams."""
def test_workflow_step_with_team(self):
"""Test team inside workflow step is copied."""
member = Agent(name="team-member", id="team-member-id")
team = Team(name="workflow-team", id="workflow-team-id", members=[member])
workflow = Workflow(
name="team-workflow",
id="workflow-id",
steps=[Step(name="team-step", team=team)],
)
copy = workflow.deep_copy()
# Team should be copied
assert copy.steps[0].team is not team
assert copy.steps[0].team.id == team.id
# Team member should be copied
assert copy.steps[0].team.members[0] is not member
assert copy.steps[0].team.members[0].id == member.id
def test_workflow_parallel_with_teams(self):
"""Test parallel execution with teams."""
member1 = Agent(name="member-1", id="member-1-id")
member2 = Agent(name="member-2", id="member-2-id")
team1 = Team(name="team-1", id="team-1-id", members=[member1])
team2 = Team(name="team-2", id="team-2-id", members=[member2])
workflow = Workflow(
name="parallel-teams-workflow",
id="workflow-id",
steps=[
Parallel(
Step(name="team-step-1", team=team1),
Step(name="team-step-2", team=team2),
name="parallel-teams",
)
],
)
copy = workflow.deep_copy()
parallel_copy = copy.steps[0]
assert parallel_copy.steps[0].team is not team1
assert parallel_copy.steps[1].team is not team2
# ============================================================================
# Edge Cases and Error Handling
# ============================================================================
class TestEdgeCases:
"""Tests for edge cases in deep copying."""
def test_empty_workflow_steps(self):
"""Test workflow with no steps."""
workflow = Workflow(name="empty-workflow", id="workflow-id", steps=[])
copy = workflow.deep_copy()
assert copy is not workflow
assert copy.steps == []
def test_none_workflow_steps(self):
"""Test workflow with None steps."""
workflow = Workflow(name="none-workflow", id="workflow-id", steps=None)
copy = workflow.deep_copy()
assert copy is not workflow
assert copy.steps is None
def test_workflow_with_empty_parallel(self):
"""Test Parallel with no steps."""
workflow = Workflow(
name="empty-parallel-workflow",
id="workflow-id",
steps=[Parallel(name="empty-parallel")],
)
copy = workflow.deep_copy()
assert copy is not workflow
assert isinstance(copy.steps[0], Parallel)
def test_workflow_with_empty_loop(self):
"""Test Loop with no steps."""
workflow = Workflow(
name="empty-loop-workflow",
id="workflow-id",
steps=[Loop(name="empty-loop", steps=[], max_iterations=3)],
)
copy = workflow.deep_copy()
assert copy is not workflow
assert isinstance(copy.steps[0], Loop)
assert copy.steps[0].max_iterations == 3
def test_workflow_update_parameter(self):
"""Test deep_copy with update parameter."""
workflow = Workflow(
name="original-name",
id="original-id",
description="Original description",
)
copy = workflow.deep_copy(update={"name": "updated-name"})
assert copy.name == "updated-name"
assert copy.id == "original-id" # ID should be preserved
assert workflow.name == "original-name" # Original unchanged
def test_mixed_step_types(self):
"""Test workflow with mixed step types."""
agent1 = Agent(name="agent-1", id="agent-1-id")
agent2 = Agent(name="agent-2", id="agent-2-id")
member = Agent(name="member", id="member-id")
team = Team(name="team", id="team-id", members=[member])
def function_executor(step_input):
return StepOutput(content="result")
workflow = Workflow(
name="mixed-workflow",
id="workflow-id",
steps=[
Step(name="agent-step", agent=agent1),
Step(name="team-step", team=team),
Step(name="function-step", executor=function_executor),
Parallel(
Step(name="parallel-agent", agent=agent2),
name="parallel-section",
),
],
)
copy = workflow.deep_copy()
# Verify all step types are copied correctly
assert copy.steps[0].agent is not agent1
assert copy.steps[1].team is not team
assert copy.steps[2].executor is function_executor
assert copy.steps[3].steps[0].agent is not agent2
# ============================================================================
# Callable Workflow Steps (Workflows 1.0 style)
# ============================================================================
class TestCallableWorkflowSteps:
"""Tests for workflows using callable steps (function-based)."""
def test_callable_steps_preserved(self):
"""Test that callable steps function is preserved."""
def my_workflow_function(workflow, execution_input):
return "result"
workflow = Workflow(
name="callable-workflow",
id="workflow-id",
steps=my_workflow_function,
)
copy = workflow.deep_copy()
# Function reference should be preserved
assert copy.steps is my_workflow_function
# ============================================================================
# Concurrent Request Simulation
# ============================================================================
class TestConcurrentRequestSimulation:
"""Tests simulating concurrent request scenarios."""
def test_multiple_copies_independent(self):
"""Multiple copies from same template are independent."""
template = Agent(
name="template",
id="template-id",
metadata={"request_id": None},
)
agents = [template]
# Simulate 3 concurrent requests
copy1 = get_agent_by_id("template-id", agents, create_fresh=True)
copy2 = get_agent_by_id("template-id", agents, create_fresh=True)
copy3 = get_agent_by_id("template-id", agents, create_fresh=True)
# Modify each copy
copy1.metadata["request_id"] = "request-1"
copy2.metadata["request_id"] = "request-2"
copy3.metadata["request_id"] = "request-3"
# All should be independent
assert template.metadata["request_id"] is None
assert copy1.metadata["request_id"] == "request-1"
assert copy2.metadata["request_id"] == "request-2"
assert copy3.metadata["request_id"] == "request-3"
# All copies are different instances
assert copy1 is not copy2
assert copy2 is not copy3
assert copy1 is not copy3
def test_workflow_multiple_copies_independent(self):
"""Multiple workflow copies are independent."""
agent = Agent(name="workflow-agent", id="workflow-agent-id")
template = Workflow(
name="template-workflow",
id="template-workflow-id",
steps=[Step(name="step", agent=agent)],
)
workflows = [template]
copy1 = get_workflow_by_id("template-workflow-id", workflows, create_fresh=True)
copy2 = get_workflow_by_id("template-workflow-id", workflows, create_fresh=True)
# Verify independence
assert copy1 is not copy2
assert copy1.steps[0].agent is not copy2.steps[0].agent
assert copy1.steps[0].agent is not agent
assert copy2.steps[0].agent is not agent
# ============================================================================
# Shared Resources Tests
# ============================================================================
class TestSharedResources:
"""Tests verifying that heavy resources are shared, not copied."""
def test_workflow_agent_field_copied(self):
"""Workflow.agent field (for workflow orchestration) should be copied."""
orchestrator = Agent(name="orchestrator", id="orchestrator-id")
workflow = Workflow(
name="orchestrated-workflow",
id="workflow-id",
agent=orchestrator,
)
copy = workflow.deep_copy()
# Orchestrator agent should be copied
assert copy.agent is not orchestrator
assert copy.agent.id == orchestrator.id
# ============================================================================
# Tools Deep Copy Unit Tests
# ============================================================================
class TestToolsDeepCopyUnit:
"""Unit tests for tools handling during Agent.deep_copy()."""
def test_regular_toolkit_is_deep_copied(self):
"""Regular Toolkit should be deep copied."""
from agno.tools.toolkit import Toolkit
class SimpleTool(Toolkit):
def __init__(self):
super().__init__(name="simple")
self.value = 0
self.register(self.get_value)
def get_value(self) -> int:
return self.value
tool = SimpleTool()
agent = Agent(name="test", id="test-id", tools=[tool])
copy = agent.deep_copy()
# Tool should be copied (different instance)
assert copy.tools[0] is not tool
# State should be independent
tool.value = 100
assert copy.tools[0].value == 0
def test_function_tool_in_list(self):
"""Function tools should be handled."""
def my_func() -> str:
return "hello"
agent = Agent(name="test", id="test-id", tools=[my_func])
copy = agent.deep_copy()
assert len(copy.tools) == 1
def test_mcp_tools_class_detection(self):
"""MCPTools class should be detected and shared."""
# Create a mock class hierarchy
class MCPTools:
pass
class MyMCPTool(MCPTools):
def __init__(self):
self.instance_id = "mcp-123"
mcp = MyMCPTool()
agent = Agent(name="test", id="test-id", tools=[mcp])
copy = agent.deep_copy()
# MCP tool should be shared (same instance)
assert copy.tools[0] is mcp
def test_multi_mcp_tools_class_detection(self):
"""MultiMCPTools class should be detected and shared."""
class MultiMCPTools:
pass
class MyMultiMCP(MultiMCPTools):
def __init__(self):
self.servers = ["s1", "s2"]
multi = MyMultiMCP()
agent = Agent(name="test", id="test-id", tools=[multi])
copy = agent.deep_copy()
# MultiMCPTools should be shared
assert copy.tools[0] is multi
def test_non_copyable_tool_shared(self):
"""Tool that can't be copied should be shared."""
class UnpicklableTool:
def __init__(self):
self.id = "unpicklable"
def __deepcopy__(self, memo):
raise TypeError("Cannot copy")
tool = UnpicklableTool()
agent = Agent(name="test", id="test-id", tools=[tool])
# Should not raise
copy = agent.deep_copy()
# Should fall back to sharing
assert copy.tools[0] is tool
def test_empty_tools_list(self):
"""Empty tools list should be copied as empty list."""
agent = Agent(name="test", id="test-id", tools=[])
copy = agent.deep_copy()
assert copy.tools == []
assert copy.tools is not agent.tools
def test_none_tools(self):
"""None tools gets normalized to empty list by Agent."""
agent = Agent(name="test", id="test-id", tools=None)
copy = agent.deep_copy()
# Agent normalizes None to empty list
assert copy.tools == []
def test_multiple_tools_mixed(self):
"""Multiple tools of different types should be handled correctly."""
from agno.tools.toolkit import Toolkit
class RegularTool(Toolkit):
def __init__(self):
super().__init__(name="regular")
self.count = 0
self.register(self.inc)
def inc(self) -> int:
self.count += 1
return self.count
class MCPTools:
pass
class MockMCP(MCPTools):
def __init__(self):
self.id = "mcp"
regular = RegularTool()
mcp = MockMCP()
agent = Agent(name="test", id="test-id", tools=[regular, mcp])
copy = agent.deep_copy()
assert len(copy.tools) == 2
assert copy.tools[0] is not regular # Copied
assert copy.tools[1] is mcp # Shared
# ============================================================================
# Heavy Resources Unit Tests
# ============================================================================
class TestHeavyResourcesUnit:
"""Unit tests for heavy resources (db, model, knowledge, memory_manager)."""
def test_db_is_shared(self):
"""Database should be shared."""
class MockDb:
pass
db = MockDb()
agent = Agent(name="test", id="test-id", db=db)
copy = agent.deep_copy()
assert copy.db is db
def test_knowledge_is_shared(self):
"""Knowledge should be shared."""
class MockKnowledge:
pass
knowledge = MockKnowledge()
agent = Agent(name="test", id="test-id", knowledge=knowledge)
copy = agent.deep_copy()
assert copy.knowledge is knowledge
def test_memory_manager_is_shared(self):
"""Memory manager should be shared."""
class MockMM:
pass
mm = MockMM()
agent = Agent(name="test", id="test-id", memory_manager=mm)
copy = agent.deep_copy()
assert copy.memory_manager is mm
def test_model_is_shared(self):
"""Model should be shared."""
from agno.models.openai import OpenAIChat
model = OpenAIChat(id="gpt-4o-mini")
agent = Agent(name="test", id="test-id", model=model)
copy = agent.deep_copy()
assert copy.model is model
def test_reasoning_model_is_shared(self):
"""Reasoning model should be shared."""
from agno.models.openai import OpenAIChat
reasoning_model = OpenAIChat(id="gpt-4o")
agent = Agent(name="test", id="test-id", reasoning_model=reasoning_model)
copy = agent.deep_copy()
assert copy.reasoning_model is reasoning_model
# ============================================================================
# Reasoning Agent Unit Tests
# ============================================================================
class TestReasoningAgentUnit:
"""Unit tests for reasoning_agent deep copy."""
def test_reasoning_agent_is_copied(self):
"""Reasoning agent should be deep copied (not shared)."""
reasoner = Agent(name="reasoner", id="reasoner-id")
agent = Agent(name="main", id="main-id", reasoning_agent=reasoner)
copy = agent.deep_copy()
# Should be a different instance
assert copy.reasoning_agent is not reasoner
assert copy.reasoning_agent.id == reasoner.id
def test_reasoning_agent_state_isolated(self):
"""Reasoning agent state should be isolated."""
reasoner = Agent(name="reasoner", id="reasoner-id", metadata={"count": 0})
agent = Agent(name="main", id="main-id", reasoning_agent=reasoner)
copy = agent.deep_copy()
# Modify original
reasoner.metadata["count"] = 10
# Copy should be unaffected
assert copy.reasoning_agent.metadata["count"] == 0
def test_reasoning_agent_none(self):
"""None reasoning_agent should remain None."""
agent = Agent(name="test", id="test-id", reasoning_agent=None)
copy = agent.deep_copy()
assert copy.reasoning_agent is None
# ============================================================================
# Error Handling Unit Tests
# ============================================================================
class TestDeepCopyErrorHandlingUnit:
"""Unit tests for error handling in deep_copy."""
def test_graceful_handling_of_weird_tool_types(self):
"""Unusual tool types should be handled gracefully."""
# These are unusual but shouldn't crash
tools = [
lambda: "hi", # Lambda
42, # Not really a tool
{"key": "value"}, # Dict
]
agent = Agent(name="test", id="test-id", tools=tools)
# Should not raise
copy = agent.deep_copy()
assert len(copy.tools) == 3
def test_tool_with_complex_state(self):
"""Tool with complex nested state should be copied correctly."""
from agno.tools.toolkit import Toolkit
class ComplexTool(Toolkit):
def __init__(self):
super().__init__(name="complex")
self.nested = {"level1": {"level2": [1, 2, 3]}}
self.items = [{"a": 1}, {"b": 2}]
self.register(self.get_data)
def get_data(self) -> dict:
return self.nested
tool = ComplexTool()
agent = Agent(name="test", id="test-id", tools=[tool])
copy = agent.deep_copy()
# Nested state should be independent
tool.nested["level1"]["level2"].append(4)
tool.items.append({"c": 3})
assert copy.tools[0].nested["level1"]["level2"] == [1, 2, 3]
assert len(copy.tools[0].items) == 2
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/os/test_per_request_isolation.py",
"license": "Apache License 2.0",
"lines": 1163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/knowledge/chunking/code.py | from typing import Any, Dict, List, Literal, Optional, Union
try:
from chonkie import CodeChunker
from chonkie.tokenizer import TokenizerProtocol
except ImportError:
raise ImportError(
"`chonkie` is required for code chunking. "
'Please install it using `pip install "chonkie[code]"` to use CodeChunking.'
)
from agno.knowledge.chunking.strategy import ChunkingStrategy
from agno.knowledge.document.base import Document
class CodeChunking(ChunkingStrategy):
"""Splits code into chunks based on its structure, leveraging Abstract Syntax Trees (ASTs) to create contextually relevant segments using Chonkie.
Args:
tokenizer: The tokenizer to use. Can be a string name or a TokenizerProtocol instance.
chunk_size: The size of the chunks to create.
language: The language to parse. Use "auto" for detection or specify a tree-sitter-language-pack language.
include_nodes: Whether to include AST nodes (Note: Chonkie's base Chunk type does not store node information).
chunker_params: Additional parameters to pass to Chonkie's CodeChunker.
"""
def __init__(
self,
tokenizer: Union[str, TokenizerProtocol] = "character",
chunk_size: int = 2048,
language: Union[Literal["auto"], Any] = "auto",
include_nodes: bool = False,
chunker_params: Optional[Dict[str, Any]] = None,
):
self.tokenizer = tokenizer
self.chunk_size = chunk_size
self.language = language
self.include_nodes = include_nodes
self.chunker_params = chunker_params
self.chunker: Optional[CodeChunker] = None
def _initialize_chunker(self):
"""Lazily initialize the chunker with Chonkie dependency."""
if self.chunker is not None:
return
_chunker_params: Dict[str, Any] = {
"tokenizer": self.tokenizer,
"chunk_size": self.chunk_size,
"language": self.language,
"include_nodes": self.include_nodes,
}
if self.chunker_params:
_chunker_params.update(self.chunker_params)
try:
self.chunker = CodeChunker(**_chunker_params)
except ValueError as e:
if "Tokenizer not found" in str(e):
raise ImportError(
f"Missing dependencies for tokenizer `{self.tokenizer}`. "
f"Please install using `pip install tiktoken`, `pip install transformers`, or `pip install tokenizers`"
) from e
raise
def chunk(self, document: Document) -> List[Document]:
"""Split document into code chunks using Chonkie."""
if not document.content:
return [document]
# Ensure chunker is initialized (will raise ImportError if Chonkie is missing)
self._initialize_chunker()
# Use Chonkie to split into code chunks
if self.chunker is None:
raise RuntimeError("Chunker failed to initialize")
chunks = self.chunker.chunk(document.content)
# Convert chunks to Documents
chunked_documents: List[Document] = []
for i, chunk in enumerate(chunks, 1):
meta_data = document.meta_data.copy()
meta_data["chunk"] = i
chunk_id = self._generate_chunk_id(document, i, chunk.text)
meta_data["chunk_size"] = len(chunk.text)
chunked_documents.append(Document(id=chunk_id, name=document.name, meta_data=meta_data, content=chunk.text))
return chunked_documents
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/chunking/code.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/knowledge/chunking/test_code_chunking.py | """Tests for CodeChunking wrapper around chonkie's CodeChunker."""
from typing import Sequence
import pytest
from agno.knowledge.chunking.code import CodeChunking
from agno.knowledge.document.base import Document
@pytest.fixture
def sample_python_code():
"""Sample Python code for testing."""
return """def function1():
x = 1
return x
def function2():
y = 2
return y
def function3():
z = 3
return z"""
@pytest.fixture
def sample_javascript_code():
"""Sample JavaScript code for testing."""
return """function hello() {
return "world";
}
function goodbye() {
return "moon";
}"""
def test_code_chunking_basic(sample_python_code):
"""Test basic code chunking with default parameters."""
chunker = CodeChunking()
doc = Document(content=sample_python_code, name="test.py")
chunks = chunker.chunk(doc)
assert len(chunks) > 0
assert all(isinstance(chunk, Document) for chunk in chunks)
assert all(chunk.content for chunk in chunks)
def test_code_chunking_character_tokenizer(sample_python_code):
"""Test with character tokenizer."""
chunker = CodeChunking(tokenizer="character", chunk_size=50, language="python")
doc = Document(content=sample_python_code, name="test.py")
chunks = chunker.chunk(doc)
assert len(chunks) > 0
assert all(chunk.content for chunk in chunks)
def test_code_chunking_word_tokenizer(sample_python_code):
"""Test with word tokenizer."""
chunker = CodeChunking(tokenizer="word", chunk_size=20, language="python")
doc = Document(content=sample_python_code, name="test.py")
chunks = chunker.chunk(doc)
assert len(chunks) > 0
assert all(chunk.content for chunk in chunks)
def test_code_chunking_gpt2_tokenizer(sample_python_code):
"""Test with gpt2 tokenizer."""
chunker = CodeChunking(tokenizer="gpt2", chunk_size=30, language="python")
doc = Document(content=sample_python_code, name="test.py")
chunks = chunker.chunk(doc)
assert len(chunks) > 0
assert all(chunk.content for chunk in chunks)
def test_code_chunking_cl100k_tokenizer(sample_python_code):
"""Test with cl100k_base tokenizer."""
chunker = CodeChunking(tokenizer="cl100k_base", chunk_size=30, language="python")
doc = Document(content=sample_python_code, name="test.py")
chunks = chunker.chunk(doc)
assert len(chunks) > 0
assert all(chunk.content for chunk in chunks)
def test_code_chunking_python_language(sample_python_code):
"""Test with explicit Python language."""
chunker = CodeChunking(tokenizer="character", chunk_size=100, language="python")
doc = Document(content=sample_python_code, name="test.py")
chunks = chunker.chunk(doc)
assert len(chunks) > 0
# Content should be preserved exactly
combined = "".join(chunk.content for chunk in chunks)
assert combined == sample_python_code
def test_code_chunking_javascript_language(sample_javascript_code):
"""Test with JavaScript language."""
chunker = CodeChunking(tokenizer="character", chunk_size=50, language="javascript")
doc = Document(content=sample_javascript_code, name="test.js")
chunks = chunker.chunk(doc)
assert len(chunks) > 0
combined = "".join(chunk.content for chunk in chunks)
assert combined == sample_javascript_code
def test_code_chunking_auto_language(sample_python_code):
"""Test with auto language detection."""
chunker = CodeChunking(tokenizer="character", chunk_size=100, language="auto")
doc = Document(content=sample_python_code, name="test.py")
chunks = chunker.chunk(doc)
assert len(chunks) > 0
def test_code_chunking_include_nodes_false(sample_python_code):
"""Test with include_nodes=False (default)."""
chunker = CodeChunking(include_nodes=False, language="python")
doc = Document(content=sample_python_code, name="test.py")
chunks = chunker.chunk(doc)
assert len(chunks) > 0
assert all(chunk.content for chunk in chunks)
def test_code_chunking_include_nodes_true(sample_python_code):
"""Test with include_nodes=True."""
chunker = CodeChunking(include_nodes=True, language="python")
doc = Document(content=sample_python_code, name="test.py")
chunks = chunker.chunk(doc)
assert len(chunks) > 0
assert all(chunk.content for chunk in chunks)
def test_code_chunking_preserves_content(sample_python_code):
"""Test that all content is preserved after chunking."""
chunker = CodeChunking(tokenizer="character", chunk_size=50, language="python")
doc = Document(content=sample_python_code, name="test.py")
chunks = chunker.chunk(doc)
# Combine all chunks
combined = "".join(chunk.content for chunk in chunks)
# Should match original exactly
assert combined == sample_python_code
assert len(combined) == len(sample_python_code)
def test_code_chunking_metadata(sample_python_code):
"""Test that chunks have correct metadata."""
chunker = CodeChunking(language="python")
doc = Document(id="test-123", name="test.py", content=sample_python_code, meta_data={"author": "test"})
chunks = chunker.chunk(doc)
for i, chunk in enumerate(chunks, 1):
assert chunk.id == f"test-123_{i}"
assert chunk.name == "test.py"
assert chunk.meta_data["chunk"] == i
assert chunk.meta_data["author"] == "test"
assert "chunk_size" in chunk.meta_data
assert chunk.meta_data["chunk_size"] == len(chunk.content)
def test_code_chunking_empty_content():
"""Test handling of empty content."""
chunker = CodeChunking()
doc = Document(content="", name="empty.py")
chunks = chunker.chunk(doc)
# Should return original document
assert len(chunks) == 1
assert chunks[0] is doc
def test_code_chunking_whitespace_only():
"""Test handling of whitespace-only content."""
chunker = CodeChunking(language="python")
doc = Document(content=" \n\n ", name="whitespace.py")
chunks = chunker.chunk(doc)
# chonkie returns empty list for whitespace-only
assert len(chunks) == 0
def test_code_chunking_single_line():
"""Test chunking single line of code."""
chunker = CodeChunking(tokenizer="character", chunk_size=100, language="python")
doc = Document(content="x = 1", name="single.py")
chunks = chunker.chunk(doc)
assert len(chunks) == 1
assert chunks[0].content == "x = 1"
def test_code_chunking_preserves_newlines():
"""Test that newlines are preserved in chunked content."""
code = "def test():\n pass\n\ndef other():\n pass\n"
chunker = CodeChunking(tokenizer="character", chunk_size=30, language="python")
doc = Document(content=code, name="test.py")
chunks = chunker.chunk(doc)
combined = "".join(chunk.content for chunk in chunks)
assert combined == code
assert combined.count("\n") == code.count("\n")
def test_code_chunking_preserves_indentation():
"""Test that indentation is preserved."""
code = """def hello():
if True:
print("nested")
return True"""
chunker = CodeChunking(tokenizer="character", chunk_size=50, language="python")
doc = Document(content=code, name="test.py")
chunks = chunker.chunk(doc)
combined = "".join(chunk.content for chunk in chunks)
assert combined == code
# Check that indentation is preserved
assert " if True:" in combined
assert " print" in combined
def test_code_chunking_unicode_content():
"""Test handling of unicode characters in code."""
code = 'def hello():\n return "Hello 世界 🌍"'
chunker = CodeChunking(tokenizer="character", chunk_size=50, language="python")
doc = Document(content=code, name="unicode.py")
chunks = chunker.chunk(doc)
combined = "".join(chunk.content for chunk in chunks)
assert combined == code
assert "世界" in combined
assert "🌍" in combined
def test_code_chunking_various_chunk_sizes(sample_python_code):
"""Test with various chunk sizes."""
sizes = [10, 50, 100, 500, 2000]
for size in sizes:
chunker = CodeChunking(tokenizer="character", chunk_size=size, language="python")
doc = Document(content=sample_python_code, name="test.py")
chunks = chunker.chunk(doc)
assert len(chunks) > 0
combined = "".join(chunk.content for chunk in chunks)
assert combined == sample_python_code
def test_code_chunking_custom_tokenizer_subclass(sample_python_code):
"""Test with custom Tokenizer subclass."""
from chonkie.tokenizer import Tokenizer
class LineTokenizer(Tokenizer):
"""Custom tokenizer that counts lines of code."""
def __init__(self):
self.vocab = []
self.token2id = {}
def __repr__(self) -> str:
return "LineTokenizer()"
def tokenize(self, text: str) -> Sequence[str]:
if not text:
return []
return text.split("\n")
def encode(self, text: str) -> Sequence[int]:
encoded = []
for token in self.tokenize(text):
if token not in self.token2id:
self.token2id[token] = len(self.vocab)
self.vocab.append(token)
encoded.append(self.token2id[token])
return encoded
def decode(self, tokens: Sequence[int]) -> str:
try:
return "\n".join([self.vocab[token] for token in tokens])
except Exception as e:
raise ValueError(f"Decoding failed. Tokens: {tokens} not found in vocab.") from e
def count_tokens(self, text: str) -> int:
if not text:
return 0
return len(text.split("\n"))
chunker = CodeChunking(tokenizer=LineTokenizer(), chunk_size=10, language="python")
doc = Document(content=sample_python_code, name="test.py")
chunks = chunker.chunk(doc)
assert len(chunks) > 0
assert all(chunk.content for chunk in chunks)
def test_code_chunking_no_document_id(sample_python_code):
"""Test chunking document without id uses name as fallback."""
chunker = CodeChunking(language="python")
doc = Document(content=sample_python_code, name="test.py") # No id, but has name
chunks = chunker.chunk(doc)
assert len(chunks) > 0
# Chunks should have name-based IDs when document has name but no id
assert all(chunk.id is not None and chunk.id.startswith("test.py_") for chunk in chunks)
def test_code_chunking_lazy_initialization(sample_python_code):
"""Test that chunker is initialized lazily."""
chunker = CodeChunking(language="python")
# Chunker should not be initialized yet
assert chunker.chunker is None
# After first chunk() call, it should be initialized
doc = Document(content=sample_python_code, name="test.py")
_ = chunker.chunk(doc)
assert chunker.chunker is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/knowledge/chunking/test_code_chunking.py",
"license": "Apache License 2.0",
"lines": 239,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/learn/config.py | """
LearningMachine Configuration
=============================
Enums and configuration classes for the unified learning system.
Uses dataclasses instead of Pydantic BaseModels to avoid runtime
overhead and validation errors that could break agents mid-run.
Configurations:
- LearningMode: How learning is extracted (ALWAYS, AGENTIC, PROPOSE, HITL)
- UserProfileConfig: Config for user profile learning
- MemoriesConfig: Config for memories learning
- SessionContextConfig: Config for session context learning
- LearnedKnowledgeConfig: Config for learned knowledge
- EntityMemoryConfig: Config for entity memory
"""
from dataclasses import dataclass
from enum import Enum
from typing import TYPE_CHECKING, Any, Optional, Type, Union
if TYPE_CHECKING:
from agno.db.base import AsyncBaseDb, BaseDb
from agno.models.base import Model
# =============================================================================
# Enums
# =============================================================================
class LearningMode(Enum):
"""How learning is extracted and saved.
ALWAYS: Automatic extraction after each response.
AGENTIC: Agent decides when to learn via tools.
PROPOSE: Agent proposes, human confirms.
HITL (Human-in-the-Loop): Reserved for future use.
"""
ALWAYS = "always"
AGENTIC = "agentic"
PROPOSE = "propose"
HITL = "hitl"
# =============================================================================
# Learning Type Configurations
# =============================================================================
@dataclass
class UserProfileConfig:
"""Configuration for User Profile learning type.
UserProfile stores long-term structured profile fields about users:
name, preferred_name, and custom fields from extended schemas.
Updated via `update_profile` tool.
Note: For unstructured memories, use UserMemoryConfig instead.
Scope: USER (fixed) - Retrieved and stored by user_id.
Attributes:
db: Database backend for storage.
model: Model for extraction (required for ALWAYS mode).
mode: How learning is extracted. Default: ALWAYS.
schema: Custom schema for user profile data. Default: UserProfile.
# Extraction operations
enable_update_profile: Allow updating profile fields (name, etc).
# Agent tools
enable_agent_tools: Expose tools to the agent.
agent_can_update_profile: If agent_tools enabled, provide update_user_profile tool.
# Prompt customization
instructions: Custom instructions for what to capture.
additional_instructions: Extra instructions appended to default.
system_message: Full override for extraction system message.
"""
# Required fields
db: Optional[Union["BaseDb", "AsyncBaseDb"]] = None
model: Optional["Model"] = None
# Mode and extraction
mode: LearningMode = LearningMode.ALWAYS
schema: Optional[Type[Any]] = None
# Extraction operations
enable_update_profile: bool = True # Allow updating profile fields
# Agent tools
enable_agent_tools: bool = False
agent_can_update_profile: bool = True
# Prompt customization
instructions: Optional[str] = None
additional_instructions: Optional[str] = None
system_message: Optional[str] = None
def __repr__(self) -> str:
return f"UserProfileConfig(mode={self.mode.value}, enable_agent_tools={self.enable_agent_tools})"
@dataclass
class UserMemoryConfig:
"""Configuration for User Memory learning type.
User Memory stores unstructured observations about users that don't fit
into structured profile fields. These are long-term memories that
persist across sessions.
Scope: USER (fixed) - Retrieved and stored by user_id.
Attributes:
db: Database backend for storage.
model: Model for extraction (required for ALWAYS mode).
mode: How learning is extracted. Default: ALWAYS.
schema: Custom schema for memories data. Default: Memories.
# Extraction operations
enable_add_memory: Allow adding new memories during extraction.
enable_update_memory: Allow updating existing memories.
enable_delete_memory: Allow deleting memories.
enable_clear_memories: Allow clearing all memories (dangerous).
# Agent tools
enable_agent_tools: Expose tools to the agent.
agent_can_update_memories: If agent_tools enabled, provide update_user_memory tool.
# Prompt customization
instructions: Custom instructions for what to capture.
additional_instructions: Extra instructions appended to default.
system_message: Full override for extraction system message.
"""
# Required fields
db: Optional[Union["BaseDb", "AsyncBaseDb"]] = None
model: Optional["Model"] = None
# Mode and extraction
mode: LearningMode = LearningMode.ALWAYS
schema: Optional[Type[Any]] = None
# Extraction operations
enable_add_memory: bool = True
enable_update_memory: bool = True
enable_delete_memory: bool = True
enable_clear_memories: bool = False # Dangerous - disabled by default
# Agent tools
enable_agent_tools: bool = False
agent_can_update_memories: bool = True
# Prompt customization
instructions: Optional[str] = None
additional_instructions: Optional[str] = None
system_message: Optional[str] = None
def __repr__(self) -> str:
return f"UserMemoryConfig(mode={self.mode.value}, enable_agent_tools={self.enable_agent_tools})"
# Backwards compatibility alias
MemoriesConfig = UserMemoryConfig
@dataclass
class SessionContextConfig:
"""Configuration for Session Context learning type.
Session Context captures state and summary for the current session:
what's happened, goals, plans, and progress.
Scope: SESSION (fixed) - Retrieved and stored by session_id.
Key behavior: Context builds on previous context. Each extraction
receives the previous context and updates it, rather than creating
from scratch. This ensures continuity even with truncated message history.
Attributes:
db: Database backend for storage.
model: Model for extraction (required for ALWAYS mode).
mode: How learning is extracted. Default: ALWAYS.
schema: Custom schema for session context. Default: SessionContext.
# Planning mode
enable_planning: Track goal, plan, and progress (not just summary).
enable_add_context: Allow creating new context.
enable_update_context: Allow updating existing context.
enable_delete_context: Allow deleting context.
enable_clear_context: Allow clearing context.
# Prompt customization
instructions: Custom instructions for extraction.
additional_instructions: Extra instructions appended to default.
system_message: Full override for extraction system message.
"""
# Required fields
db: Optional[Union["BaseDb", "AsyncBaseDb"]] = None
model: Optional["Model"] = None
# Mode and extraction
mode: LearningMode = LearningMode.ALWAYS
schema: Optional[Type[Any]] = None
# Planning mode
enable_planning: bool = False
# Extraction operations
enable_add_context: bool = True
enable_update_context: bool = True
enable_delete_context: bool = True
enable_clear_context: bool = False
# Prompt customization
instructions: Optional[str] = None
additional_instructions: Optional[str] = None
system_message: Optional[str] = None
def __repr__(self) -> str:
return f"SessionContextConfig(mode={self.mode.value}, enable_planning={self.enable_planning})"
@dataclass
class LearnedKnowledgeConfig:
"""Configuration for Learned Knowledge learning type.
Learned Knowledge captures reusable insights and patterns that
can be shared across users and agents.
Scope: `namespace` + KNOWLEDGE (fixed):
- "user": Private learned knowledge per user
- "global": Shared with everyone (default)
- Custom string: Explicit grouping (e.g., "engineering", "sales_west")
IMPORTANT: A knowledge base is required for learnings to work.
Either provide it here or pass it to LearningMachine directly.
Attributes:
knowledge: Knowledge base instance (vector store) for storage.
REQUIRED - learnings cannot be saved/searched without this.
model: Model for extraction (if using ALWAYS mode).
mode: How learning is extracted. Default: AGENTIC.
schema: Custom schema for learning data. Default: LearnedKnowledge.
# Sharing boundary
namespace: Sharing boundary ("user", "global", or custom).
# Agent tools
enable_agent_tools: Expose tools to the agent.
agent_can_save: If agent_tools enabled, provide save_learning tool.
agent_can_search: If agent_tools enabled, provide search_learnings tool.
# Prompt customization
instructions: Custom instructions for what makes a good learning.
additional_instructions: Extra instructions appended to default.
system_message: Full override for extraction system message.
"""
# Knowledge base - required for learnings to work
knowledge: Optional[Any] = None # agno.knowledge.Knowledge
model: Optional["Model"] = None
# Mode and extraction
mode: LearningMode = LearningMode.AGENTIC
schema: Optional[Type[Any]] = None
# Sharing boundary
namespace: str = "global"
# Agent tools
enable_agent_tools: bool = True
agent_can_save: bool = True
agent_can_search: bool = True
# Prompt customization
instructions: Optional[str] = None
additional_instructions: Optional[str] = None
system_message: Optional[str] = None
def __repr__(self) -> str:
has_knowledge = self.knowledge is not None
return f"LearnedKnowledgeConfig(mode={self.mode.value}, knowledge={has_knowledge}, enable_agent_tools={self.enable_agent_tools})"
@dataclass
class EntityMemoryConfig:
"""Configuration for EntityMemory learning type.
EntityMemory stores facts about third-party entities: companies,
projects, people, systems, products, etc. Think of it as UserProfile
but for things that aren't the user.
Entities have:
- Core properties (name, description, key-value properties)
- Facts (semantic memory - "Acme uses PostgreSQL")
- Events (episodic memory - "Acme launched v2 on Jan 15")
- Relationships (graph edges - "Bob is CEO of Acme")
Scope is controlled by `namespace`:
- "user": Private entity graph per user
- "global": Shared with everyone (default)
- Custom string: Explicit grouping (e.g., "sales_west")
Attributes:
db: Database backend for storage.
model: Model for extraction (required for ALWAYS mode).
mode: How learning is extracted. Default: ALWAYS.
schema: Custom schema for entity memory data. Default: EntityMemory.
# Sharing boundary
namespace: Sharing boundary ("user", "global", or custom).
# Extraction operations
enable_create_entity: Allow creating new entities.
enable_update_entity: Allow updating entity properties.
enable_add_fact: Allow adding facts to entities.
enable_update_fact: Allow updating existing facts.
enable_delete_fact: Allow deleting facts.
enable_add_event: Allow adding events to entities.
enable_add_relationship: Allow adding relationships.
# Agent tools
enable_agent_tools: Expose tools to the agent.
agent_can_create_entity: If agent_tools enabled, provide create_entity tool.
agent_can_update_entity: If agent_tools enabled, provide update_entity tool.
agent_can_search_entities: If agent_tools enabled, provide search_entities tool.
# Prompt customization
instructions: Custom instructions for entity extraction.
additional_instructions: Extra instructions appended to default.
system_message: Full override for extraction system message.
"""
# Required fields
db: Optional[Union["BaseDb", "AsyncBaseDb"]] = None
model: Optional["Model"] = None
# Mode and extraction
mode: LearningMode = LearningMode.ALWAYS
schema: Optional[Type[Any]] = None
# Sharing boundary
namespace: str = "global"
# Extraction operations
enable_create_entity: bool = True
enable_update_entity: bool = True
enable_add_fact: bool = True
enable_update_fact: bool = True
enable_delete_fact: bool = True
enable_add_event: bool = True
enable_add_relationship: bool = True
# Agent tools
enable_agent_tools: bool = False
agent_can_create_entity: bool = True
agent_can_update_entity: bool = True
agent_can_search_entities: bool = True
# Prompt customization
instructions: Optional[str] = None
additional_instructions: Optional[str] = None
system_message: Optional[str] = None
def __repr__(self) -> str:
return f"EntityMemoryConfig(mode={self.mode.value}, namespace={self.namespace}, enable_agent_tools={self.enable_agent_tools})"
# =============================================================================
# Phase 2 Configurations (Placeholders)
# =============================================================================
@dataclass
class DecisionLogConfig:
"""Configuration for Decision Logs learning type.
Decision Logs record decisions made by the agent with reasoning
and context. Useful for auditing and learning from past decisions.
Scope: AGENT (fixed) - Stored and retrieved by agent_id.
Note: Deferred to Phase 2.
"""
# Required fields
db: Optional[Union["BaseDb", "AsyncBaseDb"]] = None
model: Optional["Model"] = None
# Mode and extraction
mode: LearningMode = LearningMode.ALWAYS
schema: Optional[Type[Any]] = None
# Agent tools
enable_agent_tools: bool = True
agent_can_save: bool = True
agent_can_search: bool = True
# Prompt customization
system_message: Optional[str] = None
instructions: Optional[str] = None
additional_instructions: Optional[str] = None
def __repr__(self) -> str:
return f"DecisionLogConfig(mode={self.mode.value})"
@dataclass
class FeedbackConfig:
"""Configuration for Behavioral Feedback learning type.
Behavioral Feedback captures signals about what worked and what
didn't: thumbs up/down, corrections, regeneration requests.
Scope: AGENT (fixed) - Stored and retrieved by agent_id.
Note: Deferred to Phase 2.
"""
# Required fields
db: Optional[Union["BaseDb", "AsyncBaseDb"]] = None
model: Optional["Model"] = None
# Mode and extraction
mode: LearningMode = LearningMode.ALWAYS
schema: Optional[Type[Any]] = None
# Prompt customization
instructions: Optional[str] = None
def __repr__(self) -> str:
return "FeedbackConfig(mode=ALWAYS)"
@dataclass
class SelfImprovementConfig:
"""Configuration for Self-Improvement learning type.
Self-Improvement proposes updates to agent instructions based
on feedback patterns and successful interactions.
Scope: AGENT (fixed) - Stored and retrieved by agent_id.
Note: Deferred to Phase 3.
"""
# Required fields
db: Optional[Union["BaseDb", "AsyncBaseDb"]] = None
model: Optional["Model"] = None
# Mode and extraction
mode: LearningMode = LearningMode.HITL
schema: Optional[Type[Any]] = None
# Prompt customization
instructions: Optional[str] = None
def __repr__(self) -> str:
return "SelfImprovementConfig(mode=HITL)"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/learn/config.py",
"license": "Apache License 2.0",
"lines": 345,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/agno/learn/curate.py | """
Curator
=======
Memory maintenance for LearningMachine.
Keeps memories tidy through:
- Pruning: Remove old memories
- Deduplication: Remove exact/near-exact duplicates
Usage:
>>> learning = LearningMachine(db=db, model=model, user_profile=True)
>>>
>>> # Remove memories older than 90 days, keep max 100
>>> removed = learning.curator.prune(user_id="alice", max_age_days=90, max_count=100)
>>>
>>> # Remove duplicate memories
>>> deduped = learning.curator.deduplicate(user_id="alice")
"""
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from typing import Any, List
from agno.utils.log import log_debug
@dataclass
class Curator:
"""Memory maintenance. Keeps things tidy.
Currently supports user_profile store only.
"""
machine: Any # LearningMachine
def prune(
self,
user_id: str,
max_age_days: int = 0,
max_count: int = 0,
) -> int:
"""Remove old memories from user profile.
Args:
user_id: User to prune memories for.
max_age_days: Remove memories older than this (0 = disabled).
max_count: Keep at most this many memories (0 = disabled).
Returns:
Number of memories removed.
"""
store = self.machine.stores.get("user_profile")
if not store:
return 0
profile = store.get(user_id=user_id)
if not profile or not hasattr(profile, "memories"):
return 0
memories = profile.memories
if not memories:
return 0
original_count = len(memories)
# Age filter
if max_age_days > 0:
cutoff = datetime.now(timezone.utc) - timedelta(days=max_age_days)
memories = self._filter_by_age(memories=memories, cutoff=cutoff)
# Count filter (keep newest)
if max_count > 0 and len(memories) > max_count:
memories = self._keep_newest(memories=memories, count=max_count)
removed = original_count - len(memories)
if removed > 0:
profile.memories = memories
store.save(user_id=user_id, profile=profile)
log_debug(f"Curator.prune: removed {removed} memories for user_id={user_id}")
return removed
def deduplicate(
self,
user_id: str,
) -> int:
"""Remove duplicate memories from user profile.
Uses exact and near-exact string matching.
Args:
user_id: User to deduplicate memories for.
Returns:
Number of duplicate memories removed.
"""
store = self.machine.stores.get("user_profile")
if not store:
return 0
profile = store.get(user_id=user_id)
if not profile or not hasattr(profile, "memories"):
return 0
memories = profile.memories
if len(memories) < 2:
return 0
original_count = len(memories)
unique_memories = self._remove_duplicates(memories=memories)
removed = original_count - len(unique_memories)
if removed > 0:
profile.memories = unique_memories
store.save(user_id=user_id, profile=profile)
log_debug(f"Curator.deduplicate: removed {removed} duplicates for user_id={user_id}")
return removed
# =========================================================================
# Helpers
# =========================================================================
def _filter_by_age(
self,
memories: List[dict],
cutoff: datetime,
) -> List[dict]:
"""Keep memories newer than cutoff."""
result = []
for m in memories:
created_at = m.get("created_at")
if not created_at:
result.append(m) # Keep if no timestamp
continue
try:
created = datetime.fromisoformat(created_at.replace("Z", "+00:00"))
if created >= cutoff:
result.append(m)
except (ValueError, TypeError):
result.append(m) # Keep if unparseable
return result
def _keep_newest(
self,
memories: List[dict],
count: int,
) -> List[dict]:
"""Keep the N newest memories."""
sorted_memories = sorted(
memories,
key=lambda m: m.get("created_at", ""),
reverse=True,
)
return sorted_memories[:count]
def _remove_duplicates(
self,
memories: List[dict],
) -> List[dict]:
"""Remove exact and near-exact duplicate memories."""
seen = set()
unique = []
for m in memories:
content = m.get("content", "")
normalized = self._normalize(content)
if normalized not in seen:
seen.add(normalized)
unique.append(m)
return unique
def _normalize(self, text: str) -> str:
"""Normalize text for comparison."""
import re
text = text.lower().strip()
text = re.sub(r"[^\w\s]", "", text)
text = re.sub(r"\s+", " ", text)
return text
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/learn/curate.py",
"license": "Apache License 2.0",
"lines": 146,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/learn/machine.py | """
Learning Machine
================
Unified learning system for agents.
Coordinates multiple learning stores to give agents:
- User memory (who they're talking to)
- Session context (what's happened so far)
- Entity memory (knowledge about external things)
- Learned knowledge (reusable insights)
Plus maintenance via the Curator for keeping memories healthy.
"""
from dataclasses import dataclass, field
from os import getenv
from typing import Any, Callable, Dict, List, Optional, Union
from agno.learn.config import (
DecisionLogConfig,
EntityMemoryConfig,
LearnedKnowledgeConfig,
LearningMode,
SessionContextConfig,
UserMemoryConfig,
UserProfileConfig,
)
from agno.learn.curate import Curator
from agno.learn.stores.protocol import LearningStore
from agno.utils.log import (
log_debug,
log_warning,
set_log_level_to_debug,
set_log_level_to_info,
)
try:
from agno.db.base import AsyncBaseDb, BaseDb
from agno.models.base import Model
except ImportError:
pass
# Type aliases for cleaner signatures
UserProfileInput = Union[bool, UserProfileConfig, LearningStore, None]
UserMemoryInput = Union[bool, UserMemoryConfig, LearningStore, None]
EntityMemoryInput = Union[bool, EntityMemoryConfig, LearningStore, None]
SessionContextInput = Union[bool, SessionContextConfig, LearningStore, None]
LearnedKnowledgeInput = Union[bool, LearnedKnowledgeConfig, LearningStore, None]
DecisionLogInput = Union[bool, DecisionLogConfig, LearningStore, None]
@dataclass
class LearningMachine:
"""Central orchestrator for agent learning.
Coordinates all learning stores and provides unified interface
for recall, processing, tool generation, and maintenance.
Args:
db: Database backend for persistence.
model: Model for learning extraction.
knowledge: Knowledge base for learned knowledge store.
user_profile: Enable user profile. Accepts bool, Config, or Store.
user_memory: Enable user memory. Accepts bool, Config, or Store.
session_context: Enable session context. Accepts bool, Config, or Store.
entity_memory: Enable entity memory. Accepts bool, Config, or Store.
learned_knowledge: Enable learned knowledge. Auto-enabled when knowledge provided.
namespace: Default namespace for entity_memory and learned_knowledge.
custom_stores: Additional stores implementing LearningStore protocol.
debug_mode: Enable debug logging.
"""
db: Optional[Union["BaseDb", "AsyncBaseDb"]] = None
model: Optional["Model"] = None
knowledge: Optional[Any] = None
# Store configurations (accepts bool, Config, or Store instance)
user_profile: UserProfileInput = False
user_memory: UserMemoryInput = False
session_context: SessionContextInput = False
entity_memory: EntityMemoryInput = False
learned_knowledge: LearnedKnowledgeInput = False
decision_log: DecisionLogInput = False # Phase 2
# Namespace for entity_memory and learned_knowledge
namespace: str = "global"
# Custom stores
custom_stores: Optional[Dict[str, LearningStore]] = None
# Debug mode
debug_mode: bool = False
# Internal state (lazy initialization)
_stores: Optional[Dict[str, LearningStore]] = field(default=None, init=False)
_curator: Optional[Any] = field(default=None, init=False)
# =========================================================================
# Initialization (Lazy)
# =========================================================================
@property
def stores(self) -> Dict[str, LearningStore]:
"""All registered stores, keyed by name. Lazily initialized."""
if self._stores is None:
self._initialize_stores()
return self._stores # type: ignore
def _initialize_stores(self) -> None:
"""Initialize all configured stores."""
self._stores = {}
# User Profile
if self.user_profile:
self._stores["user_profile"] = self._resolve_store(
input_value=self.user_profile,
store_type="user_profile",
)
# User Memory
if self.user_memory:
self._stores["user_memory"] = self._resolve_store(
input_value=self.user_memory,
store_type="user_memory",
)
# Session Context
if self.session_context:
self._stores["session_context"] = self._resolve_store(
input_value=self.session_context,
store_type="session_context",
)
# Entity Memory
if self.entity_memory:
self._stores["entity_memory"] = self._resolve_store(
input_value=self.entity_memory,
store_type="entity_memory",
)
# Learned Knowledge (auto-enable if knowledge provided)
if self.learned_knowledge or self.knowledge is not None:
self._stores["learned_knowledge"] = self._resolve_store(
input_value=self.learned_knowledge if self.learned_knowledge else True,
store_type="learned_knowledge",
)
# Decision Log (Phase 2)
if self.decision_log:
self._stores["decision_log"] = self._resolve_store(
input_value=self.decision_log,
store_type="decision_log",
)
# Custom stores
if self.custom_stores:
for name, store in self.custom_stores.items():
self._stores[name] = store
log_debug(f"LearningMachine initialized with stores: {list(self._stores.keys())}")
def _resolve_store(
self,
input_value: Any,
store_type: str,
) -> LearningStore:
"""Resolve input to a store instance.
Args:
input_value: bool, Config, or Store instance
store_type: One of "user_profile", "user_memory", "session_context", "entity_memory", "learned_knowledge"
Returns:
Initialized store instance.
"""
# Already a store instance
if isinstance(input_value, LearningStore):
return input_value
# Create store based on type
if store_type == "user_profile":
return self._create_user_profile_store(config=input_value)
elif store_type == "user_memory":
return self._create_user_memory_store(config=input_value)
elif store_type == "session_context":
return self._create_session_context_store(config=input_value)
elif store_type == "entity_memory":
return self._create_entity_memory_store(config=input_value)
elif store_type == "learned_knowledge":
return self._create_learned_knowledge_store(config=input_value)
elif store_type == "decision_log":
return self._create_decision_log_store(config=input_value)
else:
raise ValueError(f"Unknown store type: {store_type}")
def _create_user_profile_store(self, config: Any) -> LearningStore:
"""Create UserProfileStore with resolved config."""
from agno.learn.stores import UserProfileStore
if isinstance(config, UserProfileConfig):
if config.db is None:
config.db = self.db
if config.model is None:
config.model = self.model
else:
config = UserProfileConfig(
db=self.db,
model=self.model,
mode=LearningMode.ALWAYS,
)
return UserProfileStore(config=config, debug_mode=self.debug_mode)
def _create_user_memory_store(self, config: Any) -> LearningStore:
"""Create UserMemoryStore with resolved config."""
from agno.learn.stores import UserMemoryStore
if isinstance(config, UserMemoryConfig):
if config.db is None:
config.db = self.db
if config.model is None:
config.model = self.model
else:
config = UserMemoryConfig(
db=self.db,
model=self.model,
mode=LearningMode.ALWAYS,
)
return UserMemoryStore(config=config, debug_mode=self.debug_mode)
def _create_session_context_store(self, config: Any) -> LearningStore:
"""Create SessionContextStore with resolved config."""
from agno.learn.stores import SessionContextStore
if isinstance(config, SessionContextConfig):
if config.db is None:
config.db = self.db
if config.model is None:
config.model = self.model
else:
config = SessionContextConfig(
db=self.db,
model=self.model,
enable_planning=False,
)
return SessionContextStore(config=config, debug_mode=self.debug_mode)
def _create_entity_memory_store(self, config: Any) -> LearningStore:
"""Create EntityMemoryStore with resolved config."""
from agno.learn.stores import EntityMemoryStore
if isinstance(config, EntityMemoryConfig):
if config.db is None:
config.db = self.db
if config.model is None:
config.model = self.model
else:
config = EntityMemoryConfig(
db=self.db,
model=self.model,
namespace=self.namespace,
mode=LearningMode.ALWAYS,
)
return EntityMemoryStore(config=config, debug_mode=self.debug_mode)
def _create_learned_knowledge_store(self, config: Any) -> LearningStore:
"""Create LearnedKnowledgeStore with resolved config."""
from agno.learn.stores import LearnedKnowledgeStore
if isinstance(config, LearnedKnowledgeConfig):
if config.model is None:
config.model = self.model
if config.knowledge is None and self.knowledge is not None:
config.knowledge = self.knowledge
else:
config = LearnedKnowledgeConfig(
model=self.model,
knowledge=self.knowledge,
mode=LearningMode.AGENTIC,
)
return LearnedKnowledgeStore(config=config, debug_mode=self.debug_mode)
def _create_decision_log_store(self, config: Any) -> LearningStore:
"""Create DecisionLogStore with resolved config."""
from agno.learn.stores import DecisionLogStore
if isinstance(config, DecisionLogConfig):
if config.db is None:
config.db = self.db
if config.model is None:
config.model = self.model
else:
config = DecisionLogConfig(
db=self.db,
model=self.model,
mode=LearningMode.AGENTIC, # Default to AGENTIC for explicit logging
)
return DecisionLogStore(config=config, debug_mode=self.debug_mode)
# =========================================================================
# Store Accessors (Type-Safe)
# =========================================================================
@property
def user_profile_store(self) -> Optional[LearningStore]:
"""Get user profile store if enabled."""
return self.stores.get("user_profile")
@property
def user_memory_store(self) -> Optional[LearningStore]:
"""Get user memory store if enabled."""
return self.stores.get("user_memory")
@property
def session_context_store(self) -> Optional[LearningStore]:
"""Get session context store if enabled."""
return self.stores.get("session_context")
@property
def entity_memory_store(self) -> Optional[LearningStore]:
"""Get entity memory store if enabled."""
return self.stores.get("entity_memory")
@property
def learned_knowledge_store(self) -> Optional[LearningStore]:
"""Get learned knowledge store if enabled."""
return self.stores.get("learned_knowledge")
@property
def decision_log_store(self) -> Optional[LearningStore]:
"""Get decision log store if enabled."""
return self.stores.get("decision_log")
@property
def was_updated(self) -> bool:
"""True if any store was updated in the last operation."""
return any(getattr(store, "was_updated", False) for store in self.stores.values())
# =========================================================================
# Main API
# =========================================================================
def build_context(
self,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
message: Optional[str] = None,
entity_id: Optional[str] = None,
entity_type: Optional[str] = None,
namespace: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> str:
"""Build memory context for the agent's system prompt.
Call before generating a response to give the agent relevant context.
Args:
user_id: User identifier (for user profile lookup).
session_id: Session identifier (for session context lookup).
message: Current message (for semantic search of learnings).
entity_id: Entity to retrieve (for entity memory).
entity_type: Type of entity to retrieve.
namespace: Namespace filter for entity_memory and learned_knowledge.
agent_id: Optional agent context.
team_id: Optional team context.
Returns:
Context string to inject into the agent's system prompt.
"""
results = self.recall(
user_id=user_id,
session_id=session_id,
message=message,
entity_id=entity_id,
entity_type=entity_type,
namespace=namespace or self.namespace,
agent_id=agent_id,
team_id=team_id,
**kwargs,
)
return self._format_results(results=results)
async def abuild_context(
self,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
message: Optional[str] = None,
entity_id: Optional[str] = None,
entity_type: Optional[str] = None,
namespace: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> str:
"""Async version of build_context."""
results = await self.arecall(
user_id=user_id,
session_id=session_id,
message=message,
entity_id=entity_id,
entity_type=entity_type,
namespace=namespace or self.namespace,
agent_id=agent_id,
team_id=team_id,
**kwargs,
)
return self._format_results(results=results)
def get_tools(
self,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
namespace: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> List[Callable]:
"""Get learning tools to expose to the agent.
Returns tools based on which stores are enabled:
- user_profile: update_user_memory
- entity_memory: search_entities, create_entity, update_entity, add_fact, etc.
- learned_knowledge: search_learnings, save_learning
Args:
user_id: User identifier (required for user profile tools).
session_id: Session identifier.
namespace: Default namespace for entity/learning operations.
agent_id: Optional agent context.
team_id: Optional team context.
Returns:
List of callable tools.
"""
tools = []
context = {
"user_id": user_id,
"session_id": session_id,
"namespace": namespace or self.namespace,
"agent_id": agent_id,
"team_id": team_id,
**kwargs,
}
for name, store in self.stores.items():
try:
store_tools = store.get_tools(**context)
if store_tools:
tools.extend(store_tools)
log_debug(f"Got {len(store_tools)} tools from {name}")
except Exception as e:
log_warning(f"Error getting tools from {name}: {e}")
return tools
async def aget_tools(
self,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
namespace: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> List[Callable]:
"""Async version of get_tools."""
tools = []
context = {
"user_id": user_id,
"session_id": session_id,
"namespace": namespace or self.namespace,
"agent_id": agent_id,
"team_id": team_id,
**kwargs,
}
for name, store in self.stores.items():
try:
store_tools = await store.aget_tools(**context)
if store_tools:
tools.extend(store_tools)
log_debug(f"Got {len(store_tools)} tools from {name}")
except Exception as e:
log_warning(f"Error getting tools from {name}: {e}")
return tools
def process(
self,
messages: List[Any],
user_id: Optional[str] = None,
session_id: Optional[str] = None,
namespace: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> None:
"""Extract and save learnings from a conversation.
Call after a conversation to extract learnings. Each store
processes based on its mode (ALWAYS stores extract automatically).
Args:
messages: Conversation messages to analyze.
user_id: User identifier (for user profile extraction).
session_id: Session identifier (for session context extraction).
namespace: Namespace for entity/learning saves.
agent_id: Optional agent context.
team_id: Optional team context.
"""
context = {
"messages": messages,
"user_id": user_id,
"session_id": session_id,
"namespace": namespace or self.namespace,
"agent_id": agent_id,
"team_id": team_id,
**kwargs,
}
for name, store in self.stores.items():
try:
store.process(**context)
if getattr(store, "was_updated", False):
log_debug(f"Store {name} was updated")
except Exception as e:
log_warning(f"Error processing through {name}: {e}")
async def aprocess(
self,
messages: List[Any],
user_id: Optional[str] = None,
session_id: Optional[str] = None,
namespace: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> None:
"""Async version of process."""
context = {
"messages": messages,
"user_id": user_id,
"session_id": session_id,
"namespace": namespace or self.namespace,
"agent_id": agent_id,
"team_id": team_id,
**kwargs,
}
for name, store in self.stores.items():
try:
await store.aprocess(**context)
if getattr(store, "was_updated", False):
log_debug(f"Store {name} was updated")
except Exception as e:
log_warning(f"Error processing through {name}: {e}")
# =========================================================================
# Lower-Level API
# =========================================================================
def recall(
self,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
message: Optional[str] = None,
entity_id: Optional[str] = None,
entity_type: Optional[str] = None,
namespace: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> Dict[str, Any]:
"""Retrieve raw data from all stores.
Most users should use `build_context()` instead.
Returns:
Dict mapping store names to their recalled data.
"""
results = {}
context = {
"user_id": user_id,
"session_id": session_id,
"message": message,
"query": message, # For learned_knowledge
"entity_id": entity_id,
"entity_type": entity_type,
"namespace": namespace or self.namespace,
"agent_id": agent_id,
"team_id": team_id,
**kwargs,
}
for name, store in self.stores.items():
try:
result = store.recall(**context)
results[name] = result
try:
log_debug(f"Recalled from {name}: {result}")
except Exception:
pass
except Exception as e:
log_warning(f"Error recalling from {name}: {e}")
return results
async def arecall(
self,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
message: Optional[str] = None,
entity_id: Optional[str] = None,
entity_type: Optional[str] = None,
namespace: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> Dict[str, Any]:
"""Async version of recall."""
results = {}
context = {
"user_id": user_id,
"session_id": session_id,
"message": message,
"query": message,
"entity_id": entity_id,
"entity_type": entity_type,
"namespace": namespace or self.namespace,
"agent_id": agent_id,
"team_id": team_id,
**kwargs,
}
for name, store in self.stores.items():
try:
result = await store.arecall(**context)
results[name] = result
try:
log_debug(f"Recalled from {name}: {result}")
except Exception:
pass
except Exception as e:
log_warning(f"Error recalling from {name}: {e}")
return results
def _format_results(self, results: Dict[str, Any]) -> str:
"""Format recalled data into context string."""
parts = []
for name, data in results.items():
store = self.stores.get(name)
if store:
try:
formatted = store.build_context(data=data)
if formatted:
parts.append(formatted)
except Exception as e:
log_warning(f"Error building context from {name}: {e}")
return "\n\n".join(parts)
# =========================================================================
# Curation
# =========================================================================
@property
def curator(self) -> "Curator":
"""Get the curator for memory maintenance.
Lazily creates the curator on first access.
Example:
>>> learning.curator.prune(user_id="alice", max_age_days=90)
>>> learning.curator.deduplicate(user_id="alice")
"""
if self._curator is None:
from agno.learn.curate import Curator
self._curator = Curator(machine=self)
return self._curator
# =========================================================================
# Debug
# =========================================================================
def set_log_level(self) -> None:
"""Set log level based on debug_mode or AGNO_DEBUG env var."""
if self.debug_mode or getenv("AGNO_DEBUG", "false").lower() == "true":
self.debug_mode = True
set_log_level_to_debug()
else:
set_log_level_to_info()
# =========================================================================
# Serialization
# =========================================================================
def to_dict(self) -> Dict[str, Any]:
"""Serialize the LearningMachine configuration to a dictionary.
Preserves which stores are enabled and the namespace so that
from_dict() can reconstruct an equivalent instance. Does not
serialize db, model, or knowledge (those are injected at init).
"""
d: Dict[str, Any] = {}
if self.user_profile:
d["user_profile"] = True
if self.user_memory:
d["user_memory"] = True
if self.session_context:
d["session_context"] = True
if self.entity_memory:
d["entity_memory"] = True
if self.learned_knowledge:
d["learned_knowledge"] = True
if self.decision_log:
d["decision_log"] = True
if self.namespace != "global":
d["namespace"] = self.namespace
if self.debug_mode:
d["debug_mode"] = True
return d
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "LearningMachine":
"""Reconstruct a LearningMachine from a serialized dictionary.
db and model must be injected separately (e.g. during agent/team init).
"""
return cls(
user_profile=data.get("user_profile", False),
user_memory=data.get("user_memory", False),
session_context=data.get("session_context", False),
entity_memory=data.get("entity_memory", False),
learned_knowledge=data.get("learned_knowledge", False),
decision_log=data.get("decision_log", False),
namespace=data.get("namespace", "global"),
debug_mode=data.get("debug_mode", False),
)
# =========================================================================
# Representation
# =========================================================================
def __repr__(self) -> str:
"""String representation for debugging."""
store_names = list(self.stores.keys()) if self._stores is not None else "[not initialized]"
db_name = self.db.__class__.__name__ if self.db else None
model_name = self.model.id if self.model and hasattr(self.model, "id") else None
has_knowledge = self.knowledge is not None
return (
f"LearningMachine("
f"stores={store_names}, "
f"db={db_name}, "
f"model={model_name}, "
f"knowledge={has_knowledge}, "
f"namespace={self.namespace!r})"
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/learn/machine.py",
"license": "Apache License 2.0",
"lines": 662,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/learn/schemas.py | """
LearningMachine Schemas
=======================
Dataclasses for each learning type.
Uses pure dataclasses to avoid runtime overhead.
All parsing is done via from_dict() which never raises.
Classes are designed to be extended - from_dict() and to_dict()
automatically handle subclass fields via dataclasses.fields().
Field Descriptions
When extending schemas, use field metadata to provide descriptions
that will be shown to the LLM:
@dataclass
class MyUserProfile(UserProfile):
company: Optional[str] = field(
default=None,
metadata={"description": "Where they work"}
)
The LLM will see this description when deciding how to update fields.
Schemas:
- UserProfile: Long-term user memory
- SessionContext: Current session state
- LearnedKnowledge: Reusable knowledge/insights
- EntityMemory: Third-party entity facts
- Decision: Decision logs (Phase 2)
- Feedback: Behavioral feedback (Phase 2)
- InstructionUpdate: Self-improvement (Phase 3)
"""
from dataclasses import asdict, dataclass, field, fields
from typing import Any, Dict, List, Optional
from agno.learn.utils import _parse_json, _safe_get
from agno.utils.log import log_debug
# =============================================================================
# Helper for debug logging
# =============================================================================
def _truncate_for_log(data: Any, max_len: int = 100) -> str:
"""Truncate data for logging to avoid massive log entries."""
s = str(data)
if len(s) > max_len:
return s[:max_len] + "..."
return s
# =============================================================================
# User Profile Schema
# =============================================================================
@dataclass
class UserProfile:
"""Schema for User Profile learning type.
Captures long-term structured profile information about a user that persists
across sessions. Designed to be extended with custom fields.
## Extending with Custom Fields
Use field metadata to provide descriptions for the LLM:
@dataclass
class MyUserProfile(UserProfile):
company: Optional[str] = field(
default=None,
metadata={"description": "Company or organization they work for"}
)
role: Optional[str] = field(
default=None,
metadata={"description": "Job title or role"}
)
timezone: Optional[str] = field(
default=None,
metadata={"description": "User's timezone (e.g., America/New_York)"}
)
Attributes:
user_id: Required unique identifier for the user.
name: User's full name.
preferred_name: How they prefer to be addressed (nickname, first name, etc).
agent_id: Which agent created this profile.
team_id: Which team created this profile.
created_at: When the profile was created (ISO format).
updated_at: When the profile was last updated (ISO format).
"""
user_id: str
name: Optional[str] = field(default=None, metadata={"description": "User's full name"})
preferred_name: Optional[str] = field(
default=None, metadata={"description": "How they prefer to be addressed (nickname, first name, etc)"}
)
agent_id: Optional[str] = field(default=None, metadata={"internal": True})
team_id: Optional[str] = field(default=None, metadata={"internal": True})
created_at: Optional[str] = field(default=None, metadata={"internal": True})
updated_at: Optional[str] = field(default=None, metadata={"internal": True})
@classmethod
def from_dict(cls, data: Any) -> Optional["UserProfile"]:
"""Parse from dict/JSON, returning None on any failure.
Works with subclasses - automatically handles additional fields.
"""
if data is None:
return None
if isinstance(data, cls):
return data
try:
parsed = _parse_json(data)
if not parsed:
log_debug(f"{cls.__name__}.from_dict: _parse_json returned None for data={_truncate_for_log(data)}")
return None
# user_id is required
if not parsed.get("user_id"):
log_debug(f"{cls.__name__}.from_dict: missing required field 'user_id'")
return None
# Get field names for this class (includes subclass fields)
field_names = {f.name for f in fields(cls)}
kwargs = {k: v for k, v in parsed.items() if k in field_names}
return cls(**kwargs)
except Exception as e:
log_debug(f"{cls.__name__}.from_dict failed: {e}, data={_truncate_for_log(data)}")
return None
def to_dict(self) -> Dict[str, Any]:
"""Convert to dict. Works with subclasses."""
try:
return asdict(self)
except Exception as e:
log_debug(f"{self.__class__.__name__}.to_dict failed: {e}")
return {}
@classmethod
def get_updateable_fields(cls) -> Dict[str, Dict[str, Any]]:
"""Get fields that can be updated via update_profile tool.
Returns:
Dict mapping field name to field info including description.
Excludes internal fields (user_id, timestamps, etc).
"""
skip = {"user_id", "created_at", "updated_at", "agent_id", "team_id"}
result = {}
for f in fields(cls):
if f.name in skip:
continue
# Skip fields marked as internal
if f.metadata.get("internal"):
continue
result[f.name] = {
"type": f.type,
"description": f.metadata.get("description", f"User's {f.name.replace('_', ' ')}"),
}
return result
def __repr__(self) -> str:
return f"UserProfile(user_id={self.user_id})"
@dataclass
class Memories:
"""Schema for Memories learning type.
Captures unstructured observations about a user that don't fit
into structured profile fields. These are long-term memories
that persist across sessions.
Attributes:
user_id: Required unique identifier for the user.
memories: List of memory entries, each with 'id' and 'content'.
agent_id: Which agent created these memories.
team_id: Which team created these memories.
created_at: When the memories were created (ISO format).
updated_at: When the memories were last updated (ISO format).
"""
user_id: str
memories: List[Dict[str, Any]] = field(default_factory=list)
agent_id: Optional[str] = field(default=None, metadata={"internal": True})
team_id: Optional[str] = field(default=None, metadata={"internal": True})
created_at: Optional[str] = field(default=None, metadata={"internal": True})
updated_at: Optional[str] = field(default=None, metadata={"internal": True})
@classmethod
def from_dict(cls, data: Any) -> Optional["Memories"]:
"""Parse from dict/JSON, returning None on any failure.
Works with subclasses - automatically handles additional fields.
"""
if data is None:
return None
if isinstance(data, cls):
return data
try:
parsed = _parse_json(data)
if not parsed:
log_debug(f"{cls.__name__}.from_dict: _parse_json returned None for data={_truncate_for_log(data)}")
return None
# user_id is required
if not parsed.get("user_id"):
log_debug(f"{cls.__name__}.from_dict: missing required field 'user_id'")
return None
# Get field names for this class (includes subclass fields)
field_names = {f.name for f in fields(cls)}
kwargs = {k: v for k, v in parsed.items() if k in field_names}
return cls(**kwargs)
except Exception as e:
log_debug(f"{cls.__name__}.from_dict failed: {e}, data={_truncate_for_log(data)}")
return None
def to_dict(self) -> Dict[str, Any]:
"""Convert to dict. Works with subclasses."""
try:
return asdict(self)
except Exception as e:
log_debug(f"{self.__class__.__name__}.to_dict failed: {e}")
return {}
def add_memory(self, content: str, **kwargs) -> str:
"""Add a new memory.
Args:
content: The memory text to add.
**kwargs: Additional fields (source, timestamp, etc.)
Returns:
The generated memory ID.
"""
import uuid
memory_id = str(uuid.uuid4())[:8]
if content and content.strip():
self.memories.append({"id": memory_id, "content": content.strip(), **kwargs})
return memory_id
def get_memory(self, memory_id: str) -> Optional[Dict[str, Any]]:
"""Get a specific memory by ID."""
for mem in self.memories:
if isinstance(mem, dict) and mem.get("id") == memory_id:
return mem
return None
def update_memory(self, memory_id: str, content: str, **kwargs) -> bool:
"""Update an existing memory.
Returns:
True if memory was found and updated, False otherwise.
"""
for mem in self.memories:
if isinstance(mem, dict) and mem.get("id") == memory_id:
mem["content"] = content.strip()
mem.update(kwargs)
return True
return False
def delete_memory(self, memory_id: str) -> bool:
"""Delete a memory by ID.
Returns:
True if memory was found and deleted, False otherwise.
"""
original_len = len(self.memories)
self.memories = [mem for mem in self.memories if not (isinstance(mem, dict) and mem.get("id") == memory_id)]
return len(self.memories) < original_len
def get_memories_text(self) -> str:
"""Get all memories as a formatted string for prompts."""
if not self.memories:
return ""
lines = []
for m in self.memories:
content = m.get("content") if isinstance(m, dict) else str(m)
if content:
lines.append(f"- {content}")
return "\n".join(lines)
def __repr__(self) -> str:
return f"Memories(user_id={self.user_id})"
# =============================================================================
# Session Context Schema
# =============================================================================
@dataclass
class SessionContext:
"""Schema for Session Context learning type.
Captures state and summary for the current session.
Unlike UserProfile which accumulates, this is REPLACED on each update.
Key behavior: Extraction receives the previous context and updates it,
ensuring continuity even when message history is truncated.
Attributes:
session_id: Required unique identifier for the session.
user_id: Which user this session belongs to.
summary: What's happened in this session.
goal: What the user is trying to accomplish.
plan: Steps to achieve the goal.
progress: Which steps have been completed.
agent_id: Which agent is running this session.
team_id: Which team is running this session.
created_at: When the session started (ISO format).
updated_at: When the context was last updated (ISO format).
Example - Extending with custom fields:
@dataclass
class MySessionContext(SessionContext):
mood: Optional[str] = field(
default=None,
metadata={"description": "User's current mood or emotional state"}
)
blockers: List[str] = field(
default_factory=list,
metadata={"description": "Current blockers or obstacles"}
)
"""
session_id: str
user_id: Optional[str] = None
summary: Optional[str] = field(
default=None, metadata={"description": "Summary of what's been discussed in this session"}
)
goal: Optional[str] = field(default=None, metadata={"description": "What the user is trying to accomplish"})
plan: Optional[List[str]] = field(default=None, metadata={"description": "Steps to achieve the goal"})
progress: Optional[List[str]] = field(default=None, metadata={"description": "Which steps have been completed"})
agent_id: Optional[str] = field(default=None, metadata={"internal": True})
team_id: Optional[str] = field(default=None, metadata={"internal": True})
created_at: Optional[str] = field(default=None, metadata={"internal": True})
updated_at: Optional[str] = field(default=None, metadata={"internal": True})
@classmethod
def from_dict(cls, data: Any) -> Optional["SessionContext"]:
"""Parse from dict/JSON, returning None on any failure."""
if data is None:
return None
if isinstance(data, cls):
return data
try:
parsed = _parse_json(data)
if not parsed:
log_debug(f"{cls.__name__}.from_dict: _parse_json returned None for data={_truncate_for_log(data)}")
return None
# session_id is required
if not parsed.get("session_id"):
log_debug(f"{cls.__name__}.from_dict: missing required field 'session_id'")
return None
field_names = {f.name for f in fields(cls)}
kwargs = {k: v for k, v in parsed.items() if k in field_names}
return cls(**kwargs)
except Exception as e:
log_debug(f"{cls.__name__}.from_dict failed: {e}, data={_truncate_for_log(data)}")
return None
def to_dict(self) -> Dict[str, Any]:
"""Convert to dict."""
try:
return asdict(self)
except Exception as e:
log_debug(f"{self.__class__.__name__}.to_dict failed: {e}")
return {}
def get_context_text(self) -> str:
"""Get session context as a formatted string for prompts."""
parts = []
if self.summary:
parts.append(f"Summary: {self.summary}")
if self.goal:
parts.append(f"Goal: {self.goal}")
if self.plan:
plan_text = "\n".join(f" {i + 1}. {step}" for i, step in enumerate(self.plan))
parts.append(f"Plan:\n{plan_text}")
if self.progress:
progress_text = "\n".join(f" ✓ {step}" for step in self.progress)
parts.append(f"Completed:\n{progress_text}")
return "\n\n".join(parts)
def __repr__(self) -> str:
return f"SessionContext(session_id={self.session_id})"
# =============================================================================
# Learned Knowledge Schema
# =============================================================================
@dataclass
class LearnedKnowledge:
"""Schema for Learned Knowledge learning type.
Captures reusable insights that apply across users and agents.
- title: Short, descriptive title for the learning.
- learning: The actual insight or pattern.
- context: When/where this learning applies.
- tags: Categories for organization.
- namespace: Sharing boundary for this learning.
Example:
LearnedKnowledge(
title="Python async best practices",
learning="Always use asyncio.gather() for concurrent I/O tasks",
context="When optimizing I/O-bound Python applications",
tags=["python", "async", "performance"]
)
"""
title: str
learning: str
context: Optional[str] = None
tags: Optional[List[str]] = None
user_id: Optional[str] = field(default=None, metadata={"internal": True})
namespace: Optional[str] = field(default=None, metadata={"internal": True})
agent_id: Optional[str] = field(default=None, metadata={"internal": True})
team_id: Optional[str] = field(default=None, metadata={"internal": True})
created_at: Optional[str] = field(default=None, metadata={"internal": True})
updated_at: Optional[str] = field(default=None, metadata={"internal": True})
@classmethod
def from_dict(cls, data: Any) -> Optional["LearnedKnowledge"]:
"""Parse from dict/JSON, returning None on any failure."""
if data is None:
return None
if isinstance(data, cls):
return data
try:
parsed = _parse_json(data)
if not parsed:
log_debug(f"{cls.__name__}.from_dict: _parse_json returned None for data={_truncate_for_log(data)}")
return None
# title and learning are required
if not parsed.get("title") or not parsed.get("learning"):
log_debug(f"{cls.__name__}.from_dict: missing required fields 'title' or 'learning'")
return None
field_names = {f.name for f in fields(cls)}
kwargs = {k: v for k, v in parsed.items() if k in field_names}
return cls(**kwargs)
except Exception as e:
log_debug(f"{cls.__name__}.from_dict failed: {e}, data={_truncate_for_log(data)}")
return None
def to_dict(self) -> Dict[str, Any]:
"""Convert to dict."""
try:
return asdict(self)
except Exception as e:
log_debug(f"{self.__class__.__name__}.to_dict failed: {e}")
return {}
def to_text(self) -> str:
"""Convert learning to searchable text format for vector storage."""
parts = [f"Title: {self.title}", f"Learning: {self.learning}"]
if self.context:
parts.append(f"Context: {self.context}")
if self.tags:
parts.append(f"Tags: {', '.join(self.tags)}")
return "\n".join(parts)
def __repr__(self) -> str:
return f"LearnedKnowledge(title={self.title})"
# =============================================================================
# Entity Memory Schema
# =============================================================================
@dataclass
class EntityMemory:
"""Schema for Entity Memory learning type.
Captures facts about third-party entities: companies, projects,
people, systems, products. Like UserProfile but for non-users.
Structure:
- **Core**: name, description, properties (key-value pairs)
- **Facts**: Semantic memory ("Acme uses PostgreSQL")
- **Events**: Episodic memory ("Acme launched v2 on Jan 15")
- **Relationships**: Graph edges ("Bob is CEO of Acme")
Common Entity Types:
- "company", "project", "person", "system", "product"
- Any string is valid.
Example:
EntityMemory(
entity_id="acme_corp",
entity_type="company",
name="Acme Corporation",
description="Enterprise software company",
properties={"industry": "fintech", "size": "startup"},
facts=[
{"id": "f1", "content": "Uses PostgreSQL for main database"},
{"id": "f2", "content": "API uses OAuth2 authentication"},
],
events=[
{"id": "e1", "content": "Launched v2.0", "date": "2024-01-15"},
],
relationships=[
{"entity_id": "bob_smith", "relation": "CEO"},
],
)
Attributes:
entity_id: Unique identifier (lowercase, underscores: "acme_corp").
entity_type: Type of entity ("company", "project", "person", etc).
name: Display name for the entity.
description: Brief description of what this entity is.
properties: Key-value properties (industry, tech_stack, etc).
facts: Semantic memories - timeless facts about the entity.
events: Episodic memories - time-bound occurrences.
relationships: Connections to other entities.
namespace: Sharing boundary for this entity.
user_id: Owner user (if namespace="user").
agent_id: Which agent created this.
team_id: Which team context.
created_at: When first created.
updated_at: When last modified.
"""
entity_id: str
entity_type: str = field(metadata={"description": "Type: company, project, person, system, product, etc"})
# Core properties
name: Optional[str] = field(default=None, metadata={"description": "Display name for the entity"})
description: Optional[str] = field(
default=None, metadata={"description": "Brief description of what this entity is"}
)
properties: Dict[str, str] = field(
default_factory=dict, metadata={"description": "Key-value properties (industry, tech_stack, etc)"}
)
# Semantic memory (facts)
facts: List[Dict[str, Any]] = field(default_factory=list)
# [{"id": "abc", "content": "Uses PostgreSQL", "confidence": 0.9, "source": "..."}]
# Episodic memory (events)
events: List[Dict[str, Any]] = field(default_factory=list)
# [{"id": "xyz", "content": "Had outage on 2024-01-15", "date": "2024-01-15"}]
# Relationships (graph edges)
relationships: List[Dict[str, Any]] = field(default_factory=list)
# [{"entity_id": "bob", "relation": "CEO", "direction": "incoming"}]
# Scope
namespace: Optional[str] = field(default=None, metadata={"internal": True})
user_id: Optional[str] = field(default=None, metadata={"internal": True})
agent_id: Optional[str] = field(default=None, metadata={"internal": True})
team_id: Optional[str] = field(default=None, metadata={"internal": True})
created_at: Optional[str] = field(default=None, metadata={"internal": True})
updated_at: Optional[str] = field(default=None, metadata={"internal": True})
@classmethod
def from_dict(cls, data: Any) -> Optional["EntityMemory"]:
"""Parse from dict/JSON, returning None on any failure."""
if data is None:
return None
if isinstance(data, cls):
return data
try:
parsed = _parse_json(data)
if not parsed:
log_debug(f"{cls.__name__}.from_dict: _parse_json returned None for data={_truncate_for_log(data)}")
return None
# entity_id and entity_type are required
if not parsed.get("entity_id") or not parsed.get("entity_type"):
log_debug(f"{cls.__name__}.from_dict: missing required fields 'entity_id' or 'entity_type'")
return None
field_names = {f.name for f in fields(cls)}
kwargs = {k: v for k, v in parsed.items() if k in field_names}
return cls(**kwargs)
except Exception as e:
log_debug(f"{cls.__name__}.from_dict failed: {e}, data={_truncate_for_log(data)}")
return None
def to_dict(self) -> Dict[str, Any]:
"""Convert to dict."""
try:
return asdict(self)
except Exception as e:
log_debug(f"{self.__class__.__name__}.to_dict failed: {e}")
return {}
def add_fact(self, content: str, **kwargs) -> str:
"""Add a new fact to the entity.
Args:
content: The fact text.
**kwargs: Additional fields (confidence, source, etc).
Returns:
The generated fact ID.
"""
import uuid
fact_id = str(uuid.uuid4())[:8]
if content and content.strip():
self.facts.append({"id": fact_id, "content": content.strip(), **kwargs})
return fact_id
def add_event(self, content: str, date: Optional[str] = None, **kwargs) -> str:
"""Add a new event to the entity.
Args:
content: The event description.
date: When the event occurred (ISO format or natural language).
**kwargs: Additional fields.
Returns:
The generated event ID.
"""
import uuid
event_id = str(uuid.uuid4())[:8]
if content and content.strip():
event = {"id": event_id, "content": content.strip(), **kwargs}
if date:
event["date"] = date
self.events.append(event)
return event_id
def add_relationship(self, related_entity_id: str, relation: str, direction: str = "outgoing", **kwargs) -> str:
"""Add a relationship to another entity.
Args:
related_entity_id: The other entity's ID.
relation: The relationship type ("CEO", "owns", "part_of", etc).
direction: "outgoing" (this → other) or "incoming" (other → this).
**kwargs: Additional fields.
Returns:
The generated relationship ID.
"""
import uuid
rel_id = str(uuid.uuid4())[:8]
self.relationships.append(
{"id": rel_id, "entity_id": related_entity_id, "relation": relation, "direction": direction, **kwargs}
)
return rel_id
def get_fact(self, fact_id: str) -> Optional[Dict[str, Any]]:
"""Get a specific fact by ID."""
for fact in self.facts:
if isinstance(fact, dict) and fact.get("id") == fact_id:
return fact
return None
def update_fact(self, fact_id: str, content: str, **kwargs) -> bool:
"""Update an existing fact.
Returns:
True if fact was found and updated, False otherwise.
"""
for fact in self.facts:
if isinstance(fact, dict) and fact.get("id") == fact_id:
fact["content"] = content.strip()
fact.update(kwargs)
return True
return False
def delete_fact(self, fact_id: str) -> bool:
"""Delete a fact by ID.
Returns:
True if fact was found and deleted, False otherwise.
"""
original_len = len(self.facts)
self.facts = [f for f in self.facts if not (isinstance(f, dict) and f.get("id") == fact_id)]
return len(self.facts) < original_len
def get_context_text(self) -> str:
"""Get entity as formatted string for prompts."""
parts = []
if self.name:
parts.append(f"**{self.name}** ({self.entity_type})")
else:
parts.append(f"**{self.entity_id}** ({self.entity_type})")
if self.description:
parts.append(self.description)
if self.properties:
props = ", ".join(f"{k}: {v}" for k, v in self.properties.items())
parts.append(f"Properties: {props}")
if self.facts:
facts_text = "\n".join(f" - {f.get('content', f)}" for f in self.facts)
parts.append(f"Facts:\n{facts_text}")
if self.events:
events_text = "\n".join(
f" - {e.get('content', e)}" + (f" ({e.get('date')})" if e.get("date") else "") for e in self.events
)
parts.append(f"Events:\n{events_text}")
if self.relationships:
rels_text = "\n".join(f" - {r.get('relation')}: {r.get('entity_id')}" for r in self.relationships)
parts.append(f"Relationships:\n{rels_text}")
return "\n\n".join(parts)
@classmethod
def get_updateable_fields(cls) -> Dict[str, Dict[str, Any]]:
"""Get fields that can be updated via update tools.
Returns:
Dict mapping field name to field info including description.
Excludes internal fields and collections (facts, events, relationships).
"""
skip = {
"entity_id",
"entity_type",
"facts",
"events",
"relationships",
"namespace",
"user_id",
"agent_id",
"team_id",
"created_at",
"updated_at",
}
result = {}
for f in fields(cls):
if f.name in skip:
continue
if f.metadata.get("internal"):
continue
result[f.name] = {
"type": f.type,
"description": f.metadata.get("description", f"Entity's {f.name.replace('_', ' ')}"),
}
return result
def __repr__(self) -> str:
return f"EntityMemory(entity_id={self.entity_id})"
# =============================================================================
# Extraction Response Models (internal use by stores)
# =============================================================================
@dataclass
class UserProfileExtractionResponse:
"""Response model for user profile extraction from LLM.
Used internally by UserProfileStore during background extraction.
"""
name: Optional[str] = None
preferred_name: Optional[str] = None
new_memories: List[str] = field(default_factory=list)
@classmethod
def from_dict(cls, data: Any) -> Optional["UserProfileExtractionResponse"]:
"""Parse from dict/JSON, returning None on any failure."""
if data is None:
return None
if isinstance(data, cls):
return data
try:
parsed = _parse_json(data)
if not parsed:
log_debug(f"{cls.__name__}.from_dict: _parse_json returned None for data={_truncate_for_log(data)}")
return None
return cls(
name=_safe_get(parsed, "name"),
preferred_name=_safe_get(parsed, "preferred_name"),
new_memories=_safe_get(parsed, "new_memories") or [],
)
except Exception as e:
log_debug(f"{cls.__name__}.from_dict failed: {e}, data={_truncate_for_log(data)}")
return None
@dataclass
class SessionSummaryExtractionResponse:
"""Response model for summary-only session extraction from LLM."""
summary: str = ""
@classmethod
def from_dict(cls, data: Any) -> Optional["SessionSummaryExtractionResponse"]:
"""Parse from dict/JSON, returning None on any failure."""
if data is None:
return None
if isinstance(data, cls):
return data
try:
parsed = _parse_json(data)
if not parsed:
log_debug(f"{cls.__name__}.from_dict: _parse_json returned None for data={_truncate_for_log(data)}")
return None
return cls(summary=_safe_get(parsed, "summary") or "")
except Exception as e:
log_debug(f"{cls.__name__}.from_dict failed: {e}, data={_truncate_for_log(data)}")
return None
@dataclass
class SessionPlanningExtractionResponse:
"""Response model for full planning extraction from LLM."""
summary: str = ""
goal: Optional[str] = None
plan: Optional[List[str]] = None
progress: Optional[List[str]] = None
@classmethod
def from_dict(cls, data: Any) -> Optional["SessionPlanningExtractionResponse"]:
"""Parse from dict/JSON, returning None on any failure."""
if data is None:
return None
if isinstance(data, cls):
return data
try:
parsed = _parse_json(data)
if not parsed:
log_debug(f"{cls.__name__}.from_dict: _parse_json returned None for data={_truncate_for_log(data)}")
return None
return cls(
summary=_safe_get(parsed, "summary") or "",
goal=_safe_get(parsed, "goal"),
plan=_safe_get(parsed, "plan"),
progress=_safe_get(parsed, "progress"),
)
except Exception as e:
log_debug(f"{cls.__name__}.from_dict failed: {e}, data={_truncate_for_log(data)}")
return None
# =============================================================================
# Phase 2 Schemas (Placeholders)
# =============================================================================
@dataclass
class DecisionLog:
"""Schema for Decision Logs.
Records decisions made by the agent with reasoning and context.
Useful for:
- Auditing agent behavior
- Learning from past decisions
- Debugging unexpected outcomes
- Building feedback loops
Example:
DecisionLog(
id="dec_abc123",
decision="Used web search instead of knowledge base",
reasoning="User asked about current events which require fresh data",
decision_type="tool_selection",
context="User query: 'What happened in the news today?'",
alternatives=["search_knowledge_base", "ask_for_clarification"],
confidence=0.85,
)
Attributes:
id: Unique identifier for this decision.
decision: What was decided (the choice made).
reasoning: Why this decision was made.
decision_type: Category of decision (tool_selection, response_style, etc).
context: The situation that required a decision.
alternatives: Other options that were considered.
confidence: How confident the agent was (0.0 to 1.0).
outcome: What happened as a result (can be updated later).
outcome_quality: Was the outcome good/bad/neutral.
tags: Categories for organization.
session_id: Which session this decision was made in.
user_id: Which user this decision was for.
agent_id: Which agent made this decision.
team_id: Which team context.
created_at: When the decision was made.
updated_at: When the outcome was recorded.
"""
id: str
decision: str
reasoning: Optional[str] = field(default=None, metadata={"description": "Why this decision was made"})
decision_type: Optional[str] = field(
default=None,
metadata={"description": "Category: tool_selection, response_style, clarification, escalation, etc"},
)
context: Optional[str] = field(default=None, metadata={"description": "The situation that required a decision"})
alternatives: Optional[List[str]] = field(
default=None, metadata={"description": "Other options that were considered"}
)
confidence: Optional[float] = field(default=None, metadata={"description": "Confidence level 0.0 to 1.0"})
outcome: Optional[str] = field(default=None, metadata={"description": "What happened as a result"})
outcome_quality: Optional[str] = field(default=None, metadata={"description": "Was outcome good/bad/neutral"})
tags: Optional[List[str]] = field(default=None, metadata={"description": "Categories for organization"})
# Scope
session_id: Optional[str] = field(default=None, metadata={"internal": True})
user_id: Optional[str] = field(default=None, metadata={"internal": True})
agent_id: Optional[str] = field(default=None, metadata={"internal": True})
team_id: Optional[str] = field(default=None, metadata={"internal": True})
created_at: Optional[str] = field(default=None, metadata={"internal": True})
updated_at: Optional[str] = field(default=None, metadata={"internal": True})
@classmethod
def from_dict(cls, data: Any) -> Optional["DecisionLog"]:
"""Parse from dict/JSON, returning None on any failure."""
if data is None:
return None
if isinstance(data, cls):
return data
try:
parsed = _parse_json(data)
if not parsed:
log_debug(f"{cls.__name__}.from_dict: _parse_json returned None for data={_truncate_for_log(data)}")
return None
# id and decision are required
if not parsed.get("id") or not parsed.get("decision"):
log_debug(f"{cls.__name__}.from_dict: missing required fields 'id' or 'decision'")
return None
field_names = {f.name for f in fields(cls)}
kwargs = {k: v for k, v in parsed.items() if k in field_names}
return cls(**kwargs)
except Exception as e:
log_debug(f"{cls.__name__}.from_dict failed: {e}, data={_truncate_for_log(data)}")
return None
def to_dict(self) -> Dict[str, Any]:
"""Convert to dict."""
try:
return asdict(self)
except Exception as e:
log_debug(f"{self.__class__.__name__}.to_dict failed: {e}")
return {}
def to_text(self) -> str:
"""Convert to searchable text format."""
parts = [f"Decision: {self.decision}"]
if self.reasoning:
parts.append(f"Reasoning: {self.reasoning}")
if self.context:
parts.append(f"Context: {self.context}")
if self.decision_type:
parts.append(f"Type: {self.decision_type}")
if self.outcome:
parts.append(f"Outcome: {self.outcome}")
return "\n".join(parts)
def __repr__(self) -> str:
return f"DecisionLog(id={self.id}, decision={self.decision[:50]}...)"
# Backwards compatibility alias
Decision = DecisionLog
@dataclass
class Feedback:
"""Schema for Behavioral Feedback. (Phase 2)
Captures signals about what worked and what didn't.
"""
signal: str # thumbs_up, thumbs_down, correction, regeneration
learning: Optional[str] = None
context: Optional[str] = None
agent_id: Optional[str] = None
team_id: Optional[str] = None
created_at: Optional[str] = None
@classmethod
def from_dict(cls, data: Any) -> Optional["Feedback"]:
"""Parse from dict/JSON, returning None on any failure."""
if data is None:
return None
if isinstance(data, cls):
return data
try:
parsed = _parse_json(data)
if not parsed:
log_debug(f"{cls.__name__}.from_dict: _parse_json returned None for data={_truncate_for_log(data)}")
return None
if not parsed.get("signal"):
log_debug(f"{cls.__name__}.from_dict: missing required field 'signal'")
return None
field_names = {f.name for f in fields(cls)}
kwargs = {k: v for k, v in parsed.items() if k in field_names}
return cls(**kwargs)
except Exception as e:
log_debug(f"{cls.__name__}.from_dict failed: {e}, data={_truncate_for_log(data)}")
return None
def to_dict(self) -> Dict[str, Any]:
"""Convert to dict."""
try:
return asdict(self)
except Exception as e:
log_debug(f"{self.__class__.__name__}.to_dict failed: {e}")
return {}
@dataclass
class InstructionUpdate:
"""Schema for Self-Improvement. (Phase 3)
Proposes updates to agent instructions based on feedback patterns.
"""
current_instruction: str
proposed_instruction: str
reasoning: str
evidence: Optional[List[str]] = None
agent_id: Optional[str] = None
team_id: Optional[str] = None
created_at: Optional[str] = None
@classmethod
def from_dict(cls, data: Any) -> Optional["InstructionUpdate"]:
"""Parse from dict/JSON, returning None on any failure."""
if data is None:
return None
if isinstance(data, cls):
return data
try:
parsed = _parse_json(data)
if not parsed:
log_debug(f"{cls.__name__}.from_dict: _parse_json returned None for data={_truncate_for_log(data)}")
return None
required = ["current_instruction", "proposed_instruction", "reasoning"]
missing = [k for k in required if not parsed.get(k)]
if missing:
log_debug(f"{cls.__name__}.from_dict: missing required fields {missing}")
return None
field_names = {f.name for f in fields(cls)}
kwargs = {k: v for k, v in parsed.items() if k in field_names}
return cls(**kwargs)
except Exception as e:
log_debug(f"{cls.__name__}.from_dict failed: {e}, data={_truncate_for_log(data)}")
return None
def to_dict(self) -> Dict[str, Any]:
"""Convert to dict."""
try:
return asdict(self)
except Exception as e:
log_debug(f"{self.__class__.__name__}.to_dict failed: {e}")
return {}
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/learn/schemas.py",
"license": "Apache License 2.0",
"lines": 897,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/learn/stores/entity_memory.py | """
Entity Memory Store
===================
Storage backend for Entity Memory learning type.
Stores knowledge about external entities - people, companies, projects, products,
concepts, systems, and any other things the agent interacts with that aren't the
user themselves.
Think of it as:
- UserProfile = what you know about THE USER
- EntityMemory = what you know about EVERYTHING ELSE
Key Features:
- Entity-scoped storage (entity_id + entity_type)
- Three types of memory per entity:
- Facts (semantic): Timeless truths ("Acme uses PostgreSQL")
- Events (episodic): Time-bound occurrences ("Acme launched v2 on Jan 15")
- Relationships (graph): Connections to other entities ("Bob is CEO of Acme")
- Namespace-based sharing control
- Agent tools for CRUD operations
- Background extraction from conversations
Scoping:
- entity_id: Unique identifier (e.g., "acme_corp", "bob_smith")
- entity_type: Category (e.g., "company", "person", "project", "product")
- namespace: Sharing scope:
- "user": Private to current user
- "global": Shared with everyone (default)
- "<custom>": Custom grouping (e.g., "sales_team")
Supported Modes:
- ALWAYS: Automatic extraction of entity info from conversations
- AGENTIC: Agent calls tools directly to manage entity info
"""
from copy import deepcopy
from dataclasses import dataclass, field
from datetime import datetime, timezone
from os import getenv
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union
from agno.learn.config import EntityMemoryConfig, LearningMode
from agno.learn.schemas import EntityMemory
from agno.learn.stores.protocol import LearningStore
from agno.utils.log import (
log_debug,
log_warning,
set_log_level_to_debug,
set_log_level_to_info,
)
if TYPE_CHECKING:
from agno.metrics import RunMetrics
try:
from agno.db.base import AsyncBaseDb, BaseDb
from agno.models.message import Message
except ImportError:
pass
@dataclass
class EntityMemoryStore(LearningStore):
"""Storage backend for Entity Memory learning type.
Stores knowledge about external entities with three types of memory:
- **Facts**: Semantic memory - timeless truths about the entity
- **Events**: Episodic memory - time-bound occurrences
- **Relationships**: Graph edges - connections to other entities
Each entity is identified by entity_id + entity_type, with namespace for sharing.
Args:
config: EntityMemoryConfig with all settings including db and model.
debug_mode: Enable debug logging.
"""
config: EntityMemoryConfig = field(default_factory=EntityMemoryConfig)
debug_mode: bool = False
# State tracking (internal)
entity_updated: bool = field(default=False, init=False)
_schema: Any = field(default=None, init=False)
def __post_init__(self):
self._schema = self.config.schema or EntityMemory
if self.config.mode == LearningMode.PROPOSE:
log_warning("EntityMemoryStore does not support PROPOSE mode. Falling back to ALWAYS mode.")
elif self.config.mode == LearningMode.HITL:
log_warning("EntityMemoryStore does not support HITL mode. Falling back to ALWAYS mode.")
# =========================================================================
# LearningStore Protocol Implementation
# =========================================================================
@property
def learning_type(self) -> str:
"""Unique identifier for this learning type."""
return "entity_memory"
@property
def schema(self) -> Any:
"""Schema class used for entities."""
return self._schema
def recall(
self,
entity_id: Optional[str] = None,
entity_type: Optional[str] = None,
user_id: Optional[str] = None,
namespace: Optional[str] = None,
**kwargs,
) -> Optional[Any]:
"""Retrieve entity memory from storage.
Args:
entity_id: The entity to retrieve (required with entity_type).
entity_type: The type of entity (required with entity_id).
user_id: User ID for "user" namespace scoping.
namespace: Filter by namespace.
**kwargs: Additional context (ignored).
Returns:
Entity memory, or None if not found.
"""
if not entity_id or not entity_type:
return None
effective_namespace = namespace or self.config.namespace
if effective_namespace == "user" and not user_id:
log_warning("EntityMemoryStore.process: namespace='user' requires user_id")
return None
return self.get(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
namespace=effective_namespace,
)
async def arecall(
self,
entity_id: Optional[str] = None,
entity_type: Optional[str] = None,
user_id: Optional[str] = None,
namespace: Optional[str] = None,
**kwargs,
) -> Optional[Any]:
"""Async version of recall."""
if not entity_id or not entity_type:
return None
effective_namespace = namespace or self.config.namespace
if effective_namespace == "user" and not user_id:
log_warning("EntityMemoryStore.arecall: namespace='user' requires user_id")
return None
return await self.aget(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
namespace=effective_namespace,
)
def process(
self,
messages: List[Any],
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
**kwargs,
) -> None:
"""Extract entity information from messages.
Args:
messages: Conversation messages to analyze.
user_id: User context (for "user" namespace scoping).
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
namespace: Namespace to save entities to.
**kwargs: Additional context (ignored).
"""
if self.config.mode == LearningMode.AGENTIC:
return
if not messages:
return
effective_namespace = namespace or self.config.namespace
self.extract_and_save(
messages=messages,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
run_metrics=kwargs.get("run_metrics"),
)
async def aprocess(
self,
messages: List[Any],
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
**kwargs,
) -> None:
"""Async version of process."""
if self.config.mode == LearningMode.AGENTIC:
return
if not messages:
return
effective_namespace = namespace or self.config.namespace
await self.aextract_and_save(
messages=messages,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
run_metrics=kwargs.get("run_metrics"),
)
def build_context(self, data: Any) -> str:
"""Build context for the agent.
Formats entity memory for injection into the agent's system prompt.
Entity memory provides knowledge about external things - people, companies,
projects, products - distinct from knowledge about the user themselves.
Args:
data: Entity memory data from recall() - single entity or list.
Returns:
Context string to inject into the agent's system prompt.
"""
if not data:
if self._should_expose_tools:
return dedent("""\
<entity_memory_system>
You have access to entity memory - a knowledge base about people, companies,
projects, products, and other external entities relevant to your work.
**Available Tools:**
- `search_entities`: Find stored information about entities
- `create_entity`: Store a new entity with its facts
- `add_fact`: Add a timeless truth about an entity
- `add_event`: Record a time-bound occurrence
- `add_relationship`: Capture connections between entities
**When to use entity memory:**
- You learn something substantive about a company, person, or project
- Information would be useful to recall in future conversations
- Facts are stable enough to be worth storing
**Entity memory vs other memory types:**
- User memory = about THE USER (their preferences, role, context)
- Entity memory = about EXTERNAL THINGS (companies, people, projects)
- Learned knowledge = reusable TASK insights (patterns, approaches)
</entity_memory_system>""")
return ""
# Handle single entity or list
entities = data if isinstance(data, list) else [data]
if not entities:
return ""
# Use schema's get_context_text
formatted_parts = []
for entity in entities:
if hasattr(entity, "get_context_text"):
formatted_parts.append(entity.get_context_text())
else:
formatted_parts.append(self._format_entity_basic(entity=entity))
formatted = "\n\n---\n\n".join(formatted_parts)
context = dedent(f"""\
<entity_memory>
**Known information about relevant entities:**
{formatted}
<entity_memory_guidelines>
Use this knowledge naturally in your responses:
- Reference stored facts without citing "entity memory"
- Treat this as background knowledge you simply have
- Current conversation takes precedence if there's conflicting information
- Update entity memory if you learn something new and substantive
</entity_memory_guidelines>
""")
if self._should_expose_tools:
context += dedent("""
Entity memory tools are available to search, create, or update entities.
</entity_memory>""")
else:
context += "</entity_memory>"
return context
def get_tools(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
**kwargs,
) -> List[Callable]:
"""Get tools to expose to agent.
Args:
user_id: User context (for "user" namespace scoping).
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
namespace: Default namespace for operations.
**kwargs: Additional context (ignored).
Returns:
List of callable tools (empty if enable_agent_tools=False).
"""
if not self._should_expose_tools:
return []
return self.get_agent_tools(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
async def aget_tools(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
**kwargs,
) -> List[Callable]:
"""Async version of get_tools."""
if not self._should_expose_tools:
return []
return await self.aget_agent_tools(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
@property
def was_updated(self) -> bool:
"""Check if entity was updated in last operation."""
return self.entity_updated
@property
def _should_expose_tools(self) -> bool:
"""Check if tools should be exposed to the agent.
Returns True if either:
- mode is AGENTIC (tools are the primary way to manage entities), OR
- enable_agent_tools is explicitly True
"""
return self.config.mode == LearningMode.AGENTIC or self.config.enable_agent_tools
# =========================================================================
# Properties
# =========================================================================
@property
def db(self) -> Optional[Union["BaseDb", "AsyncBaseDb"]]:
"""Database backend."""
return self.config.db
@property
def model(self):
"""Model for extraction."""
return self.config.model
# =========================================================================
# Debug/Logging
# =========================================================================
def set_log_level(self):
"""Set log level based on debug_mode or environment variable."""
if self.debug_mode or getenv("AGNO_DEBUG", "false").lower() == "true":
self.debug_mode = True
set_log_level_to_debug()
else:
set_log_level_to_info()
# =========================================================================
# Agent Tools
# =========================================================================
def get_agent_tools(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> List[Callable]:
"""Get the tools to expose to the agent.
Tools are included based on config settings:
- search_entities (agent_can_search_entities)
- create_entity (agent_can_create_entity)
- update_entity (agent_can_update_entity)
- add_fact, update_fact, delete_fact
- add_event
- add_relationship
Args:
user_id: User context (for "user" namespace scoping).
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
namespace: Default namespace for operations.
Returns:
List of callable tools.
"""
tools = []
effective_namespace = namespace or self.config.namespace
if self.config.agent_can_search_entities:
tools.append(
self._create_search_entities_tool(
user_id=user_id,
namespace=effective_namespace,
)
)
if self.config.agent_can_create_entity:
tools.append(
self._create_create_entity_tool(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
)
if self.config.agent_can_update_entity:
tools.append(
self._create_update_entity_tool(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
)
if self.config.enable_add_fact:
tools.append(
self._create_add_fact_tool(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
)
if self.config.enable_update_fact:
tools.append(
self._create_update_fact_tool(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
)
if self.config.enable_delete_fact:
tools.append(
self._create_delete_fact_tool(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
)
if self.config.enable_add_event:
tools.append(
self._create_add_event_tool(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
)
if self.config.enable_add_relationship:
tools.append(
self._create_add_relationship_tool(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
)
return tools
async def aget_agent_tools(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> List[Callable]:
"""Async version of get_agent_tools."""
tools = []
effective_namespace = namespace or self.config.namespace
if self.config.agent_can_search_entities:
tools.append(
self._create_async_search_entities_tool(
user_id=user_id,
namespace=effective_namespace,
)
)
if self.config.agent_can_create_entity:
tools.append(
self._create_async_create_entity_tool(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
)
if self.config.agent_can_update_entity:
tools.append(
self._create_async_update_entity_tool(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
)
if self.config.enable_add_fact:
tools.append(
self._create_async_add_fact_tool(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
)
if self.config.enable_update_fact:
tools.append(
self._create_async_update_fact_tool(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
)
if self.config.enable_delete_fact:
tools.append(
self._create_async_delete_fact_tool(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
)
if self.config.enable_add_event:
tools.append(
self._create_async_add_event_tool(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
)
if self.config.enable_add_relationship:
tools.append(
self._create_async_add_relationship_tool(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
)
return tools
# =========================================================================
# Tool: search_entities
# =========================================================================
def _create_search_entities_tool(
self,
user_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Callable:
"""Create the search_entities tool."""
def search_entities(
query: str,
entity_type: Optional[str] = None,
limit: int = 5,
) -> str:
"""Search for entities in the knowledge base.
Use this to recall information about people, companies, projects, products,
or other entities that have been stored. Searches across names, facts,
events, and relationships.
**Good times to search:**
- Before discussing a company/person that might have stored context
- When the user references an entity by name
- To recall details about a project or product
- To find relationships between entities
**Search tips:**
- Search by name: "Acme Corp", "Jane Smith"
- Search by attribute: "PostgreSQL", "San Francisco"
- Search by relationship: "CEO", "competitor"
- Combine with entity_type to narrow results
Args:
query: What to search for. Can be a name, fact content, relationship,
or any text that might appear in entity records.
Examples: "Acme", "uses PostgreSQL", "VP Engineering"
entity_type: Optional filter - "person", "company", "project", "product", etc.
limit: Maximum results (default: 5)
Returns:
Formatted list of matching entities with their facts, events, and relationships.
"""
results = self.search(
query=query,
entity_type=entity_type,
user_id=user_id,
namespace=namespace,
limit=limit,
)
if not results:
return "No matching entities found."
formatted = self._format_entities_list(entities=results)
return f"Found {len(results)} entity/entities:\n\n{formatted}"
return search_entities
def _create_async_search_entities_tool(
self,
user_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Callable:
"""Create the async search_entities tool."""
async def search_entities(
query: str,
entity_type: Optional[str] = None,
limit: int = 5,
) -> str:
"""Search for entities in the knowledge base.
Use this to recall information about people, companies, projects, products,
or other entities that have been stored. Searches across names, facts,
events, and relationships.
**Good times to search:**
- Before discussing a company/person that might have stored context
- When the user references an entity by name
- To recall details about a project or product
- To find relationships between entities
**Search tips:**
- Search by name: "Acme Corp", "Jane Smith"
- Search by attribute: "PostgreSQL", "San Francisco"
- Search by relationship: "CEO", "competitor"
- Combine with entity_type to narrow results
Args:
query: What to search for. Can be a name, fact content, relationship,
or any text that might appear in entity records.
Examples: "Acme", "uses PostgreSQL", "VP Engineering"
entity_type: Optional filter - "person", "company", "project", "product", etc.
limit: Maximum results (default: 5)
Returns:
Formatted list of matching entities with their facts, events, and relationships.
"""
results = await self.asearch(
query=query,
entity_type=entity_type,
user_id=user_id,
namespace=namespace,
limit=limit,
)
if not results:
return "No matching entities found."
formatted = self._format_entities_list(entities=results)
return f"Found {len(results)} entity/entities:\n\n{formatted}"
return search_entities
# =========================================================================
# Tool: create_entity
# =========================================================================
def _create_create_entity_tool(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Callable:
"""Create the create_entity tool."""
def create_entity(
entity_id: str,
entity_type: str,
name: str,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
) -> str:
"""Create a new entity in the knowledge base.
Use this when you encounter a person, company, project, or other entity
worth remembering. Create the entity first, then add facts/events/relationships.
**When to create an entity:**
- A company, person, or project is discussed with substantive details
- Information would be useful to recall in future conversations
- The entity has a specific identity (not just "a company")
**When NOT to create:**
- For the user themselves (use user memory)
- For generic concepts without specific identity
- For one-off mentions with no useful details
Args:
entity_id: Unique identifier using lowercase and underscores.
Convention: descriptive name like "acme_corp", "jane_smith", "project_atlas"
Bad: "company1", "entity_123", "c"
entity_type: Category of entity. Common types:
- "person": Individual people
- "company": Businesses, organizations
- "project": Specific initiatives or projects
- "product": Software, services, offerings
- "system": Technical systems, platforms
- "concept": Domain-specific concepts worth tracking
name: Human-readable display name (e.g., "Acme Corporation", "Jane Smith")
description: Brief description of what/who this entity is.
Good: "Enterprise SaaS startup in the fintech space, potential client"
Bad: "A company" (too vague)
properties: Optional key-value metadata (e.g., {"industry": "fintech", "stage": "Series A"})
Returns:
Confirmation message.
"""
success = self.create_entity(
entity_id=entity_id,
entity_type=entity_type,
name=name,
description=description,
properties=properties,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
if success:
self.entity_updated = True
return f"Entity created: {entity_type}/{entity_id} ({name})"
return "Failed to create entity (may already exist)"
return create_entity
def _create_async_create_entity_tool(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Callable:
"""Create the async create_entity tool."""
async def create_entity(
entity_id: str,
entity_type: str,
name: str,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
) -> str:
"""Create a new entity in the knowledge base.
Use this when you encounter a person, company, project, or other entity
worth remembering. Create the entity first, then add facts/events/relationships.
**When to create an entity:**
- A company, person, or project is discussed with substantive details
- Information would be useful to recall in future conversations
- The entity has a specific identity (not just "a company")
**When NOT to create:**
- For the user themselves (use user memory)
- For generic concepts without specific identity
- For one-off mentions with no useful details
Args:
entity_id: Unique identifier using lowercase and underscores.
Convention: descriptive name like "acme_corp", "jane_smith", "project_atlas"
Bad: "company1", "entity_123", "c"
entity_type: Category of entity. Common types:
- "person": Individual people
- "company": Businesses, organizations
- "project": Specific initiatives or projects
- "product": Software, services, offerings
- "system": Technical systems, platforms
- "concept": Domain-specific concepts worth tracking
name: Human-readable display name (e.g., "Acme Corporation", "Jane Smith")
description: Brief description of what/who this entity is.
Good: "Enterprise SaaS startup in the fintech space, potential client"
Bad: "A company" (too vague)
properties: Optional key-value metadata (e.g., {"industry": "fintech", "stage": "Series A"})
Returns:
Confirmation message.
"""
success = await self.acreate_entity(
entity_id=entity_id,
entity_type=entity_type,
name=name,
description=description,
properties=properties,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
if success:
self.entity_updated = True
return f"Entity created: {entity_type}/{entity_id} ({name})"
return "Failed to create entity (may already exist)"
return create_entity
# =========================================================================
# Tool: update_entity
# =========================================================================
def _create_update_entity_tool(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Callable:
"""Create the update_entity tool."""
def update_entity(
entity_id: str,
entity_type: str,
name: Optional[str] = None,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
) -> str:
"""Update an existing entity's core properties.
Use this to modify the entity's identity information. Only provided
fields will be updated - omitted fields remain unchanged.
**When to update:**
- Name change: Company rebranded, person changed name
- Description evolved: Better understanding of what entity is
- Properties changed: New metadata to add
**Note:** To update facts, events, or relationships, use the specific
tools (update_fact, add_event, add_relationship) instead.
Args:
entity_id: The entity's identifier
entity_type: Type of entity
name: New display name (only if changed)
description: New description (only if you have better info)
properties: Properties to add/update (merged with existing)
Existing properties not in this dict are preserved
Returns:
Confirmation message.
"""
success = self.update_entity(
entity_id=entity_id,
entity_type=entity_type,
name=name,
description=description,
properties=properties,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
if success:
self.entity_updated = True
return f"Entity updated: {entity_type}/{entity_id}"
return f"Entity not found: {entity_type}/{entity_id}"
return update_entity
def _create_async_update_entity_tool(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Callable:
"""Create the async update_entity tool."""
async def update_entity(
entity_id: str,
entity_type: str,
name: Optional[str] = None,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
) -> str:
"""Update an existing entity's core properties.
Use this to modify the entity's identity information. Only provided
fields will be updated - omitted fields remain unchanged.
**When to update:**
- Name change: Company rebranded, person changed name
- Description evolved: Better understanding of what entity is
- Properties changed: New metadata to add
**Note:** To update facts, events, or relationships, use the specific
tools (update_fact, add_event, add_relationship) instead.
Args:
entity_id: The entity's identifier
entity_type: Type of entity
name: New display name (only if changed)
description: New description (only if you have better info)
properties: Properties to add/update (merged with existing)
Existing properties not in this dict are preserved
Returns:
Confirmation message.
"""
success = await self.aupdate_entity(
entity_id=entity_id,
entity_type=entity_type,
name=name,
description=description,
properties=properties,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
if success:
self.entity_updated = True
return f"Entity updated: {entity_type}/{entity_id}"
return f"Entity not found: {entity_type}/{entity_id}"
return update_entity
# =========================================================================
# Tool: add_fact
# =========================================================================
def _create_add_fact_tool(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Callable:
"""Create the add_fact tool."""
def add_fact(
entity_id: str,
entity_type: str,
fact: str,
) -> str:
"""Add a fact to an entity.
Facts are **timeless truths** about an entity (semantic memory).
They describe what IS, not what HAPPENED.
**Good facts (timeless, descriptive):**
- "Uses PostgreSQL and Redis for their data layer"
- "Headquarters in San Francisco, engineering team in Austin"
- "Founded by ex-Google engineers in 2019"
- "Main product is a B2B analytics platform"
- "Prefers async communication via Slack"
**Not facts (use events instead):**
- "Launched v2.0 last month" → This is an EVENT (time-bound)
- "Just closed Series B" → This is an EVENT
- "Had a meeting yesterday" → This is an EVENT
**Not facts (too vague):**
- "It's a good company" → Subjective, not useful
- "They do tech stuff" → Too vague
Args:
entity_id: The entity's identifier (e.g., "acme_corp")
entity_type: Type of entity (e.g., "company")
fact: The fact to add - should be specific and timeless
Returns:
Confirmation message with fact ID.
"""
fact_id = self.add_fact(
entity_id=entity_id,
entity_type=entity_type,
fact=fact,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
if fact_id:
self.entity_updated = True
return f"Fact added to {entity_type}/{entity_id} (id: {fact_id})"
return "Failed to add fact (entity may not exist)"
return add_fact
def _create_async_add_fact_tool(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Callable:
"""Create the async add_fact tool."""
async def add_fact(
entity_id: str,
entity_type: str,
fact: str,
) -> str:
"""Add a fact to an entity.
Facts are **timeless truths** about an entity (semantic memory).
They describe what IS, not what HAPPENED.
**Good facts (timeless, descriptive):**
- "Uses PostgreSQL and Redis for their data layer"
- "Headquarters in San Francisco, engineering team in Austin"
- "Founded by ex-Google engineers in 2019"
- "Main product is a B2B analytics platform"
- "Prefers async communication via Slack"
**Not facts (use events instead):**
- "Launched v2.0 last month" → This is an EVENT (time-bound)
- "Just closed Series B" → This is an EVENT
- "Had a meeting yesterday" → This is an EVENT
**Not facts (too vague):**
- "It's a good company" → Subjective, not useful
- "They do tech stuff" → Too vague
Args:
entity_id: The entity's identifier (e.g., "acme_corp")
entity_type: Type of entity (e.g., "company")
fact: The fact to add - should be specific and timeless
Returns:
Confirmation message with fact ID.
"""
fact_id = await self.aadd_fact(
entity_id=entity_id,
entity_type=entity_type,
fact=fact,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
if fact_id:
self.entity_updated = True
return f"Fact added to {entity_type}/{entity_id} (id: {fact_id})"
return "Failed to add fact (entity may not exist)"
return add_fact
# =========================================================================
# Tool: update_fact
# =========================================================================
def _create_update_fact_tool(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Callable:
"""Create the update_fact tool."""
def update_fact(
entity_id: str,
entity_type: str,
fact_id: str,
fact: str,
) -> str:
"""Update an existing fact on an entity.
Use this when a fact needs correction or has become more specific.
The new fact completely replaces the old one.
**When to update:**
- Correction: Original fact was wrong
- More detail: "Uses PostgreSQL" → "Uses PostgreSQL 15 with TimescaleDB extension"
- Changed reality: "50 employees" → "75 employees after recent hiring"
**When to delete instead:**
- Fact is no longer true and shouldn't be replaced
- Fact was a misunderstanding
Args:
entity_id: The entity's identifier
entity_type: Type of entity
fact_id: ID of the fact to update (from search_entities results)
fact: New fact content - complete replacement, not a diff
Returns:
Confirmation message.
"""
success = self.update_fact(
entity_id=entity_id,
entity_type=entity_type,
fact_id=fact_id,
fact=fact,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
if success:
self.entity_updated = True
return f"Fact updated on {entity_type}/{entity_id}"
return f"Fact not found: {fact_id}"
return update_fact
def _create_async_update_fact_tool(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Callable:
"""Create the async update_fact tool."""
async def update_fact(
entity_id: str,
entity_type: str,
fact_id: str,
fact: str,
) -> str:
"""Update an existing fact on an entity.
Use this when a fact needs correction or has become more specific.
The new fact completely replaces the old one.
**When to update:**
- Correction: Original fact was wrong
- More detail: "Uses PostgreSQL" → "Uses PostgreSQL 15 with TimescaleDB extension"
- Changed reality: "50 employees" → "75 employees after recent hiring"
**When to delete instead:**
- Fact is no longer true and shouldn't be replaced
- Fact was a misunderstanding
Args:
entity_id: The entity's identifier
entity_type: Type of entity
fact_id: ID of the fact to update (from search_entities results)
fact: New fact content - complete replacement, not a diff
Returns:
Confirmation message.
"""
success = await self.aupdate_fact(
entity_id=entity_id,
entity_type=entity_type,
fact_id=fact_id,
fact=fact,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
if success:
self.entity_updated = True
return f"Fact updated on {entity_type}/{entity_id}"
return f"Fact not found: {fact_id}"
return update_fact
# =========================================================================
# Tool: delete_fact
# =========================================================================
def _create_delete_fact_tool(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Callable:
"""Create the delete_fact tool."""
def delete_fact(
entity_id: str,
entity_type: str,
fact_id: str,
) -> str:
"""Delete a fact from an entity.
Use this when a fact is no longer accurate and shouldn't be replaced
with updated information.
**When to delete:**
- Fact was incorrect/misunderstood
- Fact is no longer true (and no replacement makes sense)
- Duplicate of another fact
- Too vague to be useful
**When to update instead:**
- Fact needs correction but the topic is still relevant
- Fact needs more detail
Args:
entity_id: The entity's identifier
entity_type: Type of entity
fact_id: ID of the fact to delete (from search_entities results)
Returns:
Confirmation message.
"""
success = self.delete_fact(
entity_id=entity_id,
entity_type=entity_type,
fact_id=fact_id,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
if success:
self.entity_updated = True
return f"Fact deleted from {entity_type}/{entity_id}"
return f"Fact not found: {fact_id}"
return delete_fact
def _create_async_delete_fact_tool(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Callable:
"""Create the async delete_fact tool."""
async def delete_fact(
entity_id: str,
entity_type: str,
fact_id: str,
) -> str:
"""Delete a fact from an entity.
Use this when a fact is no longer accurate and shouldn't be replaced
with updated information.
**When to delete:**
- Fact was incorrect/misunderstood
- Fact is no longer true (and no replacement makes sense)
- Duplicate of another fact
- Too vague to be useful
**When to update instead:**
- Fact needs correction but the topic is still relevant
- Fact needs more detail
Args:
entity_id: The entity's identifier
entity_type: Type of entity
fact_id: ID of the fact to delete (from search_entities results)
Returns:
Confirmation message.
"""
success = await self.adelete_fact(
entity_id=entity_id,
entity_type=entity_type,
fact_id=fact_id,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
if success:
self.entity_updated = True
return f"Fact deleted from {entity_type}/{entity_id}"
return f"Fact not found: {fact_id}"
return delete_fact
# =========================================================================
# Tool: add_event
# =========================================================================
def _create_add_event_tool(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Callable:
"""Create the add_event tool."""
def add_event(
entity_id: str,
entity_type: str,
event: str,
date: Optional[str] = None,
) -> str:
"""Add an event to an entity.
Events are **time-bound occurrences** (episodic memory).
They describe what HAPPENED, not what IS.
**Good events (specific, time-bound):**
- "Launched v2.0 with new ML features" (date: "2025-01-15")
- "Closed $50M Series B led by Sequoia" (date: "2024-Q3")
- "Had 4-hour outage affecting payment processing" (date: "2024-12-20")
- "CEO announced pivot to enterprise market" (date: "2024-11")
- "Initial discovery call - interested in our analytics product"
**Not events (use facts instead):**
- "Uses PostgreSQL" → This is a FACT (timeless truth)
- "Based in San Francisco" → This is a FACT
- "Has 50 employees" → This is a FACT
**Include dates when known** - even approximate dates help:
- Exact: "2025-01-15"
- Month: "January 2025" or "2025-01"
- Quarter: "Q1 2025"
- Relative: "early 2024", "last week"
Args:
entity_id: The entity's identifier (e.g., "acme_corp")
entity_type: Type of entity (e.g., "company")
event: Description of what happened - be specific
date: When it happened (ISO format, natural language, or approximate)
Returns:
Confirmation message with event ID.
"""
event_id = self.add_event(
entity_id=entity_id,
entity_type=entity_type,
event=event,
date=date,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
if event_id:
self.entity_updated = True
return f"Event added to {entity_type}/{entity_id} (id: {event_id})"
return "Failed to add event (entity may not exist)"
return add_event
def _create_async_add_event_tool(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Callable:
"""Create the async add_event tool."""
async def add_event(
entity_id: str,
entity_type: str,
event: str,
date: Optional[str] = None,
) -> str:
"""Add an event to an entity.
Events are **time-bound occurrences** (episodic memory).
They describe what HAPPENED, not what IS.
**Good events (specific, time-bound):**
- "Launched v2.0 with new ML features" (date: "2025-01-15")
- "Closed $50M Series B led by Sequoia" (date: "2024-Q3")
- "Had 4-hour outage affecting payment processing" (date: "2024-12-20")
- "CEO announced pivot to enterprise market" (date: "2024-11")
- "Initial discovery call - interested in our analytics product"
**Not events (use facts instead):**
- "Uses PostgreSQL" → This is a FACT (timeless truth)
- "Based in San Francisco" → This is a FACT
- "Has 50 employees" → This is a FACT
**Include dates when known** - even approximate dates help:
- Exact: "2025-01-15"
- Month: "January 2025" or "2025-01"
- Quarter: "Q1 2025"
- Relative: "early 2024", "last week"
Args:
entity_id: The entity's identifier (e.g., "acme_corp")
entity_type: Type of entity (e.g., "company")
event: Description of what happened - be specific
date: When it happened (ISO format, natural language, or approximate)
Returns:
Confirmation message with event ID.
"""
event_id = await self.aadd_event(
entity_id=entity_id,
entity_type=entity_type,
event=event,
date=date,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
if event_id:
self.entity_updated = True
return f"Event added to {entity_type}/{entity_id} (id: {event_id})"
return "Failed to add event (entity may not exist)"
return add_event
# =========================================================================
# Tool: add_relationship
# =========================================================================
def _create_add_relationship_tool(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Callable:
"""Create the add_relationship tool."""
def add_relationship(
entity_id: str,
entity_type: str,
related_entity_id: str,
relation: str,
direction: str = "outgoing",
) -> str:
"""Add a relationship between two entities.
Relationships are **graph edges** connecting entities - they capture
how entities relate to each other.
**Common relationship patterns:**
People → Companies:
- "jane_smith" --[CEO]--> "acme_corp"
- "bob_jones" --[engineer_at]--> "acme_corp"
- "sarah_chen" --[founder]--> "startup_xyz"
Companies → Companies:
- "acme_corp" --[competitor_of]--> "beta_inc"
- "acme_corp" --[acquired]--> "small_startup"
- "acme_corp" --[partner_of]--> "big_vendor"
Projects → Other entities:
- "project_atlas" --[uses]--> "postgresql"
- "project_atlas" --[owned_by]--> "acme_corp"
- "project_atlas" --[led_by]--> "jane_smith"
**Direction matters:**
- "outgoing": This entity → Related entity (default)
"jane_smith" --[CEO]--> "acme_corp" means Jane IS CEO OF Acme
- "incoming": Related entity → This entity
"acme_corp" with incoming "CEO" from "jane_smith" means Acme HAS CEO Jane
Args:
entity_id: The source entity's identifier
entity_type: Type of source entity
related_entity_id: The target entity's identifier (must exist or will be created)
relation: Type of relationship - use clear, consistent labels:
For roles: "CEO", "CTO", "engineer_at", "founder"
For ownership: "owns", "owned_by", "part_of"
For competition: "competitor_of", "partner_of"
For technical: "uses", "depends_on", "integrates_with"
direction: "outgoing" (source → target) or "incoming" (target → source)
Returns:
Confirmation message with relationship ID.
"""
rel_id = self.add_relationship(
entity_id=entity_id,
entity_type=entity_type,
related_entity_id=related_entity_id,
relation=relation,
direction=direction,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
if rel_id:
self.entity_updated = True
return f"Relationship added: {entity_id} --[{relation}]--> {related_entity_id} (id: {rel_id})"
return "Failed to add relationship (entity may not exist)"
return add_relationship
def _create_async_add_relationship_tool(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Callable:
"""Create the async add_relationship tool."""
async def add_relationship(
entity_id: str,
entity_type: str,
related_entity_id: str,
relation: str,
direction: str = "outgoing",
) -> str:
"""Add a relationship between two entities.
Relationships are **graph edges** connecting entities - they capture
how entities relate to each other.
**Common relationship patterns:**
People → Companies:
- "jane_smith" --[CEO]--> "acme_corp"
- "bob_jones" --[engineer_at]--> "acme_corp"
- "sarah_chen" --[founder]--> "startup_xyz"
Companies → Companies:
- "acme_corp" --[competitor_of]--> "beta_inc"
- "acme_corp" --[acquired]--> "small_startup"
- "acme_corp" --[partner_of]--> "big_vendor"
Projects → Other entities:
- "project_atlas" --[uses]--> "postgresql"
- "project_atlas" --[owned_by]--> "acme_corp"
- "project_atlas" --[led_by]--> "jane_smith"
**Direction matters:**
- "outgoing": This entity → Related entity (default)
"jane_smith" --[CEO]--> "acme_corp" means Jane IS CEO OF Acme
- "incoming": Related entity → This entity
"acme_corp" with incoming "CEO" from "jane_smith" means Acme HAS CEO Jane
Args:
entity_id: The source entity's identifier
entity_type: Type of source entity
related_entity_id: The target entity's identifier (must exist or will be created)
relation: Type of relationship - use clear, consistent labels:
For roles: "CEO", "CTO", "engineer_at", "founder"
For ownership: "owns", "owned_by", "part_of"
For competition: "competitor_of", "partner_of"
For technical: "uses", "depends_on", "integrates_with"
direction: "outgoing" (source → target) or "incoming" (target → source)
Returns:
Confirmation message with relationship ID.
"""
rel_id = await self.aadd_relationship(
entity_id=entity_id,
entity_type=entity_type,
related_entity_id=related_entity_id,
relation=relation,
direction=direction,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
if rel_id:
self.entity_updated = True
return f"Relationship added: {entity_id} --[{relation}]--> {related_entity_id} (id: {rel_id})"
return "Failed to add relationship (entity may not exist)"
return add_relationship
# =========================================================================
# Read Operations
# =========================================================================
def get(
self,
entity_id: str,
entity_type: str,
user_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Optional[EntityMemory]:
"""Retrieve entity by entity_id and entity_type.
Args:
entity_id: The unique entity identifier.
entity_type: The type of entity.
user_id: User ID for "user" namespace scoping.
namespace: Namespace to search in.
Returns:
EntityMemory instance, or None if not found.
"""
if not self.db:
return None
effective_namespace = namespace or self.config.namespace
try:
result = self.db.get_learning(
learning_type=self.learning_type,
entity_id=entity_id,
entity_type=entity_type,
namespace=effective_namespace,
user_id=user_id if effective_namespace == "user" else None,
)
if result and result.get("content"): # type: ignore[union-attr]
return self.schema.from_dict(result["content"]) # type: ignore[index]
return None
except Exception as e:
log_debug(f"EntityMemoryStore.get failed for {entity_type}/{entity_id}: {e}")
return None
async def aget(
self,
entity_id: str,
entity_type: str,
user_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Optional[EntityMemory]:
"""Async version of get."""
if not self.db:
return None
effective_namespace = namespace or self.config.namespace
try:
if isinstance(self.db, AsyncBaseDb):
result = await self.db.get_learning(
learning_type=self.learning_type,
entity_id=entity_id,
entity_type=entity_type,
namespace=effective_namespace,
user_id=user_id if effective_namespace == "user" else None,
)
else:
result = self.db.get_learning(
learning_type=self.learning_type,
entity_id=entity_id,
entity_type=entity_type,
namespace=effective_namespace,
user_id=user_id if effective_namespace == "user" else None,
)
if result and result.get("content"):
return self.schema.from_dict(result["content"])
return None
except Exception as e:
log_debug(f"EntityMemoryStore.aget failed for {entity_type}/{entity_id}: {e}")
return None
# =========================================================================
# Search Operations
# =========================================================================
def search(
self,
query: str,
entity_type: Optional[str] = None,
user_id: Optional[str] = None,
namespace: Optional[str] = None,
limit: int = 10,
) -> List[EntityMemory]:
"""Search for entities matching query.
Args:
query: Search query (matched against name, facts, events, etc.).
entity_type: Filter by entity type.
user_id: User ID for "user" namespace scoping.
namespace: Filter by namespace.
limit: Maximum results to return.
Returns:
List of matching EntityMemory objects.
"""
if not self.db:
return []
effective_namespace = namespace or self.config.namespace
try:
results = self.db.get_learnings(
learning_type=self.learning_type,
entity_type=entity_type,
namespace=effective_namespace,
user_id=user_id if effective_namespace == "user" else None,
limit=limit * 3, # Over-fetch for filtering
)
entities = []
query_lower = query.lower()
for result in results or []: # type: ignore[union-attr]
content = result.get("content", {})
if self._matches_query(content=content, query=query_lower):
entity = self.schema.from_dict(content)
if entity:
entities.append(entity)
if len(entities) >= limit:
break
log_debug(f"EntityMemoryStore.search: found {len(entities)} entities for query: {query[:50]}...")
return entities
except Exception as e:
log_debug(f"EntityMemoryStore.search failed: {e}")
return []
async def asearch(
self,
query: str,
entity_type: Optional[str] = None,
user_id: Optional[str] = None,
namespace: Optional[str] = None,
limit: int = 10,
) -> List[EntityMemory]:
"""Async version of search."""
if not self.db:
return []
effective_namespace = namespace or self.config.namespace
try:
if isinstance(self.db, AsyncBaseDb):
results = await self.db.get_learnings(
learning_type=self.learning_type,
entity_type=entity_type,
namespace=effective_namespace,
user_id=user_id if effective_namespace == "user" else None,
limit=limit * 3,
)
else:
results = self.db.get_learnings(
learning_type=self.learning_type,
entity_type=entity_type,
namespace=effective_namespace,
user_id=user_id if effective_namespace == "user" else None,
limit=limit * 3,
)
entities = []
query_lower = query.lower()
for result in results or []:
content = result.get("content", {})
if self._matches_query(content=content, query=query_lower):
entity = self.schema.from_dict(content)
if entity:
entities.append(entity)
if len(entities) >= limit:
break
log_debug(f"EntityMemoryStore.asearch: found {len(entities)} entities for query: {query[:50]}...")
return entities
except Exception as e:
log_debug(f"EntityMemoryStore.asearch failed: {e}")
return []
def _matches_query(self, content: Dict[str, Any], query: str) -> bool:
"""Check if entity content matches search query."""
# Check name
name = content.get("name", "")
if name and query in name.lower():
return True
# Check entity_id
entity_id = content.get("entity_id", "")
if entity_id and query in entity_id.lower():
return True
# Check description
description = content.get("description", "")
if description and query in description.lower():
return True
# Check properties
properties = content.get("properties", {})
for value in properties.values():
if query in str(value).lower():
return True
# Check facts
facts = content.get("facts", [])
for fact in facts:
fact_content = fact.get("content", "") if isinstance(fact, dict) else str(fact)
if query in fact_content.lower():
return True
# Check events
events = content.get("events", [])
for event in events:
event_content = event.get("content", "") if isinstance(event, dict) else str(event)
if query in event_content.lower():
return True
# Check relationships
relationships = content.get("relationships", [])
for rel in relationships:
if isinstance(rel, dict):
if query in rel.get("entity_id", "").lower():
return True
if query in rel.get("relation", "").lower():
return True
return False
# =========================================================================
# Create Operations
# =========================================================================
def create_entity(
self,
entity_id: str,
entity_type: str,
name: str,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> bool:
"""Create a new entity.
Args:
entity_id: Unique identifier for the entity.
entity_type: Type of entity.
name: Display name.
description: Brief description.
properties: Key-value properties.
user_id: User ID (required for "user" namespace).
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
namespace: Namespace for scoping.
Returns:
True if created, False if already exists or error.
"""
if not self.db:
return False
effective_namespace = namespace or self.config.namespace
# Validate "user" namespace has user_id
if effective_namespace == "user" and not user_id:
log_warning("EntityMemoryStore.create_entity: 'user' namespace requires user_id")
return False
# Check if already exists
existing = self.get(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
namespace=effective_namespace,
)
if existing:
log_debug(f"EntityMemoryStore.create_entity: entity already exists {entity_type}/{entity_id}")
return False
try:
now = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
entity = self.schema(
entity_id=entity_id,
entity_type=entity_type,
name=name,
description=description,
properties=properties or {},
facts=[],
events=[],
relationships=[],
namespace=effective_namespace,
user_id=user_id if effective_namespace == "user" else None,
agent_id=agent_id,
team_id=team_id,
created_at=now,
updated_at=now,
)
self.db.upsert_learning(
id=self._build_entity_db_id(entity_id, entity_type, effective_namespace),
learning_type=self.learning_type,
entity_id=entity_id,
entity_type=entity_type,
namespace=effective_namespace,
user_id=user_id if effective_namespace == "user" else None,
agent_id=agent_id,
team_id=team_id,
content=entity.to_dict(),
)
log_debug(f"EntityMemoryStore.create_entity: created {entity_type}/{entity_id}")
return True
except Exception as e:
log_debug(f"EntityMemoryStore.create_entity failed: {e}")
return False
async def acreate_entity(
self,
entity_id: str,
entity_type: str,
name: str,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> bool:
"""Async version of create_entity."""
if not self.db:
return False
effective_namespace = namespace or self.config.namespace
if effective_namespace == "user" and not user_id:
log_warning("EntityMemoryStore.acreate_entity: 'user' namespace requires user_id")
return False
existing = await self.aget(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
namespace=effective_namespace,
)
if existing:
log_debug(f"EntityMemoryStore.acreate_entity: entity already exists {entity_type}/{entity_id}")
return False
try:
now = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
entity = self.schema(
entity_id=entity_id,
entity_type=entity_type,
name=name,
description=description,
properties=properties or {},
facts=[],
events=[],
relationships=[],
namespace=effective_namespace,
user_id=user_id if effective_namespace == "user" else None,
agent_id=agent_id,
team_id=team_id,
created_at=now,
updated_at=now,
)
if isinstance(self.db, AsyncBaseDb):
await self.db.upsert_learning(
id=self._build_entity_db_id(entity_id, entity_type, effective_namespace),
learning_type=self.learning_type,
entity_id=entity_id,
entity_type=entity_type,
namespace=effective_namespace,
user_id=user_id if effective_namespace == "user" else None,
agent_id=agent_id,
team_id=team_id,
content=entity.to_dict(),
)
else:
self.db.upsert_learning(
id=self._build_entity_db_id(entity_id, entity_type, effective_namespace),
learning_type=self.learning_type,
entity_id=entity_id,
entity_type=entity_type,
namespace=effective_namespace,
user_id=user_id if effective_namespace == "user" else None,
agent_id=agent_id,
team_id=team_id,
content=entity.to_dict(),
)
log_debug(f"EntityMemoryStore.acreate_entity: created {entity_type}/{entity_id}")
return True
except Exception as e:
log_debug(f"EntityMemoryStore.acreate_entity failed: {e}")
return False
# =========================================================================
# Update Operations
# =========================================================================
def update_entity(
self,
entity_id: str,
entity_type: str,
name: Optional[str] = None,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> bool:
"""Update an existing entity's core properties.
Args:
entity_id: The entity's identifier.
entity_type: Type of entity.
name: New display name (optional).
description: New description (optional).
properties: Properties to merge (optional).
user_id: User ID for namespace scoping.
agent_id: Agent context.
team_id: Team context.
namespace: Namespace to search in.
Returns:
True if updated, False if not found.
"""
effective_namespace = namespace or self.config.namespace
entity = self.get(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
namespace=effective_namespace,
)
if not entity:
return False
# Update fields
if name is not None:
entity.name = name
if description is not None:
entity.description = description
if properties is not None:
entity.properties = {**(entity.properties or {}), **properties}
entity.updated_at = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
return self._save_entity(
entity=entity,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
async def aupdate_entity(
self,
entity_id: str,
entity_type: str,
name: Optional[str] = None,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> bool:
"""Async version of update_entity."""
effective_namespace = namespace or self.config.namespace
entity = await self.aget(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
namespace=effective_namespace,
)
if not entity:
return False
if name is not None:
entity.name = name
if description is not None:
entity.description = description
if properties is not None:
entity.properties = {**(entity.properties or {}), **properties}
entity.updated_at = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
return await self._asave_entity(
entity=entity,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
# =========================================================================
# Fact Operations
# =========================================================================
def add_fact(
self,
entity_id: str,
entity_type: str,
fact: str,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Optional[str]:
"""Add a fact to an entity.
Returns:
Fact ID if added, None if entity not found.
"""
effective_namespace = namespace or self.config.namespace
entity = self.get(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
namespace=effective_namespace,
)
if not entity:
return None
fact_id = entity.add_fact(fact)
entity.updated_at = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
success = self._save_entity(
entity=entity,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
return fact_id if success else None
async def aadd_fact(
self,
entity_id: str,
entity_type: str,
fact: str,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Optional[str]:
"""Async version of add_fact."""
effective_namespace = namespace or self.config.namespace
entity = await self.aget(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
namespace=effective_namespace,
)
if not entity:
return None
fact_id = entity.add_fact(fact)
entity.updated_at = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
success = await self._asave_entity(
entity=entity,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
return fact_id if success else None
def update_fact(
self,
entity_id: str,
entity_type: str,
fact_id: str,
fact: str,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> bool:
"""Update an existing fact."""
effective_namespace = namespace or self.config.namespace
entity = self.get(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
namespace=effective_namespace,
)
if not entity:
return False
if not entity.update_fact(fact_id, fact):
return False
entity.updated_at = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
return self._save_entity(
entity=entity,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
async def aupdate_fact(
self,
entity_id: str,
entity_type: str,
fact_id: str,
fact: str,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> bool:
"""Async version of update_fact."""
effective_namespace = namespace or self.config.namespace
entity = await self.aget(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
namespace=effective_namespace,
)
if not entity:
return False
if not entity.update_fact(fact_id, fact):
return False
entity.updated_at = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
return await self._asave_entity(
entity=entity,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
def delete_fact(
self,
entity_id: str,
entity_type: str,
fact_id: str,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> bool:
"""Delete a fact from an entity."""
effective_namespace = namespace or self.config.namespace
entity = self.get(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
namespace=effective_namespace,
)
if not entity:
return False
if not entity.delete_fact(fact_id):
return False
entity.updated_at = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
return self._save_entity(
entity=entity,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
async def adelete_fact(
self,
entity_id: str,
entity_type: str,
fact_id: str,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> bool:
"""Async version of delete_fact."""
effective_namespace = namespace or self.config.namespace
entity = await self.aget(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
namespace=effective_namespace,
)
if not entity:
return False
if not entity.delete_fact(fact_id):
return False
entity.updated_at = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
return await self._asave_entity(
entity=entity,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
# =========================================================================
# Event Operations
# =========================================================================
def add_event(
self,
entity_id: str,
entity_type: str,
event: str,
date: Optional[str] = None,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Optional[str]:
"""Add an event to an entity.
Returns:
Event ID if added, None if entity not found.
"""
effective_namespace = namespace or self.config.namespace
entity = self.get(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
namespace=effective_namespace,
)
if not entity:
return None
event_id = entity.add_event(event, date=date)
entity.updated_at = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
success = self._save_entity(
entity=entity,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
return event_id if success else None
async def aadd_event(
self,
entity_id: str,
entity_type: str,
event: str,
date: Optional[str] = None,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Optional[str]:
"""Async version of add_event."""
effective_namespace = namespace or self.config.namespace
entity = await self.aget(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
namespace=effective_namespace,
)
if not entity:
return None
event_id = entity.add_event(event, date=date)
entity.updated_at = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
success = await self._asave_entity(
entity=entity,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
return event_id if success else None
# =========================================================================
# Relationship Operations
# =========================================================================
def add_relationship(
self,
entity_id: str,
entity_type: str,
related_entity_id: str,
relation: str,
direction: str = "outgoing",
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Optional[str]:
"""Add a relationship to an entity.
Returns:
Relationship ID if added, None if entity not found.
"""
effective_namespace = namespace or self.config.namespace
entity = self.get(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
namespace=effective_namespace,
)
if not entity:
return None
rel_id = entity.add_relationship(related_entity_id, relation, direction=direction)
entity.updated_at = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
success = self._save_entity(
entity=entity,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
return rel_id if success else None
async def aadd_relationship(
self,
entity_id: str,
entity_type: str,
related_entity_id: str,
relation: str,
direction: str = "outgoing",
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Optional[str]:
"""Async version of add_relationship."""
effective_namespace = namespace or self.config.namespace
entity = await self.aget(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
namespace=effective_namespace,
)
if not entity:
return None
rel_id = entity.add_relationship(related_entity_id, relation, direction=direction)
entity.updated_at = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
success = await self._asave_entity(
entity=entity,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
return rel_id if success else None
# =========================================================================
# Internal Save Helpers
# =========================================================================
def _save_entity(
self,
entity: EntityMemory,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> bool:
"""Save entity to database."""
if not self.db:
return False
effective_namespace = namespace or self.config.namespace
try:
content = entity.to_dict()
if not content:
return False
self.db.upsert_learning(
id=self._build_entity_db_id(entity.entity_id, entity.entity_type, effective_namespace),
learning_type=self.learning_type,
entity_id=entity.entity_id,
entity_type=entity.entity_type,
namespace=effective_namespace,
user_id=user_id if effective_namespace == "user" else None,
agent_id=agent_id,
team_id=team_id,
content=content,
)
return True
except Exception as e:
log_debug(f"EntityMemoryStore._save_entity failed: {e}")
return False
async def _asave_entity(
self,
entity: EntityMemory,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> bool:
"""Async version of _save_entity."""
if not self.db:
return False
effective_namespace = namespace or self.config.namespace
try:
content = entity.to_dict()
if not content:
return False
if isinstance(self.db, AsyncBaseDb):
await self.db.upsert_learning(
id=self._build_entity_db_id(entity.entity_id, entity.entity_type, effective_namespace),
learning_type=self.learning_type,
entity_id=entity.entity_id,
entity_type=entity.entity_type,
namespace=effective_namespace,
user_id=user_id if effective_namespace == "user" else None,
agent_id=agent_id,
team_id=team_id,
content=content,
)
else:
self.db.upsert_learning(
id=self._build_entity_db_id(entity.entity_id, entity.entity_type, effective_namespace),
learning_type=self.learning_type,
entity_id=entity.entity_id,
entity_type=entity.entity_type,
namespace=effective_namespace,
user_id=user_id if effective_namespace == "user" else None,
agent_id=agent_id,
team_id=team_id,
content=content,
)
return True
except Exception as e:
log_debug(f"EntityMemoryStore._asave_entity failed: {e}")
return False
# =========================================================================
# Background Extraction
# =========================================================================
def extract_and_save(
self,
messages: List[Any],
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
run_metrics: Optional["RunMetrics"] = None,
) -> None:
"""Extract entities from messages (sync)."""
if not self.model or not self.db:
return
try:
from agno.models.message import Message
conversation_text = self._messages_to_text(messages=messages)
tools = self._get_extraction_tools(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
functions = self._build_functions_for_model(tools=tools)
messages_for_model = [
self._get_extraction_system_message(),
Message(role="user", content=f"Extract entities from this conversation:\n\n{conversation_text}"),
]
model_copy = deepcopy(self.model)
response = model_copy.response(
messages=messages_for_model,
tools=functions,
)
if run_metrics is not None and response.response_usage is not None:
from agno.metrics import ModelType, accumulate_model_metrics
accumulate_model_metrics(response, model_copy, ModelType.LEARNING_MODEL, run_metrics)
if response.tool_executions:
self.entity_updated = True
log_debug("EntityMemoryStore: Extraction saved entities")
except Exception as e:
log_warning(f"EntityMemoryStore.extract_and_save failed: {e}")
async def aextract_and_save(
self,
messages: List[Any],
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
run_metrics: Optional["RunMetrics"] = None,
) -> None:
"""Extract entities from messages (async)."""
if not self.model or not self.db:
return
try:
conversation_text = self._messages_to_text(messages=messages)
tools = self._aget_extraction_tools(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
functions = self._build_functions_for_model(tools=tools)
messages_for_model = [
self._get_extraction_system_message(),
Message(role="user", content=f"Extract entities from this conversation:\n\n{conversation_text}"),
]
model_copy = deepcopy(self.model)
response = await model_copy.aresponse(
messages=messages_for_model,
tools=functions,
)
if run_metrics is not None and response.response_usage is not None:
from agno.metrics import ModelType, accumulate_model_metrics
accumulate_model_metrics(response, model_copy, ModelType.LEARNING_MODEL, run_metrics)
if response.tool_executions:
self.entity_updated = True
log_debug("EntityMemoryStore: Extraction saved entities")
except Exception as e:
log_warning(f"EntityMemoryStore.aextract_and_save failed: {e}")
def _get_extraction_system_message(self) -> "Message":
"""Get system message for extraction."""
from agno.models.message import Message
custom_instructions = self.config.instructions or ""
additional = self.config.additional_instructions or ""
if self.config.system_message:
return Message(role="system", content=self.config.system_message)
content = dedent("""\
You are an Entity Extractor. Your job is to identify and capture knowledge about
external entities - people, companies, projects, products, systems, and other things
mentioned in conversations that are worth remembering.
## Philosophy
Entity memory is your knowledge about the WORLD, distinct from:
- **User memory**: What you know about the user themselves
- **Learned knowledge**: Reusable task insights and patterns
- **Session context**: State of the current conversation
Think of entity memory like a professional's mental rolodex - the accumulated knowledge
about clients, companies, technologies, and projects that helps you work effectively.
## Entity Structure
Each entity has:
**Core identity:**
- `entity_id`: Lowercase with underscores (e.g., "acme_corp", "jane_smith", "project_atlas")
- `entity_type`: Category - "person", "company", "project", "product", "system", "concept"
- `name`: Human-readable display name
- `description`: Brief description of what this entity is
**Three types of memory:**
1. **Facts** (semantic memory) - Timeless truths about the entity
- "Uses PostgreSQL for their main database"
- "Headquarters in San Francisco"
- "Founded in 2019"
- "Prefers async communication"
2. **Events** (episodic memory) - Time-bound occurrences
- "Launched v2.0 on January 15, 2025"
- "Acquired by BigCorp in Q3 2024"
- "Had a major outage affecting 10K users"
- "Completed Series B funding"
3. **Relationships** (graph edges) - Connections to other entities
- "Bob Smith" --[CEO]--> "Acme Corp"
- "Project Atlas" --[uses]--> "PostgreSQL"
- "Acme Corp" --[competitor_of]--> "Beta Inc"
- "Jane" --[reports_to]--> "Bob"
## What to Extract
**DO extract entities that are:**
- Named specifically (not just "a company" but "Acme Corp")
- Substantively discussed (not just mentioned in passing)
- Likely to be referenced again in future conversations
- Important to the user's work or context
**DO capture:**
- Companies the user works with or mentions repeatedly
- People (colleagues, clients, stakeholders) with specific roles
- Projects with concrete details
- Products or systems with technical specifics
- Organizations relevant to the user's domain
## What NOT to Extract
**DO NOT extract:**
- The user themselves (that belongs in UserProfile)
- Generic concepts without specific identity ("databases" vs "PostgreSQL")
- One-off mentions unlikely to recur ("I saw a company on the news")
- Entities with no substantive information to store
- Publicly available information that's easily searchable
**Avoid:**
- Creating entities just because something was named
- Storing obvious facts ("Google is a tech company")
- Duplicating information across multiple entities unnecessarily
## Quality Guidelines
**Good entity example:**
```
entity_id: "northstar_analytics"
entity_type: "company"
name: "NorthStar Analytics"
description: "Data analytics startup, potential client"
facts:
- "Series A stage, ~50 employees"
- "Tech stack: Python, Snowflake, dbt"
- "Main contact is Sarah Chen, VP Engineering"
- "Decision timeline is Q1 2025"
events:
- "Initial meeting held December 2024"
- "Requested technical deep-dive on ML capabilities"
relationships:
- sarah_chen --[works_at]--> northstar_analytics
```
**Poor entity example:**
```
entity_id: "company1" # Too generic
name: "Some Company" # Vague
facts:
- "It's a company" # Obvious, not useful
```
## Extraction Guidelines
1. **Be selective**: Only extract entities with substantive, useful information
2. **Be specific**: Capture concrete details, not vague generalities
3. **Be accurate**: Only store information actually stated in the conversation
4. **Categorize correctly**: Facts vs events vs relationships have different purposes
5. **Use consistent IDs**: Lowercase, underscores, descriptive (e.g., "acme_corp" not "company_1")
It's perfectly fine to extract nothing if no notable entities are mentioned.
Quality over quantity - one well-documented entity beats five sparse ones.
""")
if custom_instructions:
content += f"\n## Additional Instructions\n\n{custom_instructions}\n"
if additional:
content += f"\n{additional}\n"
return Message(role="system", content=content)
def _get_extraction_tools(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> List[Callable]:
"""Get sync extraction tools based on config."""
tools: List[Callable[..., str]] = []
effective_namespace = namespace or self.config.namespace
if self.config.enable_create_entity:
def create_entity(
entity_id: str,
entity_type: str,
name: str,
description: Optional[str] = None,
) -> str:
"""Create a new entity."""
success = self.create_entity(
entity_id=entity_id,
entity_type=entity_type,
name=name,
description=description,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
return f"Created: {entity_type}/{entity_id}" if success else "Entity exists"
tools.append(create_entity)
if self.config.enable_add_fact:
def add_fact(entity_id: str, entity_type: str, fact: str) -> str:
"""Add a fact to an entity."""
fact_id = self.add_fact(
entity_id=entity_id,
entity_type=entity_type,
fact=fact,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
return f"Fact added: {fact_id}" if fact_id else "Entity not found"
tools.append(add_fact)
if self.config.enable_add_event:
def add_event(
entity_id: str,
entity_type: str,
event: str,
date: Optional[str] = None,
) -> str:
"""Add an event to an entity."""
event_id = self.add_event(
entity_id=entity_id,
entity_type=entity_type,
event=event,
date=date,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
return f"Event added: {event_id}" if event_id else "Entity not found"
tools.append(add_event)
if self.config.enable_add_relationship:
def add_relationship(
entity_id: str,
entity_type: str,
related_entity_id: str,
relation: str,
) -> str:
"""Add a relationship between entities."""
rel_id = self.add_relationship(
entity_id=entity_id,
entity_type=entity_type,
related_entity_id=related_entity_id,
relation=relation,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
return f"Relationship added: {rel_id}" if rel_id else "Entity not found"
tools.append(add_relationship)
return tools
def _aget_extraction_tools(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> List[Callable]:
"""Get async extraction tools based on config."""
tools: List[Callable] = []
effective_namespace = namespace or self.config.namespace
if self.config.enable_create_entity:
async def create_entity(
entity_id: str,
entity_type: str,
name: str,
description: Optional[str] = None,
) -> str:
"""Create a new entity."""
success = await self.acreate_entity(
entity_id=entity_id,
entity_type=entity_type,
name=name,
description=description,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
return f"Created: {entity_type}/{entity_id}" if success else "Entity exists"
tools.append(create_entity)
if self.config.enable_add_fact:
async def add_fact(entity_id: str, entity_type: str, fact: str) -> str:
"""Add a fact to an entity."""
fact_id = await self.aadd_fact(
entity_id=entity_id,
entity_type=entity_type,
fact=fact,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
return f"Fact added: {fact_id}" if fact_id else "Entity not found"
tools.append(add_fact)
if self.config.enable_add_event:
async def add_event(
entity_id: str,
entity_type: str,
event: str,
date: Optional[str] = None,
) -> str:
"""Add an event to an entity."""
event_id = await self.aadd_event(
entity_id=entity_id,
entity_type=entity_type,
event=event,
date=date,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
return f"Event added: {event_id}" if event_id else "Entity not found"
tools.append(add_event)
if self.config.enable_add_relationship:
async def add_relationship(
entity_id: str,
entity_type: str,
related_entity_id: str,
relation: str,
) -> str:
"""Add a relationship between entities."""
rel_id = await self.aadd_relationship(
entity_id=entity_id,
entity_type=entity_type,
related_entity_id=related_entity_id,
relation=relation,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
return f"Relationship added: {rel_id}" if rel_id else "Entity not found"
tools.append(add_relationship)
return tools
def _build_functions_for_model(self, tools: List[Callable]) -> List[Any]:
"""Convert callables to Functions for model."""
from agno.tools.function import Function
functions = []
seen_names = set()
for tool in tools:
try:
name = tool.__name__
if name in seen_names:
continue
seen_names.add(name)
func = Function.from_callable(tool, strict=True)
func.strict = True
functions.append(func)
except Exception as e:
log_warning(f"Could not add function {tool}: {e}")
return functions
def _messages_to_text(self, messages: List[Any]) -> str:
"""Convert messages to text for extraction."""
parts = []
for msg in messages:
if msg.role == "user":
content = msg.get_content_string() if hasattr(msg, "get_content_string") else str(msg.content)
if content and content.strip():
parts.append(f"User: {content}")
elif msg.role in ["assistant", "model"]:
content = msg.get_content_string() if hasattr(msg, "get_content_string") else str(msg.content)
if content and content.strip():
parts.append(f"Assistant: {content}")
return "\n".join(parts)
# =========================================================================
# Private Helpers
# =========================================================================
def _build_entity_db_id(
self,
entity_id: str,
entity_type: str,
namespace: str,
) -> str:
"""Build unique DB ID for entity."""
return f"entity_{namespace}_{entity_type}_{entity_id}"
def _format_entity_basic(self, entity: Any) -> str:
"""Basic entity formatting fallback."""
parts = []
name = getattr(entity, "name", None)
entity_type = getattr(entity, "entity_type", "unknown")
entity_id = getattr(entity, "entity_id", "unknown")
if name:
parts.append(f"**{name}** ({entity_type})")
else:
parts.append(f"**{entity_id}** ({entity_type})")
description = getattr(entity, "description", None)
if description:
parts.append(description)
facts = getattr(entity, "facts", [])
if facts:
facts_text = "\n".join(f" - {f.get('content', f)}" for f in facts[:5])
parts.append(f"Facts:\n{facts_text}")
return "\n".join(parts)
def _format_entities_list(self, entities: List[EntityMemory]) -> str:
"""Format entities for tool output."""
parts = []
for i, entity in enumerate(entities, 1):
if hasattr(entity, "get_context_text"):
formatted = entity.get_context_text()
else:
formatted = self._format_entity_basic(entity=entity)
parts.append(f"{i}. {formatted}")
return "\n\n".join(parts)
# =========================================================================
# Representation
# =========================================================================
def __repr__(self) -> str:
"""String representation for debugging."""
has_db = self.db is not None
has_model = self.model is not None
return (
f"EntityMemoryStore("
f"mode={self.config.mode.value}, "
f"namespace={self.config.namespace}, "
f"db={has_db}, "
f"model={has_model}, "
f"enable_agent_tools={self.config.enable_agent_tools})"
)
def print(
self,
entity_id: str,
entity_type: str,
*,
user_id: Optional[str] = None,
namespace: Optional[str] = None,
raw: bool = False,
) -> None:
"""Print formatted entity memory.
Args:
entity_id: The entity to print.
entity_type: Type of entity.
user_id: User ID for "user" namespace scoping.
namespace: Namespace to search in.
raw: If True, print raw dict using pprint instead of formatted panel.
Example:
>>> store.print(entity_id="acme_corp", entity_type="company")
╭────────────────── Entity Memory ──────────────────╮
│ Acme Corporation (company) │
│ Enterprise software company │
│ │
│ Properties: │
│ industry: fintech │
│ size: startup │
│ │
│ Facts: │
│ [dim][f1][/dim] Uses PostgreSQL for main DB │
│ [dim][f2][/dim] API uses OAuth2 authentication │
│ │
│ Events: │
│ [dim][e1][/dim] Launched v2.0 (2024-01-15) │
│ │
│ Relationships: │
│ CEO → bob_smith │
╰────────────────── acme_corp ──────────────────────╯
"""
from agno.learn.utils import print_panel
effective_namespace = namespace or self.config.namespace
entity = self.get(
entity_id=entity_id,
entity_type=entity_type,
user_id=user_id,
namespace=effective_namespace,
)
lines = []
if entity:
# Header: name and type
name = getattr(entity, "name", None)
etype = getattr(entity, "entity_type", entity_type)
if name:
lines.append(f"[bold]{name}[/bold] ({etype})")
else:
lines.append(f"[bold]{entity_id}[/bold] ({etype})")
# Description
description = getattr(entity, "description", None)
if description:
lines.append(description)
# Properties
properties = getattr(entity, "properties", {})
if properties:
lines.append("")
lines.append("Properties:")
for key, value in properties.items():
lines.append(f" {key}: {value}")
# Facts
facts = getattr(entity, "facts", [])
if facts:
lines.append("")
lines.append("Facts:")
for fact in facts:
if isinstance(fact, dict):
fact_id = fact.get("id", "?")
content = fact.get("content", str(fact))
else:
fact_id = "?"
content = str(fact)
lines.append(f" [dim]\\[{fact_id}][/dim] {content}")
# Events
events = getattr(entity, "events", [])
if events:
lines.append("")
lines.append("Events:")
for event in events:
if isinstance(event, dict):
event_id = event.get("id", "?")
content = event.get("content", str(event))
date = event.get("date")
date_str = f" ({date})" if date else ""
else:
event_id = "?"
content = str(event)
date_str = ""
lines.append(f" [dim]\\[{event_id}][/dim] {content}{date_str}")
# Relationships
relationships = getattr(entity, "relationships", [])
if relationships:
lines.append("")
lines.append("Relationships:")
for rel in relationships:
if isinstance(rel, dict):
related_id = rel.get("entity_id", "?")
relation = rel.get("relation", "related_to")
direction = rel.get("direction", "outgoing")
if direction == "outgoing":
lines.append(f" {relation} → {related_id}")
else:
lines.append(f" {relation} ← {related_id}")
print_panel(
title="Entity Memory",
subtitle=f"{entity_type}/{entity_id}",
lines=lines,
empty_message="No entity found",
raw_data=entity,
raw=raw,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/learn/stores/entity_memory.py",
"license": "Apache License 2.0",
"lines": 2774,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/learn/stores/learned_knowledge.py | """
Learned Knowledge Store
=======================
Storage backend for Learned Knowledge learning type.
Stores reusable insights that apply across users and agents.
Think of it as:
- UserProfile = what you know about a person
- SessionContext = what happened in this meeting
- LearnedKnowledge = reusable insights that apply anywhere
Key Features:
- TWO agent tools: save_learning and search_learnings
- Semantic search for relevant learnings
- Shared across all agents using the same knowledge base
- Supports namespace-based scoping for privacy/sharing control:
- namespace="user": Private per user (scoped by user_id)
- namespace="global": Shared with everyone (default)
- namespace="<custom>": Custom grouping (literal string, e.g., "engineering")
Supported Modes:
- AGENTIC: Agent calls save_learning directly when it discovers insights
- PROPOSE: Agent proposes learnings, user approves before saving
- ALWAYS: Automatic extraction with duplicate detection
"""
from copy import deepcopy
from dataclasses import dataclass, field
from datetime import datetime, timezone
from os import getenv
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, List, Optional
from agno.learn.config import LearnedKnowledgeConfig, LearningMode
from agno.learn.schemas import LearnedKnowledge
from agno.learn.stores.protocol import LearningStore
from agno.learn.utils import to_dict_safe
from agno.utils.log import (
log_debug,
log_warning,
set_log_level_to_debug,
set_log_level_to_info,
)
if TYPE_CHECKING:
from agno.metrics import RunMetrics
@dataclass
class LearnedKnowledgeStore(LearningStore):
"""Storage backend for Learned Knowledge learning type.
Uses a Knowledge base with vector embeddings for semantic search.
Supports namespace-based scoping for privacy/sharing control.
Namespace Scoping:
- namespace="global": Shared with everyone (default)
- namespace="user": Private per user (requires user_id)
- namespace="<custom>": Custom grouping (e.g., "engineering", "sales")
Provides TWO tools to the agent (when enable_agent_tools=True):
1. search_learnings - Find relevant learnings via semantic search
2. save_learning - Save reusable insights
Args:
config: LearnedKnowledgeConfig with all settings including knowledge base.
debug_mode: Enable debug logging.
"""
config: LearnedKnowledgeConfig = field(default_factory=LearnedKnowledgeConfig)
debug_mode: bool = False
# State tracking (internal)
learning_saved: bool = field(default=False, init=False)
_schema: Any = field(default=None, init=False)
def __post_init__(self):
self._schema = self.config.schema or LearnedKnowledge
if self.config.mode == LearningMode.HITL:
log_warning("LearnedKnowledgeStore does not support HITL mode. Use PROPOSE mode for soft approval.")
# =========================================================================
# LearningStore Protocol Implementation
# =========================================================================
@property
def learning_type(self) -> str:
"""Unique identifier for this learning type."""
return "learned_knowledge"
@property
def schema(self) -> Any:
"""Schema class used for learnings."""
return self._schema
def recall(
self,
query: Optional[str] = None,
message: Optional[str] = None,
user_id: Optional[str] = None,
namespace: Optional[str] = None,
limit: int = 5,
**kwargs,
) -> Optional[List[Any]]:
"""Retrieve relevant learnings via semantic search.
Args:
query: Search query (searches title, learning, context).
message: Current user message to find relevant learnings for (alternative).
user_id: User ID for "user" namespace scoping.
namespace: Filter by namespace (None = all accessible).
limit: Maximum number of results.
**kwargs: Additional context (ignored).
Returns:
List of relevant learnings, or None if no query.
"""
search_query = query or message
if not search_query:
return None
effective_namespace = namespace or self.config.namespace
if effective_namespace == "user" and not user_id:
log_warning("LearnedKnowledgeStore.recall: namespace='user' requires user_id")
return None
results = self.search(
query=search_query,
user_id=user_id,
namespace=effective_namespace,
limit=limit,
)
return results if results else None
async def arecall(
self,
query: Optional[str] = None,
message: Optional[str] = None,
user_id: Optional[str] = None,
namespace: Optional[str] = None,
limit: int = 5,
**kwargs,
) -> Optional[List[Any]]:
"""Async version of recall."""
search_query = query or message
if not search_query:
return None
effective_namespace = namespace or self.config.namespace
if effective_namespace == "user" and not user_id:
log_warning("LearnedKnowledgeStore.arecall: namespace='user' requires user_id")
return None
results = await self.asearch(
query=search_query,
user_id=user_id,
namespace=effective_namespace,
limit=limit,
)
return results if results else None
def process(
self,
messages: List[Any],
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
**kwargs,
) -> None:
"""Extract learned knowledge from messages.
Args:
messages: Conversation messages to analyze.
user_id: User context (for "user" namespace scoping).
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
namespace: Namespace to save learnings to (default: "global").
**kwargs: Additional context (ignored).
"""
# Reset state for this operation
self.learning_saved = False
# process only supported in ALWAYS mode
# for programmatic extraction, use extract_and_save directly
if self.config.mode != LearningMode.ALWAYS:
return
if not messages:
return
self.extract_and_save(
messages=messages,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
run_metrics=kwargs.get("run_metrics"),
)
async def aprocess(
self,
messages: List[Any],
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
**kwargs,
) -> None:
"""Async version of process."""
# Reset state for this operation
self.learning_saved = False
if self.config.mode != LearningMode.ALWAYS:
return
if not messages:
return
await self.aextract_and_save(
messages=messages,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
run_metrics=kwargs.get("run_metrics"),
)
def build_context(self, data: Any) -> str:
"""Build context for the agent.
Args:
data: List of learning objects from recall() (may be None).
Returns:
Context string to inject into the agent's system prompt.
"""
mode = self.config.mode
if mode == LearningMode.PROPOSE:
return self._build_propose_mode_context(data=data)
elif mode == LearningMode.AGENTIC:
return self._build_agentic_mode_context(data=data)
else:
return self._build_background_mode_context(data=data)
def _build_agentic_mode_context(self, data: Any) -> str:
"""Build context for AGENTIC mode."""
instructions = dedent("""\
<learning_system>
You have a knowledge base of reusable learnings from past interactions.
## CRITICAL RULES - ALWAYS FOLLOW
**RULE 1: ALWAYS search before answering substantive questions.**
When the user asks for advice, recommendations, how-to guidance, or best practices:
→ First call `search_learnings` with relevant keywords
→ Then incorporate any relevant findings into your response
**RULE 2: ALWAYS search before saving.**
Before saving anything, first call `search_learnings` to check if similar knowledge exists.
Only save if it's genuinely new (not a duplicate or minor variation).
**RULE 3: ALWAYS save when explicitly asked.**
When the user says "remember", "save", "note", "keep in mind", or similar:
→ These are explicit directives - save what they asked (after searching for duplicates)
**RULE 4: ALWAYS save team/org goals, constraints, and policies.**
When the user shares organizational context:
→ "We're trying to..." / "Our goal is..." (team goals)
→ "We can't use..." / "We need to avoid..." (constraints)
→ "Our policy is..." / "We always..." (policies)
→ "Our priority is..." / "We prefer..." at org level (priorities)
These are shared context - save them so other users benefit too.
## Tools
`search_learnings(query)` - Search for relevant prior insights. Use liberally.
`save_learning(title, learning, context, tags)` - Save new insights or context.
## When to Search
Search when the user:
- Asks for recommendations or best practices
- Asks how to approach a problem
- Asks about trade-offs or considerations
- Mentions a technology, domain, or problem area
- Asks you to save something (check for duplicates first)
## When to Save (Self-Discovered Insights)
For insights you discover yourself (not explicit requests or org context), only save if:
- Non-obvious (required investigation to discover)
- Reusable (applies to a category of problems)
- Actionable (specific enough to apply directly)
Do NOT save:
- Raw facts or common knowledge
- Individual user preferences (use user memory instead)
- Duplicates of existing learnings
</learning_system>\
""")
if data:
learnings = data if isinstance(data, list) else [data]
if learnings:
formatted = self._format_learnings_for_context(learnings=learnings)
instructions += f"\n\n<relevant_learnings>\nPrior insights that may help with this task:\n\n{formatted}\n\nApply these naturally if relevant. Current context takes precedence.\n</relevant_learnings>"
return instructions
def _build_propose_mode_context(self, data: Any) -> str:
"""Build context for PROPOSE mode."""
instructions = dedent("""\
<learning_system>
You have a knowledge base of reusable learnings. In PROPOSE mode, saving requires user approval.
## CRITICAL RULES - ALWAYS FOLLOW
**RULE 1: ALWAYS search before answering substantive questions.**
When the user asks for advice, recommendations, how-to guidance, or best practices:
→ First call `search_learnings` with relevant keywords
→ Then incorporate any relevant findings into your response
**RULE 2: Propose learnings, don't save directly.**
If you discover something worth preserving, propose it at the end of your response:
---
**Proposed Learning**
**Title:** [Concise title]
**Context:** [When this applies]
**Insight:** [The learning - specific and actionable]
Save this to the knowledge base? (yes/no)
---
**RULE 3: Only save after explicit approval.**
Call `save_learning` ONLY after the user says "yes" to your proposal.
Before saving, search first to check for duplicates.
## Tools
`search_learnings(query)` - Search for relevant prior insights. Use liberally.
`save_learning(title, learning, context, tags)` - Save ONLY after user approval.
## What to Propose
Only propose insights that are:
- Non-obvious (required investigation to discover)
- Reusable (applies to a category of problems)
- Actionable (specific enough to apply directly)
Do NOT propose:
- Raw facts or common knowledge
- User-specific preferences
- Things the user already knew
</learning_system>\
""")
if data:
learnings = data if isinstance(data, list) else [data]
if learnings:
formatted = self._format_learnings_for_context(learnings=learnings)
instructions += f"\n\n<relevant_learnings>\nPrior insights that may help:\n\n{formatted}\n\nApply these naturally if relevant.\n</relevant_learnings>"
return instructions
def _build_background_mode_context(self, data: Any) -> str:
"""Build context for ALWAYS mode (just show relevant learnings)."""
if not data:
return ""
learnings = data if isinstance(data, list) else [data]
if not learnings:
return ""
formatted = self._format_learnings_for_context(learnings=learnings)
return dedent(f"""\
<relevant_learnings>
Prior insights that may help with this task:
{formatted}
Apply these naturally if they're relevant to the current request.
Your current analysis and the user's specific context take precedence.
</relevant_learnings>\
""")
def _format_learnings_for_context(self, learnings: List[Any]) -> str:
"""Format learnings for inclusion in context."""
parts = []
for i, learning in enumerate(learnings, 1):
formatted = self._format_single_learning(learning=learning)
if formatted:
parts.append(f"{i}. {formatted}")
return "\n".join(parts)
def get_tools(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
**kwargs,
) -> List[Callable]:
"""Get tools to expose to agent.
Args:
user_id: User context (for "user" namespace scoping).
agent_id: Agent context (stored for audit on saves).
team_id: Team context (stored for audit on saves).
namespace: Default namespace for saves (default: "global").
**kwargs: Additional context (ignored).
Returns:
List of callable tools (empty if enable_agent_tools=False).
"""
if not self.config.enable_agent_tools:
return []
return self.get_agent_tools(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
async def aget_tools(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
**kwargs,
) -> List[Callable]:
"""Async version of get_tools."""
if not self.config.enable_agent_tools:
return []
return await self.aget_agent_tools(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
@property
def was_updated(self) -> bool:
"""Check if a learning was saved in last operation."""
return self.learning_saved
# =========================================================================
# Properties
# =========================================================================
@property
def knowledge(self):
"""The knowledge base (vector store)."""
return self.config.knowledge
@property
def model(self):
"""Model for extraction (if needed)."""
return self.config.model
# =========================================================================
# Debug/Logging
# =========================================================================
def set_log_level(self):
"""Set log level based on debug_mode or environment variable."""
if self.debug_mode or getenv("AGNO_DEBUG", "false").lower() == "true":
self.debug_mode = True
set_log_level_to_debug()
else:
set_log_level_to_info()
# =========================================================================
# Agent Tools
# =========================================================================
def get_agent_tools(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> List[Callable]:
"""Get the tools to expose to the agent.
Returns TWO tools (based on config settings):
1. search_learnings - Find relevant learnings
2. save_learning - Save reusable insights
Args:
user_id: User context (for "user" namespace scoping).
agent_id: Agent context (stored for audit on saves).
team_id: Team context (stored for audit on saves).
namespace: Default namespace for saves (default: "global").
Returns:
List of callable tools.
"""
tools = []
if self.config.agent_can_search:
tools.append(
self._create_search_learnings_tool(namespace=namespace or self.config.namespace, user_id=user_id)
)
if self.config.agent_can_save:
tools.append(
self._create_save_learning_tool(
namespace=namespace or self.config.namespace,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
)
return tools
async def aget_agent_tools(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> List[Callable]:
"""Async version of get_agent_tools."""
tools = []
if self.config.agent_can_search:
tools.append(
self._create_async_search_learnings_tool(namespace=namespace or self.config.namespace, user_id=user_id)
)
if self.config.agent_can_save:
tools.append(
self._create_async_save_learning_tool(
namespace=namespace or self.config.namespace,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
)
return tools
# =========================================================================
# Tool: save_learning
# =========================================================================
def _create_save_learning_tool(
self,
namespace: str,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> Callable:
"""Create the save_learning tool for the agent."""
def save_learning(
title: str,
learning: str,
context: Optional[str] = None,
tags: Optional[List[str]] = None,
) -> str:
"""Save a reusable insight or organizational context to the knowledge base.
IMPORTANT: You MUST call search_learnings first to check for duplicates.
Args:
title: Concise, searchable title.
learning: The insight or context - specific and actionable.
context: When/where this applies.
tags: Categories for organization.
Returns:
Confirmation message.
"""
success = self.save(
title=title,
learning=learning,
context=context,
tags=tags,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
if success:
self.learning_saved = True
return f"Learning saved: {title} (namespace: {namespace})"
return "Failed to save learning"
return save_learning
def _create_async_save_learning_tool(
self,
namespace: str,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> Callable:
"""Create the async save_learning tool for the agent."""
async def save_learning(
title: str,
learning: str,
context: Optional[str] = None,
tags: Optional[List[str]] = None,
) -> str:
"""Save a reusable insight or organizational context to the knowledge base.
IMPORTANT: You MUST call search_learnings first to check for duplicates.
Args:
title: Concise, searchable title.
learning: The insight or context - specific and actionable.
context: When/where this applies.
tags: Categories for organization.
Returns:
Confirmation message.
"""
success = await self.asave(
title=title,
learning=learning,
context=context,
tags=tags,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
if success:
self.learning_saved = True
return f"Learning saved: {title} (namespace: {namespace})"
return "Failed to save learning"
return save_learning
# =========================================================================
# Tool: search_learnings
# =========================================================================
def _create_search_learnings_tool(
self,
namespace: str,
user_id: Optional[str] = None,
) -> Callable:
"""Create the search_learnings tool for the agent."""
def search_learnings(
query: str,
limit: int = 5,
) -> str:
"""Search for relevant insights in the knowledge base.
ALWAYS call this:
1. Before answering questions about best practices, recommendations, or how-to
2. Before saving a new learning (to check for duplicates)
Args:
query: Keywords describing what you're looking for.
Examples: "cloud costs", "API rate limiting", "team goals"
limit: Maximum results (default: 5)
Returns:
List of relevant learnings, or message if none found.
"""
results = self.search(
query=query,
user_id=user_id,
namespace=namespace,
limit=limit,
)
if not results:
return "No relevant learnings found."
formatted = self._format_learnings_list(learnings=results)
return f"Found {len(results)} relevant learning(s):\n\n{formatted}"
return search_learnings
def _create_async_search_learnings_tool(
self,
namespace: str,
user_id: Optional[str] = None,
) -> Callable:
"""Create the async search_learnings tool for the agent."""
async def search_learnings(
query: str,
limit: int = 5,
) -> str:
"""Search for relevant insights in the knowledge base.
ALWAYS call this:
1. Before answering questions about best practices, recommendations, or how-to
2. Before saving a new learning (to check for duplicates)
Args:
query: Keywords describing what you're looking for.
Examples: "cloud costs", "API rate limiting", "team goals"
limit: Maximum results (default: 5)
Returns:
List of relevant learnings, or message if none found.
"""
results = await self.asearch(
query=query,
user_id=user_id,
namespace=namespace,
limit=limit,
)
if not results:
return "No relevant learnings found."
formatted = self._format_learnings_list(learnings=results)
return f"Found {len(results)} relevant learning(s):\n\n{formatted}"
return search_learnings
# =========================================================================
# Search Operations
# =========================================================================
def search(
self,
query: str,
user_id: Optional[str] = None,
namespace: Optional[str] = None,
limit: int = 5,
) -> List[Any]:
"""Search for relevant learnings based on query.
Uses semantic search to find learnings most relevant to the query.
Args:
query: The search query.
user_id: User ID for "user" namespace access.
namespace: Filter by namespace (None = all accessible).
limit: Maximum number of results to return.
Returns:
List of learning objects matching the query.
"""
if not self.knowledge:
log_warning("LearnedKnowledgeStore.search: no knowledge base configured")
return []
try:
# Build filters based on namespace
filters = self._build_search_filters(user_id=user_id, namespace=namespace)
# Search with filters if supported
if filters:
results = self.knowledge.search(query=query, max_results=limit, filters=filters)
else:
results = self.knowledge.search(query=query, max_results=limit)
learnings = []
for result in results or []:
learning = self._parse_result(result=result)
if learning:
# Post-filter by namespace if KB doesn't support filtering
if self._matches_namespace_filter(learning, user_id=user_id, namespace=namespace):
learnings.append(learning)
log_debug(f"LearnedKnowledgeStore.search: found {len(learnings)} learnings for query: {query[:50]}...")
return learnings[:limit]
except Exception as e:
log_warning(f"LearnedKnowledgeStore.search failed: {e}")
return []
async def asearch(
self,
query: str,
user_id: Optional[str] = None,
namespace: Optional[str] = None,
limit: int = 5,
) -> List[Any]:
"""Async version of search."""
if not self.knowledge:
log_warning("LearnedKnowledgeStore.asearch: no knowledge base configured")
return []
try:
# Build filters based on namespace
filters = self._build_search_filters(user_id=user_id, namespace=namespace)
# Search with filters if supported
if hasattr(self.knowledge, "asearch"):
if filters:
results = await self.knowledge.asearch(query=query, max_results=limit, filters=filters)
else:
results = await self.knowledge.asearch(query=query, max_results=limit)
else:
if filters:
results = self.knowledge.search(query=query, max_results=limit, filters=filters)
else:
results = self.knowledge.search(query=query, max_results=limit)
learnings = []
for result in results or []:
learning = self._parse_result(result=result)
if learning:
# Post-filter by namespace if KB doesn't support filtering
if self._matches_namespace_filter(learning, user_id=user_id, namespace=namespace):
learnings.append(learning)
log_debug(f"LearnedKnowledgeStore.asearch: found {len(learnings)} learnings for query: {query[:50]}...")
return learnings[:limit]
except Exception as e:
log_warning(f"LearnedKnowledgeStore.asearch failed: {e}")
return []
def _build_search_filters(
self,
user_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> Optional[dict]:
"""Build search filters for namespace scoping.
Returns filter dict for knowledge base, or None if no filtering needed.
"""
if not namespace:
return None
if namespace == "user":
if not user_id:
log_warning("LearnedKnowledgeStore: 'user' namespace requires user_id")
return None
return {"namespace": "user", "user_id": user_id}
return {"namespace": namespace}
def _matches_namespace_filter(
self,
learning: Any,
user_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> bool:
"""Check if a learning matches the namespace filter (for post-filtering)."""
if not namespace:
return True
learning_namespace = getattr(learning, "namespace", None) or "global"
learning_user_id = getattr(learning, "user_id", None)
if namespace == "user":
return learning_namespace == "user" and learning_user_id == user_id
return learning_namespace == namespace
# =========================================================================
# Save Operations
# =========================================================================
def save(
self,
title: str,
learning: str,
context: Optional[str] = None,
tags: Optional[List[str]] = None,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> bool:
"""Save a learning to the knowledge base.
Args:
title: Short descriptive title.
learning: The actual insight.
context: When/why this applies.
tags: Tags for categorization.
user_id: User ID (required for "user" namespace).
agent_id: Agent that created this (stored as metadata for audit).
team_id: Team context (stored as metadata for audit).
namespace: Namespace for scoping (default: "global").
Returns:
True if saved successfully, False otherwise.
"""
if not self.knowledge:
log_warning("LearnedKnowledgeStore.save: no knowledge base configured")
return False
effective_namespace = namespace or self.config.namespace
# Validate "user" namespace has user_id
if effective_namespace == "user" and not user_id:
log_warning("LearnedKnowledgeStore.save: 'user' namespace requires user_id")
return False
try:
from agno.knowledge.reader.text_reader import TextReader
learning_data = {
"title": title.strip(),
"learning": learning.strip(),
"context": context.strip() if context else None,
"tags": tags or [],
"namespace": effective_namespace,
"user_id": user_id if effective_namespace == "user" else None,
"agent_id": agent_id,
"team_id": team_id,
"created_at": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"),
}
learning_obj = self.schema(**learning_data)
text_content = self._to_text_content(learning=learning_obj)
# Build metadata for filtering
# Metadata must be passed separately to insert for filters to work
filter_metadata: dict[str, Any] = {
"namespace": effective_namespace,
}
if effective_namespace == "user" and user_id:
filter_metadata["user_id"] = user_id
if agent_id:
filter_metadata["agent_id"] = agent_id
if team_id:
filter_metadata["team_id"] = team_id
if tags:
filter_metadata["tags"] = tags
self.knowledge.insert(
name=learning_data["title"],
text_content=text_content,
reader=TextReader(),
skip_if_exists=True,
metadata=filter_metadata, # Pass metadata for filtering
)
log_debug(f"LearnedKnowledgeStore.save: saved learning '{title}' (namespace: {effective_namespace})")
return True
except Exception as e:
log_warning(f"LearnedKnowledgeStore.save failed: {e}")
return False
async def asave(
self,
title: str,
learning: str,
context: Optional[str] = None,
tags: Optional[List[str]] = None,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> bool:
"""Async version of save."""
if not self.knowledge:
log_warning("LearnedKnowledgeStore.asave: no knowledge base configured")
return False
effective_namespace = namespace or self.config.namespace
# Validate "user" namespace has user_id
if effective_namespace == "user" and not user_id:
log_warning("LearnedKnowledgeStore.asave: 'user' namespace requires user_id")
return False
try:
from agno.knowledge.reader.text_reader import TextReader
learning_data = {
"title": title.strip(),
"learning": learning.strip(),
"context": context.strip() if context else None,
"tags": tags or [],
"namespace": effective_namespace,
"user_id": user_id if effective_namespace == "user" else None,
"agent_id": agent_id,
"team_id": team_id,
"created_at": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"),
}
learning_obj = self.schema(**learning_data)
text_content = self._to_text_content(learning=learning_obj)
# Build metadata for filtering
# Metadata must be passed separately to insert for filters to work
filter_metadata: dict[str, Any] = {
"namespace": effective_namespace,
}
if effective_namespace == "user" and user_id:
filter_metadata["user_id"] = user_id
if agent_id:
filter_metadata["agent_id"] = agent_id
if team_id:
filter_metadata["team_id"] = team_id
if tags:
filter_metadata["tags"] = tags
if hasattr(self.knowledge, "ainsert"):
await self.knowledge.ainsert(
name=learning_data["title"],
text_content=text_content,
reader=TextReader(),
skip_if_exists=True,
metadata=filter_metadata, # Pass metadata for filtering
)
else:
self.knowledge.insert(
name=learning_data["title"],
text_content=text_content,
reader=TextReader(),
skip_if_exists=True,
metadata=filter_metadata, # Pass metadata for filtering
)
log_debug(f"LearnedKnowledgeStore.asave: saved learning '{title}' (namespace: {effective_namespace})")
return True
except Exception as e:
log_warning(f"LearnedKnowledgeStore.asave failed: {e}")
return False
# =========================================================================
# Delete Operations
# =========================================================================
def delete(self, title: str) -> bool:
"""Delete a learning by title.
Args:
title: The title of the learning to delete.
Returns:
True if deleted, False otherwise.
"""
if not self.knowledge:
log_warning("LearnedKnowledgeStore.delete: no knowledge base configured")
return False
try:
if hasattr(self.knowledge, "delete_content"):
self.knowledge.delete_content(name=title)
log_debug(f"LearnedKnowledgeStore.delete: deleted learning '{title}'")
return True
else:
log_warning("LearnedKnowledgeStore.delete: knowledge base does not support deletion")
return False
except Exception as e:
log_warning(f"LearnedKnowledgeStore.delete failed: {e}")
return False
async def adelete(self, title: str) -> bool:
"""Async version of delete."""
if not self.knowledge:
log_warning("LearnedKnowledgeStore.adelete: no knowledge base configured")
return False
try:
if hasattr(self.knowledge, "adelete_content"):
await self.knowledge.adelete_content(name=title)
elif hasattr(self.knowledge, "delete_content"):
self.knowledge.delete_content(name=title)
else:
log_warning("LearnedKnowledgeStore.adelete: knowledge base does not support deletion")
return False
log_debug(f"LearnedKnowledgeStore.adelete: deleted learning '{title}'")
return True
except Exception as e:
log_warning(f"LearnedKnowledgeStore.adelete failed: {e}")
return False
# =========================================================================
# Background Extraction (ALWAYS mode)
# =========================================================================
def extract_and_save(
self,
messages: List[Any],
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
run_metrics: Optional["RunMetrics"] = None,
) -> None:
"""Extract learnings from messages (sync)."""
if not self.model or not self.knowledge:
return
try:
conversation_text = self._messages_to_text(messages=messages)
# Search for existing learnings to avoid duplicates
existing = self.search(query=conversation_text[:500], limit=5)
existing_summary = self._summarize_existing(learnings=existing)
extraction_messages = self._build_extraction_messages(
conversation_text=conversation_text,
existing_summary=existing_summary,
)
tools = self._get_extraction_tools(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
functions = self._build_functions_for_model(tools=tools)
model_copy = deepcopy(self.model)
response = model_copy.response(
messages=extraction_messages,
tools=functions,
)
if run_metrics is not None and response.response_usage is not None:
from agno.metrics import ModelType, accumulate_model_metrics
accumulate_model_metrics(response, model_copy, ModelType.LEARNING_MODEL, run_metrics)
if response.tool_executions:
self.learning_saved = True
log_debug("LearnedKnowledgeStore: Extraction saved new learning(s)")
except Exception as e:
log_warning(f"LearnedKnowledgeStore.extract_and_save failed: {e}")
async def aextract_and_save(
self,
messages: List[Any],
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
run_metrics: Optional["RunMetrics"] = None,
) -> None:
"""Extract learnings from messages (async)."""
if not self.model or not self.knowledge:
return
try:
conversation_text = self._messages_to_text(messages=messages)
# Search for existing learnings to avoid duplicates
existing = await self.asearch(query=conversation_text[:500], limit=5)
existing_summary = self._summarize_existing(learnings=existing)
extraction_messages = self._build_extraction_messages(
conversation_text=conversation_text,
existing_summary=existing_summary,
)
tools = self._aget_extraction_tools(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=namespace,
)
functions = self._build_functions_for_model(tools=tools)
model_copy = deepcopy(self.model)
response = await model_copy.aresponse(
messages=extraction_messages,
tools=functions,
)
if run_metrics is not None and response.response_usage is not None:
from agno.metrics import ModelType, accumulate_model_metrics
accumulate_model_metrics(response, model_copy, ModelType.LEARNING_MODEL, run_metrics)
if response.tool_executions:
self.learning_saved = True
log_debug("LearnedKnowledgeStore: Extraction saved new learning(s)")
except Exception as e:
log_warning(f"LearnedKnowledgeStore.aextract_and_save failed: {e}")
def _build_extraction_messages(
self,
conversation_text: str,
existing_summary: str,
) -> List[Any]:
"""Build messages for extraction."""
from agno.models.message import Message
system_prompt = dedent("""\
You are a Learning Extractor. Your job is to identify knowledge worth preserving
from conversations for future use.
## What to Save
**1. Discovered Insights** - Knowledge that emerged through the conversation:
- Non-obvious (required reasoning or investigation)
- Reusable (applies to a category of problems)
- Actionable (specific enough to apply directly)
- Durable (won't become outdated quickly)
**2. Organizational Context** - Explicit directives shared by the user:
- Explicit save requests: "remember that...", "note that...", "keep in mind..."
- Team/org goals: "we're trying to...", "our goal is...", "our priority is..."
- Constraints: "we can't use...", "we need to avoid..."
- Policies: "our policy is...", "we always...", "we never..."
## What NOT to Save
- Raw facts (use search for retrieval)
- Individual user preferences (belongs in user memory)
- Common knowledge (everyone knows this)
- One-off answers (not generalizable)
- Summaries without insight
- Uncertain conclusions
## Examples
Good - Discovered insight:
> **Title:** Debugging intermittent PostgreSQL connection timeouts
> **Learning:** Check for connection pool exhaustion before investigating network issues.
> **Context:** Diagnosing database connectivity issues
Good - Organizational context:
> **Title:** Team goal: reduce cloud egress costs
> **Learning:** Factor egress costs into architecture decisions.
> **Context:** Infrastructure and vendor decisions
Bad (don't save):
- "The error was a typo on line 42" (one-off)
- "React is popular" (common knowledge)
- "We discussed options" (summary, no insight)
""")
if existing_summary:
system_prompt += f"""## Already Saved (DO NOT DUPLICATE)
These insights are already in the knowledge base. Do not save variations of these:
{existing_summary}
"""
system_prompt += dedent("""\
## Your Task
Review the conversation below. Save anything that fits the criteria above:
- Discovered insights worth preserving
- Organizational context the user shared (goals, constraints, policies)
**Important:**
- Most conversations will NOT produce a learning. That's expected.
- When in doubt, don't save. Quality over quantity.
- It's fine to do nothing if there's nothing worth saving.\
""")
return [
Message(role="system", content=system_prompt),
Message(role="user", content=f"Review this conversation for reusable insights:\n\n{conversation_text}"),
]
def _get_extraction_tools(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> List[Callable]:
"""Get sync extraction tools."""
effective_namespace = namespace or self.config.namespace
def save_learning(
title: str,
learning: str,
context: Optional[str] = None,
tags: Optional[List[str]] = None,
) -> str:
"""Save a reusable insight or organizational context.
Args:
title: Concise, searchable title.
learning: The insight or context - specific and actionable.
context: When/where this applies.
tags: Categories for organization.
Returns:
Confirmation message.
"""
success = self.save(
title=title,
learning=learning,
context=context,
tags=tags,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
return f"Saved: {title}" if success else "Failed to save"
return [save_learning]
def _aget_extraction_tools(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
namespace: Optional[str] = None,
) -> List[Callable]:
"""Get async extraction tools."""
effective_namespace = namespace or self.config.namespace
async def save_learning(
title: str,
learning: str,
context: Optional[str] = None,
tags: Optional[List[str]] = None,
) -> str:
"""Save a reusable insight or organizational context.
Args:
title: Concise, searchable title.
learning: The insight or context - specific and actionable.
context: When/where this applies.
tags: Categories for organization.
Returns:
Confirmation message.
"""
success = await self.asave(
title=title,
learning=learning,
context=context,
tags=tags,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
namespace=effective_namespace,
)
return f"Saved: {title}" if success else "Failed to save"
return [save_learning]
def _build_functions_for_model(self, tools: List[Callable]) -> List[Any]:
"""Convert callables to Functions for model."""
from agno.tools.function import Function
functions = []
seen_names = set()
for tool in tools:
try:
name = tool.__name__
if name in seen_names:
continue
seen_names.add(name)
func = Function.from_callable(tool, strict=True)
func.strict = True
functions.append(func)
except Exception as e:
log_warning(f"Could not add function {tool}: {e}")
return functions
def _messages_to_text(self, messages: List[Any]) -> str:
"""Convert messages to text for extraction."""
parts = []
for msg in messages:
if msg.role == "user":
content = msg.get_content_string() if hasattr(msg, "get_content_string") else str(msg.content)
if content and content.strip():
parts.append(f"User: {content}")
elif msg.role in ["assistant", "model"]:
content = msg.get_content_string() if hasattr(msg, "get_content_string") else str(msg.content)
if content and content.strip():
parts.append(f"Assistant: {content}")
return "\n".join(parts)
def _summarize_existing(self, learnings: List[Any]) -> str:
"""Summarize existing learnings to help avoid duplicates."""
if not learnings:
return ""
parts = []
for learning in learnings[:5]:
if hasattr(learning, "title") and hasattr(learning, "learning"):
parts.append(f"- {learning.title}: {learning.learning[:100]}...")
return "\n".join(parts)
# =========================================================================
# Private Helpers
# =========================================================================
def _parse_result(self, result: Any) -> Optional[Any]:
"""Parse a search result into a learning object."""
import json
try:
content = None
if isinstance(result, dict):
content = result.get("content") or result.get("text") or result
elif hasattr(result, "content"):
content = result.content
elif hasattr(result, "text"):
content = result.text
elif isinstance(result, str):
content = result
if not content:
return None
if isinstance(content, str):
try:
content = json.loads(content)
except json.JSONDecodeError:
return self.schema(title="Learning", learning=content)
if isinstance(content, dict):
from dataclasses import fields
field_names = {f.name for f in fields(self.schema)}
filtered = {k: v for k, v in content.items() if k in field_names}
return self.schema(**filtered)
return None
except Exception as e:
log_warning(f"LearnedKnowledgeStore._parse_result failed: {e}")
return None
def _to_text_content(self, learning: Any) -> str:
"""Convert a learning object to text content for storage."""
import json
learning_dict = to_dict_safe(learning)
return json.dumps(learning_dict, ensure_ascii=False)
def _format_single_learning(self, learning: Any) -> str:
"""Format a single learning for display."""
parts = []
if hasattr(learning, "title") and learning.title:
parts.append(f"**{learning.title}**")
if hasattr(learning, "learning") and learning.learning:
parts.append(learning.learning)
if hasattr(learning, "context") and learning.context:
parts.append(f"_Context: {learning.context}_")
if hasattr(learning, "tags") and learning.tags:
tags_str = ", ".join(learning.tags)
parts.append(f"_Tags: {tags_str}_")
if hasattr(learning, "namespace") and learning.namespace and learning.namespace != "global":
parts.append(f"_Namespace: {learning.namespace}_")
return "\n ".join(parts)
def _format_learnings_list(self, learnings: List[Any]) -> str:
"""Format a list of learnings for tool output."""
parts = []
for i, learning in enumerate(learnings, 1):
formatted = self._format_single_learning(learning=learning)
if formatted:
parts.append(f"{i}. {formatted}")
return "\n".join(parts)
# =========================================================================
# Representation
# =========================================================================
def __repr__(self) -> str:
"""String representation for debugging."""
has_knowledge = self.knowledge is not None
has_model = self.model is not None
return (
f"LearnedKnowledgeStore("
f"mode={self.config.mode.value}, "
f"knowledge={has_knowledge}, "
f"model={has_model}, "
f"enable_agent_tools={self.config.enable_agent_tools})"
)
def print(
self,
query: str,
*,
user_id: Optional[str] = None,
namespace: Optional[str] = None,
limit: int = 10,
raw: bool = False,
) -> None:
"""Print formatted learned knowledge search results.
Args:
query: Search query to find relevant learnings.
user_id: User ID for "user" namespace scoping.
namespace: Namespace to filter by.
limit: Maximum number of learnings to display.
raw: If True, print raw list using pprint instead of formatted panel.
Example:
>>> store.print(query="API design")
╭───────────── Learned Knowledge ──────────────╮
│ 1. PostgreSQL JSONB indexing │
│ For frequently queried nested JSONB... │
│ Context: When query performance degrades │
│ Tags: postgresql, performance │
│ │
│ 2. Handling rate limits in async clients │
│ Implement exponential backoff with... │
│ Context: When building API clients │
│ Tags: api, async, rate-limiting │
╰──────────────── query: API ──────────────────╯
"""
from agno.learn.utils import print_panel
learnings = self.search(
query=query,
user_id=user_id,
namespace=namespace,
limit=limit,
)
lines = []
for i, learning in enumerate(learnings, 1):
if i > 1:
lines.append("") # Separator between learnings
# Title
title = getattr(learning, "title", None)
if title:
lines.append(f"[bold]{i}. {title}[/bold]")
else:
lines.append(f"[bold]{i}. (untitled)[/bold]")
# Learning content
content = getattr(learning, "learning", None)
if content:
# Truncate long content for display
if len(content) > 200:
content = content[:200] + "..."
lines.append(f" {content}")
# Context
context = getattr(learning, "context", None)
if context:
lines.append(f" [dim]Context: {context}[/dim]")
# Tags
tags = getattr(learning, "tags", None)
if tags:
tags_str = ", ".join(tags)
lines.append(f" [dim]Tags: {tags_str}[/dim]")
# Namespace (if not global)
ns = getattr(learning, "namespace", None)
if ns and ns != "global":
lines.append(f" [dim]Namespace: {ns}[/dim]")
print_panel(
title="Learned Knowledge",
subtitle=f"query: {query[:30]}{'...' if len(query) > 30 else ''}",
lines=lines,
empty_message="No learnings found",
raw_data=learnings,
raw=raw,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/learn/stores/learned_knowledge.py",
"license": "Apache License 2.0",
"lines": 1300,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/learn/stores/protocol.py | """
Learning Store Protocol
=======================
Defines the interface that all learning stores must implement.
This protocol enables:
- Consistent API across different learning types
- Easy addition of custom stores
- Type safety with Protocol typing
"""
from typing import Any, Callable, List, Optional, Protocol, runtime_checkable
@runtime_checkable
class LearningStore(Protocol):
"""Protocol that all learning stores must implement.
A learning store handles one type of learning (user profile, session context,
learned knowledge, etc.) and provides methods for:
- recall: Retrieve relevant data for the current context
- process: Extract and save learnings from conversations
- build_context: Format data for inclusion in agent prompts
- get_tools: Provide tools for agent interaction
"""
@property
def learning_type(self) -> str:
"""Unique identifier for this learning type.
Used for storage keys and logging.
Returns:
String identifier (e.g., "user_profile", "session_context")
"""
...
@property
def schema(self) -> Any:
"""Schema class used for this learning type.
Returns:
The dataclass or schema class for this learning type.
"""
...
def recall(self, **kwargs) -> Optional[Any]:
"""Retrieve relevant data for the current context.
Args:
**kwargs: Context including user_id, session_id, message, etc.
Returns:
Retrieved data (schema instance, list, or None if not found).
"""
...
async def arecall(self, **kwargs) -> Optional[Any]:
"""Async version of recall."""
...
def process(self, messages: List[Any], **kwargs) -> None:
"""Extract and save learnings from messages.
Called after a conversation to extract learnings.
Args:
messages: Conversation messages to analyze.
**kwargs: Context including user_id, session_id, etc.
"""
...
async def aprocess(self, messages: List[Any], **kwargs) -> None:
"""Async version of process."""
...
def build_context(self, data: Any) -> str:
"""Build context string for agent prompts.
Formats the recalled data into a string that can be
injected into the agent's system prompt.
Args:
data: Data returned from recall().
Returns:
Formatted context string, or empty string if no data.
"""
...
def get_tools(self, **kwargs) -> List[Callable]:
"""Get tools to expose to the agent.
Returns callable tools that the agent can use to interact
with this learning type (e.g., update_user_memory, search_learnings).
Args:
**kwargs: Context including user_id, session_id, etc.
Returns:
List of callable tools, or empty list if no tools.
"""
...
async def aget_tools(self, **kwargs) -> List[Callable]:
"""Async version of get_tools."""
...
@property
def was_updated(self) -> bool:
"""Check if the store was updated in the last operation.
Returns:
True if data was saved/updated, False otherwise.
"""
...
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/learn/stores/protocol.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/agno/learn/stores/session_context.py | """
Session Context Store
=====================
Storage backend for Session Context learning type.
Stores the current state of a session: what's happened, what's the goal, what's the plan.
Key Features:
- Summary extraction from conversations
- Optional planning mode (goal, plan, progress tracking)
- Session-scoped storage (each session_id has one context)
- Builds on previous context (doesn't start from scratch each time)
- No agent tool (system-managed only)
Scope:
- Context is retrieved by session_id only
- agent_id/team_id stored in DB columns for audit trail
Key Behavior:
- Extraction receives the previous context and updates it
- This ensures continuity even when message history is truncated
- Previous context + new messages → Updated context
Supported Modes:
- ALWAYS only. SessionContextStore does not support AGENTIC, PROPOSE, or HITL modes.
"""
from copy import deepcopy
from dataclasses import dataclass, field
from os import getenv
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union
from agno.learn.config import LearningMode, SessionContextConfig
from agno.learn.schemas import SessionContext
from agno.learn.stores.protocol import LearningStore
from agno.learn.utils import from_dict_safe, to_dict_safe
from agno.utils.log import (
log_debug,
log_warning,
set_log_level_to_debug,
set_log_level_to_info,
)
if TYPE_CHECKING:
from agno.metrics import RunMetrics
try:
from agno.db.base import AsyncBaseDb, BaseDb
from agno.models.message import Message
from agno.tools.function import Function
except ImportError:
pass
@dataclass
class SessionContextStore(LearningStore):
"""Storage backend for Session Context learning type.
Context is retrieved by session_id only — all agents sharing the same DB
will see the same context for a given session. agent_id and team_id are
stored in DB columns for audit purposes.
Key difference from UserProfileStore:
- UserProfile: accumulates memories over time
- SessionContext: snapshot of current session state (updated on each extraction)
Key behavior:
- Extraction builds on previous context rather than starting fresh
- This ensures continuity even when message history is truncated
- Previous summary, goal, plan, progress are preserved and updated
Args:
config: SessionContextConfig with all settings including db and model.
debug_mode: Enable debug logging.
"""
config: SessionContextConfig = field(default_factory=SessionContextConfig)
debug_mode: bool = False
# State tracking (internal)
context_updated: bool = field(default=False, init=False)
_schema: Any = field(default=None, init=False)
def __post_init__(self):
self._schema = self.config.schema or SessionContext
if self.config.mode != LearningMode.ALWAYS:
log_warning(
f"SessionContextStore only supports ALWAYS mode, got {self.config.mode}. Ignoring mode setting."
)
# =========================================================================
# LearningStore Protocol Implementation
# =========================================================================
@property
def learning_type(self) -> str:
"""Unique identifier for this learning type."""
return "session_context"
@property
def schema(self) -> Any:
"""Schema class used for context."""
return self._schema
def recall(self, session_id: str, **kwargs) -> Optional[Any]:
"""Retrieve session context from storage.
Args:
session_id: The session to retrieve context for (required).
**kwargs: Additional context (ignored).
Returns:
Session context, or None if not found.
"""
if not session_id:
return None
return self.get(session_id=session_id)
async def arecall(self, session_id: str, **kwargs) -> Optional[Any]:
"""Async version of recall."""
if not session_id:
return None
return await self.aget(session_id=session_id)
def process(
self,
messages: List[Any],
session_id: str,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> None:
"""Extract session context from messages.
Args:
messages: Conversation messages to analyze.
session_id: The session to update context for (required).
user_id: User context (stored for audit).
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
**kwargs: Additional context (ignored).
"""
# process only supported in ALWAYS mode
# for programmatic extraction, use extract_and_save directly
if self.config.mode != LearningMode.ALWAYS:
return
if not session_id or not messages:
return
self.extract_and_save(
messages=messages,
session_id=session_id,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
run_metrics=kwargs.get("run_metrics"),
)
async def aprocess(
self,
messages: List[Any],
session_id: str,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> None:
"""Async version of process."""
if self.config.mode != LearningMode.ALWAYS:
return
if not session_id or not messages:
return
await self.aextract_and_save(
messages=messages,
session_id=session_id,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
run_metrics=kwargs.get("run_metrics"),
)
def build_context(self, data: Any) -> str:
"""Build context for the agent.
Formats session context for injection into the agent's system prompt.
Session context provides continuity within a single conversation,
especially useful when message history gets truncated.
Args:
data: Session context data from recall().
Returns:
Context string to inject into the agent's system prompt.
"""
if not data:
return ""
context_text = None
if hasattr(data, "get_context_text"):
context_text = data.get_context_text()
elif hasattr(data, "summary") and data.summary:
context_text = self._format_context(context=data)
if not context_text:
return ""
return dedent(f"""\
<session_context>
This is a continuation of an ongoing session. Here's where things stand:
{context_text}
<session_context_guidelines>
Use this context to maintain continuity:
- Reference earlier decisions and conclusions naturally
- Don't re-ask questions that have already been answered
- Build on established understanding rather than starting fresh
- If the user references something from "earlier," this context has the details
Current messages take precedence if there's any conflict with this summary.
</session_context_guidelines>
</session_context>\
""")
def get_tools(self, **kwargs) -> List[Callable]:
"""Session context has no agent tools (system-managed only)."""
return []
async def aget_tools(self, **kwargs) -> List[Callable]:
"""Async version of get_tools."""
return []
@property
def was_updated(self) -> bool:
"""Check if context was updated in last operation."""
return self.context_updated
# =========================================================================
# Properties
# =========================================================================
@property
def db(self) -> Optional[Union["BaseDb", "AsyncBaseDb"]]:
"""Database backend."""
return self.config.db
@property
def model(self):
"""Model for extraction."""
return self.config.model
# =========================================================================
# Debug/Logging
# =========================================================================
def set_log_level(self):
"""Set log level based on debug_mode or environment variable."""
if self.debug_mode or getenv("AGNO_DEBUG", "false").lower() == "true":
self.debug_mode = True
set_log_level_to_debug()
else:
set_log_level_to_info()
# =========================================================================
# Read Operations
# =========================================================================
def get(self, session_id: str) -> Optional[Any]:
"""Retrieve session context by session_id.
Args:
session_id: The unique session identifier.
Returns:
Session context as schema instance, or None if not found.
"""
if not self.db:
return None
try:
result = self.db.get_learning(
learning_type=self.learning_type,
session_id=session_id,
)
if result and result.get("content"): # type: ignore[union-attr]
return from_dict_safe(self.schema, result["content"]) # type: ignore[index]
return None
except Exception as e:
log_debug(f"SessionContextStore.get failed for session_id={session_id}: {e}")
return None
async def aget(self, session_id: str) -> Optional[Any]:
"""Async version of get."""
if not self.db:
return None
try:
if isinstance(self.db, AsyncBaseDb):
result = await self.db.get_learning(
learning_type=self.learning_type,
session_id=session_id,
)
else:
result = self.db.get_learning(
learning_type=self.learning_type,
session_id=session_id,
)
if result and result.get("content"):
return from_dict_safe(self.schema, result["content"])
return None
except Exception as e:
log_debug(f"SessionContextStore.aget failed for session_id={session_id}: {e}")
return None
# =========================================================================
# Write Operations
# =========================================================================
def save(
self,
session_id: str,
context: Any,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> None:
"""Save or replace session context.
Args:
session_id: The unique session identifier.
context: The context data to save.
user_id: User context (stored in DB column for audit).
agent_id: Agent context (stored in DB column for audit).
team_id: Team context (stored in DB column for audit).
"""
if not self.db or not context:
return
try:
content = to_dict_safe(context)
if not content:
return
self.db.upsert_learning(
id=self._build_context_id(session_id=session_id),
learning_type=self.learning_type,
session_id=session_id,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
content=content,
)
log_debug(f"SessionContextStore.save: saved context for session_id={session_id}")
except Exception as e:
log_debug(f"SessionContextStore.save failed for session_id={session_id}: {e}")
async def asave(
self,
session_id: str,
context: Any,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> None:
"""Async version of save."""
if not self.db or not context:
return
try:
content = to_dict_safe(context)
if not content:
return
if isinstance(self.db, AsyncBaseDb):
await self.db.upsert_learning(
id=self._build_context_id(session_id=session_id),
learning_type=self.learning_type,
session_id=session_id,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
content=content,
)
else:
self.db.upsert_learning(
id=self._build_context_id(session_id=session_id),
learning_type=self.learning_type,
session_id=session_id,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
content=content,
)
log_debug(f"SessionContextStore.asave: saved context for session_id={session_id}")
except Exception as e:
log_debug(f"SessionContextStore.asave failed for session_id={session_id}: {e}")
# =========================================================================
# Delete Operations
# =========================================================================
def delete(self, session_id: str) -> bool:
"""Delete session context.
Args:
session_id: The unique session identifier.
Returns:
True if deleted, False otherwise.
"""
if not self.db:
return False
try:
context_id = self._build_context_id(session_id=session_id)
return self.db.delete_learning(id=context_id) # type: ignore[return-value]
except Exception as e:
log_debug(f"SessionContextStore.delete failed for session_id={session_id}: {e}")
return False
async def adelete(self, session_id: str) -> bool:
"""Async version of delete."""
if not self.db:
return False
try:
context_id = self._build_context_id(session_id=session_id)
if isinstance(self.db, AsyncBaseDb):
return await self.db.delete_learning(id=context_id)
else:
return self.db.delete_learning(id=context_id)
except Exception as e:
log_debug(f"SessionContextStore.adelete failed for session_id={session_id}: {e}")
return False
def clear(
self,
session_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> None:
"""Clear session context (reset to empty).
Args:
session_id: The unique session identifier.
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
"""
if not self.db:
return
try:
empty_context = self.schema(session_id=session_id)
self.save(session_id=session_id, context=empty_context, agent_id=agent_id, team_id=team_id)
log_debug(f"SessionContextStore.clear: cleared context for session_id={session_id}")
except Exception as e:
log_debug(f"SessionContextStore.clear failed for session_id={session_id}: {e}")
async def aclear(
self,
session_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> None:
"""Async version of clear."""
if not self.db:
return
try:
empty_context = self.schema(session_id=session_id)
await self.asave(session_id=session_id, context=empty_context, agent_id=agent_id, team_id=team_id)
log_debug(f"SessionContextStore.aclear: cleared context for session_id={session_id}")
except Exception as e:
log_debug(f"SessionContextStore.aclear failed for session_id={session_id}: {e}")
# =========================================================================
# Extraction Operations
# =========================================================================
def extract_and_save(
self,
messages: List["Message"],
session_id: str,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
run_metrics: Optional["RunMetrics"] = None,
) -> str:
"""Extract session context from messages and save.
Builds on previous context rather than starting from scratch.
Args:
messages: Conversation messages to analyze.
session_id: The unique session identifier.
user_id: User context (stored for audit).
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
Returns:
Response from model.
"""
if self.model is None:
log_warning("SessionContextStore.extract_and_save: no model provided")
return "No model provided for session context extraction"
if not self.db:
log_warning("SessionContextStore.extract_and_save: no database provided")
return "No DB provided for session context store"
log_debug("SessionContextStore: Extracting session context", center=True)
self.context_updated = False
# Get existing context to build upon
existing_context = self.get(session_id=session_id)
conversation_text = self._messages_to_text(messages=messages)
tools = self._get_extraction_tools(
session_id=session_id,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
existing_context=existing_context,
)
functions = self._build_functions_for_model(tools=tools)
system_message = self._get_system_message(
conversation_text=conversation_text,
existing_context=existing_context,
)
messages_for_model = [
system_message,
# For models that require a non-system message
Message(
role="user",
content="Please analyze the conversation and update the session context using the available tools.",
),
]
model_copy = deepcopy(self.model)
response = model_copy.response(
messages=messages_for_model,
tools=functions,
)
if run_metrics is not None and response.response_usage is not None:
from agno.metrics import ModelType, accumulate_model_metrics
accumulate_model_metrics(response, model_copy, ModelType.LEARNING_MODEL, run_metrics)
if response.tool_executions:
self.context_updated = True
log_debug("SessionContextStore: Extraction complete", center=True)
return response.content or ("Context updated" if self.context_updated else "No updates needed")
async def aextract_and_save(
self,
messages: List["Message"],
session_id: str,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
run_metrics: Optional["RunMetrics"] = None,
) -> str:
"""Async version of extract_and_save."""
if self.model is None:
log_warning("SessionContextStore.aextract_and_save: no model provided")
return "No model provided for session context extraction"
if not self.db:
log_warning("SessionContextStore.aextract_and_save: no database provided")
return "No DB provided for session context store"
log_debug("SessionContextStore: Extracting session context (async)", center=True)
self.context_updated = False
# Get existing context to build upon
existing_context = await self.aget(session_id=session_id)
conversation_text = self._messages_to_text(messages=messages)
tools = await self._aget_extraction_tools(
session_id=session_id,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
existing_context=existing_context,
)
functions = self._build_functions_for_model(tools=tools)
system_message = self._get_system_message(
conversation_text=conversation_text,
existing_context=existing_context,
)
messages_for_model = [
system_message,
# For models that require a non-system message
Message(
role="user",
content="Please analyze the conversation and update the session context using the available tools.",
),
]
model_copy = deepcopy(self.model)
response = await model_copy.aresponse(
messages=messages_for_model,
tools=functions,
)
if run_metrics is not None and response.response_usage is not None:
from agno.metrics import ModelType, accumulate_model_metrics
accumulate_model_metrics(response, model_copy, ModelType.LEARNING_MODEL, run_metrics)
if response.tool_executions:
self.context_updated = True
log_debug("SessionContextStore: Extraction complete", center=True)
return response.content or ("Context updated" if self.context_updated else "No updates needed")
# =========================================================================
# Private Helpers
# =========================================================================
def _build_context_id(self, session_id: str) -> str:
"""Build a unique context ID."""
return f"session_context_{session_id}"
def _format_context(self, context: Any) -> str:
"""Format context data for display in agent prompt."""
parts = []
if hasattr(context, "summary") and context.summary:
parts.append(f"**Summary:** {context.summary}")
if hasattr(context, "goal") and context.goal:
parts.append(f"**Current Goal:** {context.goal}")
if hasattr(context, "plan") and context.plan:
plan_items = "\n - ".join(context.plan)
parts.append(f"**Plan:**\n - {plan_items}")
if hasattr(context, "progress") and context.progress:
progress_items = "\n - ".join(f"✓ {item}" for item in context.progress)
parts.append(f"**Completed:**\n - {progress_items}")
return "\n\n".join(parts)
def _messages_to_text(self, messages: List["Message"]) -> str:
"""Convert messages to text for extraction."""
parts = []
for msg in messages:
if msg.role == "user":
content = msg.get_content_string() if hasattr(msg, "get_content_string") else str(msg.content)
if content and content.strip():
parts.append(f"User: {content}")
elif msg.role in ["assistant", "model"]:
content = msg.get_content_string() if hasattr(msg, "get_content_string") else str(msg.content)
if content and content.strip():
parts.append(f"Assistant: {content}")
return "\n".join(parts)
def _get_system_message(
self,
conversation_text: str,
existing_context: Optional[Any] = None,
) -> "Message":
"""Build system message for extraction.
Creates a prompt that guides the model to extract and update session context,
building on previous context rather than starting fresh each time.
"""
from agno.models.message import Message
if self.config.system_message is not None:
return Message(role="system", content=self.config.system_message)
enable_planning = self.config.enable_planning
custom_instructions = self.config.instructions or ""
# Build previous context section
previous_context_section = ""
if existing_context:
previous_context_section = dedent("""\
## Previous Context
This session already has context from earlier exchanges. Your job is to UPDATE it,
not replace it. Integrate new information while preserving what's still relevant.
""")
if hasattr(existing_context, "summary") and existing_context.summary:
previous_context_section += f"**Previous summary:**\n{existing_context.summary}\n\n"
if enable_planning:
if hasattr(existing_context, "goal") and existing_context.goal:
previous_context_section += f"**Established goal:** {existing_context.goal}\n"
if hasattr(existing_context, "plan") and existing_context.plan:
previous_context_section += f"**Current plan:** {', '.join(existing_context.plan)}\n"
if hasattr(existing_context, "progress") and existing_context.progress:
previous_context_section += f"**Completed so far:** {', '.join(existing_context.progress)}\n"
previous_context_section += "\n"
if enable_planning:
system_prompt = (
dedent("""\
You are a Session Context Manager. Your job is to maintain a living summary of this
conversation that enables continuity - especially important when message history
gets truncated.
## Philosophy
Think of session context like notes a colleague would take during a working session:
- Not a transcript, but the current STATE of the work
- What's been decided, what's still open
- Where things stand, not every step of how we got here
- What someone would need to pick up exactly where we left off
## What to Capture
1. **Summary**: The essential narrative of this session
- Key topics and how they were resolved
- Important decisions and their rationale
- Current state of any work in progress
- Open questions or unresolved items
2. **Goal**: What the user is ultimately trying to accomplish
- May evolve as the conversation progresses
- Keep updating if the user clarifies or pivots
3. **Plan**: The approach being taken (if one has emerged)
- Steps that have been outlined
- Update if the plan changes
4. **Progress**: What's been completed
- Helps track where we are in multi-step work
- Mark items done as they're completed
""")
+ previous_context_section
+ dedent("""\
## New Conversation to Integrate
<conversation>
""")
+ conversation_text
+ dedent("""
</conversation>
## Guidelines
**Integration, not replacement:**
- BUILD ON previous context - don't lose earlier information
- If previous summary mentioned topic X and it's still relevant, keep it
- If something was resolved or superseded, update accordingly
**Quality of summary:**
- Should stand alone - reader should understand the full session
- Capture conclusions and current state, not conversation flow
- Be concise but complete - aim for density of useful information
- Include enough detail that work could continue seamlessly
**Good summary characteristics:**
- "User is building a REST API for inventory management. Decided on FastAPI over Flask
for async support. Schema design complete with Products, Categories, and Suppliers tables.
Currently implementing the Products endpoint with pagination."
**Poor summary characteristics:**
- "User asked about APIs. We discussed some options. Made some decisions."
(Too vague - doesn't capture what was actually decided)\
""")
+ custom_instructions
+ dedent("""
Save your updated context using the save_session_context tool.\
""")
)
else:
system_prompt = (
dedent("""\
You are a Session Context Manager. Your job is to maintain a living summary of this
conversation that enables continuity - especially important when message history
gets truncated.
## Philosophy
Think of session context like meeting notes:
- Not a transcript, but what matters for continuity
- What was discussed, decided, and concluded
- Current state of any ongoing work
- What someone would need to pick up where we left off
## What to Capture
Create a summary that includes:
- **Topics covered** and how they were addressed
- **Decisions made** and key conclusions
- **Current state** of any work in progress
- **Open items** - questions pending, next steps discussed
- **Important details** that would be awkward to re-establish
""")
+ previous_context_section
+ dedent("""\
## New Conversation to Integrate
<conversation>
""")
+ conversation_text
+ dedent("""
</conversation>
## Guidelines
**Integration, not replacement:**
- BUILD ON previous summary - don't lose earlier context
- Weave new information into existing narrative
- If something is superseded, update it; if still relevant, preserve it
**Quality standards:**
*Good summary:*
"Helping user debug a memory leak in their Node.js application. Identified that the
issue occurs in the WebSocket handler - connections aren't being cleaned up on
disconnect. Reviewed the connection management code and found missing event listener
removal. User is implementing the fix with a connection registry pattern. Next step:
test under load to verify the leak is resolved."
*Poor summary:*
"User had a bug. We looked at code. Found some issues. Working on fixing it."
(Missing: what bug, what code, what issues, what fix)
**Aim for:**
- Density of useful information
- Standalone comprehensibility
- Enough detail to continue seamlessly
- Focus on state over story
""")
+ custom_instructions
+ dedent("""
Save your updated summary using the save_session_context tool.\
""")
)
if self.config.additional_instructions:
system_prompt += f"\n\n{self.config.additional_instructions}"
return Message(role="system", content=system_prompt)
def _build_functions_for_model(self, tools: List[Callable]) -> List["Function"]:
"""Convert callables to Functions for model."""
from agno.tools.function import Function
functions = []
seen_names = set()
for tool in tools:
try:
name = tool.__name__
if name in seen_names:
continue
seen_names.add(name)
func = Function.from_callable(tool, strict=True)
func.strict = True
functions.append(func)
log_debug(f"Added function {func.name}")
except Exception as e:
log_warning(f"Could not add function {tool}: {e}")
return functions
def _get_extraction_tools(
self,
session_id: str,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
existing_context: Optional[Any] = None,
) -> List[Callable]:
"""Get sync extraction tools for the model."""
enable_planning = self.config.enable_planning
if enable_planning:
# Full planning mode: include goal, plan, progress parameters
def save_session_context(
summary: str,
goal: Optional[str] = None,
plan: Optional[List[str]] = None,
progress: Optional[List[str]] = None,
) -> str:
"""Save the updated session context.
The summary should capture the current state of the conversation in a way that
enables seamless continuation. Think: "What would someone need to know to pick
up exactly where we left off?"
Args:
summary: A comprehensive summary that integrates previous context with new
developments. Should be standalone - readable without seeing the
actual messages. Capture:
- What's being worked on and why
- Key decisions made and their rationale
- Current state of any work in progress
- Open questions or pending items
Good: "Debugging a React performance issue in the user's dashboard.
Identified unnecessary re-renders in the DataTable component caused by
inline object creation in props. Implemented useMemo for the column
definitions. Testing shows 60% render reduction. Next: profile the
filtering logic which may have similar issues."
Bad: "Looked at React code. Found some performance issues. Made changes."
goal: The user's primary objective for this session (if one is apparent).
Update if the goal has evolved or been clarified.
plan: Current plan of action as a list of steps (if a structured approach
has emerged). Update as the plan evolves.
progress: Steps from the plan that have been completed. Add items as work
is finished to track advancement through the plan.
Returns:
Confirmation message.
"""
try:
context_data: Dict[str, Any] = {
"session_id": session_id,
"summary": summary,
}
# Preserve previous values if not updated
if goal is not None:
context_data["goal"] = goal
elif existing_context and hasattr(existing_context, "goal"):
context_data["goal"] = existing_context.goal
if plan is not None:
context_data["plan"] = plan
elif existing_context and hasattr(existing_context, "plan"):
context_data["plan"] = existing_context.plan or []
if progress is not None:
context_data["progress"] = progress
elif existing_context and hasattr(existing_context, "progress"):
context_data["progress"] = existing_context.progress or []
context = from_dict_safe(self.schema, context_data)
self.save(
session_id=session_id,
context=context,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
log_debug(f"Session context saved: {summary[:50]}...")
return "Session context saved"
except Exception as e:
log_warning(f"Error saving session context: {e}")
return f"Error: {e}"
else:
# Summary-only mode: only summary parameter
def save_session_context(summary: str) -> str: # type: ignore[misc]
"""Save the updated session summary.
The summary should capture the current state of the conversation in a way that
enables seamless continuation. Think: "What would someone need to know to pick
up exactly where we left off?"
Args:
summary: A comprehensive summary that integrates previous context with new
developments. Should be standalone - readable without seeing the
actual messages. Capture:
- What's being worked on and why
- Key decisions made and their rationale
- Current state of any work in progress
- Open questions or pending items
Good: "Helping user debug a memory leak in their Node.js application.
Identified that the issue occurs in the WebSocket handler - connections
aren't being cleaned up on disconnect. Reviewed the connection management
code and found missing event listener removal. User is implementing the
fix with a connection registry pattern. Next step: test under load."
Bad: "User had a bug. We looked at code. Found some issues. Working on fixing it."
Returns:
Confirmation message.
"""
try:
context_data = {
"session_id": session_id,
"summary": summary,
}
context = from_dict_safe(self.schema, context_data)
self.save(
session_id=session_id,
context=context,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
log_debug(f"Session context saved: {summary[:50]}...")
return "Session context saved"
except Exception as e:
log_warning(f"Error saving session context: {e}")
return f"Error: {e}"
return [save_session_context]
async def _aget_extraction_tools(
self,
session_id: str,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
existing_context: Optional[Any] = None,
) -> List[Callable]:
"""Get async extraction tools for the model."""
enable_planning = self.config.enable_planning
if enable_planning:
# Full planning mode: include goal, plan, progress parameters
async def save_session_context(
summary: str,
goal: Optional[str] = None,
plan: Optional[List[str]] = None,
progress: Optional[List[str]] = None,
) -> str:
"""Save the updated session context.
The summary should capture the current state of the conversation in a way that
enables seamless continuation. Think: "What would someone need to know to pick
up exactly where we left off?"
Args:
summary: A comprehensive summary that integrates previous context with new
developments. Should be standalone - readable without seeing the
actual messages. Capture:
- What's being worked on and why
- Key decisions made and their rationale
- Current state of any work in progress
- Open questions or pending items
Good: "Debugging a React performance issue in the user's dashboard.
Identified unnecessary re-renders in the DataTable component caused by
inline object creation in props. Implemented useMemo for the column
definitions. Testing shows 60% render reduction. Next: profile the
filtering logic which may have similar issues."
Bad: "Looked at React code. Found some performance issues. Made changes."
goal: The user's primary objective for this session (if one is apparent).
Update if the goal has evolved or been clarified.
plan: Current plan of action as a list of steps (if a structured approach
has emerged). Update as the plan evolves.
progress: Steps from the plan that have been completed. Add items as work
is finished to track advancement through the plan.
Returns:
Confirmation message.
"""
try:
context_data: Dict[str, Any] = {
"session_id": session_id,
"summary": summary,
}
# Preserve previous values if not updated
if goal is not None:
context_data["goal"] = goal
elif existing_context and hasattr(existing_context, "goal"):
context_data["goal"] = existing_context.goal
if plan is not None:
context_data["plan"] = plan
elif existing_context and hasattr(existing_context, "plan"):
context_data["plan"] = existing_context.plan or []
if progress is not None:
context_data["progress"] = progress
elif existing_context and hasattr(existing_context, "progress"):
context_data["progress"] = existing_context.progress or []
context = from_dict_safe(self.schema, context_data)
await self.asave(
session_id=session_id,
context=context,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
log_debug(f"Session context saved: {summary[:50]}...")
return "Session context saved"
except Exception as e:
log_warning(f"Error saving session context: {e}")
return f"Error: {e}"
else:
# Summary-only mode: only summary parameter
async def save_session_context(summary: str) -> str: # type: ignore[misc]
"""Save the updated session summary.
The summary should capture the current state of the conversation in a way that
enables seamless continuation. Think: "What would someone need to know to pick
up exactly where we left off?"
Args:
summary: A comprehensive summary that integrates previous context with new
developments. Should be standalone - readable without seeing the
actual messages. Capture:
- What's being worked on and why
- Key decisions made and their rationale
- Current state of any work in progress
- Open questions or pending items
Good: "Helping user debug a memory leak in their Node.js application.
Identified that the issue occurs in the WebSocket handler - connections
aren't being cleaned up on disconnect. Reviewed the connection management
code and found missing event listener removal. User is implementing the
fix with a connection registry pattern. Next step: test under load."
Bad: "User had a bug. We looked at code. Found some issues. Working on fixing it."
Returns:
Confirmation message.
"""
try:
context_data = {
"session_id": session_id,
"summary": summary,
}
context = from_dict_safe(self.schema, context_data)
await self.asave(
session_id=session_id,
context=context,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
log_debug(f"Session context saved: {summary[:50]}...")
return "Session context saved"
except Exception as e:
log_warning(f"Error saving session context: {e}")
return f"Error: {e}"
return [save_session_context]
# =========================================================================
# Representation
# =========================================================================
def __repr__(self) -> str:
"""String representation for debugging."""
has_db = self.db is not None
has_model = self.model is not None
return (
f"SessionContextStore("
f"mode={self.config.mode.value}, "
f"db={has_db}, "
f"model={has_model}, "
f"enable_planning={self.config.enable_planning})"
)
def print(self, session_id: str, *, raw: bool = False) -> None:
"""Print formatted session context.
Args:
session_id: The session to print context for.
raw: If True, print raw dict using pprint instead of formatted panel.
Example:
>>> store.print(session_id="sess_123")
╭─────────────── Session Context ───────────────╮
│ Summary: Debugging React performance issue... │
│ Goal: Fix DataTable re-renders │
│ Plan: │
│ 1. Profile component renders │
│ 2. Identify unnecessary re-renders │
│ Progress: │
│ ✓ Profile component renders │
╰──────────────── sess_123 ─────────────────────╯
"""
from agno.learn.utils import print_panel
context = self.get(session_id=session_id)
lines = []
if context:
if hasattr(context, "summary") and context.summary:
lines.append(f"Summary: {context.summary}")
if hasattr(context, "goal") and context.goal:
if lines:
lines.append("")
lines.append(f"Goal: {context.goal}")
if hasattr(context, "plan") and context.plan:
if lines:
lines.append("")
lines.append("Plan:")
for i, step in enumerate(context.plan, 1):
lines.append(f" {i}. {step}")
if hasattr(context, "progress") and context.progress:
if lines:
lines.append("")
lines.append("Progress:")
for step in context.progress:
lines.append(f" [green]✓[/green] {step}")
print_panel(
title="Session Context",
subtitle=session_id,
lines=lines,
empty_message="No session context",
raw_data=context,
raw=raw,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/learn/stores/session_context.py",
"license": "Apache License 2.0",
"lines": 1019,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/learn/stores/user_memory.py | """
User Memory Store
=================
Storage backend for User Memory learning type.
Stores unstructured observations about users that don't fit into
structured profile fields. These are long-term memories that persist
across sessions.
Key Features:
- Background extraction from conversations
- Agent tools for in-conversation updates
- Multi-user isolation (each user has their own memories)
- Add, update, delete memory operations
Scope:
- Memories are retrieved by user_id only
- agent_id/team_id stored in DB columns for audit trail
- agent_id/team_id stored on individual memories for granular audit
Supported Modes:
- ALWAYS: Automatic extraction after conversations
- AGENTIC: Agent calls update_user_memory tool directly
"""
import uuid
from copy import deepcopy
from dataclasses import dataclass, field
from os import getenv
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, List, Optional, Union
from agno.learn.config import LearningMode, UserMemoryConfig
from agno.learn.schemas import Memories
from agno.learn.stores.protocol import LearningStore
from agno.learn.utils import from_dict_safe, to_dict_safe
from agno.utils.log import (
log_debug,
log_warning,
set_log_level_to_debug,
set_log_level_to_info,
)
if TYPE_CHECKING:
from agno.metrics import RunMetrics
try:
from agno.db.base import AsyncBaseDb, BaseDb
from agno.models.message import Message
from agno.tools.function import Function
except ImportError:
pass
@dataclass
class UserMemoryStore(LearningStore):
"""Storage backend for User Memory learning type.
Memories are retrieved by user_id only - all agents sharing the same DB
will see the same memories for a given user. agent_id and team_id are
stored for audit purposes (both at DB column level and on individual memories).
Args:
config: UserMemoryConfig with all settings including db and model.
debug_mode: Enable debug logging.
"""
config: UserMemoryConfig = field(default_factory=UserMemoryConfig)
debug_mode: bool = False
# State tracking (internal)
memories_updated: bool = field(default=False, init=False)
_schema: Any = field(default=None, init=False)
def __post_init__(self):
self._schema = self.config.schema or Memories
if self.config.mode == LearningMode.PROPOSE:
log_warning("UserMemoryStore does not support PROPOSE mode.")
elif self.config.mode == LearningMode.HITL:
log_warning("UserMemoryStore does not support HITL mode.")
# =========================================================================
# LearningStore Protocol Implementation
# =========================================================================
@property
def learning_type(self) -> str:
"""Unique identifier for this learning type."""
return "user_memory"
@property
def schema(self) -> Any:
"""Schema class used for memories."""
return self._schema
def recall(self, user_id: str, **kwargs) -> Optional[Any]:
"""Retrieve memories from storage.
Args:
user_id: The user to retrieve memories for (required).
**kwargs: Additional context (ignored).
Returns:
Memories, or None if not found.
"""
if not user_id:
return None
return self.get(user_id=user_id)
async def arecall(self, user_id: str, **kwargs) -> Optional[Any]:
"""Async version of recall."""
if not user_id:
return None
return await self.aget(user_id=user_id)
def process(
self,
messages: List[Any],
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> None:
"""Extract memories from messages.
Args:
messages: Conversation messages to analyze.
user_id: The user to update memories for (required).
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
**kwargs: Additional context (ignored).
"""
# process only supported in ALWAYS mode
# for programmatic extraction, use extract_and_save directly
if self.config.mode != LearningMode.ALWAYS:
return
if not user_id or not messages:
return
self.extract_and_save(
messages=messages,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
run_metrics=kwargs.get("run_metrics"),
)
async def aprocess(
self,
messages: List[Any],
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> None:
"""Async version of process."""
if self.config.mode != LearningMode.ALWAYS:
return
if not user_id or not messages:
return
await self.aextract_and_save(
messages=messages,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
run_metrics=kwargs.get("run_metrics"),
)
def build_context(self, data: Any) -> str:
"""Build context for the agent.
Formats memories data for injection into the agent's system prompt.
Designed to enable natural, personalized responses without meta-commentary
about memory systems.
Args:
data: Memories data from recall().
Returns:
Context string to inject into the agent's system prompt.
"""
# Build tool documentation based on what's enabled
tool_docs = self._build_tool_documentation()
if not data:
if self._should_expose_tools:
return (
dedent("""\
<user_memory>
No memories saved about this user yet.
""")
+ tool_docs
+ dedent("""
</user_memory>""")
)
return ""
# Build memories section
memories_text = None
if hasattr(data, "get_memories_text"):
memories_text = data.get_memories_text()
elif hasattr(data, "memories") and data.memories:
memories_text = "\n".join(f"- {m.get('content', str(m))}" for m in data.memories)
if not memories_text:
if self._should_expose_tools:
return (
dedent("""\
<user_memory>
No memories saved about this user yet.
""")
+ tool_docs
+ dedent("""
</user_memory>""")
)
return ""
context = "<user_memory>\n"
context += memories_text + "\n"
context += dedent("""
<memory_application_guidelines>
Apply this knowledge naturally - respond as if you inherently know this information,
exactly as a colleague would recall shared history without narrating their thought process.
- Selectively apply memories based on relevance to the current query
- Never say "based on my memory" or "I remember that" - just use the information naturally
- Current conversation always takes precedence over stored memories
- Use memories to calibrate tone, depth, and examples without announcing it
</memory_application_guidelines>""")
if self._should_expose_tools:
context += (
dedent("""
<memory_updates>
""")
+ tool_docs
+ dedent("""
</memory_updates>""")
)
context += "\n</user_memory>"
return context
def _build_tool_documentation(self) -> str:
"""Build documentation for available memory tools.
Returns:
String documenting which tools are available and when to use them.
"""
docs = []
if self.config.agent_can_update_memories:
docs.append(
"Use `update_user_memory` to save observations, preferences, and context about this user "
"that would help personalize future conversations or avoid asking the same questions."
)
return "\n\n".join(docs) if docs else ""
def get_tools(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> List[Callable]:
"""Get tools to expose to agent.
Args:
user_id: The user context (required for tool to work).
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
**kwargs: Additional context (ignored).
Returns:
List containing update_user_memory tool if enabled.
"""
if not user_id or not self._should_expose_tools:
return []
return self.get_agent_tools(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
async def aget_tools(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> List[Callable]:
"""Async version of get_tools."""
if not user_id or not self._should_expose_tools:
return []
return await self.aget_agent_tools(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
@property
def was_updated(self) -> bool:
"""Check if memories were updated in last operation."""
return self.memories_updated
@property
def _should_expose_tools(self) -> bool:
"""Check if tools should be exposed to the agent.
Returns True if either:
- mode is AGENTIC (tools are the primary way to update memory), OR
- enable_agent_tools is explicitly True
"""
return self.config.mode == LearningMode.AGENTIC or self.config.enable_agent_tools
# =========================================================================
# Properties
# =========================================================================
@property
def db(self) -> Optional[Union["BaseDb", "AsyncBaseDb"]]:
"""Database backend."""
return self.config.db
@property
def model(self):
"""Model for extraction."""
return self.config.model
# =========================================================================
# Debug/Logging
# =========================================================================
def set_log_level(self):
"""Set log level based on debug_mode or environment variable."""
if self.debug_mode or getenv("AGNO_DEBUG", "false").lower() == "true":
self.debug_mode = True
set_log_level_to_debug()
else:
set_log_level_to_info()
# =========================================================================
# Agent Tools
# =========================================================================
def get_agent_tools(
self,
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> List[Callable]:
"""Get the tools to expose to the agent.
Args:
user_id: The user to update (required).
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
Returns:
List of callable tools based on config settings.
"""
tools = []
# Memory update tool (delegates to extraction)
if self.config.agent_can_update_memories:
def update_user_memory(task: str) -> str:
"""Save or update information about this user for future conversations.
Use this when you learn something worth remembering - information that would
help personalize future interactions or provide continuity across sessions.
Args:
task: What to save, update, or remove. Be specific and factual.
Good examples:
- "User is a senior engineer at Stripe working on payments"
- "Prefers concise responses without lengthy explanations"
- "Update: User moved from NYC to London"
- "Remove the memory about their old job at Acme"
Bad examples:
- "User seems nice" (too vague)
- "Had a meeting today" (not durable)
Returns:
Confirmation of what was saved/updated.
"""
return self.run_memories_update(
task=task,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
tools.append(update_user_memory)
return tools
async def aget_agent_tools(
self,
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> List[Callable]:
"""Get the async tools to expose to the agent."""
tools = []
if self.config.agent_can_update_memories:
async def update_user_memory(task: str) -> str:
"""Save or update information about this user for future conversations.
Use this when you learn something worth remembering - information that would
help personalize future interactions or provide continuity across sessions.
Args:
task: What to save, update, or remove. Be specific and factual.
Good examples:
- "User is a senior engineer at Stripe working on payments"
- "Prefers concise responses without lengthy explanations"
- "Update: User moved from NYC to London"
- "Remove the memory about their old job at Acme"
Bad examples:
- "User seems nice" (too vague)
- "Had a meeting today" (not durable)
Returns:
Confirmation of what was saved/updated.
"""
return await self.arun_memories_update(
task=task,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
tools.append(update_user_memory)
return tools
# =========================================================================
# Read Operations
# =========================================================================
def get(self, user_id: str) -> Optional[Any]:
"""Retrieve memories by user_id.
Args:
user_id: The unique user identifier.
Returns:
Memories as schema instance, or None if not found.
"""
if not self.db:
return None
try:
result = self.db.get_learning(
learning_type=self.learning_type,
user_id=user_id,
)
if result and result.get("content"): # type: ignore[union-attr]
return from_dict_safe(self.schema, result["content"]) # type: ignore[index]
return None
except Exception as e:
log_debug(f"UserMemoryStore.get failed for user_id={user_id}: {e}")
return None
async def aget(self, user_id: str) -> Optional[Any]:
"""Async version of get."""
if not self.db:
return None
try:
if isinstance(self.db, AsyncBaseDb):
result = await self.db.get_learning(
learning_type=self.learning_type,
user_id=user_id,
)
else:
result = self.db.get_learning(
learning_type=self.learning_type,
user_id=user_id,
)
if result and result.get("content"):
return from_dict_safe(self.schema, result["content"])
return None
except Exception as e:
log_debug(f"UserMemoryStore.aget failed for user_id={user_id}: {e}")
return None
# =========================================================================
# Write Operations
# =========================================================================
def save(
self,
user_id: str,
memories: Any,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> None:
"""Save or update memories.
Args:
user_id: The unique user identifier.
memories: The memories data to save.
agent_id: Agent context (stored in DB column for audit).
team_id: Team context (stored in DB column for audit).
"""
if not self.db or not memories:
return
try:
content = to_dict_safe(memories)
if not content:
return
self.db.upsert_learning(
id=self._build_memories_id(user_id=user_id),
learning_type=self.learning_type,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
content=content,
)
log_debug(f"UserMemoryStore.save: saved memories for user_id={user_id}")
except Exception as e:
log_debug(f"UserMemoryStore.save failed for user_id={user_id}: {e}")
async def asave(
self,
user_id: str,
memories: Any,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> None:
"""Async version of save."""
if not self.db or not memories:
return
try:
content = to_dict_safe(memories)
if not content:
return
if isinstance(self.db, AsyncBaseDb):
await self.db.upsert_learning(
id=self._build_memories_id(user_id=user_id),
learning_type=self.learning_type,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
content=content,
)
else:
self.db.upsert_learning(
id=self._build_memories_id(user_id=user_id),
learning_type=self.learning_type,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
content=content,
)
log_debug(f"UserMemoryStore.asave: saved memories for user_id={user_id}")
except Exception as e:
log_debug(f"UserMemoryStore.asave failed for user_id={user_id}: {e}")
# =========================================================================
# Delete Operations
# =========================================================================
def delete(self, user_id: str) -> bool:
"""Delete memories for a user.
Args:
user_id: The unique user identifier.
Returns:
True if deleted, False otherwise.
"""
if not self.db:
return False
try:
memories_id = self._build_memories_id(user_id=user_id)
return self.db.delete_learning(id=memories_id) # type: ignore[return-value]
except Exception as e:
log_debug(f"UserMemoryStore.delete failed for user_id={user_id}: {e}")
return False
async def adelete(self, user_id: str) -> bool:
"""Async version of delete."""
if not self.db:
return False
try:
memories_id = self._build_memories_id(user_id=user_id)
if isinstance(self.db, AsyncBaseDb):
return await self.db.delete_learning(id=memories_id)
else:
return self.db.delete_learning(id=memories_id)
except Exception as e:
log_debug(f"UserMemoryStore.adelete failed for user_id={user_id}: {e}")
return False
def clear(
self,
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> None:
"""Clear all memories for a user (reset to empty).
Args:
user_id: The unique user identifier.
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
"""
if not self.db:
return
try:
empty_memories = self.schema(user_id=user_id)
self.save(user_id=user_id, memories=empty_memories, agent_id=agent_id, team_id=team_id)
log_debug(f"UserMemoryStore.clear: cleared memories for user_id={user_id}")
except Exception as e:
log_debug(f"UserMemoryStore.clear failed for user_id={user_id}: {e}")
async def aclear(
self,
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> None:
"""Async version of clear."""
if not self.db:
return
try:
empty_memories = self.schema(user_id=user_id)
await self.asave(user_id=user_id, memories=empty_memories, agent_id=agent_id, team_id=team_id)
log_debug(f"UserMemoryStore.aclear: cleared memories for user_id={user_id}")
except Exception as e:
log_debug(f"UserMemoryStore.aclear failed for user_id={user_id}: {e}")
# =========================================================================
# Memory Operations
# =========================================================================
def add_memory(
self,
user_id: str,
memory: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> Optional[str]:
"""Add a single memory.
Args:
user_id: The unique user identifier.
memory: The memory text to add.
agent_id: Agent that added this (stored for audit).
team_id: Team context (stored for audit).
**kwargs: Additional fields for the memory.
Returns:
The memory ID if added, None otherwise.
"""
memories_data = self.get(user_id=user_id)
if memories_data is None:
memories_data = self.schema(user_id=user_id)
memory_id = None
if hasattr(memories_data, "add_memory"):
memory_id = memories_data.add_memory(memory, **kwargs)
elif hasattr(memories_data, "memories"):
memory_id = str(uuid.uuid4())[:8]
memory_entry = {"id": memory_id, "content": memory, **kwargs}
if agent_id:
memory_entry["added_by_agent"] = agent_id
if team_id:
memory_entry["added_by_team"] = team_id
memories_data.memories.append(memory_entry)
self.save(user_id=user_id, memories=memories_data, agent_id=agent_id, team_id=team_id)
log_debug(f"UserMemoryStore.add_memory: added memory for user_id={user_id}")
return memory_id
async def aadd_memory(
self,
user_id: str,
memory: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> Optional[str]:
"""Async version of add_memory."""
memories_data = await self.aget(user_id=user_id)
if memories_data is None:
memories_data = self.schema(user_id=user_id)
memory_id = None
if hasattr(memories_data, "add_memory"):
memory_id = memories_data.add_memory(memory, **kwargs)
elif hasattr(memories_data, "memories"):
memory_id = str(uuid.uuid4())[:8]
memory_entry = {"id": memory_id, "content": memory, **kwargs}
if agent_id:
memory_entry["added_by_agent"] = agent_id
if team_id:
memory_entry["added_by_team"] = team_id
memories_data.memories.append(memory_entry)
await self.asave(user_id=user_id, memories=memories_data, agent_id=agent_id, team_id=team_id)
log_debug(f"UserMemoryStore.aadd_memory: added memory for user_id={user_id}")
return memory_id
# =========================================================================
# Extraction Operations
# =========================================================================
def extract_and_save(
self,
messages: List["Message"],
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
run_metrics: Optional["RunMetrics"] = None,
) -> str:
"""Extract memories from messages and save.
Args:
messages: Conversation messages to analyze.
user_id: The unique user identifier.
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
Returns:
Response from model.
"""
if self.model is None:
log_warning("UserMemoryStore.extract_and_save: no model provided")
return "No model provided for memories extraction"
if not self.db:
log_warning("UserMemoryStore.extract_and_save: no database provided")
return "No DB provided for memories store"
log_debug("UserMemoryStore: Extracting memories", center=True)
self.memories_updated = False
existing_memories = self.get(user_id=user_id)
existing_data = self._memories_to_list(memories=existing_memories)
input_string = self._messages_to_input_string(messages=messages)
tools = self._get_extraction_tools(
user_id=user_id,
input_string=input_string,
existing_memories=existing_memories,
agent_id=agent_id,
team_id=team_id,
)
functions = self._build_functions_for_model(tools=tools)
messages_for_model = [
self._get_system_message(existing_data=existing_data),
*messages,
]
model_copy = deepcopy(self.model)
response = model_copy.response(
messages=messages_for_model,
tools=functions,
)
if run_metrics is not None and response.response_usage is not None:
from agno.metrics import ModelType, accumulate_model_metrics
accumulate_model_metrics(response, model_copy, ModelType.LEARNING_MODEL, run_metrics)
if response.tool_executions:
self.memories_updated = True
log_debug("UserMemoryStore: Extraction complete", center=True)
return response.content or ("Memories updated" if self.memories_updated else "No updates needed")
async def aextract_and_save(
self,
messages: List["Message"],
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
run_metrics: Optional["RunMetrics"] = None,
) -> str:
"""Async version of extract_and_save."""
if self.model is None:
log_warning("UserMemoryStore.aextract_and_save: no model provided")
return "No model provided for memories extraction"
if not self.db:
log_warning("UserMemoryStore.aextract_and_save: no database provided")
return "No DB provided for memories store"
log_debug("UserMemoryStore: Extracting memories (async)", center=True)
self.memories_updated = False
existing_memories = await self.aget(user_id=user_id)
existing_data = self._memories_to_list(memories=existing_memories)
input_string = self._messages_to_input_string(messages=messages)
tools = await self._aget_extraction_tools(
user_id=user_id,
input_string=input_string,
existing_memories=existing_memories,
agent_id=agent_id,
team_id=team_id,
)
functions = self._build_functions_for_model(tools=tools)
messages_for_model = [
self._get_system_message(existing_data=existing_data),
*messages,
]
model_copy = deepcopy(self.model)
response = await model_copy.aresponse(
messages=messages_for_model,
tools=functions,
)
if run_metrics is not None and response.response_usage is not None:
from agno.metrics import ModelType, accumulate_model_metrics
accumulate_model_metrics(response, model_copy, ModelType.LEARNING_MODEL, run_metrics)
if response.tool_executions:
self.memories_updated = True
log_debug("UserMemoryStore: Extraction complete", center=True)
return response.content or ("Memories updated" if self.memories_updated else "No updates needed")
# =========================================================================
# Update Operations (called by agent tool)
# =========================================================================
def run_memories_update(
self,
task: str,
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> str:
"""Run a memories update task.
Args:
task: The update task description.
user_id: The unique user identifier.
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
Returns:
Response from model.
"""
from agno.models.message import Message
messages = [Message(role="user", content=task)]
return self.extract_and_save(
messages=messages,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
async def arun_memories_update(
self,
task: str,
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> str:
"""Async version of run_memories_update."""
from agno.models.message import Message
messages = [Message(role="user", content=task)]
return await self.aextract_and_save(
messages=messages,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
# =========================================================================
# Private Helpers
# =========================================================================
def _build_memories_id(self, user_id: str) -> str:
"""Build a unique memories ID."""
return f"memories_{user_id}"
def _memories_to_list(self, memories: Optional[Any]) -> List[dict]:
"""Convert memories to list of memory dicts for prompt."""
if not memories:
return []
result = []
if hasattr(memories, "memories") and memories.memories:
for mem in memories.memories:
if isinstance(mem, dict):
memory_id = mem.get("id", str(uuid.uuid4())[:8])
content = mem.get("content", str(mem))
else:
memory_id = str(uuid.uuid4())[:8]
content = str(mem)
result.append({"id": memory_id, "content": content})
return result
def _messages_to_input_string(self, messages: List["Message"]) -> str:
"""Convert messages to input string."""
if len(messages) == 1:
return messages[0].get_content_string()
else:
return "\n".join([f"{m.role}: {m.get_content_string()}" for m in messages if m.content])
def _build_functions_for_model(self, tools: List[Callable]) -> List["Function"]:
"""Convert callables to Functions for model."""
from agno.tools.function import Function
functions = []
seen_names = set()
for tool in tools:
try:
name = tool.__name__
if name in seen_names:
continue
seen_names.add(name)
func = Function.from_callable(tool, strict=True)
func.strict = True
functions.append(func)
log_debug(f"Added function {func.name}")
except Exception as e:
log_warning(f"Could not add function {tool}: {e}")
return functions
def _get_system_message(
self,
existing_data: List[dict],
) -> "Message":
"""Build system message for memory extraction."""
from agno.models.message import Message
if self.config.system_message is not None:
return Message(role="system", content=self.config.system_message)
system_prompt = dedent("""\
You are building a memory of this user to enable personalized, contextual interactions.
Your goal is NOT to create a database of facts, but to build working knowledge that helps an AI assistant engage naturally with this person - knowing their context, adapting to their preferences, and providing continuity across conversations.
## Memory Philosophy
Think of memories as what a thoughtful colleague would remember after working with someone:
- Their role and what they're working on
- How they prefer to communicate
- What matters to them and what frustrates them
- Ongoing projects or situations worth tracking
Memories should make future interactions feel informed and personal, not robotic or surveillance-like.
## Memory Categories
Use memory tools for contextual information organized by relevance:
**Work/Project Context** - What they're building, their role, current focus
**Personal Context** - Preferences, communication style, background that shapes interactions
**Top of Mind** - Active situations, ongoing challenges, time-sensitive context
**Patterns** - How they work, what they value, recurring themes
""")
# Custom instructions or defaults
capture_instructions = self.config.instructions or dedent("""\
## What To Capture
**DO save:**
- Role, company, and what they're working on
- Communication preferences (brevity vs detail, technical depth, tone)
- Goals, priorities, and current challenges
- Preferences that affect how to help them (tools, frameworks, approaches)
- Context that would be awkward to ask about again
- Patterns in how they think and work
**DO NOT save:**
- Sensitive personal information (health conditions, financial details, relationships) unless directly relevant to helping them
- One-off details unlikely to matter in future conversations
- Information they'd find creepy to have remembered
- Inferences or assumptions - only save what they've actually stated
- Duplicates of existing memories (update instead)
- Trivial preferences that don't affect interactions\
""")
system_prompt += capture_instructions
system_prompt += dedent("""
## Writing Style
Write memories as concise, factual statements in third person:
**Good memories:**
- "Founder and CEO of Acme, a 10-person AI startup"
- "Prefers direct feedback without excessive caveats"
- "Currently preparing for Series A fundraise, targeting $50M"
- "Values simplicity over cleverness in code architecture"
**Bad memories:**
- "User mentioned they work at a company" (too vague)
- "User seems to like technology" (obvious/not useful)
- "Had a meeting yesterday" (not durable)
- "User is stressed about fundraising" (inference without direct statement)
## Consolidation Over Accumulation
**Critical:** Prefer updating existing memories over adding new ones.
- If new information extends an existing memory, UPDATE it
- If new information contradicts an existing memory, REPLACE it
- If information is truly new and distinct, then add it
- Periodically consolidate related memories into cohesive summaries
- Delete memories that are no longer accurate or relevant
Think of memory maintenance like note-taking: a few well-organized notes beat many scattered fragments.
""")
# Current memories section
system_prompt += "## Current Memories\n\n"
if existing_data:
system_prompt += "Existing memories for this user:\n"
for entry in existing_data:
system_prompt += f"- [{entry['id']}] {entry['content']}\n"
system_prompt += dedent("""
Review these before adding new ones:
- UPDATE if new information extends or modifies an existing memory
- DELETE if a memory is no longer accurate
- Only ADD if the information is genuinely new and distinct
""")
else:
system_prompt += "No existing memories. Extract what's worth remembering from this conversation.\n"
# Available actions
system_prompt += "\n## Available Actions\n\n"
if self.config.enable_add_memory:
system_prompt += "- `add_memory`: Add a new memory (only if genuinely new information)\n"
if self.config.enable_update_memory:
system_prompt += "- `update_memory`: Update existing memory with new/corrected information\n"
if self.config.enable_delete_memory:
system_prompt += "- `delete_memory`: Remove outdated or incorrect memory\n"
if self.config.enable_clear_memories:
system_prompt += "- `clear_all_memories`: Reset all memories (use rarely)\n"
# Examples
system_prompt += dedent("""
## Examples
**Example 1: New user introduction**
User: "I'm Sarah, I run engineering at Stripe. We're migrating to Kubernetes."
→ add_memory("Engineering lead at Stripe, currently migrating infrastructure to Kubernetes")
**Example 2: Updating existing context**
Existing memory: "Working on Series A fundraise"
User: "We closed our Series A last week! $12M from Sequoia."
→ update_memory(id, "Closed $12M Series A from Sequoia")
**Example 3: Learning preferences**
User: "Can you skip the explanations and just give me the code?"
→ add_memory("Prefers concise responses with code over lengthy explanations")
**Example 4: Nothing worth saving**
User: "What's the weather like?"
→ No action needed (trivial, no lasting relevance)
## Final Guidance
- Quality over quantity: 5 great memories beat 20 mediocre ones
- Durability matters: save information that will still be relevant next month
- Respect boundaries: when in doubt about whether to save something, don't
- It's fine to do nothing if the conversation reveals nothing worth remembering\
""")
if self.config.additional_instructions:
system_prompt += f"\n\n{self.config.additional_instructions}"
return Message(role="system", content=system_prompt)
def _get_extraction_tools(
self,
user_id: str,
input_string: str,
existing_memories: Optional[Any] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> List[Callable]:
"""Get sync extraction tools for the model."""
functions: List[Callable] = []
if self.config.enable_add_memory:
def add_memory(memory: str) -> str:
"""Save a new memory about this user.
Only add genuinely new information that will help personalize future interactions.
Before adding, check if this extends an existing memory (use update_memory instead).
Args:
memory: Concise, factual statement in third person.
Good: "Senior engineer at Stripe, working on payment infrastructure"
Bad: "User works at a company" (too vague)
Returns:
Confirmation message.
"""
try:
memories_data = self.get(user_id=user_id)
if memories_data is None:
memories_data = self.schema(user_id=user_id)
if hasattr(memories_data, "memories"):
memory_id = str(uuid.uuid4())[:8]
memory_entry = {
"id": memory_id,
"content": memory,
"source": input_string[:200] if input_string else None,
}
if agent_id:
memory_entry["added_by_agent"] = agent_id
if team_id:
memory_entry["added_by_team"] = team_id
memories_data.memories.append(memory_entry)
self.save(user_id=user_id, memories=memories_data, agent_id=agent_id, team_id=team_id)
log_debug(f"Memory added: {memory[:50]}...")
return f"Memory saved: {memory}"
except Exception as e:
log_warning(f"Error adding memory: {e}")
return f"Error: {e}"
functions.append(add_memory)
if self.config.enable_update_memory:
def update_memory(memory_id: str, memory: str) -> str:
"""Update an existing memory with new or corrected information.
Prefer updating over adding when new information extends or modifies
something already stored. This keeps memories consolidated and accurate.
Args:
memory_id: The ID of the memory to update (shown in brackets like [abc123]).
memory: The updated memory content. Should be a complete replacement,
not a diff or addition.
Returns:
Confirmation message.
"""
try:
memories_data = self.get(user_id=user_id)
if memories_data is None:
return "No memories found"
if hasattr(memories_data, "memories"):
for mem in memories_data.memories:
if isinstance(mem, dict) and mem.get("id") == memory_id:
mem["content"] = memory
mem["source"] = input_string[:200] if input_string else None
if agent_id:
mem["updated_by_agent"] = agent_id
if team_id:
mem["updated_by_team"] = team_id
self.save(user_id=user_id, memories=memories_data, agent_id=agent_id, team_id=team_id)
log_debug(f"Memory updated: {memory_id}")
return f"Memory updated: {memory}"
return f"Memory {memory_id} not found"
return "No memories field"
except Exception as e:
log_warning(f"Error updating memory: {e}")
return f"Error: {e}"
functions.append(update_memory)
if self.config.enable_delete_memory:
def delete_memory(memory_id: str) -> str:
"""Remove a memory that is outdated, incorrect, or no longer relevant.
Delete when:
- Information is no longer accurate (e.g., they changed jobs)
- The memory was a misunderstanding
- It's been superseded by a more complete memory
Args:
memory_id: The ID of the memory to delete (shown in brackets like [abc123]).
Returns:
Confirmation message.
"""
try:
memories_data = self.get(user_id=user_id)
if memories_data is None:
return "No memories found"
if hasattr(memories_data, "memories"):
original_len = len(memories_data.memories)
memories_data.memories = [
mem
for mem in memories_data.memories
if not (isinstance(mem, dict) and mem.get("id") == memory_id)
]
if len(memories_data.memories) < original_len:
self.save(user_id=user_id, memories=memories_data, agent_id=agent_id, team_id=team_id)
log_debug(f"Memory deleted: {memory_id}")
return f"Memory {memory_id} deleted"
return f"Memory {memory_id} not found"
return "No memories field"
except Exception as e:
log_warning(f"Error deleting memory: {e}")
return f"Error: {e}"
functions.append(delete_memory)
if self.config.enable_clear_memories:
def clear_all_memories() -> str:
"""Clear all memories for this user. Use sparingly.
Returns:
Confirmation message.
"""
try:
self.clear(user_id=user_id, agent_id=agent_id, team_id=team_id)
log_debug("All memories cleared")
return "All memories cleared"
except Exception as e:
log_warning(f"Error clearing memories: {e}")
return f"Error: {e}"
functions.append(clear_all_memories)
return functions
async def _aget_extraction_tools(
self,
user_id: str,
input_string: str,
existing_memories: Optional[Any] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> List[Callable]:
"""Get async extraction tools for the model."""
functions: List[Callable] = []
if self.config.enable_add_memory:
async def add_memory(memory: str) -> str:
"""Save a new memory about this user.
Only add genuinely new information that will help personalize future interactions.
Before adding, check if this extends an existing memory (use update_memory instead).
Args:
memory: Concise, factual statement in third person.
Good: "Senior engineer at Stripe, working on payment infrastructure"
Bad: "User works at a company" (too vague)
Returns:
Confirmation message.
"""
try:
memories_data = await self.aget(user_id=user_id)
if memories_data is None:
memories_data = self.schema(user_id=user_id)
if hasattr(memories_data, "memories"):
memory_id = str(uuid.uuid4())[:8]
memory_entry = {
"id": memory_id,
"content": memory,
"source": input_string[:200] if input_string else None,
}
if agent_id:
memory_entry["added_by_agent"] = agent_id
if team_id:
memory_entry["added_by_team"] = team_id
memories_data.memories.append(memory_entry)
await self.asave(user_id=user_id, memories=memories_data, agent_id=agent_id, team_id=team_id)
log_debug(f"Memory added: {memory[:50]}...")
return f"Memory saved: {memory}"
except Exception as e:
log_warning(f"Error adding memory: {e}")
return f"Error: {e}"
functions.append(add_memory)
if self.config.enable_update_memory:
async def update_memory(memory_id: str, memory: str) -> str:
"""Update an existing memory with new or corrected information.
Prefer updating over adding when new information extends or modifies
something already stored. This keeps memories consolidated and accurate.
Args:
memory_id: The ID of the memory to update (shown in brackets like [abc123]).
memory: The updated memory content. Should be a complete replacement,
not a diff or addition.
Returns:
Confirmation message.
"""
try:
memories_data = await self.aget(user_id=user_id)
if memories_data is None:
return "No memories found"
if hasattr(memories_data, "memories"):
for mem in memories_data.memories:
if isinstance(mem, dict) and mem.get("id") == memory_id:
mem["content"] = memory
mem["source"] = input_string[:200] if input_string else None
if agent_id:
mem["updated_by_agent"] = agent_id
if team_id:
mem["updated_by_team"] = team_id
await self.asave(
user_id=user_id, memories=memories_data, agent_id=agent_id, team_id=team_id
)
log_debug(f"Memory updated: {memory_id}")
return f"Memory updated: {memory}"
return f"Memory {memory_id} not found"
return "No memories field"
except Exception as e:
log_warning(f"Error updating memory: {e}")
return f"Error: {e}"
functions.append(update_memory)
if self.config.enable_delete_memory:
async def delete_memory(memory_id: str) -> str:
"""Remove a memory that is outdated, incorrect, or no longer relevant.
Delete when:
- Information is no longer accurate (e.g., they changed jobs)
- The memory was a misunderstanding
- It's been superseded by a more complete memory
Args:
memory_id: The ID of the memory to delete (shown in brackets like [abc123]).
Returns:
Confirmation message.
"""
try:
memories_data = await self.aget(user_id=user_id)
if memories_data is None:
return "No memories found"
if hasattr(memories_data, "memories"):
original_len = len(memories_data.memories)
memories_data.memories = [
mem
for mem in memories_data.memories
if not (isinstance(mem, dict) and mem.get("id") == memory_id)
]
if len(memories_data.memories) < original_len:
await self.asave(
user_id=user_id, memories=memories_data, agent_id=agent_id, team_id=team_id
)
log_debug(f"Memory deleted: {memory_id}")
return f"Memory {memory_id} deleted"
return f"Memory {memory_id} not found"
return "No memories field"
except Exception as e:
log_warning(f"Error deleting memory: {e}")
return f"Error: {e}"
functions.append(delete_memory)
if self.config.enable_clear_memories:
async def clear_all_memories() -> str:
"""Clear all memories for this user. Use sparingly.
Returns:
Confirmation message.
"""
try:
await self.aclear(user_id=user_id, agent_id=agent_id, team_id=team_id)
log_debug("All memories cleared")
return "All memories cleared"
except Exception as e:
log_warning(f"Error clearing memories: {e}")
return f"Error: {e}"
functions.append(clear_all_memories)
return functions
# =========================================================================
# Representation
# =========================================================================
def __repr__(self) -> str:
"""String representation for debugging."""
has_db = self.db is not None
has_model = self.model is not None
return (
f"UserMemoryStore("
f"mode={self.config.mode.value}, "
f"db={has_db}, "
f"model={has_model}, "
f"enable_agent_tools={self.config.enable_agent_tools})"
)
def print(self, user_id: str, *, raw: bool = False) -> None:
"""Print formatted memories.
Args:
user_id: The user to print memories for.
raw: If True, print raw dict using pprint instead of formatted panel.
Example:
>>> store.print(user_id="alice@example.com")
╭──────────────── Memories ─────────────────╮
│ Memories: │
│ [dim][a1b2c3d4][/dim] Loves Python │
│ [dim][e5f6g7h8][/dim] Works at Anthropic│
╰─────────────── alice@example.com ─────────╯
"""
from agno.learn.utils import print_panel
memories_data = self.get(user_id=user_id)
lines = []
if memories_data:
if hasattr(memories_data, "memories") and memories_data.memories:
lines.append("Memories:")
for mem in memories_data.memories:
if isinstance(mem, dict):
mem_id = mem.get("id", "?")
content = mem.get("content", str(mem))
else:
mem_id = "?"
content = str(mem)
lines.append(f" [dim]\\[{mem_id}][/dim] {content}")
print_panel(
title="Memories",
subtitle=user_id,
lines=lines,
empty_message="No memories",
raw_data=memories_data,
raw=raw,
)
# Backwards compatibility alias
MemoriesStore = UserMemoryStore
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/learn/stores/user_memory.py",
"license": "Apache License 2.0",
"lines": 1221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/learn/stores/user_profile.py | """
User Profile Store
==================
Storage backend for User Profile learning type.
Stores long-term structured profile fields about users that persist across sessions.
Key Features:
- Structured profile fields (name, preferred_name, and custom fields)
- Background extraction from conversations
- Agent tools for in-conversation updates
- Multi-user isolation (each user has their own profile)
Profile Fields (structured):
- name, preferred_name, and custom fields from extended schemas
- Updated via `update_profile` tool
- For concrete facts that fit defined schema fields
Note: For unstructured memories, use UserMemoryStore instead.
Scope:
- Profiles are retrieved by user_id only
- agent_id/team_id stored in DB columns for audit trail
Supported Modes:
- ALWAYS: Automatic extraction after conversations
- AGENTIC: Agent calls update_user_profile tool directly
"""
import inspect
from copy import deepcopy
from dataclasses import dataclass, field
from dataclasses import fields as dc_fields
from os import getenv
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union, cast
from agno.learn.config import LearningMode, UserProfileConfig
from agno.learn.schemas import UserProfile
from agno.learn.stores.protocol import LearningStore
from agno.learn.utils import from_dict_safe, to_dict_safe
from agno.utils.log import (
log_debug,
log_warning,
set_log_level_to_debug,
set_log_level_to_info,
)
if TYPE_CHECKING:
from agno.metrics import RunMetrics
try:
from agno.db.base import AsyncBaseDb, BaseDb
from agno.models.message import Message
from agno.tools.function import Function
except ImportError:
pass
@dataclass
class UserProfileStore(LearningStore):
"""Storage backend for User Profile learning type.
Profiles are retrieved by user_id only - all agents sharing the same DB
will see the same profile for a given user. agent_id and team_id are
stored for audit purposes in DB columns.
Profile Fields (structured): name, preferred_name, and any custom
fields added when extending the schema. Updated via `update_profile` tool.
Note: For unstructured memories, use UserMemoryStore instead.
Args:
config: UserProfileConfig with all settings including db and model.
debug_mode: Enable debug logging.
"""
config: UserProfileConfig = field(default_factory=UserProfileConfig)
debug_mode: bool = False
# State tracking (internal)
profile_updated: bool = field(default=False, init=False)
_schema: Any = field(default=None, init=False)
def __post_init__(self):
self._schema = self.config.schema or UserProfile
if self.config.mode == LearningMode.PROPOSE:
log_warning("UserProfileStore does not support PROPOSE mode.")
elif self.config.mode == LearningMode.HITL:
log_warning("UserProfileStore does not support HITL mode.")
# =========================================================================
# LearningStore Protocol Implementation
# =========================================================================
@property
def learning_type(self) -> str:
"""Unique identifier for this learning type."""
return "user_profile"
@property
def schema(self) -> Any:
"""Schema class used for profiles."""
return self._schema
def recall(self, user_id: str, **kwargs) -> Optional[Any]:
"""Retrieve user profile from storage.
Args:
user_id: The user to retrieve profile for (required).
**kwargs: Additional context (ignored).
Returns:
User profile, or None if not found.
"""
if not user_id:
return None
return self.get(user_id=user_id)
async def arecall(self, user_id: str, **kwargs) -> Optional[Any]:
"""Async version of recall."""
if not user_id:
return None
return await self.aget(user_id=user_id)
def process(
self,
messages: List[Any],
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> None:
"""Extract user profile from messages.
Args:
messages: Conversation messages to analyze.
user_id: The user to update profile for (required).
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
**kwargs: Additional context (ignored).
"""
# process only supported in ALWAYS mode
# for programmatic extraction, use extract_and_save directly
if self.config.mode != LearningMode.ALWAYS:
return
if not user_id or not messages:
return
self.extract_and_save(
messages=messages,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
run_metrics=kwargs.get("run_metrics"),
)
async def aprocess(
self,
messages: List[Any],
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> None:
"""Async version of process."""
if self.config.mode != LearningMode.ALWAYS:
return
if not user_id or not messages:
return
await self.aextract_and_save(
messages=messages,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
run_metrics=kwargs.get("run_metrics"),
)
def build_context(self, data: Any) -> str:
"""Build context for the agent.
Formats user profile data for injection into the agent's system prompt.
Designed to enable natural, personalized responses without meta-commentary
about memory systems.
Args:
data: User profile data from recall().
Returns:
Context string to inject into the agent's system prompt.
"""
# Build tool documentation based on what's enabled
tool_docs = self._build_tool_documentation()
if not data:
if self._should_expose_tools:
return (
dedent("""\
<user_profile>
No profile information saved about this user yet.
""")
+ tool_docs
+ dedent("""
</user_profile>""")
)
return ""
# Build profile fields section
profile_parts = []
updateable_fields = self._get_updateable_fields()
for field_name in updateable_fields:
value = getattr(data, field_name, None)
if value:
profile_parts.append(f"{field_name.replace('_', ' ').title()}: {value}")
if not profile_parts:
if self._should_expose_tools:
return (
dedent("""\
<user_profile>
No profile information saved about this user yet.
""")
+ tool_docs
+ dedent("""
</user_profile>""")
)
return ""
context = "<user_profile>\n"
context += "\n".join(profile_parts) + "\n"
context += dedent("""
<profile_application_guidelines>
Apply this knowledge naturally - respond as if you inherently know this information,
exactly as a colleague would recall shared history without narrating their thought process.
- Use profile information to personalize responses appropriately
- Never say "based on your profile" or "I see that" - just use the information naturally
- Current conversation always takes precedence over stored profile data
</profile_application_guidelines>""")
if self._should_expose_tools:
context += (
dedent("""
<profile_updates>
""")
+ tool_docs
+ dedent("""
</profile_updates>""")
)
context += "\n</user_profile>"
return context
def _build_tool_documentation(self) -> str:
"""Build documentation for available profile tools.
Returns:
String documenting which tools are available and when to use them.
"""
docs = []
if self.config.agent_can_update_profile:
# Get the actual field names to document
updateable_fields = self._get_updateable_fields()
if updateable_fields:
field_names = ", ".join(updateable_fields.keys())
docs.append(
f"Use `update_profile` to set structured profile fields ({field_names}) "
"when the user explicitly shares this information."
)
return "\n\n".join(docs) if docs else ""
def get_tools(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> List[Callable]:
"""Get tools to expose to agent.
Args:
user_id: The user context (required for tool to work).
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
**kwargs: Additional context (ignored).
Returns:
List containing update_profile tool if enabled.
"""
if not user_id or not self._should_expose_tools:
return []
return self.get_agent_tools(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
async def aget_tools(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> List[Callable]:
"""Async version of get_tools."""
if not user_id or not self._should_expose_tools:
return []
return await self.aget_agent_tools(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
@property
def was_updated(self) -> bool:
"""Check if profile was updated in last operation."""
return self.profile_updated
@property
def _should_expose_tools(self) -> bool:
"""Check if tools should be exposed to the agent.
Returns True if either:
- mode is AGENTIC (tools are the primary way to update memory), OR
- enable_agent_tools is explicitly True
"""
return self.config.mode == LearningMode.AGENTIC or self.config.enable_agent_tools
# =========================================================================
# Properties
# =========================================================================
@property
def db(self) -> Optional[Union["BaseDb", "AsyncBaseDb"]]:
"""Database backend."""
return self.config.db
@property
def model(self):
"""Model for extraction."""
return self.config.model
# =========================================================================
# Debug/Logging
# =========================================================================
def set_log_level(self):
"""Set log level based on debug_mode or environment variable."""
if self.debug_mode or getenv("AGNO_DEBUG", "false").lower() == "true":
self.debug_mode = True
set_log_level_to_debug()
else:
set_log_level_to_info()
# =========================================================================
# Schema Field Introspection
# =========================================================================
def _get_updateable_fields(self) -> Dict[str, Dict[str, Any]]:
"""Get schema fields that can be updated via update_profile tool.
Returns:
Dict mapping field name to field info including description.
Excludes internal fields (user_id, memories, timestamps, etc).
"""
# Use schema method if available
if hasattr(self.schema, "get_updateable_fields"):
return self.schema.get_updateable_fields()
# Fallback: introspect dataclass fields
skip = {"user_id", "memories", "created_at", "updated_at", "agent_id", "team_id"}
result = {}
for f in dc_fields(self.schema):
if f.name in skip:
continue
# Skip fields marked as internal
if f.metadata.get("internal"):
continue
result[f.name] = {
"type": f.type,
"description": f.metadata.get("description", f"User's {f.name.replace('_', ' ')}"),
}
return result
def _build_update_profile_tool(
self,
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> Optional[Callable]:
"""Build a typed update_profile tool dynamically from schema.
Creates a function with explicit parameters for each schema field,
giving the LLM clear typed parameters to work with.
"""
updateable = self._get_updateable_fields()
if not updateable:
return None
# Build parameter list for signature
params = [
inspect.Parameter(
name=field_name,
kind=inspect.Parameter.KEYWORD_ONLY,
default=None,
annotation=Optional[str], # Simplified to str for LLM compatibility
)
for field_name in updateable
]
# Build docstring with field descriptions
fields_doc = "\n".join(f" {name}: {info['description']}" for name, info in updateable.items())
docstring = f"""Update user profile fields.
Use this to update structured information about the user.
Only provide fields you want to update.
Args:
{fields_doc}
Returns:
Confirmation of updated fields.
Examples:
update_profile(name="Alice")
update_profile(name="Bob", preferred_name="Bobby")
"""
# Capture self and IDs in closure
store = self
def update_profile(**kwargs) -> str:
try:
profile = store.get(user_id=user_id)
if profile is None:
profile = store.schema(user_id=user_id)
changed = []
for field_name, value in kwargs.items():
if value is not None and field_name in updateable:
setattr(profile, field_name, value)
changed.append(f"{field_name}={value}")
if changed:
store.save(
user_id=user_id,
profile=profile,
agent_id=agent_id,
team_id=team_id,
)
log_debug(f"Profile fields updated: {', '.join(changed)}")
return f"Profile updated: {', '.join(changed)}"
return "No fields provided to update"
except Exception as e:
log_warning(f"Error updating profile: {e}")
return f"Error: {e}"
# Set the signature, docstring, and annotations
# Use cast to satisfy mypy - all Python functions have these attributes
func = cast(Any, update_profile)
func.__signature__ = inspect.Signature(params)
func.__doc__ = docstring
func.__name__ = "update_profile"
func.__annotations__ = {field_name: Optional[str] for field_name in updateable}
func.__annotations__["return"] = str
return update_profile
async def _abuild_update_profile_tool(
self,
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> Optional[Callable]:
"""Async version of _build_update_profile_tool."""
updateable = self._get_updateable_fields()
if not updateable:
return None
params = [
inspect.Parameter(
name=field_name,
kind=inspect.Parameter.KEYWORD_ONLY,
default=None,
annotation=Optional[str],
)
for field_name in updateable
]
fields_doc = "\n".join(f" {name}: {info['description']}" for name, info in updateable.items())
docstring = f"""Update user profile fields.
Use this to update structured information about the user.
Only provide fields you want to update.
Args:
{fields_doc}
Returns:
Confirmation of updated fields.
"""
store = self
async def update_profile(**kwargs) -> str:
try:
profile = await store.aget(user_id=user_id)
if profile is None:
profile = store.schema(user_id=user_id)
changed = []
for field_name, value in kwargs.items():
if value is not None and field_name in updateable:
setattr(profile, field_name, value)
changed.append(f"{field_name}={value}")
if changed:
await store.asave(
user_id=user_id,
profile=profile,
agent_id=agent_id,
team_id=team_id,
)
log_debug(f"Profile fields updated: {', '.join(changed)}")
return f"Profile updated: {', '.join(changed)}"
return "No fields provided to update"
except Exception as e:
log_warning(f"Error updating profile: {e}")
return f"Error: {e}"
# Set the signature, docstring, and annotations
# Use cast to satisfy mypy - all Python functions have these attributes
func = cast(Any, update_profile)
func.__signature__ = inspect.Signature(params)
func.__doc__ = docstring
func.__name__ = "update_profile"
func.__annotations__ = {field_name: Optional[str] for field_name in updateable}
func.__annotations__["return"] = str
return update_profile
# =========================================================================
# Agent Tools
# =========================================================================
def get_agent_tools(
self,
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> List[Callable]:
"""Get the tools to expose to the agent.
Args:
user_id: The user to update (required).
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
Returns:
List of callable tools based on config settings.
"""
tools = []
# Profile field update tool
if self.config.agent_can_update_profile:
update_profile = self._build_update_profile_tool(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
if update_profile:
tools.append(update_profile)
return tools
async def aget_agent_tools(
self,
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> List[Callable]:
"""Get the async tools to expose to the agent."""
tools = []
if self.config.agent_can_update_profile:
update_profile = await self._abuild_update_profile_tool(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
if update_profile:
tools.append(update_profile)
return tools
# =========================================================================
# Read Operations
# =========================================================================
def get(self, user_id: str) -> Optional[Any]:
"""Retrieve user profile by user_id.
Args:
user_id: The unique user identifier.
Returns:
User profile as schema instance, or None if not found.
"""
if not self.db:
return None
try:
result = self.db.get_learning(
learning_type=self.learning_type,
user_id=user_id,
)
if result and result.get("content"): # type: ignore[union-attr]
return from_dict_safe(self.schema, result["content"]) # type: ignore[index]
return None
except Exception as e:
log_debug(f"UserProfileStore.get failed for user_id={user_id}: {e}")
return None
async def aget(self, user_id: str) -> Optional[Any]:
"""Async version of get."""
if not self.db:
return None
try:
if isinstance(self.db, AsyncBaseDb):
result = await self.db.get_learning(
learning_type=self.learning_type,
user_id=user_id,
)
else:
result = self.db.get_learning(
learning_type=self.learning_type,
user_id=user_id,
)
if result and result.get("content"):
return from_dict_safe(self.schema, result["content"])
return None
except Exception as e:
log_debug(f"UserProfileStore.aget failed for user_id={user_id}: {e}")
return None
# =========================================================================
# Write Operations
# =========================================================================
def save(
self,
user_id: str,
profile: Any,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> None:
"""Save or update user profile.
Args:
user_id: The unique user identifier.
profile: The profile data to save.
agent_id: Agent context (stored in DB column for audit).
team_id: Team context (stored in DB column for audit).
"""
if not self.db or not profile:
return
try:
content = to_dict_safe(profile)
if not content:
return
self.db.upsert_learning(
id=self._build_profile_id(user_id=user_id),
learning_type=self.learning_type,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
content=content,
)
log_debug(f"UserProfileStore.save: saved profile for user_id={user_id}")
except Exception as e:
log_debug(f"UserProfileStore.save failed for user_id={user_id}: {e}")
async def asave(
self,
user_id: str,
profile: Any,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> None:
"""Async version of save."""
if not self.db or not profile:
return
try:
content = to_dict_safe(profile)
if not content:
return
if isinstance(self.db, AsyncBaseDb):
await self.db.upsert_learning(
id=self._build_profile_id(user_id=user_id),
learning_type=self.learning_type,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
content=content,
)
else:
self.db.upsert_learning(
id=self._build_profile_id(user_id=user_id),
learning_type=self.learning_type,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
content=content,
)
log_debug(f"UserProfileStore.asave: saved profile for user_id={user_id}")
except Exception as e:
log_debug(f"UserProfileStore.asave failed for user_id={user_id}: {e}")
# =========================================================================
# Delete Operations
# =========================================================================
def delete(self, user_id: str) -> bool:
"""Delete a user profile.
Args:
user_id: The unique user identifier.
Returns:
True if deleted, False otherwise.
"""
if not self.db:
return False
try:
profile_id = self._build_profile_id(user_id=user_id)
return self.db.delete_learning(id=profile_id) # type: ignore[return-value]
except Exception as e:
log_debug(f"UserProfileStore.delete failed for user_id={user_id}: {e}")
return False
async def adelete(self, user_id: str) -> bool:
"""Async version of delete."""
if not self.db:
return False
try:
profile_id = self._build_profile_id(user_id=user_id)
if isinstance(self.db, AsyncBaseDb):
return await self.db.delete_learning(id=profile_id)
else:
return self.db.delete_learning(id=profile_id)
except Exception as e:
log_debug(f"UserProfileStore.adelete failed for user_id={user_id}: {e}")
return False
def clear(
self,
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> None:
"""Clear user profile (reset to empty).
Args:
user_id: The unique user identifier.
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
"""
if not self.db:
return
try:
empty_profile = self.schema(user_id=user_id)
self.save(user_id=user_id, profile=empty_profile, agent_id=agent_id, team_id=team_id)
log_debug(f"UserProfileStore.clear: cleared profile for user_id={user_id}")
except Exception as e:
log_debug(f"UserProfileStore.clear failed for user_id={user_id}: {e}")
async def aclear(
self,
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> None:
"""Async version of clear."""
if not self.db:
return
try:
empty_profile = self.schema(user_id=user_id)
await self.asave(user_id=user_id, profile=empty_profile, agent_id=agent_id, team_id=team_id)
log_debug(f"UserProfileStore.aclear: cleared profile for user_id={user_id}")
except Exception as e:
log_debug(f"UserProfileStore.aclear failed for user_id={user_id}: {e}")
# =========================================================================
# Extraction Operations
# =========================================================================
def extract_and_save(
self,
messages: List["Message"],
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
run_metrics: Optional["RunMetrics"] = None,
) -> str:
"""Extract user profile information from messages and save.
Args:
messages: Conversation messages to analyze.
user_id: The unique user identifier.
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
Returns:
Response from model.
"""
if self.model is None:
log_warning("UserProfileStore.extract_and_save: no model provided")
return "No model provided for user profile extraction"
if not self.db:
log_warning("UserProfileStore.extract_and_save: no database provided")
return "No DB provided for user profile store"
log_debug("UserProfileStore: Extracting user profile", center=True)
self.profile_updated = False
existing_profile = self.get(user_id=user_id)
tools = self._get_extraction_tools(
user_id=user_id,
existing_profile=existing_profile,
agent_id=agent_id,
team_id=team_id,
)
functions = self._build_functions_for_model(tools=tools)
messages_for_model = [
self._get_system_message(existing_profile=existing_profile),
*messages,
]
model_copy = deepcopy(self.model)
response = model_copy.response(
messages=messages_for_model,
tools=functions,
)
if run_metrics is not None and response.response_usage is not None:
from agno.metrics import ModelType, accumulate_model_metrics
accumulate_model_metrics(response, model_copy, ModelType.LEARNING_MODEL, run_metrics)
if response.tool_executions:
self.profile_updated = True
log_debug("UserProfileStore: Extraction complete", center=True)
return response.content or ("Profile updated" if self.profile_updated else "No updates needed")
async def aextract_and_save(
self,
messages: List["Message"],
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
run_metrics: Optional["RunMetrics"] = None,
) -> str:
"""Async version of extract_and_save."""
if self.model is None:
log_warning("UserProfileStore.aextract_and_save: no model provided")
return "No model provided for user profile extraction"
if not self.db:
log_warning("UserProfileStore.aextract_and_save: no database provided")
return "No DB provided for user profile store"
log_debug("UserProfileStore: Extracting user profile (async)", center=True)
self.profile_updated = False
existing_profile = await self.aget(user_id=user_id)
tools = await self._aget_extraction_tools(
user_id=user_id,
existing_profile=existing_profile,
agent_id=agent_id,
team_id=team_id,
)
functions = self._build_functions_for_model(tools=tools)
messages_for_model = [
self._get_system_message(existing_profile=existing_profile),
*messages,
]
model_copy = deepcopy(self.model)
response = await model_copy.aresponse(
messages=messages_for_model,
tools=functions,
)
if run_metrics is not None and response.response_usage is not None:
from agno.metrics import ModelType, accumulate_model_metrics
accumulate_model_metrics(response, model_copy, ModelType.LEARNING_MODEL, run_metrics)
if response.tool_executions:
self.profile_updated = True
log_debug("UserProfileStore: Extraction complete", center=True)
return response.content or ("Profile updated" if self.profile_updated else "No updates needed")
# =========================================================================
# Update Operations (called by agent tool)
# =========================================================================
def run_user_profile_update(
self,
task: str,
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> str:
"""Run a user profile update task.
Args:
task: The update task description.
user_id: The unique user identifier.
agent_id: Agent context (stored for audit).
team_id: Team context (stored for audit).
Returns:
Response from model.
"""
from agno.models.message import Message
messages = [Message(role="user", content=task)]
return self.extract_and_save(
messages=messages,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
async def arun_user_profile_update(
self,
task: str,
user_id: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> str:
"""Async version of run_user_profile_update."""
from agno.models.message import Message
messages = [Message(role="user", content=task)]
return await self.aextract_and_save(
messages=messages,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
# =========================================================================
# Private Helpers
# =========================================================================
def _build_profile_id(self, user_id: str) -> str:
"""Build a unique profile ID."""
return f"user_profile_{user_id}"
def _messages_to_input_string(self, messages: List["Message"]) -> str:
"""Convert messages to input string."""
if len(messages) == 1:
return messages[0].get_content_string()
else:
return "\n".join([f"{m.role}: {m.get_content_string()}" for m in messages if m.content])
def _build_functions_for_model(self, tools: List[Callable]) -> List["Function"]:
"""Convert callables to Functions for model."""
from agno.tools.function import Function
functions = []
seen_names = set()
for tool in tools:
try:
name = tool.__name__
if name in seen_names:
continue
seen_names.add(name)
func = Function.from_callable(tool, strict=True)
func.strict = True
functions.append(func)
log_debug(f"Added function {func.name}")
except Exception as e:
log_warning(f"Could not add function {tool}: {e}")
return functions
def _get_system_message(
self,
existing_profile: Optional[Any] = None,
) -> "Message":
"""Build system message for profile extraction.
Guides the model to extract structured profile information from conversations.
"""
from agno.models.message import Message
if self.config.system_message is not None:
return Message(role="system", content=self.config.system_message)
profile_fields = self._get_updateable_fields()
system_prompt = dedent("""\
You are extracting structured profile information about the user.
Your goal is to identify and save key identity information that fits the defined profile fields.
Only save information the user explicitly states - do not make inferences.
""")
# Profile Fields section
if profile_fields and self.config.enable_update_profile:
system_prompt += dedent("""\
## Profile Fields
Use `update_profile` to save structured identity information:
""")
for field_name, field_info in profile_fields.items():
description = field_info.get("description", f"User's {field_name.replace('_', ' ')}")
system_prompt += f"- **{field_name}**: {description}\n"
if existing_profile:
has_values = False
for field_name in profile_fields:
if getattr(existing_profile, field_name, None):
has_values = True
break
if has_values:
system_prompt += "\nCurrent values:\n"
for field_name in profile_fields:
value = getattr(existing_profile, field_name, None)
if value:
system_prompt += f"- {field_name}: {value}\n"
system_prompt += "\n"
# Custom instructions or defaults
profile_capture_instructions = self.config.instructions or dedent("""\
## Guidelines
**DO save:**
- Name and preferred name when explicitly stated
- Other profile fields when the user provides the information
**DO NOT save:**
- Information that doesn't fit the defined profile fields
- Inferences or assumptions - only save what's explicitly stated
- Duplicate information that matches existing values
""")
system_prompt += profile_capture_instructions
# Available actions
system_prompt += "\n## Available Actions\n\n"
if self.config.enable_update_profile and profile_fields:
fields_list = ", ".join(profile_fields.keys())
system_prompt += f"- `update_profile`: Set profile fields ({fields_list})\n"
# Examples
system_prompt += dedent("""
## Examples
**Example 1: User introduces themselves**
User: "I'm Sarah, but everyone calls me Saz."
→ update_profile(name="Sarah", preferred_name="Saz")
**Example 2: Nothing to save**
User: "What's the weather like?"
→ No action needed (no profile information shared)
## Final Guidance
- Only call update_profile when the user explicitly shares profile information
- It's fine to do nothing if the conversation reveals no profile data\
""")
if self.config.additional_instructions:
system_prompt += f"\n\n{self.config.additional_instructions}"
return Message(role="system", content=system_prompt)
def _get_extraction_tools(
self,
user_id: str,
existing_profile: Optional[Any] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> List[Callable]:
"""Get sync extraction tools for the model."""
functions: List[Callable] = []
# Profile update tool
if self.config.enable_update_profile:
update_profile = self._build_update_profile_tool(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
if update_profile:
functions.append(update_profile)
return functions
async def _aget_extraction_tools(
self,
user_id: str,
existing_profile: Optional[Any] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> List[Callable]:
"""Get async extraction tools for the model."""
functions: List[Callable] = []
# Profile update tool
if self.config.enable_update_profile:
update_profile = await self._abuild_update_profile_tool(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
)
if update_profile:
functions.append(update_profile)
return functions
# =========================================================================
# Representation
# =========================================================================
def __repr__(self) -> str:
"""String representation for debugging."""
has_db = self.db is not None
has_model = self.model is not None
return (
f"UserProfileStore("
f"mode={self.config.mode.value}, "
f"db={has_db}, "
f"model={has_model}, "
f"enable_agent_tools={self.config.enable_agent_tools})"
)
def print(self, user_id: str, *, raw: bool = False) -> None:
"""Print formatted user profile.
Args:
user_id: The user to print profile for.
raw: If True, print raw dict using pprint instead of formatted panel.
Example:
>>> store.print(user_id="alice@example.com")
+---------------- User Profile -----------------+
| Name: Alice |
| Preferred Name: Ali |
+--------------- alice@example.com -------------+
"""
from agno.learn.utils import print_panel
profile = self.get(user_id=user_id)
lines = []
if profile:
# Add profile fields
updateable_fields = self._get_updateable_fields()
for field_name in updateable_fields:
value = getattr(profile, field_name, None)
if value:
display_name = field_name.replace("_", " ").title()
lines.append(f"{display_name}: {value}")
print_panel(
title="User Profile",
subtitle=user_id,
lines=lines,
empty_message="No profile data",
raw_data=profile,
raw=raw,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/learn/stores/user_profile.py",
"license": "Apache License 2.0",
"lines": 999,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/learn/utils.py | """
Learning Machine Utilities
==========================
Helper functions for safe data handling.
All functions are designed to never raise exceptions -
they return None on any failure. This prevents learning
extraction errors from crashing the main agent.
"""
from dataclasses import asdict, fields
from typing import Any, Dict, List, Optional, Type, TypeVar
T = TypeVar("T")
def _safe_get(data: Any, key: str, default: Any = None) -> Any:
"""Safely get a key from dict-like data.
Args:
data: Dict or object with attributes.
key: Key or attribute name to get.
default: Value to return if not found.
Returns:
The value, or default if not found.
"""
if isinstance(data, dict):
return data.get(key, default)
return getattr(data, key, default)
def _parse_json(data: Any) -> Optional[Dict]:
"""Parse JSON string to dict, or return dict as-is.
Args:
data: JSON string, dict, or None.
Returns:
Parsed dict, or None if parsing fails.
"""
if data is None:
return None
if isinstance(data, dict):
return data
if isinstance(data, str):
import json
try:
return json.loads(data)
except Exception:
return None
return None
def from_dict_safe(cls: Type[T], data: Any) -> Optional[T]:
"""Safely create a dataclass instance from dict-like data.
Works with any dataclass - automatically handles subclass fields.
Never raises - returns None on any failure.
Args:
cls: The dataclass type to instantiate.
data: Dict, JSON string, or existing instance.
Returns:
Instance of cls, or None if parsing fails.
Example:
>>> profile = from_dict_safe(UserProfile, {"user_id": "123"})
>>> profile.user_id
'123'
"""
if data is None:
return None
# Already the right type
if isinstance(data, cls):
return data
try:
# Parse JSON string if needed
parsed = _parse_json(data)
if parsed is None:
return None
# Get valid field names for this class
field_names = {f.name for f in fields(cls)} # type: ignore
# Filter to only valid fields
kwargs = {k: v for k, v in parsed.items() if k in field_names}
return cls(**kwargs)
except Exception:
return None
def print_panel(
title: str,
subtitle: str,
lines: List[str],
*,
empty_message: str = "No data",
raw_data: Any = None,
raw: bool = False,
) -> None:
"""Print formatted panel output for learning stores.
Uses rich library for formatted output with a bordered panel.
Falls back to pprint when raw=True or rich is unavailable.
Args:
title: Panel title (e.g., "User Profile", "Session Context")
subtitle: Panel subtitle (e.g., user_id, session_id)
lines: Content lines to display inside the panel
empty_message: Message shown when lines is empty
raw_data: Object to pprint when raw=True
raw: If True, use pprint instead of formatted panel
Example:
>>> print_panel(
... title="User Profile",
... subtitle="alice@example.com",
... lines=["Name: Alice", "Memories:", " [abc123] Loves Python"],
... raw_data=profile,
... )
╭──────────────── User Profile ─────────────────╮
│ Name: Alice │
│ Memories: │
│ [abc123] Loves Python │
╰─────────────── alice@example.com ─────────────╯
"""
if raw and raw_data is not None:
from pprint import pprint
pprint(to_dict_safe(raw_data) or raw_data)
return
try:
from rich.console import Console
from rich.panel import Panel
console = Console()
if not lines:
content = f"[dim]{empty_message}[/dim]"
else:
content = "\n".join(lines)
panel = Panel(
content,
title=f"[bold]{title}[/bold]",
subtitle=f"[dim]{subtitle}[/dim]",
border_style="blue",
)
console.print(panel)
except ImportError:
# Fallback if rich not installed
from pprint import pprint
print(f"=== {title} ({subtitle}) ===")
if not lines:
print(f" {empty_message}")
else:
for line in lines:
print(f" {line}")
print()
def to_dict_safe(obj: Any) -> Optional[Dict[str, Any]]:
"""Safely convert a dataclass to dict.
Works with any dataclass. Never raises - returns None on failure.
Args:
obj: Dataclass instance to convert.
Returns:
Dict representation, or None if conversion fails.
Example:
>>> profile = UserProfile(user_id="123")
>>> to_dict_safe(profile)
{'user_id': '123', 'name': None, ...}
"""
if obj is None:
return None
try:
# Already a dict
if isinstance(obj, dict):
return obj
# Has to_dict method
if hasattr(obj, "to_dict"):
return obj.to_dict()
# Is a dataclass
if hasattr(obj, "__dataclass_fields__"):
return asdict(obj)
# Has __dict__
if hasattr(obj, "__dict__"):
return dict(obj.__dict__)
return None
except Exception:
return None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/learn/utils.py",
"license": "Apache License 2.0",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/tests/unit/models/test_retry_error_classification.py | import os
from unittest.mock import MagicMock, patch
import pytest
# Set test API key to avoid env var lookup errors
os.environ.setdefault("OPENAI_API_KEY", "test-key-for-testing")
from agno.exceptions import ModelProviderError
from agno.models.openai.chat import OpenAIChat
@pytest.fixture
def model():
"""Create a model instance for testing."""
return OpenAIChat(id="gpt-4o-mini", retries=3)
@pytest.fixture
def model_with_retries():
"""Create a model instance with retries and no delay."""
return OpenAIChat(id="gpt-4o-mini", retries=3, delay_between_retries=0)
@pytest.fixture
def model_with_two_retries():
"""Create a model instance with 2 retries and no delay."""
return OpenAIChat(id="gpt-4o-mini", retries=2, delay_between_retries=0)
# =============================================================================
# Tests for _is_retryable_error method - Status Codes
# =============================================================================
@pytest.mark.parametrize(
"status_code",
[400, 401, 403, 413, 422],
ids=["bad_request", "unauthorized", "forbidden", "payload_too_large", "unprocessable_entity"],
)
def test_non_retryable_status_codes(model, status_code):
"""Verify that client error status codes are not retryable."""
error = ModelProviderError("Test error", status_code=status_code)
assert model._is_retryable_error(error) is False
@pytest.mark.parametrize(
"status_code",
[429, 500, 502, 503, 504],
ids=["rate_limit", "internal_error", "bad_gateway", "service_unavailable", "gateway_timeout"],
)
def test_retryable_status_codes(model, status_code):
"""Verify that server error and rate limit status codes are retryable."""
error = ModelProviderError("Test error", status_code=status_code)
assert model._is_retryable_error(error) is True
# =============================================================================
# Tests for _is_retryable_error method - Error Message Patterns
# =============================================================================
@pytest.mark.parametrize(
"error_message",
[
"context_length_exceeded",
"This model's maximum context length is 8192 tokens",
"Your request exceeded the context window limit",
"token limit exceeded",
"max_tokens exceeded",
"You have too many tokens in your request",
"payload too large",
"content_too_large",
"request too large for model",
"input too long",
"Request exceeds the model's context limit",
],
ids=[
"context_length_exceeded",
"maximum_context_length",
"context_window",
"token_limit",
"max_tokens",
"too_many_tokens",
"payload_too_large",
"content_too_large",
"request_too_large",
"input_too_long",
"exceeds_the_model",
],
)
def test_non_retryable_error_patterns(model, error_message):
"""Verify that context/token limit error patterns are not retryable even with retryable status."""
# Using status code 500 (normally retryable) to test that message patterns take precedence
error = ModelProviderError(error_message, status_code=500)
assert model._is_retryable_error(error) is False
@pytest.mark.parametrize(
"error_message",
[
"Rate limit exceeded, please retry",
"Server error, please try again",
"Connection timeout",
"Internal server error",
"Service temporarily unavailable",
"Gateway timeout occurred",
"Temporary failure in name resolution",
],
ids=[
"rate_limit_message",
"server_error",
"connection_timeout",
"internal_error",
"service_unavailable",
"gateway_timeout",
"dns_failure",
],
)
def test_retryable_error_patterns(model, error_message):
"""Verify that transient error messages are retryable."""
error = ModelProviderError(error_message, status_code=500)
assert model._is_retryable_error(error) is True
def test_case_insensitive_pattern_matching(model):
"""Verify that error message pattern matching is case-insensitive."""
patterns = [
"CONTEXT_LENGTH_EXCEEDED",
"Maximum Context Length Exceeded",
"TOKEN LIMIT",
"PAYLOAD TOO LARGE",
]
for pattern in patterns:
error = ModelProviderError(pattern, status_code=500)
assert model._is_retryable_error(error) is False, f"Pattern '{pattern}' should not be retryable"
# =============================================================================
# Tests for Sync Retry Behavior
# =============================================================================
def test_sync_non_retryable_error_not_retried(model_with_retries):
"""Verify that non-retryable errors are raised immediately without retries."""
call_count = 0
def mock_invoke(**kwargs):
nonlocal call_count
call_count += 1
raise ModelProviderError(
"This model's maximum context length is 8192 tokens",
status_code=400,
)
with patch.object(model_with_retries, "invoke", side_effect=mock_invoke):
with pytest.raises(ModelProviderError) as exc_info:
model_with_retries._invoke_with_retry(messages=[])
assert call_count == 1, "Non-retryable error should not trigger retries"
assert "maximum context length" in str(exc_info.value)
def test_sync_retryable_error_is_retried(model_with_retries):
"""Verify that retryable errors trigger the configured number of retries."""
call_count = 0
def mock_invoke(**kwargs):
nonlocal call_count
call_count += 1
raise ModelProviderError("Internal server error", status_code=500)
with patch.object(model_with_retries, "invoke", side_effect=mock_invoke):
with pytest.raises(ModelProviderError):
model_with_retries._invoke_with_retry(messages=[])
# With retries=3, expect 4 total calls (1 initial + 3 retries)
assert call_count == 4, f"Expected 4 calls (1 + 3 retries), got {call_count}"
def test_sync_success_after_transient_failure(model_with_retries):
"""Verify that success after a transient failure stops retrying."""
call_count = 0
mock_response = MagicMock()
def mock_invoke(**kwargs):
nonlocal call_count
call_count += 1
if call_count < 3:
raise ModelProviderError("Server error", status_code=500)
return mock_response
with patch.object(model_with_retries, "invoke", side_effect=mock_invoke):
result = model_with_retries._invoke_with_retry(messages=[])
assert result == mock_response
assert call_count == 3, "Should succeed on third attempt"
def test_sync_auth_error_not_retried(model_with_retries):
"""Verify that authentication errors (401) are not retried."""
call_count = 0
def mock_invoke(**kwargs):
nonlocal call_count
call_count += 1
raise ModelProviderError("Invalid API key", status_code=401)
with patch.object(model_with_retries, "invoke", side_effect=mock_invoke):
with pytest.raises(ModelProviderError):
model_with_retries._invoke_with_retry(messages=[])
assert call_count == 1, "Auth errors should not be retried"
def test_sync_payload_too_large_not_retried(model_with_retries):
"""Verify that payload too large errors (413) are not retried."""
call_count = 0
def mock_invoke(**kwargs):
nonlocal call_count
call_count += 1
raise ModelProviderError("Request entity too large", status_code=413)
with patch.object(model_with_retries, "invoke", side_effect=mock_invoke):
with pytest.raises(ModelProviderError):
model_with_retries._invoke_with_retry(messages=[])
assert call_count == 1, "Payload too large errors should not be retried"
# =============================================================================
# Tests for Async Retry Behavior
# =============================================================================
@pytest.mark.asyncio
async def test_async_non_retryable_error_not_retried(model_with_retries):
"""Verify that non-retryable errors are raised immediately without retries (async)."""
call_count = 0
async def mock_ainvoke(**kwargs):
nonlocal call_count
call_count += 1
raise ModelProviderError(
"This model's maximum context length is 8192 tokens",
status_code=400,
)
with patch.object(model_with_retries, "ainvoke", side_effect=mock_ainvoke):
with pytest.raises(ModelProviderError) as exc_info:
await model_with_retries._ainvoke_with_retry(messages=[])
assert call_count == 1, "Non-retryable error should not trigger retries"
assert "maximum context length" in str(exc_info.value)
@pytest.mark.asyncio
async def test_async_retryable_error_is_retried(model_with_retries):
"""Verify that retryable errors trigger retries (async)."""
call_count = 0
async def mock_ainvoke(**kwargs):
nonlocal call_count
call_count += 1
raise ModelProviderError("Internal server error", status_code=500)
with patch.object(model_with_retries, "ainvoke", side_effect=mock_ainvoke):
with pytest.raises(ModelProviderError):
await model_with_retries._ainvoke_with_retry(messages=[])
assert call_count == 4, f"Expected 4 calls, got {call_count}"
@pytest.mark.asyncio
async def test_async_success_after_transient_failure(model_with_retries):
"""Verify that success after a transient failure stops retrying (async)."""
call_count = 0
mock_response = MagicMock()
async def mock_ainvoke(**kwargs):
nonlocal call_count
call_count += 1
if call_count < 2:
raise ModelProviderError("Server error", status_code=503)
return mock_response
with patch.object(model_with_retries, "ainvoke", side_effect=mock_ainvoke):
result = await model_with_retries._ainvoke_with_retry(messages=[])
assert result == mock_response
assert call_count == 2
# =============================================================================
# Tests for Streaming Retry Behavior
# =============================================================================
def test_sync_stream_non_retryable_error_not_retried(model_with_two_retries):
"""Verify that non-retryable errors in streaming are not retried."""
call_count = 0
def mock_invoke_stream(**kwargs):
nonlocal call_count
call_count += 1
raise ModelProviderError("context_length_exceeded", status_code=400)
yield # Make it a generator
with patch.object(model_with_two_retries, "invoke_stream", side_effect=mock_invoke_stream):
with pytest.raises(ModelProviderError):
list(model_with_two_retries._invoke_stream_with_retry(messages=[]))
assert call_count == 1, "Non-retryable stream error should not trigger retries"
def test_sync_stream_retryable_error_is_retried(model_with_two_retries):
"""Verify that retryable errors in streaming trigger retries."""
call_count = 0
def mock_invoke_stream(**kwargs):
nonlocal call_count
call_count += 1
raise ModelProviderError("Server error", status_code=500)
yield # Make it a generator
with patch.object(model_with_two_retries, "invoke_stream", side_effect=mock_invoke_stream):
with pytest.raises(ModelProviderError):
list(model_with_two_retries._invoke_stream_with_retry(messages=[]))
# With retries=2, expect 3 total calls (1 initial + 2 retries)
assert call_count == 3, f"Expected 3 calls, got {call_count}"
@pytest.mark.asyncio
async def test_async_stream_non_retryable_error_not_retried(model_with_two_retries):
"""Verify that non-retryable errors in async streaming are not retried."""
call_count = 0
async def mock_ainvoke_stream(**kwargs):
nonlocal call_count
call_count += 1
raise ModelProviderError("context_length_exceeded", status_code=400)
yield # Make it an async generator
with patch.object(model_with_two_retries, "ainvoke_stream", side_effect=mock_ainvoke_stream):
with pytest.raises(ModelProviderError):
async for _ in model_with_two_retries._ainvoke_stream_with_retry(messages=[]):
pass
assert call_count == 1, "Non-retryable async stream error should not trigger retries"
@pytest.mark.asyncio
async def test_async_stream_retryable_error_is_retried(model_with_two_retries):
"""Verify that retryable errors in async streaming trigger retries."""
call_count = 0
async def mock_ainvoke_stream(**kwargs):
nonlocal call_count
call_count += 1
raise ModelProviderError("Server error", status_code=500)
yield # Make it an async generator
with patch.object(model_with_two_retries, "ainvoke_stream", side_effect=mock_ainvoke_stream):
with pytest.raises(ModelProviderError):
async for _ in model_with_two_retries._ainvoke_stream_with_retry(messages=[]):
pass
assert call_count == 3, f"Expected 3 calls, got {call_count}"
# =============================================================================
# Tests for Retry Configuration
# =============================================================================
def test_zero_retries_means_no_retry():
"""Verify that retries=0 means only one attempt."""
model = OpenAIChat(id="gpt-4o-mini", retries=0)
call_count = 0
def mock_invoke(**kwargs):
nonlocal call_count
call_count += 1
raise ModelProviderError("Server error", status_code=500)
with patch.object(model, "invoke", side_effect=mock_invoke):
with pytest.raises(ModelProviderError):
model._invoke_with_retry(messages=[])
assert call_count == 1, "With retries=0, only one attempt should be made"
def test_exponential_backoff_delay_calculation():
"""Verify that exponential backoff calculates delays correctly."""
model = OpenAIChat(id="gpt-4o-mini", retries=3, delay_between_retries=1, exponential_backoff=True)
assert model._get_retry_delay(0) == 1 # 1 * 2^0 = 1
assert model._get_retry_delay(1) == 2 # 1 * 2^1 = 2
assert model._get_retry_delay(2) == 4 # 1 * 2^2 = 4
assert model._get_retry_delay(3) == 8 # 1 * 2^3 = 8
def test_linear_delay_calculation():
"""Verify that linear (non-exponential) delay is constant."""
model = OpenAIChat(id="gpt-4o-mini", retries=3, delay_between_retries=2, exponential_backoff=False)
assert model._get_retry_delay(0) == 2
assert model._get_retry_delay(1) == 2
assert model._get_retry_delay(2) == 2
assert model._get_retry_delay(3) == 2
# =============================================================================
# Tests for ModelProviderError Attributes
# =============================================================================
def test_error_with_model_info(model):
"""Verify error classification works with model name and id."""
error = ModelProviderError(
"context_length_exceeded",
status_code=400,
model_name="gpt-4o",
model_id="gpt-4o-2024-05-13",
)
assert model._is_retryable_error(error) is False
assert error.model_name == "gpt-4o"
assert error.model_id == "gpt-4o-2024-05-13"
def test_default_status_code(model):
"""Verify that default status code (502) is retryable."""
error = ModelProviderError("Unknown error") # Default status_code=502
assert model._is_retryable_error(error) is True
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/models/test_retry_error_classification.py",
"license": "Apache License 2.0",
"lines": 328,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/knowledge/chunking/test_markdown_chunking.py | """Tests for MarkdownChunking with split_on_headings parameter."""
import pytest
pytest.importorskip("unstructured")
from agno.knowledge.chunking.markdown import MarkdownChunking
from agno.knowledge.document.base import Document
# Sample markdown content with multiple heading levels
MARKDOWN_CONTENT = """# Main Title (H1)
This is content under the main title.
## Section 1 (H2)
Content for section 1.
### Subsection 1.1 (H3)
Content for subsection 1.1.
### Subsection 1.2 (H3)
Content for subsection 1.2.
## Section 2 (H2)
Content for section 2.
### Subsection 2.1 (H3)
Content for subsection 2.1.
#### Details 2.1.1 (H4)
Detailed content.
## Section 3 (H2)
Final section content.
"""
# --- Tests for split_on_headings parameter ---
def test_split_on_headings_false_uses_size_based_chunking():
"""With split_on_headings=False, should use default size-based chunking."""
chunker = MarkdownChunking(chunk_size=5000, split_on_headings=False)
doc = Document(name="test.md", content=MARKDOWN_CONTENT)
chunks = chunker.chunk(doc)
# With large chunk_size and split_on_headings=False, should produce single chunk
assert len(chunks) >= 1
assert all(chunk.content for chunk in chunks)
def test_split_on_headings_true_splits_on_all_headings():
"""With split_on_headings=True, should split on all heading levels (H1-H6)."""
chunker = MarkdownChunking(split_on_headings=True)
doc = Document(name="test.md", content=MARKDOWN_CONTENT)
chunks = chunker.chunk(doc)
# Should create separate chunks for each heading
# H1 (1) + H2 (3) + H3 (3) + H4 (1) = 8 total headings
assert len(chunks) == 8
# First chunk should start with H1
assert chunks[0].content.startswith("# Main Title (H1)")
# Check that each chunk has a heading
for chunk in chunks:
assert chunk.content.strip().startswith("#")
def test_split_on_headings_level_2_splits_on_h1_and_h2():
"""With split_on_headings=2, should split only on H1 and H2."""
chunker = MarkdownChunking(split_on_headings=2)
doc = Document(name="test.md", content=MARKDOWN_CONTENT)
chunks = chunker.chunk(doc)
# Should split on H1 and H2 only: 1 H1 + 3 H2 = 4 chunks
assert len(chunks) == 4
# First chunk should contain H1 and its content
assert chunks[0].content.startswith("# Main Title (H1)")
# Second chunk should start with H2 and contain H3 subsections
assert chunks[1].content.startswith("## Section 1 (H2)")
assert "### Subsection 1.1 (H3)" in chunks[1].content
assert "### Subsection 1.2 (H3)" in chunks[1].content
def test_split_on_headings_level_1_splits_on_h1_only():
"""With split_on_headings=1, should split only on H1."""
chunker = MarkdownChunking(split_on_headings=1)
doc = Document(name="test.md", content=MARKDOWN_CONTENT)
chunks = chunker.chunk(doc)
# Should split on H1 only: 1 H1 = 1 chunk (all content under it)
assert len(chunks) == 1
# The chunk should contain all content including H2, H3, H4
assert "# Main Title (H1)" in chunks[0].content
assert "## Section 1 (H2)" in chunks[0].content
assert "### Subsection 1.1 (H3)" in chunks[0].content
assert "#### Details 2.1.1 (H4)" in chunks[0].content
def test_split_on_headings_level_3_splits_on_h1_h2_h3():
"""With split_on_headings=3, should split on H1, H2, and H3."""
chunker = MarkdownChunking(split_on_headings=3)
doc = Document(name="test.md", content=MARKDOWN_CONTENT)
chunks = chunker.chunk(doc)
# Should split on H1, H2, H3: 1 H1 + 3 H2 + 3 H3 = 7 chunks
assert len(chunks) == 7
# Find chunk with H4 - it should be within an H3 chunk
h4_chunks = [c for c in chunks if "#### Details 2.1.1 (H4)" in c.content]
assert len(h4_chunks) == 1
# That chunk should start with H3
assert h4_chunks[0].content.strip().startswith("### Subsection 2.1 (H3)")
def test_chunk_metadata_includes_chunk_number():
"""Chunks should include metadata with chunk number."""
chunker = MarkdownChunking(split_on_headings=2)
doc = Document(name="test.md", content=MARKDOWN_CONTENT, meta_data={"source": "test"})
chunks = chunker.chunk(doc)
for i, chunk in enumerate(chunks, 1):
assert chunk.meta_data["chunk"] == i
assert chunk.meta_data["source"] == "test" # Original metadata preserved
assert "chunk_size" in chunk.meta_data
def test_chunk_ids_include_chunk_number():
"""Chunk IDs should include chunk number when document has ID."""
chunker = MarkdownChunking(split_on_headings=True)
doc = Document(id="doc123", name="test.md", content=MARKDOWN_CONTENT)
chunks = chunker.chunk(doc)
for i, chunk in enumerate(chunks, 1):
assert chunk.id == f"doc123_{i}"
def test_chunk_ids_use_name_when_no_id():
"""Chunk IDs should use document name when no ID provided."""
chunker = MarkdownChunking(split_on_headings=True)
doc = Document(name="test.md", content=MARKDOWN_CONTENT)
chunks = chunker.chunk(doc)
for i, chunk in enumerate(chunks, 1):
assert chunk.id == f"test.md_{i}"
# --- Tests for parameter validation ---
def test_invalid_heading_level_above_6_raises_error():
"""split_on_headings must be between 1 and 6."""
with pytest.raises(ValueError, match="split_on_headings must be between 1 and 6"):
MarkdownChunking(split_on_headings=7)
def test_invalid_heading_level_below_1_raises_error():
"""split_on_headings must be between 1 and 6."""
with pytest.raises(ValueError, match="split_on_headings must be between 1 and 6"):
MarkdownChunking(split_on_headings=0)
def test_negative_heading_level_raises_error():
"""Negative heading levels should raise ValueError."""
with pytest.raises(ValueError, match="split_on_headings must be between 1 and 6"):
MarkdownChunking(split_on_headings=-1)
def test_valid_heading_levels_1_to_6_accepted():
"""All valid heading levels from 1 to 6 should be accepted."""
for level in range(1, 7):
chunker = MarkdownChunking(split_on_headings=level)
assert chunker.split_on_headings == level
# --- Tests for edge cases ---
def test_empty_content_returns_single_chunk():
"""Empty content should return single chunk."""
chunker = MarkdownChunking(split_on_headings=True)
doc = Document(name="test.md", content="")
chunks = chunker.chunk(doc)
assert len(chunks) == 1
assert chunks[0].content == ""
def test_content_smaller_than_chunk_size_returns_single_chunk():
"""Content smaller than chunk_size should return single chunk."""
small_content = "# Small Heading\n\nSmall content."
chunker = MarkdownChunking(chunk_size=5000, split_on_headings=False)
doc = Document(name="test.md", content=small_content)
chunks = chunker.chunk(doc)
assert len(chunks) == 1
assert chunks[0].content == small_content
def test_no_headings_with_split_on_headings_true():
"""Content without headings should return single chunk even with split_on_headings=True."""
no_heading_content = "Just some plain text without any headings.\n\nAnother paragraph."
chunker = MarkdownChunking(split_on_headings=True)
doc = Document(name="test.md", content=no_heading_content)
chunks = chunker.chunk(doc)
assert len(chunks) == 1
assert no_heading_content in chunks[0].content
def test_only_lower_level_headings_with_high_split_level():
"""Content with only H3-H6 when split_on_headings=2 should return single chunk."""
low_level_content = """### Heading 3
Content under H3.
#### Heading 4
Content under H4.
"""
chunker = MarkdownChunking(split_on_headings=2) # Only splits on H1 and H2
doc = Document(name="test.md", content=low_level_content)
chunks = chunker.chunk(doc)
# Should not split since no H1 or H2 present
assert len(chunks) == 1
assert "### Heading 3" in chunks[0].content
assert "#### Heading 4" in chunks[0].content
def test_mixed_heading_levels_with_level_4_split():
"""Test splitting on H1-H4 with mixed heading levels."""
mixed_content = """# H1
## H2
### H3
#### H4
##### H5
###### H6
"""
chunker = MarkdownChunking(split_on_headings=4)
doc = Document(name="test.md", content=mixed_content)
chunks = chunker.chunk(doc)
# Should split on H1, H2, H3, H4 = 4 chunks
# H5 and H6 should be part of H4 chunk
assert len(chunks) == 4
assert "##### H5" in chunks[3].content
assert "###### H6" in chunks[3].content
# --- Tests for fallback behavior ---
def test_fallback_splits_at_paragraphs():
"""When markdown parsing fails, should fall back to paragraph splitting."""
from unittest.mock import patch
text = """First paragraph.
Second paragraph.
Third paragraph."""
doc = Document(id="test", name="test", content=text)
chunker = MarkdownChunking(chunk_size=30, overlap=0)
with patch("agno.knowledge.chunking.markdown.partition_md", side_effect=Exception("test")):
chunks = chunker.chunk(doc)
assert len(chunks) > 1
# --- Tests for overlap functionality ---
def test_overlap_prepends_content_from_previous_chunk():
"""Overlap should prepend content from previous chunk."""
content = """# Section 1
First section content here.
## Section 2
Second section content here.
## Section 3
Third section content here.
"""
chunker = MarkdownChunking(split_on_headings=True, overlap=10)
doc = Document(name="test.md", content=content)
chunks = chunker.chunk(doc)
# Second chunk onwards should have overlap from previous chunk
assert len(chunks) == 3
# Check that chunks after first have content prepended
for i in range(1, len(chunks)):
prev_ending = chunks[i - 1].content[-10:]
assert chunks[i].content.startswith(prev_ending)
def test_overlap_with_size_based_chunking():
"""Overlap should work with size-based chunking."""
from unittest.mock import patch
long_content = "Paragraph one. " * 20 + "\n\n" + "Paragraph two. " * 20 + "\n\n" + "Paragraph three. " * 20
chunker = MarkdownChunking(chunk_size=200, overlap=30, split_on_headings=False)
doc = Document(name="test.md", content=long_content)
with patch("agno.knowledge.chunking.markdown.partition_md", side_effect=Exception("force fallback")):
chunks = chunker.chunk(doc)
assert len(chunks) > 1
# Verify overlap exists in subsequent chunks
for i in range(1, len(chunks)):
prev_ending = chunks[i - 1].content[-30:]
assert prev_ending in chunks[i].content
# --- Tests for unicode and international content ---
def test_unicode_headings_and_content():
"""Headings and content with unicode characters should be handled correctly."""
content = """# Documentation en Francais
Contenu avec des accents: cafe, facade, resume.
## Abschnitt auf Deutsch
Umlaute: Muller, Geschaft, Ubung.
## Sekcja po Polsku
Polskie znaki: zolty, zrodlo, swieto.
"""
chunker = MarkdownChunking(split_on_headings=True)
doc = Document(name="test.md", content=content)
chunks = chunker.chunk(doc)
assert len(chunks) == 3
assert "Francais" in chunks[0].content
assert "Deutsch" in chunks[1].content
assert "Polsku" in chunks[2].content
def test_cjk_characters_in_markdown():
"""Chinese, Japanese, Korean characters should be handled correctly."""
content = """# Chinese Section
This section has content.
## Japanese Section
More content here.
## Korean Section
Final content.
"""
chunker = MarkdownChunking(split_on_headings=True)
doc = Document(name="test.md", content=content)
chunks = chunker.chunk(doc)
assert len(chunks) == 3
assert "Chinese" in chunks[0].content
assert "Japanese" in chunks[1].content
assert "Korean" in chunks[2].content
# --- Tests for size-based chunking creating multiple chunks ---
def test_size_based_chunking_creates_multiple_chunks_when_content_exceeds_limit():
"""When split_on_headings=False and content exceeds chunk_size, should create multiple chunks."""
from unittest.mock import patch
# Create content that definitely exceeds chunk_size
long_content = ("This is paragraph one with some content. " * 10 + "\n\n") * 10
chunker = MarkdownChunking(chunk_size=200, split_on_headings=False)
doc = Document(name="test.md", content=long_content)
# Force fallback to ensure size-based chunking behavior
with patch("agno.knowledge.chunking.markdown.partition_md", side_effect=Exception("force fallback")):
chunks = chunker.chunk(doc)
assert len(chunks) > 1
# Verify chunk numbers are sequential
for i, chunk in enumerate(chunks, 1):
assert chunk.meta_data["chunk"] == i
# --- Tests for split_on_headings respecting chunk_size ---
def test_split_on_headings_respects_chunk_size():
"""Large sections should be split to respect chunk_size even with split_on_headings enabled."""
# Create content with a very large section under one heading
large_section = "This is a long paragraph with lots of content. " * 50 # ~2400 chars
content = f"""# Section 1
{large_section}
## Section 2
Short content here.
"""
chunker = MarkdownChunking(chunk_size=500, split_on_headings=True)
doc = Document(name="test.md", content=content)
chunks = chunker.chunk(doc)
# Should create multiple chunks for the large section
assert len(chunks) > 2 # More than just 2 sections
# All chunks should respect chunk_size
for chunk in chunks:
assert len(chunk.content) <= 500, f"Chunk exceeds chunk_size: {len(chunk.content)} > 500"
def test_split_on_headings_preserves_heading_in_sub_chunks():
"""When splitting large sections, the heading should be preserved in each sub-chunk."""
# Create content with a large section
large_section = "Word " * 200 # ~1000 chars of content
content = f"""# My Important Heading
{large_section}
"""
chunker = MarkdownChunking(chunk_size=300, split_on_headings=True)
doc = Document(name="test.md", content=content)
chunks = chunker.chunk(doc)
# Should have multiple chunks
assert len(chunks) > 1
# Each chunk should start with the heading
for chunk in chunks:
assert chunk.content.startswith("# My Important Heading"), (
f"Chunk should start with heading, got: {chunk.content[:50]}..."
)
def test_split_on_headings_level_2_respects_chunk_size():
"""Large H2 sections should be split to respect chunk_size with split_on_headings=2."""
large_content = "Content here. " * 100 # ~1400 chars
content = f"""# Main Title
Intro text.
## Large Section
{large_content}
## Small Section
Just a little text.
"""
chunker = MarkdownChunking(chunk_size=400, split_on_headings=2)
doc = Document(name="test.md", content=content)
chunks = chunker.chunk(doc)
# Should split the large section
assert len(chunks) > 3 # More than just H1, H2 large, H2 small
# All chunks should respect chunk_size
for chunk in chunks:
assert len(chunk.content) <= 400, f"Chunk exceeds chunk_size: {len(chunk.content)} > 400"
def test_split_on_headings_small_sections_not_affected():
"""Small sections should remain as single chunks when split_on_headings is enabled."""
content = """# Section 1
Short content.
## Section 2
Also short.
### Section 3
Brief text.
"""
chunker = MarkdownChunking(chunk_size=500, split_on_headings=True)
doc = Document(name="test.md", content=content)
chunks = chunker.chunk(doc)
# Should have exactly 3 chunks (one per heading)
assert len(chunks) == 3
# Verify content matches expected sections
assert "# Section 1" in chunks[0].content
assert "## Section 2" in chunks[1].content
assert "### Section 3" in chunks[2].content
def test_split_on_headings_very_long_paragraph_split_by_words():
"""A single very long paragraph should be split by words to respect chunk_size."""
# Create a paragraph that is much larger than chunk_size
long_paragraph = "word " * 500 # ~2500 chars
content = f"""# Heading
{long_paragraph}
"""
chunker = MarkdownChunking(chunk_size=200, split_on_headings=True)
doc = Document(name="test.md", content=content)
chunks = chunker.chunk(doc)
# Should create multiple chunks
assert len(chunks) > 5
# All chunks should respect chunk_size
for chunk in chunks:
assert len(chunk.content) <= 200, f"Chunk exceeds chunk_size: {len(chunk.content)} > 200"
def test_split_on_headings_chunk_metadata_correct_with_splitting():
"""Chunk metadata should be correct when large sections are split."""
large_content = "Some text here. " * 50 # ~800 chars
content = f"""# Section
{large_content}
"""
chunker = MarkdownChunking(chunk_size=200, split_on_headings=True)
doc = Document(id="doc1", name="test.md", content=content, meta_data={"source": "test"})
chunks = chunker.chunk(doc)
# Verify sequential chunk numbers and correct IDs
for i, chunk in enumerate(chunks, 1):
assert chunk.meta_data["chunk"] == i
assert chunk.meta_data["source"] == "test"
assert chunk.id == f"doc1_{i}"
assert chunk.meta_data["chunk_size"] == len(chunk.content)
def test_split_on_headings_overlap_between_sub_chunks():
"""Overlap should be applied between sub-chunks from split large sections."""
# Create a large section that will be split into multiple sub-chunks
large_content = "Word1 Word2 Word3. " * 30 # ~570 chars
content = f"""# Big Section
{large_content}
"""
chunker = MarkdownChunking(chunk_size=200, split_on_headings=True, overlap=20)
doc = Document(name="test.md", content=content)
chunks = chunker.chunk(doc)
# Should have multiple chunks
assert len(chunks) > 1
# Verify overlap exists between consecutive chunks
for i in range(1, len(chunks)):
# The end of previous chunk should appear at start of current chunk
prev_ending = chunks[i - 1].content[-20:]
assert chunks[i].content.startswith(prev_ending), f"Chunk {i + 1} should start with overlap from chunk {i}"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/knowledge/chunking/test_markdown_chunking.py",
"license": "Apache License 2.0",
"lines": 388,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/human_in_the_loop/test_multi_round_user_input.py | """Tests for multi-round Human-in-the-Loop (HITL) user input flows.
These tests verify that active_requirements is correctly populated across
multiple continue_run() calls when new tools are paused in each round.
"""
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.tools.decorator import tool
from agno.tools.user_control_flow import UserControlFlowTools
def test_multi_round_user_input_with_decorator(shared_db):
"""Test multiple rounds of user input with @tool decorator."""
call_count = 0
@tool(requires_user_input=True, user_input_fields=["answer"])
def ask_question(question: str, answer: str = ""):
nonlocal call_count
call_count += 1
return f"Q{call_count}: {question} -> A: {answer}"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[ask_question],
instructions="""You are a survey bot. Ask 3 questions one at a time.
After each answer, call ask_question again with the next question.
Questions: 1) What is your name? 2) What is your age? 3) What is your city?
After all 3 questions, summarize the answers.""",
db=shared_db,
telemetry=False,
)
session_id = "test_multi_round_decorator"
# Round 1
response = agent.run("Start the survey", session_id=session_id)
assert response.is_paused, "Run should be paused after first tool call"
assert len(response.active_requirements) == 1, "Should have 1 active requirement"
assert response.active_requirements[0].needs_user_input
# Fill in first answer
response.active_requirements[0].user_input_schema[0].value = "John" # type: ignore
# Round 2
response = agent.continue_run(
run_id=response.run_id,
requirements=response.requirements,
session_id=session_id,
)
if response.is_paused:
# Verify we have a NEW active requirement
assert len(response.active_requirements) >= 1, "Should have at least 1 active requirement for the new question"
assert response.active_requirements[0].needs_user_input
# Fill in second answer
response.active_requirements[0].user_input_schema[0].value = "25" # type: ignore
# Round 3
response = agent.continue_run(
run_id=response.run_id,
requirements=response.requirements,
session_id=session_id,
)
if response.is_paused:
assert len(response.active_requirements) >= 1
response.active_requirements[0].user_input_schema[0].value = "NYC" # type: ignore
# Final round
response = agent.continue_run(
run_id=response.run_id,
requirements=response.requirements,
session_id=session_id,
)
# Final response should not be paused
assert not response.is_paused, "Final response should not be paused"
def test_multi_round_user_control_flow_tools(shared_db):
"""Test multiple rounds using UserControlFlowTools (get_user_input).
This is the exact scenario from the reported bug where active_requirements
was empty on subsequent continue_run() calls.
"""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[UserControlFlowTools()],
instructions="""Ask questions in multiple rounds.
Round 1: Ask for destination using get_user_input
Round 2: After getting destination, ask for travel dates using get_user_input
Round 3: After getting dates, ask for budget using get_user_input
After all 3 rounds, provide a summary.""",
db=shared_db,
telemetry=False,
)
session_id = "test_multi_round_user_control"
# Round 1
response = agent.run("I want to plan a trip", session_id=session_id)
assert response.is_paused, "Run should be paused for user input"
assert len(response.requirements) >= 1, "Should have at least 1 requirement" # type: ignore
assert len(response.active_requirements) >= 1, "Should have at least 1 active requirement"
# Track tool_call_ids to verify new requirements are created
first_tool_id = response.active_requirements[0].tool_execution.tool_call_id # type: ignore
# Fill in first round answers
for field in response.active_requirements[0].user_input_schema: # type: ignore
field.value = f"test_value_for_{field.name}"
# Round 2
response = agent.continue_run(
run_id=response.run_id,
requirements=response.requirements,
session_id=session_id,
)
round_count = 1
max_rounds = 5 # Safety limit
while response.is_paused and round_count < max_rounds:
round_count += 1
# THE KEY ASSERTION: active_requirements should NOT be empty
# when the run is paused with new paused tools
paused_tools = [t for t in response.tools or [] if t.is_paused]
if paused_tools:
assert len(response.active_requirements) >= 1, (
f"Round {round_count}: active_requirements should not be empty "
f"when there are {len(paused_tools)} paused tools. "
f"Total requirements: {len(response.requirements) if response.requirements else 0}"
)
# Verify the new requirement has a different tool_call_id
new_tool_id = response.active_requirements[0].tool_execution.tool_call_id
assert new_tool_id != first_tool_id, "New requirement should have different tool_call_id"
# Fill in answers for this round
for req in response.active_requirements:
if req.needs_user_input and req.user_input_schema:
for field in req.user_input_schema:
field.value = f"round{round_count}_{field.name}"
response = agent.continue_run(
run_id=response.run_id,
requirements=response.requirements,
session_id=session_id,
)
# Should complete within max_rounds
assert round_count < max_rounds, f"Test didn't complete within {max_rounds} rounds"
def test_requirements_accumulate_across_rounds(shared_db):
"""Test that requirements list grows with each round, maintaining history."""
@tool(requires_user_input=True, user_input_fields=["value"])
def collect_value(field_name: str, value: str = ""):
return f"{field_name}={value}"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[collect_value],
instructions="""Collect 3 values one at a time:
1. First call collect_value for 'name'
2. Then call collect_value for 'age'
3. Then call collect_value for 'city'
After collecting all 3, return a summary.""",
db=shared_db,
telemetry=False,
)
session_id = "test_requirements_accumulate"
response = agent.run("Collect my info", session_id=session_id)
requirements_count_history = []
active_count_history = []
round_num = 0
max_rounds = 5
while response.is_paused and round_num < max_rounds:
round_num += 1
requirements_count_history.append(len(response.requirements or []))
active_count_history.append(len(response.active_requirements))
# Each round should have at least 1 active requirement
# Note: The model may batch multiple tool calls in a single response
assert len(response.active_requirements) >= 1, (
f"Round {round_num}: Should have at least 1 active requirement, got {len(response.active_requirements)}"
)
# Fill the values for all active requirements
for i, req in enumerate(response.active_requirements):
if req.user_input_schema:
for field in req.user_input_schema:
if field.value is None:
field.value = f"value{round_num}_{i}"
response = agent.continue_run(
run_id=response.run_id,
requirements=response.requirements,
session_id=session_id,
)
# Verify requirements accumulated (each round adds 1)
if len(requirements_count_history) >= 2:
for i in range(1, len(requirements_count_history)):
assert requirements_count_history[i] >= requirements_count_history[i - 1], (
f"Requirements should accumulate: {requirements_count_history}"
)
# Active requirements should always be at least 1 (model may batch calls)
for count in active_count_history:
assert count >= 1, f"Active requirements per round should be at least 1: {active_count_history}"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/human_in_the_loop/test_multi_round_user_input.py",
"license": "Apache License 2.0",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/db/test_span_schema.py | """Tests for span table schema with dynamic foreign key references."""
from unittest.mock import Mock, patch
import pytest
from sqlalchemy.engine import Engine
from sqlalchemy.orm import Session
from sqlalchemy.schema import Table
from agno.db.postgres.postgres import PostgresDb
from agno.db.postgres.schemas import get_table_schema_definition as get_postgres_schema
from agno.db.sqlite.schemas import get_table_schema_definition as get_sqlite_schema
from agno.db.sqlite.sqlite import SqliteDb
# ==================== SQLite Schema Tests ====================
def test_sqlite_span_schema_default_traces_table():
"""Test span schema uses default traces table name in foreign key."""
schema = get_sqlite_schema("spans")
assert "trace_id" in schema
assert "foreign_key" in schema["trace_id"]
assert schema["trace_id"]["foreign_key"] == "agno_traces.trace_id"
def test_sqlite_span_schema_custom_traces_table():
"""Test span schema uses custom traces table name in foreign key."""
schema = get_sqlite_schema("spans", traces_table_name="custom_traces")
assert "trace_id" in schema
assert "foreign_key" in schema["trace_id"]
assert schema["trace_id"]["foreign_key"] == "custom_traces.trace_id"
def test_sqlite_span_schema_has_required_columns():
"""Test span schema has all required columns."""
schema = get_sqlite_schema("spans")
expected_columns = [
"span_id",
"trace_id",
"parent_span_id",
"name",
"span_kind",
"status_code",
"status_message",
"start_time",
"end_time",
"duration_ms",
"attributes",
"created_at",
]
for col in expected_columns:
assert col in schema, f"Missing column: {col}"
def test_sqlite_span_schema_primary_key():
"""Test span_id is the primary key."""
schema = get_sqlite_schema("spans")
assert schema["span_id"]["primary_key"] is True
def test_sqlite_span_schema_indexes():
"""Test span schema has correct indexes."""
schema = get_sqlite_schema("spans")
# trace_id should be indexed for efficient joins
assert schema["trace_id"]["index"] is True
# parent_span_id should be indexed for tree traversal
assert schema["parent_span_id"]["index"] is True
# start_time and created_at should be indexed for time-based queries
assert schema["start_time"]["index"] is True
assert schema["created_at"]["index"] is True
# ==================== PostgreSQL Schema Tests ====================
def test_postgres_span_schema_default_values():
"""Test span schema uses default traces table and schema in foreign key."""
schema = get_postgres_schema("spans")
assert "trace_id" in schema
assert "foreign_key" in schema["trace_id"]
# Postgres includes schema prefix
assert schema["trace_id"]["foreign_key"] == "agno.agno_traces.trace_id"
def test_postgres_span_schema_custom_traces_table():
"""Test span schema uses custom traces table name in foreign key."""
schema = get_postgres_schema("spans", traces_table_name="my_traces")
assert schema["trace_id"]["foreign_key"] == "agno.my_traces.trace_id"
def test_postgres_span_schema_custom_db_schema():
"""Test span schema uses custom database schema in foreign key."""
schema = get_postgres_schema("spans", db_schema="custom_schema")
assert schema["trace_id"]["foreign_key"] == "custom_schema.agno_traces.trace_id"
def test_postgres_span_schema_custom_both():
"""Test span schema uses both custom traces table and db schema."""
schema = get_postgres_schema("spans", traces_table_name="my_traces", db_schema="my_schema")
assert schema["trace_id"]["foreign_key"] == "my_schema.my_traces.trace_id"
def test_postgres_span_schema_has_required_columns():
"""Test span schema has all required columns."""
schema = get_postgres_schema("spans")
expected_columns = [
"span_id",
"trace_id",
"parent_span_id",
"name",
"span_kind",
"status_code",
"status_message",
"start_time",
"end_time",
"duration_ms",
"attributes",
"created_at",
]
for col in expected_columns:
assert col in schema, f"Missing column: {col}"
# ==================== SQLite Database Integration Tests ====================
@pytest.fixture
def sqlite_db_default(tmp_path):
"""Create a SqliteDb instance with default table names."""
db_file = str(tmp_path / "test.db")
return SqliteDb(
db_file=db_file,
traces_table="agno_traces",
spans_table="agno_spans",
)
@pytest.fixture
def sqlite_db_custom(tmp_path):
"""Create a SqliteDb instance with custom table names."""
db_file = str(tmp_path / "test_custom.db")
return SqliteDb(
db_file=db_file,
traces_table="custom_traces",
spans_table="custom_spans",
)
def test_sqlite_default_trace_table_name(sqlite_db_default):
"""Test default traces table name is used."""
assert sqlite_db_default.trace_table_name == "agno_traces"
def test_sqlite_custom_trace_table_name(sqlite_db_custom):
"""Test custom traces table name is used."""
assert sqlite_db_custom.trace_table_name == "custom_traces"
def test_sqlite_create_span_table_with_default_fk(sqlite_db_default):
"""Test span table creation uses default traces table in FK."""
# Create traces table first (required for FK)
sqlite_db_default._get_table(table_type="traces", create_table_if_not_found=True)
# Create spans table
table = sqlite_db_default._create_table("agno_spans", "spans")
assert table is not None
assert table.name == "agno_spans"
# Verify foreign key references default traces table
trace_id_col = table.c.trace_id
assert len(trace_id_col.foreign_keys) == 1
fk = list(trace_id_col.foreign_keys)[0]
assert "agno_traces.trace_id" in str(fk.target_fullname)
def test_sqlite_create_span_table_with_custom_fk(sqlite_db_custom):
"""Test span table creation uses custom traces table in FK."""
# Create traces table first (required for FK)
sqlite_db_custom._get_table(table_type="traces", create_table_if_not_found=True)
# Create spans table
table = sqlite_db_custom._create_table("custom_spans", "spans")
assert table is not None
assert table.name == "custom_spans"
# Verify foreign key references custom traces table
trace_id_col = table.c.trace_id
assert len(trace_id_col.foreign_keys) == 1
fk = list(trace_id_col.foreign_keys)[0]
assert "custom_traces.trace_id" in str(fk.target_fullname)
# ==================== PostgreSQL Database Integration Tests ====================
@pytest.fixture
def mock_engine():
"""Create a mock SQLAlchemy engine."""
engine = Mock(spec=Engine)
engine.url = "postgresql://fake:///url"
return engine
@pytest.fixture
def mock_session():
"""Create a mock session."""
session = Mock(spec=Session)
session.__enter__ = Mock(return_value=session)
session.__exit__ = Mock(return_value=None)
session.begin = Mock()
session.begin().__enter__ = Mock(return_value=session)
session.begin().__exit__ = Mock(return_value=None)
return session
@pytest.fixture
def postgres_db_default(mock_engine):
"""Create a PostgresDb instance with default table names."""
return PostgresDb(
db_engine=mock_engine,
db_schema="ai",
traces_table="agno_traces",
spans_table="agno_spans",
)
@pytest.fixture
def postgres_db_custom(mock_engine):
"""Create a PostgresDb instance with custom table names."""
return PostgresDb(
db_engine=mock_engine,
db_schema="custom_schema",
traces_table="custom_traces",
spans_table="custom_spans",
)
def test_postgres_default_trace_table_name(postgres_db_default):
"""Test default traces table name is used."""
assert postgres_db_default.trace_table_name == "agno_traces"
def test_postgres_custom_trace_table_name(postgres_db_custom):
"""Test custom traces table name is used."""
assert postgres_db_custom.trace_table_name == "custom_traces"
def test_postgres_custom_db_schema(postgres_db_custom):
"""Test custom db schema is used."""
assert postgres_db_custom.db_schema == "custom_schema"
def test_postgres_create_span_table_with_default_fk(postgres_db_default, mock_session):
"""Test span table creation uses default traces table in FK."""
postgres_db_default.Session = Mock(return_value=mock_session)
with patch.object(Table, "create"):
with patch("agno.db.postgres.postgres.create_schema"):
with patch("agno.db.postgres.postgres.is_table_available", return_value=False):
table = postgres_db_default._create_table("agno_spans", "spans")
assert table is not None
assert table.name == "agno_spans"
# Verify foreign key references default traces table with schema
trace_id_col = table.c.trace_id
assert len(trace_id_col.foreign_keys) == 1
fk = list(trace_id_col.foreign_keys)[0]
# FK should reference ai.agno_traces.trace_id
fk_target = str(fk.target_fullname)
assert "agno_traces" in fk_target
assert "trace_id" in fk_target
def test_postgres_create_span_table_with_custom_fk(postgres_db_custom, mock_session):
"""Test span table creation uses custom traces table and schema in FK."""
postgres_db_custom.Session = Mock(return_value=mock_session)
with patch.object(Table, "create"):
with patch("agno.db.postgres.postgres.create_schema"):
with patch("agno.db.postgres.postgres.is_table_available", return_value=False):
table = postgres_db_custom._create_table("custom_spans", "spans")
assert table is not None
assert table.name == "custom_spans"
# Verify foreign key references custom traces table with custom schema
trace_id_col = table.c.trace_id
assert len(trace_id_col.foreign_keys) == 1
fk = list(trace_id_col.foreign_keys)[0]
# FK should reference custom_schema.custom_traces.trace_id
fk_target = str(fk.target_fullname)
assert "custom_traces" in fk_target
assert "trace_id" in fk_target
# ==================== Regression Tests ====================
def test_sqlite_span_fk_not_hardcoded():
"""Ensure SQLite span FK is not hardcoded to 'agno_traces'."""
# Get schema with a different table name
schema1 = get_sqlite_schema("spans", traces_table_name="table_a")
schema2 = get_sqlite_schema("spans", traces_table_name="table_b")
# FKs should be different
assert schema1["trace_id"]["foreign_key"] != schema2["trace_id"]["foreign_key"]
assert "table_a" in schema1["trace_id"]["foreign_key"]
assert "table_b" in schema2["trace_id"]["foreign_key"]
def test_postgres_span_fk_not_hardcoded():
"""Ensure Postgres span FK is not hardcoded to 'agno.agno_traces'."""
# Get schema with different table and schema names
schema1 = get_postgres_schema("spans", traces_table_name="table_a", db_schema="schema_a")
schema2 = get_postgres_schema("spans", traces_table_name="table_b", db_schema="schema_b")
# FKs should be different
assert schema1["trace_id"]["foreign_key"] != schema2["trace_id"]["foreign_key"]
assert "schema_a.table_a" in schema1["trace_id"]["foreign_key"]
assert "schema_b.table_b" in schema2["trace_id"]["foreign_key"]
def test_sqlite_span_fk_format():
"""Test SQLite span FK format is correct (no schema prefix)."""
schema = get_sqlite_schema("spans", traces_table_name="my_traces")
fk = schema["trace_id"]["foreign_key"]
# SQLite doesn't use schema prefix
assert fk == "my_traces.trace_id"
# Should NOT have a schema prefix (no dots before table name)
parts = fk.split(".")
assert len(parts) == 2 # table.column
def test_postgres_span_fk_format():
"""Test Postgres span FK format is correct (with schema prefix)."""
schema = get_postgres_schema("spans", traces_table_name="my_traces", db_schema="my_schema")
fk = schema["trace_id"]["foreign_key"]
# Postgres uses schema.table.column format
assert fk == "my_schema.my_traces.trace_id"
parts = fk.split(".")
assert len(parts) == 3 # schema.table.column
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/db/test_span_schema.py",
"license": "Apache License 2.0",
"lines": 260,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/skills/agent_skills.py | import json
import subprocess
from pathlib import Path
from typing import Dict, List, Optional
from agno.skills.errors import SkillValidationError
from agno.skills.loaders.base import SkillLoader
from agno.skills.skill import Skill
from agno.skills.utils import is_safe_path, read_file_safe, run_script
from agno.tools.function import Function
from agno.utils.log import log_debug, log_warning
class Skills:
"""Orchestrates skill loading and provides tools for agents to access skills.
The Skills class is responsible for:
1. Loading skills from various sources (loaders)
2. Providing methods to access loaded skills
3. Generating tools for agents to use skills
4. Creating system prompt snippets with available skills metadata
Args:
loaders: List of SkillLoader instances to load skills from.
"""
def __init__(self, loaders: List[SkillLoader]):
self.loaders = loaders
self._skills: Dict[str, Skill] = {}
self._load_skills()
def _load_skills(self) -> None:
"""Load skills from all loaders.
Raises:
SkillValidationError: If any skill fails validation.
"""
for loader in self.loaders:
try:
skills = loader.load()
for skill in skills:
if skill.name in self._skills:
log_warning(f"Duplicate skill name '{skill.name}', overwriting with newer version")
self._skills[skill.name] = skill
except SkillValidationError:
raise # Re-raise validation errors as hard failures
except Exception as e:
log_warning(f"Error loading skills from {loader}: {e}")
log_debug(f"Loaded {len(self._skills)} total skills")
def reload(self) -> None:
"""Reload skills from all loaders, clearing existing skills.
Raises:
SkillValidationError: If any skill fails validation.
"""
self._skills.clear()
self._load_skills()
def get_skill(self, name: str) -> Optional[Skill]:
"""Get a skill by name.
Args:
name: The name of the skill to retrieve.
Returns:
The Skill object if found, None otherwise.
"""
return self._skills.get(name)
def get_all_skills(self) -> List[Skill]:
"""Get all loaded skills.
Returns:
A list of all loaded Skill objects.
"""
return list(self._skills.values())
def get_skill_names(self) -> List[str]:
"""Get the names of all loaded skills.
Returns:
A list of skill names.
"""
return list(self._skills.keys())
def get_system_prompt_snippet(self) -> str:
"""Generate a system prompt snippet with available skills metadata.
This creates an XML-formatted snippet that provides the agent with
information about available skills without including the full instructions.
Returns:
An XML-formatted string with skills metadata.
"""
if not self._skills:
return ""
lines = [
"<skills_system>",
"",
"## What are Skills?",
"Skills are packages of domain expertise that extend your capabilities. Each skill contains:",
"- **Instructions**: Detailed guidance on when and how to apply the skill",
"- **Scripts**: Executable code templates you can use or adapt",
"- **References**: Supporting documentation (guides, cheatsheets, examples)",
"",
"## IMPORTANT: How to Use Skills",
"**Skill names are NOT callable functions.** You cannot call a skill directly by its name.",
"Instead, you MUST use the provided skill access tools:",
"",
"1. `get_skill_instructions(skill_name)` - Load the full instructions for a skill",
"2. `get_skill_reference(skill_name, reference_path)` - Access specific documentation",
"3. `get_skill_script(skill_name, script_path, execute=False)` - Read or run scripts",
"",
"## Progressive Discovery Workflow",
"1. **Browse**: Review the skill summaries below to understand what's available",
"2. **Load**: When a task matches a skill, call `get_skill_instructions(skill_name)` first",
"3. **Reference**: Use `get_skill_reference` to access specific documentation as needed",
"4. **Scripts**: Use `get_skill_script` to read or execute scripts from a skill",
"",
"**IMPORTANT**: References are documentation files (NOT executable). Only use `get_skill_script` when `<scripts>` lists actual script files. If `<scripts>none</scripts>`, do NOT call `get_skill_script`.",
"",
"This approach ensures you only load detailed instructions when actually needed.",
"",
"## Available Skills",
]
for skill in self._skills.values():
lines.append("<skill>")
lines.append(f" <name>{skill.name}</name>")
lines.append(f" <description>{skill.description}</description>")
if skill.scripts:
script_names = [s["name"] if isinstance(s, dict) else s for s in skill.scripts]
lines.append(f" <scripts>{', '.join(script_names)}</scripts>")
else:
# Explicitly indicate no scripts to prevent model confusion
lines.append(" <scripts>none</scripts>")
if skill.references:
ref_names = [r["name"] if isinstance(r, dict) else r for r in skill.references]
lines.append(f" <references>{', '.join(ref_names)}</references>")
lines.append("</skill>")
lines.append("")
lines.append("</skills_system>")
return "\n".join(lines)
def get_tools(self) -> List[Function]:
"""Get the tools for accessing skills.
Returns:
A list of Function objects that agents can use to access skills.
"""
tools: List[Function] = []
# Tool: get_skill_instructions
tools.append(
Function(
name="get_skill_instructions",
description="Load the full instructions for a skill. Use this when you need to follow a skill's guidance.",
entrypoint=self._get_skill_instructions,
)
)
# Tool: get_skill_reference
tools.append(
Function(
name="get_skill_reference",
description="Load a reference document from a skill's references. Use this to access detailed documentation.",
entrypoint=self._get_skill_reference,
)
)
# Tool: get_skill_script
tools.append(
Function(
name="get_skill_script",
description="Read or execute a script from a skill. Set execute=True to run the script and get output, or execute=False (default) to read the script content.",
entrypoint=self._get_skill_script,
)
)
return tools
def _get_skill_instructions(self, skill_name: str) -> str:
"""Load the full instructions for a skill.
Args:
skill_name: The name of the skill to get instructions for.
Returns:
A JSON string with the skill's instructions and metadata.
"""
skill = self.get_skill(skill_name)
if skill is None:
available = ", ".join(self.get_skill_names())
return json.dumps(
{
"error": f"Skill '{skill_name}' not found",
"available_skills": available,
}
)
return json.dumps(
{
"skill_name": skill.name,
"description": skill.description,
"instructions": skill.instructions,
"available_scripts": skill.scripts,
"available_references": skill.references,
}
)
def _get_skill_reference(self, skill_name: str, reference_path: str) -> str:
"""Load a reference document from a skill.
Args:
skill_name: The name of the skill.
reference_path: The filename of the reference document.
Returns:
A JSON string with the reference content.
"""
skill = self.get_skill(skill_name)
if skill is None:
available = ", ".join(self.get_skill_names())
return json.dumps(
{
"error": f"Skill '{skill_name}' not found",
"available_skills": available,
}
)
if reference_path not in skill.references:
return json.dumps(
{
"error": f"Reference '{reference_path}' not found in skill '{skill_name}'",
"available_references": skill.references,
}
)
# Validate path to prevent path traversal attacks
refs_dir = Path(skill.source_path) / "references"
if not is_safe_path(refs_dir, reference_path):
return json.dumps(
{
"error": f"Invalid reference path: '{reference_path}'",
"skill_name": skill_name,
}
)
# Load the reference file
ref_file = refs_dir / reference_path
try:
content = read_file_safe(ref_file)
return json.dumps(
{
"skill_name": skill_name,
"reference_path": reference_path,
"content": content,
}
)
except Exception as e:
return json.dumps(
{
"error": f"Error reading reference file: {e}",
"skill_name": skill_name,
"reference_path": reference_path,
}
)
def _get_skill_script(
self,
skill_name: str,
script_path: str,
execute: bool = False,
args: Optional[List[str]] = None,
timeout: int = 30,
) -> str:
"""Read or execute a script from a skill.
Args:
skill_name: The name of the skill.
script_path: The filename of the script.
execute: If True, execute the script. If False (default), return content.
args: Optional list of arguments to pass to the script (only used if execute=True).
timeout: Maximum execution time in seconds (default: 30, only used if execute=True).
Returns:
A JSON string with either the script content or execution results.
"""
skill = self.get_skill(skill_name)
if skill is None:
available = ", ".join(self.get_skill_names())
return json.dumps(
{
"error": f"Skill '{skill_name}' not found",
"available_skills": available,
}
)
if script_path not in skill.scripts:
return json.dumps(
{
"error": f"Script '{script_path}' not found in skill '{skill_name}'",
"available_scripts": skill.scripts,
}
)
# Validate path to prevent path traversal attacks
scripts_dir = Path(skill.source_path) / "scripts"
if not is_safe_path(scripts_dir, script_path):
return json.dumps(
{
"error": f"Invalid script path: '{script_path}'",
"skill_name": skill_name,
}
)
script_file = scripts_dir / script_path
if not execute:
# Read mode: return script content
try:
content = read_file_safe(script_file)
return json.dumps(
{
"skill_name": skill_name,
"script_path": script_path,
"content": content,
}
)
except Exception as e:
return json.dumps(
{
"error": f"Error reading script file: {e}",
"skill_name": skill_name,
"script_path": script_path,
}
)
# Execute mode: run the script
try:
result = run_script(
script_path=script_file,
args=args,
timeout=timeout,
cwd=Path(skill.source_path),
)
return json.dumps(
{
"skill_name": skill_name,
"script_path": script_path,
"stdout": result.stdout,
"stderr": result.stderr,
"returncode": result.returncode,
}
)
except subprocess.TimeoutExpired:
return json.dumps(
{
"error": f"Script execution timed out after {timeout} seconds",
"skill_name": skill_name,
"script_path": script_path,
}
)
except FileNotFoundError as e:
return json.dumps(
{
"error": f"Interpreter or script not found: {e}",
"skill_name": skill_name,
"script_path": script_path,
}
)
except Exception as e:
return json.dumps(
{
"error": f"Error executing script: {e}",
"skill_name": skill_name,
"script_path": script_path,
}
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/skills/agent_skills.py",
"license": "Apache License 2.0",
"lines": 335,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/skills/errors.py | """Skill-related exceptions."""
from typing import List, Optional
class SkillError(Exception):
"""Base exception for all skill-related errors."""
pass
class SkillParseError(SkillError):
"""Raised when SKILL.md parsing fails."""
pass
class SkillValidationError(SkillError):
"""Raised when skill validation fails.
Attributes:
errors: List of validation error messages.
"""
def __init__(self, message: str, errors: Optional[List[str]] = None):
super().__init__(message)
self.errors = errors if errors is not None else [message]
def __str__(self) -> str:
if len(self.errors) == 1:
return self.errors[0]
return f"{len(self.errors)} validation errors: {'; '.join(self.errors)}"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/skills/errors.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/skills/loaders/base.py | from abc import ABC, abstractmethod
from typing import List
from agno.skills.skill import Skill
class SkillLoader(ABC):
"""Abstract base class for skill loaders.
Skill loaders are responsible for loading skills from various sources
(local filesystem, GitHub, URLs, etc.) and returning them as Skill objects.
Subclasses must implement the `load()` method to define how skills
are loaded from their specific source.
"""
@abstractmethod
def load(self) -> List[Skill]:
"""Load skills from the source.
Returns:
A list of Skill objects loaded from the source.
Raises:
SkillLoadError: If there's an error loading skills from the source.
"""
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/skills/loaders/base.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/agno/skills/loaders/local.py | import re
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
from agno.skills.errors import SkillValidationError
from agno.skills.loaders.base import SkillLoader
from agno.skills.skill import Skill
from agno.skills.validator import validate_skill_directory
from agno.utils.log import log_debug, log_warning
class LocalSkills(SkillLoader):
"""Loads skills from the local filesystem.
This loader can handle both:
1. A single skill folder (contains SKILL.md)
2. A directory containing multiple skill folders
Args:
path: Path to a skill folder or directory containing skill folders.
validate: Whether to validate skills against the Agent Skills spec.
If True (default), invalid skills will raise SkillValidationError.
"""
def __init__(self, path: str, validate: bool = True):
self.path = Path(path).resolve()
self.validate = validate
def load(self) -> List[Skill]:
"""Load skills from the local filesystem.
Returns:
A list of Skill objects loaded from the filesystem.
Raises:
FileNotFoundError: If the path doesn't exist.
"""
if not self.path.exists():
raise FileNotFoundError(f"Skills path does not exist: {self.path}")
skills: List[Skill] = []
# Check if this is a single skill folder or a directory of skills
skill_md_path = self.path / "SKILL.md"
if skill_md_path.exists():
# Single skill folder
skill = self._load_skill_from_folder(self.path)
if skill:
skills.append(skill)
else:
# Directory of skill folders
for item in self.path.iterdir():
if item.is_dir() and not item.name.startswith("."):
skill_md = item / "SKILL.md"
if skill_md.exists():
skill = self._load_skill_from_folder(item)
if skill:
skills.append(skill)
else:
log_debug(f"Skipping directory without SKILL.md: {item}")
log_debug(f"Loaded {len(skills)} skills from {self.path}")
return skills
def _load_skill_from_folder(self, folder: Path) -> Optional[Skill]:
"""Load a single skill from a folder.
Args:
folder: Path to the skill folder.
Returns:
A Skill object if successful, None if there's an error.
Raises:
SkillValidationError: If validation is enabled and the skill is invalid.
"""
# Validate skill directory structure and content if validation is enabled
if self.validate:
errors = validate_skill_directory(folder)
if errors:
raise SkillValidationError(
f"Skill validation failed for '{folder.name}'",
errors=errors,
)
skill_md_path = folder / "SKILL.md"
try:
content = skill_md_path.read_text(encoding="utf-8")
frontmatter, instructions = self._parse_skill_md(content)
# Get skill name from the frontmatter or folder name
name = frontmatter.get("name", folder.name)
description = frontmatter.get("description", "")
# Get optional fields
license_info = frontmatter.get("license")
metadata = frontmatter.get("metadata")
compatibility = frontmatter.get("compatibility")
allowed_tools = frontmatter.get("allowed-tools")
# Discover scripts
scripts = self._discover_scripts(folder)
# Discover references
references = self._discover_references(folder)
return Skill(
name=name,
description=description,
instructions=instructions,
source_path=str(folder),
scripts=scripts,
references=references,
metadata=metadata,
license=license_info,
compatibility=compatibility,
allowed_tools=allowed_tools,
)
except SkillValidationError:
raise # Re-raise validation errors
except Exception as e:
log_warning(f"Error loading skill from {folder}: {e}")
return None
def _parse_skill_md(self, content: str) -> Tuple[Dict[str, Any], str]:
"""Parse SKILL.md content into frontmatter and instructions.
Args:
content: The raw SKILL.md content.
Returns:
A tuple of (frontmatter_dict, instructions_body).
"""
frontmatter: Dict[str, Any] = {}
instructions = content
# Check for YAML frontmatter (between --- delimiters)
frontmatter_match = re.match(r"^---\s*\n(.*?)\n---\s*\n?(.*)$", content, re.DOTALL)
if frontmatter_match:
frontmatter_text = frontmatter_match.group(1)
instructions = frontmatter_match.group(2).strip()
# Parse YAML frontmatter
try:
import yaml
frontmatter = yaml.safe_load(frontmatter_text) or {}
except ImportError:
# Fallback: simple key-value parsing if yaml not available
frontmatter = self._parse_simple_frontmatter(frontmatter_text)
except Exception as e:
log_warning(f"Error parsing YAML frontmatter: {e}")
frontmatter = self._parse_simple_frontmatter(frontmatter_text)
return frontmatter, instructions
def _parse_simple_frontmatter(self, text: str) -> Dict[str, Any]:
"""Simple fallback frontmatter parser for basic key: value pairs.
Args:
text: The frontmatter text.
Returns:
A dictionary of parsed key-value pairs.
"""
result: Dict[str, Any] = {}
for line in text.strip().split("\n"):
if ":" in line:
key, value = line.split(":", 1)
key = key.strip()
value = value.strip().strip('"').strip("'")
result[key] = value
return result
def _discover_scripts(self, folder: Path) -> List[str]:
"""Discover script files in the scripts/ subdirectory.
Args:
folder: Path to the skill folder.
Returns:
A list of script filenames.
"""
scripts_dir = folder / "scripts"
if not scripts_dir.exists() or not scripts_dir.is_dir():
return []
scripts: List[str] = []
for item in scripts_dir.iterdir():
if item.is_file() and not item.name.startswith("."):
scripts.append(item.name)
return sorted(scripts)
def _discover_references(self, folder: Path) -> List[str]:
"""Discover reference files in the references/ subdirectory.
Args:
folder: Path to the skill folder.
Returns:
A list of reference filenames.
"""
refs_dir = folder / "references"
if not refs_dir.exists() or not refs_dir.is_dir():
return []
references: List[str] = []
for item in refs_dir.iterdir():
if item.is_file() and not item.name.startswith("."):
references.append(item.name)
return sorted(references)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/skills/loaders/local.py",
"license": "Apache License 2.0",
"lines": 171,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/skills/skill.py | from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional
@dataclass
class Skill:
"""Represents a skill that an agent can use.
A skill provides structured instructions, reference documentation,
and optional scripts that an agent can access to perform specific tasks.
Attributes:
name: Unique skill name (from folder name or SKILL.md frontmatter)
description: Short description of what the skill does
instructions: Full SKILL.md body (the instructions/guidance for the agent)
scripts: List of script filenames in scripts/ subdirectory
references: List of reference filenames in references/ subdirectory
source_path: Filesystem path to the skill folder
metadata: Optional metadata from frontmatter (version, author, tags, etc.)
license: Optional license information
compatibility: Optional compatibility requirements
allowed_tools: Optional list of tools this skill is allowed to use
"""
name: str
description: str
instructions: str
source_path: str
scripts: List[str] = field(default_factory=list)
references: List[str] = field(default_factory=list)
metadata: Optional[Dict[str, Any]] = None
license: Optional[str] = None
compatibility: Optional[str] = None
allowed_tools: Optional[List[str]] = None
def to_dict(self) -> Dict[str, Any]:
"""Convert the Skill to a dictionary representation."""
return {
"name": self.name,
"description": self.description,
"instructions": self.instructions,
"source_path": self.source_path,
"scripts": self.scripts,
"references": self.references,
"metadata": self.metadata,
"license": self.license,
"compatibility": self.compatibility,
"allowed_tools": self.allowed_tools,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "Skill":
"""Create a Skill from a dictionary."""
return cls(
name=data["name"],
description=data["description"],
instructions=data["instructions"],
source_path=data["source_path"],
scripts=data.get("scripts", []),
references=data.get("references", []),
metadata=data.get("metadata"),
license=data.get("license"),
compatibility=data.get("compatibility"),
allowed_tools=data.get("allowed_tools"),
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/skills/skill.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/skills/utils.py | """Utility functions for the skills module."""
import os
import platform
import stat
import subprocess
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional
def is_safe_path(base_dir: Path, requested_path: str) -> bool:
"""Check if the requested path stays within the base directory.
This prevents path traversal attacks where a malicious path like
'../../../etc/passwd' could be used to access files outside the
intended directory.
Args:
base_dir: The base directory that the path must stay within.
requested_path: The user-provided path to validate.
Returns:
True if the path is safe (stays within base_dir), False otherwise.
"""
try:
full_path = (base_dir / requested_path).resolve()
base_resolved = base_dir.resolve()
return full_path.is_relative_to(base_resolved)
except (ValueError, OSError):
return False
def ensure_executable(file_path: Path) -> None:
"""Ensure a file has the executable bit set for the owner.
Args:
file_path: Path to the file to make executable.
"""
current_mode = file_path.stat().st_mode
if not (current_mode & stat.S_IXUSR):
os.chmod(file_path, current_mode | stat.S_IXUSR)
def parse_shebang(script_path: Path) -> Optional[str]:
"""Parse the shebang line from a script file to determine the interpreter.
Handles various shebang formats:
- #!/usr/bin/env python3 -> "python3"
- #!/usr/bin/python3 -> "python3"
- #!/bin/bash -> "bash"
- #!/usr/bin/env -S node -> "node"
Args:
script_path: Path to the script file.
Returns:
The interpreter name (e.g., "python3", "bash") or None if no valid shebang.
"""
try:
with open(script_path, "r", encoding="utf-8") as f:
first_line = f.readline().strip()
except (OSError, UnicodeDecodeError):
return None
if not first_line.startswith("#!"):
return None
shebang = first_line[2:].strip()
if not shebang:
return None
parts = shebang.split()
# Handle /usr/bin/env style shebangs
if Path(parts[0]).name == "env":
# Skip any flags (like -S) and get the interpreter
for part in parts[1:]:
if not part.startswith("-"):
return part
return None
# Handle direct path shebangs like #!/bin/bash or #!/usr/bin/python3
# Extract the basename of the path
interpreter_path = parts[0]
return Path(interpreter_path).name
def get_interpreter_command(interpreter: str) -> List[str]:
"""Map an interpreter name to a Windows-compatible command.
Args:
interpreter: The interpreter name from shebang (e.g., "python3", "bash").
Returns:
A list representing the command to invoke the interpreter.
"""
# Normalize interpreter name
interpreter_lower = interpreter.lower()
# Python interpreters - use current Python executable
if interpreter_lower in ("python", "python3", "python2"):
return [sys.executable]
# Other interpreters - pass through as-is
# This includes: bash, sh, node, ruby, perl, etc.
# These need to be available in PATH on Windows
return [interpreter]
def _build_windows_command(script_path: Path, args: List[str]) -> List[str]:
"""Build the command list for executing a script on Windows.
On Windows, shebang lines are not processed by the OS, so we need to
parse the shebang and explicitly invoke the interpreter.
Args:
script_path: Path to the script file.
args: Arguments to pass to the script.
Returns:
A list representing the full command to execute.
"""
interpreter = parse_shebang(script_path)
if interpreter:
cmd_prefix = get_interpreter_command(interpreter)
return [*cmd_prefix, str(script_path), *args]
# Fallback: try direct execution (may fail, but provides clear error)
return [str(script_path), *args]
@dataclass
class ScriptResult:
"""Result of a script execution."""
stdout: str
stderr: str
returncode: int
def run_script(
script_path: Path,
args: Optional[List[str]] = None,
timeout: int = 30,
cwd: Optional[Path] = None,
) -> ScriptResult:
"""Execute a script and return the result.
On Unix-like systems, scripts are executed directly using their shebang.
On Windows, the shebang is parsed to determine the interpreter since
Windows does not natively support shebang lines.
Args:
script_path: Path to the script to execute.
args: Optional list of arguments to pass to the script.
timeout: Maximum execution time in seconds.
cwd: Working directory for the script.
Returns:
ScriptResult with stdout, stderr, and returncode.
Raises:
subprocess.TimeoutExpired: If script exceeds timeout.
FileNotFoundError: If script or interpreter not found.
"""
if platform.system() == "Windows":
cmd = _build_windows_command(script_path, args or [])
else:
ensure_executable(script_path)
cmd = [str(script_path), *(args or [])]
result = subprocess.run(
cmd,
capture_output=True,
text=True,
timeout=timeout,
cwd=cwd,
)
return ScriptResult(
stdout=result.stdout,
stderr=result.stderr,
returncode=result.returncode,
)
def read_file_safe(file_path: Path, encoding: str = "utf-8") -> str:
"""Read a file's contents safely.
Args:
file_path: Path to the file to read.
encoding: File encoding (default: utf-8).
Returns:
The file contents as a string.
Raises:
FileNotFoundError: If file doesn't exist.
PermissionError: If file can't be read.
UnicodeDecodeError: If file can't be decoded.
"""
return file_path.read_text(encoding=encoding)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/skills/utils.py",
"license": "Apache License 2.0",
"lines": 157,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/agno/skills/validator.py | """Skill validation logic following the Agent Skills spec."""
import unicodedata
from pathlib import Path
from typing import Dict, List, Optional
# Constants per Agent Skills Spec
MAX_SKILL_NAME_LENGTH = 64
MAX_DESCRIPTION_LENGTH = 1024
MAX_COMPATIBILITY_LENGTH = 500
# Allowed frontmatter fields per Agent Skills Spec
ALLOWED_FIELDS = {
"name",
"description",
"license",
"allowed-tools",
"metadata",
"compatibility",
}
def _validate_name(name: str, skill_dir: Optional[Path] = None) -> List[str]:
"""Validate skill name format and directory match.
Skill names support alphanumeric characters plus hyphens.
Names must be lowercase and cannot start/end with hyphens.
Args:
name: The skill name to validate.
skill_dir: Optional path to skill directory (for name-directory match check).
Returns:
List of validation error messages. Empty list means valid.
"""
errors = []
if not name or not isinstance(name, str) or not name.strip():
errors.append("Field 'name' must be a non-empty string")
return errors
name = unicodedata.normalize("NFKC", name.strip())
if len(name) > MAX_SKILL_NAME_LENGTH:
errors.append(f"Skill name '{name}' exceeds {MAX_SKILL_NAME_LENGTH} character limit ({len(name)} chars)")
if name != name.lower():
errors.append(f"Skill name '{name}' must be lowercase")
if name.startswith("-") or name.endswith("-"):
errors.append("Skill name cannot start or end with a hyphen")
if "--" in name:
errors.append("Skill name cannot contain consecutive hyphens")
if not all(c.isalnum() or c == "-" for c in name):
errors.append(
f"Skill name '{name}' contains invalid characters. Only letters, digits, and hyphens are allowed."
)
if skill_dir:
dir_name = unicodedata.normalize("NFKC", skill_dir.name)
if dir_name != name:
errors.append(f"Directory name '{dir_name}' must match skill name '{name}'")
return errors
def _validate_description(description: str) -> List[str]:
"""Validate description format.
Args:
description: The skill description to validate.
Returns:
List of validation error messages. Empty list means valid.
"""
errors = []
if not description or not isinstance(description, str) or not description.strip():
errors.append("Field 'description' must be a non-empty string")
return errors
if len(description) > MAX_DESCRIPTION_LENGTH:
errors.append(f"Description exceeds {MAX_DESCRIPTION_LENGTH} character limit ({len(description)} chars)")
return errors
def _validate_compatibility(compatibility: str) -> List[str]:
"""Validate compatibility format.
Args:
compatibility: The compatibility string to validate.
Returns:
List of validation error messages. Empty list means valid.
"""
errors = []
if not isinstance(compatibility, str):
errors.append("Field 'compatibility' must be a string")
return errors
if len(compatibility) > MAX_COMPATIBILITY_LENGTH:
errors.append(f"Compatibility exceeds {MAX_COMPATIBILITY_LENGTH} character limit ({len(compatibility)} chars)")
return errors
def _validate_license(license_val: str) -> List[str]:
"""Validate license field.
Args:
license_val: The license string to validate.
Returns:
List of validation error messages. Empty list means valid.
"""
errors: List[str] = []
if not isinstance(license_val, str):
errors.append("Field 'license' must be a string")
return errors
def _validate_allowed_tools(allowed_tools) -> List[str]:
"""Validate allowed-tools field.
Args:
allowed_tools: The allowed-tools value to validate.
Returns:
List of validation error messages. Empty list means valid.
"""
errors = []
if not isinstance(allowed_tools, list):
errors.append("Field 'allowed-tools' must be a list")
return errors
if not all(isinstance(tool, str) for tool in allowed_tools):
errors.append("Field 'allowed-tools' must be a list of strings")
return errors
def _validate_metadata_value(metadata_val) -> List[str]:
"""Validate metadata field value.
Args:
metadata_val: The metadata value to validate.
Returns:
List of validation error messages. Empty list means valid.
"""
errors = []
if not isinstance(metadata_val, dict):
errors.append("Field 'metadata' must be a dictionary")
return errors
def _validate_metadata_fields(metadata: Dict) -> List[str]:
"""Validate that only allowed fields are present in frontmatter.
Args:
metadata: Parsed frontmatter dictionary.
Returns:
List of validation error messages. Empty list means valid.
"""
errors = []
extra_fields = set(metadata.keys()) - ALLOWED_FIELDS
if extra_fields:
errors.append(
f"Unexpected fields in frontmatter: {', '.join(sorted(extra_fields))}. "
f"Only {sorted(ALLOWED_FIELDS)} are allowed."
)
return errors
def validate_metadata(metadata: Dict, skill_dir: Optional[Path] = None) -> List[str]:
"""Validate parsed skill metadata.
This is the core validation function that works on already-parsed metadata.
Args:
metadata: Parsed YAML frontmatter dictionary.
skill_dir: Optional path to skill directory (for name-directory match check).
Returns:
List of validation error messages. Empty list means valid.
"""
errors = []
errors.extend(_validate_metadata_fields(metadata))
if "name" not in metadata:
errors.append("Missing required field in frontmatter: name")
else:
errors.extend(_validate_name(metadata["name"], skill_dir))
if "description" not in metadata:
errors.append("Missing required field in frontmatter: description")
else:
errors.extend(_validate_description(metadata["description"]))
if "compatibility" in metadata:
errors.extend(_validate_compatibility(metadata["compatibility"]))
if "license" in metadata:
errors.extend(_validate_license(metadata["license"]))
if "allowed-tools" in metadata:
errors.extend(_validate_allowed_tools(metadata["allowed-tools"]))
if "metadata" in metadata:
errors.extend(_validate_metadata_value(metadata["metadata"]))
return errors
def validate_skill_directory(skill_dir: Path) -> List[str]:
"""Validate a skill directory structure and contents.
Args:
skill_dir: Path to the skill directory.
Returns:
List of validation error messages. Empty list means valid.
"""
import yaml
from agno.skills.errors import SkillParseError
skill_dir = Path(skill_dir)
if not skill_dir.exists():
return [f"Path does not exist: {skill_dir}"]
if not skill_dir.is_dir():
return [f"Not a directory: {skill_dir}"]
# Find SKILL.md file (only uppercase supported)
skill_md = skill_dir / "SKILL.md"
if not skill_md.exists():
return ["Missing required file: SKILL.md"]
# Parse frontmatter
try:
content = skill_md.read_text(encoding="utf-8")
if not content.startswith("---"):
raise SkillParseError("SKILL.md must start with YAML frontmatter (---)")
parts = content.split("---", 2)
if len(parts) < 3:
raise SkillParseError("SKILL.md frontmatter not properly closed with ---")
frontmatter_str = parts[1]
metadata = yaml.safe_load(frontmatter_str)
if not isinstance(metadata, dict):
raise SkillParseError("SKILL.md frontmatter must be a YAML mapping")
except SkillParseError as e:
return [str(e)]
except yaml.YAMLError as e:
return [f"Invalid YAML in frontmatter: {e}"]
except Exception as e:
return [f"Error reading SKILL.md: {e}"]
return validate_metadata(metadata, skill_dir)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/skills/validator.py",
"license": "Apache License 2.0",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/skills/loaders/test_base.py | """Unit tests for SkillLoader abstract base class."""
from abc import ABC
from typing import List
import pytest
from agno.skills.loaders.base import SkillLoader
from agno.skills.skill import Skill
def test_skill_loader_is_abstract() -> None:
"""Test that SkillLoader is an abstract base class."""
assert issubclass(SkillLoader, ABC)
def test_skill_loader_cannot_be_instantiated() -> None:
"""Test that SkillLoader cannot be instantiated directly."""
with pytest.raises(TypeError) as exc_info:
SkillLoader() # type: ignore
assert "abstract" in str(exc_info.value).lower()
def test_skill_loader_requires_load_method() -> None:
"""Test that subclasses must implement load method."""
class IncompleteLoader(SkillLoader):
pass
with pytest.raises(TypeError) as exc_info:
IncompleteLoader() # type: ignore
assert "abstract" in str(exc_info.value).lower()
def test_skill_loader_concrete_implementation() -> None:
"""Test that a complete implementation can be instantiated."""
class ConcreteLoader(SkillLoader):
def load(self) -> List[Skill]:
return []
loader = ConcreteLoader()
assert isinstance(loader, SkillLoader)
assert loader.load() == []
def test_skill_loader_load_returns_list() -> None:
"""Test that load method returns a list of Skills."""
class TestLoader(SkillLoader):
def load(self) -> List[Skill]:
return [
Skill(
name="test-skill",
description="Test",
instructions="Instructions",
source_path="/path",
)
]
loader = TestLoader()
result = loader.load()
assert isinstance(result, list)
assert len(result) == 1
assert isinstance(result[0], Skill)
assert result[0].name == "test-skill"
def test_skill_loader_load_can_return_empty_list() -> None:
"""Test that load method can return an empty list."""
class EmptyLoader(SkillLoader):
def load(self) -> List[Skill]:
return []
loader = EmptyLoader()
result = loader.load()
assert result == []
assert isinstance(result, list)
def test_skill_loader_load_can_return_multiple_skills() -> None:
"""Test that load method can return multiple skills."""
class MultiSkillLoader(SkillLoader):
def load(self) -> List[Skill]:
return [
Skill(
name="skill-1",
description="First skill",
instructions="Instructions 1",
source_path="/path/1",
),
Skill(
name="skill-2",
description="Second skill",
instructions="Instructions 2",
source_path="/path/2",
),
]
loader = MultiSkillLoader()
result = loader.load()
assert len(result) == 2
assert result[0].name == "skill-1"
assert result[1].name == "skill-2"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/skills/loaders/test_base.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/skills/loaders/test_local.py | """Unit tests for LocalSkills loader."""
from pathlib import Path
import pytest
from agno.skills.errors import SkillValidationError
from agno.skills.loaders.base import SkillLoader
from agno.skills.loaders.local import LocalSkills
# --- Initialization Tests ---
def test_local_skills_is_skill_loader() -> None:
"""Test that LocalSkills is a SkillLoader."""
assert issubclass(LocalSkills, SkillLoader)
def test_local_skills_init_with_string_path(temp_skill_dir: Path) -> None:
"""Test initialization with string path."""
loader = LocalSkills(str(temp_skill_dir))
assert loader.path == temp_skill_dir
assert loader.validate is True
def test_local_skills_init_with_validate_false(temp_skill_dir: Path) -> None:
"""Test initialization with validation disabled."""
loader = LocalSkills(str(temp_skill_dir), validate=False)
assert loader.validate is False
def test_local_skills_path_is_resolved(temp_skill_dir: Path) -> None:
"""Test that path is resolved to absolute path."""
loader = LocalSkills(str(temp_skill_dir))
assert loader.path.is_absolute()
# --- Single Skill Loading Tests ---
def test_load_single_skill_directory(temp_skill_dir: Path) -> None:
"""Test loading a single skill from its directory."""
loader = LocalSkills(str(temp_skill_dir))
skills = loader.load()
assert len(skills) == 1
skill = skills[0]
assert skill.name == "test-skill"
assert skill.description == "A test skill for unit testing"
assert "Test Skill Instructions" in skill.instructions
assert skill.source_path == str(temp_skill_dir)
def test_load_skill_with_scripts(temp_skill_dir: Path) -> None:
"""Test that scripts are discovered correctly."""
loader = LocalSkills(str(temp_skill_dir))
skills = loader.load()
assert len(skills) == 1
skill = skills[0]
assert "helper.py" in skill.scripts
assert "runner.sh" in skill.scripts
def test_load_skill_with_references(temp_skill_dir: Path) -> None:
"""Test that references are discovered correctly."""
loader = LocalSkills(str(temp_skill_dir))
skills = loader.load()
assert len(skills) == 1
skill = skills[0]
assert "guide.md" in skill.references
assert "api-docs.md" in skill.references
def test_load_skill_parses_metadata(temp_skill_dir: Path) -> None:
"""Test that metadata from frontmatter is parsed."""
loader = LocalSkills(str(temp_skill_dir))
skills = loader.load()
assert len(skills) == 1
skill = skills[0]
assert skill.metadata is not None
assert skill.metadata["version"] == "1.0.0"
assert skill.metadata["author"] == "test-author"
def test_load_skill_parses_license(temp_skill_dir: Path) -> None:
"""Test that license from frontmatter is parsed."""
loader = LocalSkills(str(temp_skill_dir))
skills = loader.load()
assert len(skills) == 1
skill = skills[0]
assert skill.license == "MIT"
# --- Multiple Skills Loading Tests ---
def test_load_from_parent_directory(temp_skills_dir: Path) -> None:
"""Test loading multiple skills from a parent directory."""
loader = LocalSkills(str(temp_skills_dir))
skills = loader.load()
# Should load 2 skills (code-review and git-workflow)
# Hidden and non-skill directories should be skipped
assert len(skills) == 2
skill_names = {s.name for s in skills}
assert "code-review" in skill_names
assert "git-workflow" in skill_names
def test_load_skips_hidden_directories(temp_skills_dir: Path) -> None:
"""Test that hidden directories are skipped."""
loader = LocalSkills(str(temp_skills_dir))
skills = loader.load()
skill_names = {s.name for s in skills}
assert "hidden-skill" not in skill_names
def test_load_skips_directories_without_skill_md(temp_skills_dir: Path) -> None:
"""Test that directories without SKILL.md are skipped."""
loader = LocalSkills(str(temp_skills_dir))
skills = loader.load()
# "not-a-skill" directory should be skipped
assert len(skills) == 2
# --- Validation Tests ---
def test_validation_enabled_by_default(temp_skill_dir: Path) -> None:
"""Test that validation is enabled by default."""
loader = LocalSkills(str(temp_skill_dir))
assert loader.validate is True
def test_valid_skill_passes_validation(temp_skill_dir: Path) -> None:
"""Test that a valid skill passes validation."""
loader = LocalSkills(str(temp_skill_dir), validate=True)
skills = loader.load()
assert len(skills) == 1
def test_invalid_skill_raises_error_when_validation_enabled(invalid_skill_dir: Path) -> None:
"""Test that invalid skill raises SkillValidationError."""
loader = LocalSkills(str(invalid_skill_dir), validate=True)
with pytest.raises(SkillValidationError) as exc_info:
loader.load()
# Should contain validation errors
assert len(exc_info.value.errors) > 0
def test_invalid_skill_with_validation_disabled(invalid_skill_dir: Path) -> None:
"""Test that invalid skill loads when validation is disabled."""
loader = LocalSkills(str(invalid_skill_dir), validate=False)
# Should not raise, but the skill may have issues
skills = loader.load()
# The skill should still be loaded (with invalid name)
assert len(skills) == 1
# --- Error Handling Tests ---
def test_path_not_exists_raises_error(tmp_path: Path) -> None:
"""Test that non-existent path raises FileNotFoundError."""
nonexistent = tmp_path / "does-not-exist"
loader = LocalSkills(str(nonexistent))
with pytest.raises(FileNotFoundError) as exc_info:
loader.load()
assert "does not exist" in str(exc_info.value).lower()
def test_missing_skill_md_with_validation(tmp_path: Path) -> None:
"""Test that missing SKILL.md with validation raises error."""
skill_dir = tmp_path / "no-skill-md"
skill_dir.mkdir()
loader = LocalSkills(str(skill_dir), validate=True)
# When pointing directly at a directory that should be a skill,
# it won't find SKILL.md and should return empty
skills = loader.load()
assert len(skills) == 0
# --- Frontmatter Parsing Tests ---
def test_parses_yaml_frontmatter(tmp_path: Path) -> None:
"""Test parsing of YAML frontmatter."""
skill_dir = tmp_path / "yaml-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"""---
name: yaml-skill
description: Skill with YAML frontmatter
license: Apache-2.0
metadata:
version: "2.0.0"
tags:
- test
- yaml
---
# YAML Skill Instructions
Instructions here.
"""
)
loader = LocalSkills(str(skill_dir))
skills = loader.load()
assert len(skills) == 1
skill = skills[0]
assert skill.name == "yaml-skill"
assert skill.license == "Apache-2.0"
assert skill.metadata["version"] == "2.0.0"
assert skill.metadata["tags"] == ["test", "yaml"]
def test_instructions_exclude_frontmatter(temp_skill_dir: Path) -> None:
"""Test that instructions don't include frontmatter."""
loader = LocalSkills(str(temp_skill_dir))
skills = loader.load()
skill = skills[0]
assert "---" not in skill.instructions
assert "name:" not in skill.instructions
def test_skill_name_from_frontmatter(tmp_path: Path) -> None:
"""Test that skill name comes from frontmatter, not directory name."""
skill_dir = tmp_path / "directory-name"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"""---
name: directory-name
description: Test skill
---
# Instructions
"""
)
loader = LocalSkills(str(skill_dir))
skills = loader.load()
assert len(skills) == 1
assert skills[0].name == "directory-name"
# --- Discovery Tests ---
def test_scripts_sorted_alphabetically(tmp_path: Path) -> None:
"""Test that discovered scripts are sorted alphabetically."""
skill_dir = tmp_path / "sorted-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"""---
name: sorted-skill
description: Test sorting
---
# Instructions
"""
)
scripts_dir = skill_dir / "scripts"
scripts_dir.mkdir()
(scripts_dir / "z_script.py").write_text("# Z")
(scripts_dir / "a_script.py").write_text("# A")
(scripts_dir / "m_script.py").write_text("# M")
loader = LocalSkills(str(skill_dir))
skills = loader.load()
assert skills[0].scripts == ["a_script.py", "m_script.py", "z_script.py"]
def test_references_sorted_alphabetically(tmp_path: Path) -> None:
"""Test that discovered references are sorted alphabetically."""
skill_dir = tmp_path / "sorted-refs"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"""---
name: sorted-refs
description: Test reference sorting
---
# Instructions
"""
)
refs_dir = skill_dir / "references"
refs_dir.mkdir()
(refs_dir / "z_doc.md").write_text("# Z")
(refs_dir / "a_doc.md").write_text("# A")
loader = LocalSkills(str(skill_dir))
skills = loader.load()
assert skills[0].references == ["a_doc.md", "z_doc.md"]
def test_hidden_files_skipped_in_scripts(tmp_path: Path) -> None:
"""Test that hidden files in scripts/ are skipped."""
skill_dir = tmp_path / "hidden-scripts"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"""---
name: hidden-scripts
description: Test hidden file skipping
---
# Instructions
"""
)
scripts_dir = skill_dir / "scripts"
scripts_dir.mkdir()
(scripts_dir / "visible.py").write_text("# Visible")
(scripts_dir / ".hidden.py").write_text("# Hidden")
loader = LocalSkills(str(skill_dir))
skills = loader.load()
assert "visible.py" in skills[0].scripts
assert ".hidden.py" not in skills[0].scripts
def test_empty_scripts_directory(tmp_path: Path) -> None:
"""Test skill with empty scripts directory."""
skill_dir = tmp_path / "empty-scripts"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"""---
name: empty-scripts
description: Test empty scripts
---
# Instructions
"""
)
(skill_dir / "scripts").mkdir()
loader = LocalSkills(str(skill_dir))
skills = loader.load()
assert skills[0].scripts == []
def test_no_scripts_directory(tmp_path: Path) -> None:
"""Test skill without scripts directory."""
skill_dir = tmp_path / "no-scripts"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"""---
name: no-scripts
description: No scripts directory
---
# Instructions
"""
)
loader = LocalSkills(str(skill_dir))
skills = loader.load()
assert skills[0].scripts == []
def test_no_references_directory(tmp_path: Path) -> None:
"""Test skill without references directory."""
skill_dir = tmp_path / "no-refs"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"""---
name: no-refs
description: No references directory
---
# Instructions
"""
)
loader = LocalSkills(str(skill_dir))
skills = loader.load()
assert skills[0].references == []
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/skills/loaders/test_local.py",
"license": "Apache License 2.0",
"lines": 287,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/skills/test_agent_skills.py | """Unit tests for Skills orchestrator class."""
import json
from pathlib import Path
from typing import List
import pytest
from agno.skills.agent_skills import Skills
from agno.skills.errors import SkillValidationError
from agno.skills.loaders.base import SkillLoader
from agno.skills.loaders.local import LocalSkills
from agno.skills.skill import Skill
from agno.tools.function import Function
from .conftest import MockSkillLoader
# --- Initialization Tests ---
def test_skills_with_single_loader(mock_loader: MockSkillLoader) -> None:
"""Test Skills initialization with a single loader."""
skills = Skills(loaders=[mock_loader])
assert len(skills.loaders) == 1
def test_skills_with_multiple_loaders(mock_loader: MockSkillLoader, mock_loader_empty: MockSkillLoader) -> None:
"""Test Skills initialization with multiple loaders."""
skills = Skills(loaders=[mock_loader, mock_loader_empty])
assert len(skills.loaders) == 2
def test_skills_empty_loaders() -> None:
"""Test Skills initialization with no loaders."""
skills = Skills(loaders=[])
assert len(skills.loaders) == 0
# --- Eager Loading Tests ---
def test_skills_loaded_on_init(mock_loader: MockSkillLoader) -> None:
"""Test that skills are loaded immediately on initialization."""
skills = Skills(loaders=[mock_loader])
# Skills should be loaded immediately
assert len(skills._skills) > 0
assert "test-skill" in skills._skills
def test_reload_clears_and_reloads(sample_skill: Skill) -> None:
"""Test that reload() clears existing skills and reloads."""
from .conftest import MockSkillLoader
loader = MockSkillLoader([sample_skill])
skills = Skills(loaders=[loader])
# Initial load happens in __init__
assert len(skills._skills) == 1
assert "test-skill" in skills._skills
# Update the loader with a different skill
new_skill = Skill(
name="new-skill",
description="A new skill",
instructions="New instructions",
source_path="/new/path",
)
loader._skills = [new_skill]
# Reload should clear and reload
skills.reload()
assert "new-skill" in skills._skills
assert "test-skill" not in skills._skills
# --- Retrieval Tests ---
def test_get_skill_by_name(mock_loader: MockSkillLoader) -> None:
"""Test getting a skill by name."""
skills = Skills(loaders=[mock_loader])
skill = skills.get_skill("test-skill")
assert skill is not None
assert skill.name == "test-skill"
def test_get_skill_not_found(mock_loader: MockSkillLoader) -> None:
"""Test getting a non-existent skill returns None."""
skills = Skills(loaders=[mock_loader])
skill = skills.get_skill("nonexistent-skill")
assert skill is None
def test_get_all_skills(mock_loader_multiple: MockSkillLoader) -> None:
"""Test getting all skills."""
skills = Skills(loaders=[mock_loader_multiple])
all_skills = skills.get_all_skills()
assert len(all_skills) == 2
assert all(isinstance(s, Skill) for s in all_skills)
def test_get_all_skills_empty(mock_loader_empty: MockSkillLoader) -> None:
"""Test getting all skills when none loaded."""
skills = Skills(loaders=[mock_loader_empty])
all_skills = skills.get_all_skills()
assert all_skills == []
def test_get_skill_names(mock_loader_multiple: MockSkillLoader) -> None:
"""Test getting skill names."""
skills = Skills(loaders=[mock_loader_multiple])
names = skills.get_skill_names()
assert len(names) == 2
assert "test-skill" in names
assert "minimal-skill" in names
def test_get_skill_names_empty(mock_loader_empty: MockSkillLoader) -> None:
"""Test getting skill names when none loaded."""
skills = Skills(loaders=[mock_loader_empty])
names = skills.get_skill_names()
assert names == []
# --- Multiple Loaders Tests ---
def test_skills_from_multiple_loaders(sample_skill: Skill, minimal_skill: Skill) -> None:
"""Test loading skills from multiple loaders."""
loader1 = MockSkillLoader([sample_skill])
loader2 = MockSkillLoader([minimal_skill])
skills = Skills(loaders=[loader1, loader2])
all_skills = skills.get_all_skills()
assert len(all_skills) == 2
names = {s.name for s in all_skills}
assert "test-skill" in names
assert "minimal-skill" in names
def test_duplicate_skill_name_overwrites(sample_skill: Skill) -> None:
"""Test that duplicate skill names cause overwriting."""
skill1 = Skill(
name="duplicate",
description="First version",
instructions="First",
source_path="/path1",
)
skill2 = Skill(
name="duplicate",
description="Second version",
instructions="Second",
source_path="/path2",
)
loader1 = MockSkillLoader([skill1])
loader2 = MockSkillLoader([skill2])
skills = Skills(loaders=[loader1, loader2])
all_skills = skills.get_all_skills()
assert len(all_skills) == 1
assert all_skills[0].description == "Second version"
# --- System Prompt Tests ---
def test_get_system_prompt_snippet_format(mock_loader: MockSkillLoader) -> None:
"""Test system prompt snippet format."""
skills = Skills(loaders=[mock_loader])
snippet = skills.get_system_prompt_snippet()
assert "<skills_system>" in snippet
assert "</skills_system>" in snippet
assert "<skill>" in snippet
assert "</skill>" in snippet
def test_get_system_prompt_snippet_empty(mock_loader_empty: MockSkillLoader) -> None:
"""Test system prompt snippet when no skills loaded."""
skills = Skills(loaders=[mock_loader_empty])
snippet = skills.get_system_prompt_snippet()
assert snippet == ""
def test_get_system_prompt_includes_skill_name(mock_loader: MockSkillLoader) -> None:
"""Test that system prompt includes skill name."""
skills = Skills(loaders=[mock_loader])
snippet = skills.get_system_prompt_snippet()
assert "<name>test-skill</name>" in snippet
def test_get_system_prompt_includes_description(mock_loader: MockSkillLoader) -> None:
"""Test that system prompt includes skill description."""
skills = Skills(loaders=[mock_loader])
snippet = skills.get_system_prompt_snippet()
assert "A test skill for unit testing" in snippet
def test_get_system_prompt_includes_scripts(mock_loader: MockSkillLoader) -> None:
"""Test that system prompt includes scripts list."""
skills = Skills(loaders=[mock_loader])
snippet = skills.get_system_prompt_snippet()
assert "<scripts>" in snippet
assert "helper.py" in snippet
def test_get_system_prompt_shows_none_when_no_scripts(minimal_skill: Skill) -> None:
"""Test that system prompt shows <scripts>none</scripts> when skill has no scripts."""
loader = MockSkillLoader([minimal_skill])
skills = Skills(loaders=[loader])
snippet = skills.get_system_prompt_snippet()
# Should explicitly show "none" instead of omitting the tag
assert "<scripts>none</scripts>" in snippet
def test_get_system_prompt_includes_references(mock_loader: MockSkillLoader) -> None:
"""Test that system prompt includes references list."""
skills = Skills(loaders=[mock_loader])
snippet = skills.get_system_prompt_snippet()
assert "<references>" in snippet
assert "guide.md" in snippet
def test_get_system_prompt_includes_progressive_discovery(mock_loader: MockSkillLoader) -> None:
"""Test that system prompt includes progressive discovery section."""
skills = Skills(loaders=[mock_loader])
snippet = skills.get_system_prompt_snippet()
assert "Progressive Discovery" in snippet
assert "get_skill_instructions" in snippet
assert "get_skill_reference" in snippet
assert "get_skill_script" in snippet
# --- Get Tools Tests ---
def test_get_tools_returns_functions(mock_loader: MockSkillLoader) -> None:
"""Test that get_tools returns a list of Function objects."""
skills = Skills(loaders=[mock_loader])
tools = skills.get_tools()
assert isinstance(tools, list)
assert all(isinstance(t, Function) for t in tools)
def test_get_tools_returns_three_functions(mock_loader: MockSkillLoader) -> None:
"""Test that get_tools returns exactly three functions."""
skills = Skills(loaders=[mock_loader])
tools = skills.get_tools()
assert len(tools) == 3
tool_names = {t.name for t in tools}
assert "get_skill_instructions" in tool_names
assert "get_skill_reference" in tool_names
assert "get_skill_script" in tool_names
# --- Skill Instructions Tool Tests ---
def test_get_skill_instructions_success(mock_loader: MockSkillLoader) -> None:
"""Test successful retrieval of skill instructions."""
skills = Skills(loaders=[mock_loader])
result_json = skills._get_skill_instructions("test-skill")
result = json.loads(result_json)
assert result["skill_name"] == "test-skill"
assert "instructions" in result
assert "Follow these instructions" in result["instructions"]
assert "available_scripts" in result
assert "available_references" in result
def test_get_skill_instructions_not_found(mock_loader: MockSkillLoader) -> None:
"""Test retrieval of non-existent skill instructions."""
skills = Skills(loaders=[mock_loader])
result_json = skills._get_skill_instructions("nonexistent")
result = json.loads(result_json)
assert "error" in result
assert "not found" in result["error"].lower()
assert "available_skills" in result
# --- Skill Reference Tool Tests ---
def test_get_skill_reference_skill_not_found(mock_loader: MockSkillLoader) -> None:
"""Test reference retrieval for non-existent skill."""
skills = Skills(loaders=[mock_loader])
result_json = skills._get_skill_reference("nonexistent", "ref.md")
result = json.loads(result_json)
assert "error" in result
assert "not found" in result["error"].lower()
def test_get_skill_reference_ref_not_found(mock_loader: MockSkillLoader) -> None:
"""Test reference retrieval for non-existent reference."""
skills = Skills(loaders=[mock_loader])
result_json = skills._get_skill_reference("test-skill", "nonexistent.md")
result = json.loads(result_json)
assert "error" in result
assert "not found" in result["error"].lower()
assert "available_references" in result
def test_get_skill_reference_with_real_file(temp_skill_dir: Path) -> None:
"""Test reference retrieval with actual file."""
loader = LocalSkills(str(temp_skill_dir))
skills = Skills(loaders=[loader])
result_json = skills._get_skill_reference("test-skill", "guide.md")
result = json.loads(result_json)
assert result["skill_name"] == "test-skill"
assert result["reference_path"] == "guide.md"
assert "content" in result
assert "reference guide" in result["content"].lower()
# --- Skill Script Tool Tests ---
def test_skill_script_read_skill_not_found(mock_loader: MockSkillLoader) -> None:
"""Test script read for non-existent skill."""
skills = Skills(loaders=[mock_loader])
result_json = skills._get_skill_script("nonexistent", "script.py")
result = json.loads(result_json)
assert "error" in result
assert "not found" in result["error"].lower()
def test_skill_script_read_script_not_found(mock_loader: MockSkillLoader) -> None:
"""Test script read for non-existent script."""
skills = Skills(loaders=[mock_loader])
result_json = skills._get_skill_script("test-skill", "nonexistent.py")
result = json.loads(result_json)
assert "error" in result
assert "not found" in result["error"].lower()
assert "available_scripts" in result
def test_skill_script_read_with_real_file(temp_skill_dir: Path) -> None:
"""Test script read with actual file."""
loader = LocalSkills(str(temp_skill_dir))
skills = Skills(loaders=[loader])
result_json = skills._get_skill_script("test-skill", "helper.py")
result = json.loads(result_json)
assert result["skill_name"] == "test-skill"
assert result["script_path"] == "helper.py"
assert "content" in result
assert "Helper script" in result["content"]
# --- Error Handling Tests ---
def test_validation_error_propagates(invalid_skill_dir: Path) -> None:
"""Test that validation errors propagate from loaders during initialization."""
loader = LocalSkills(str(invalid_skill_dir), validate=True)
# With eager loading, validation error happens in __init__
with pytest.raises(SkillValidationError):
Skills(loaders=[loader])
def test_loader_error_logged_but_continues() -> None:
"""Test that loader errors are logged but don't stop loading."""
class FailingLoader(SkillLoader):
def load(self) -> List[Skill]:
raise RuntimeError("Loader failed")
working_skill = Skill(
name="working",
description="Works",
instructions="Instructions",
source_path="/path",
)
working_loader = MockSkillLoader([working_skill])
failing_loader = FailingLoader()
skills = Skills(loaders=[failing_loader, working_loader])
# Should not raise, and should load skills from working loader
all_skills = skills.get_all_skills()
assert len(all_skills) == 1
assert all_skills[0].name == "working"
# --- Path Traversal Prevention Tests ---
def test_get_skill_reference_path_traversal_blocked(mock_loader: MockSkillLoader) -> None:
"""Test that path traversal attempts are blocked for references."""
skills = Skills(loaders=[mock_loader])
result_json = skills._get_skill_reference("test-skill", "../../../etc/passwd")
result = json.loads(result_json)
assert "error" in result
# Should be caught by the "not in skill.references" check first
assert "not found" in result["error"].lower()
def test_skill_script_path_traversal_blocked(mock_loader: MockSkillLoader) -> None:
"""Test that path traversal attempts are blocked for scripts."""
skills = Skills(loaders=[mock_loader])
result_json = skills._get_skill_script("test-skill", "../../../etc/passwd")
result = json.loads(result_json)
assert "error" in result
# Should be caught by the "not in skill.scripts" check first
assert "not found" in result["error"].lower()
def test_is_safe_path_allows_valid_paths(tmp_path: Path) -> None:
"""Test that is_safe_path allows valid paths."""
from agno.skills.utils import is_safe_path
# Create real directories for testing
base_dir = tmp_path / "base"
base_dir.mkdir()
subdir = base_dir / "subdir"
subdir.mkdir()
assert is_safe_path(base_dir, "file.txt") is True
assert is_safe_path(base_dir, "subdir/file.txt") is True
def test_is_safe_path_blocks_traversal(tmp_path: Path) -> None:
"""Test that is_safe_path blocks path traversal attempts."""
from agno.skills.utils import is_safe_path
base_dir = tmp_path / "base"
base_dir.mkdir()
assert is_safe_path(base_dir, "../file.txt") is False
assert is_safe_path(base_dir, "../../file.txt") is False
assert is_safe_path(base_dir, "../../../etc/passwd") is False
assert is_safe_path(base_dir, "subdir/../../file.txt") is False
def test_skill_script_execute_skill_not_found(mock_loader: MockSkillLoader) -> None:
"""Test script execution for non-existent skill."""
skills = Skills(loaders=[mock_loader])
result_json = skills._get_skill_script("nonexistent", "script.py", execute=True)
result = json.loads(result_json)
assert "error" in result
assert "not found" in result["error"].lower()
assert "available_skills" in result
def test_skill_script_execute_script_not_found(mock_loader: MockSkillLoader) -> None:
"""Test script execution for non-existent script."""
skills = Skills(loaders=[mock_loader])
result_json = skills._get_skill_script("test-skill", "nonexistent.py", execute=True)
result = json.loads(result_json)
assert "error" in result
assert "not found" in result["error"].lower()
assert "available_scripts" in result
def test_skill_script_execute_success(temp_skill_dir: Path) -> None:
"""Test successful script execution."""
# Create a simple test script with shebang (chmod handled automatically)
scripts_dir = temp_skill_dir / "scripts"
test_script = scripts_dir / "test_runner.py"
test_script.write_text('#!/usr/bin/env python3\nprint("Hello from script")')
loader = LocalSkills(str(temp_skill_dir))
skills = Skills(loaders=[loader])
result_json = skills._get_skill_script("test-skill", "test_runner.py", execute=True)
result = json.loads(result_json)
assert "error" not in result
assert result["skill_name"] == "test-skill"
assert result["script_path"] == "test_runner.py"
assert "Hello from script" in result["stdout"]
assert result["returncode"] == 0
def test_skill_script_execute_with_args(temp_skill_dir: Path) -> None:
"""Test script execution with arguments."""
scripts_dir = temp_skill_dir / "scripts"
test_script = scripts_dir / "echo_args.py"
test_script.write_text('#!/usr/bin/env python3\nimport sys; print(" ".join(sys.argv[1:]))')
loader = LocalSkills(str(temp_skill_dir))
skills = Skills(loaders=[loader])
result_json = skills._get_skill_script("test-skill", "echo_args.py", execute=True, args=["arg1", "arg2"])
result = json.loads(result_json)
assert "error" not in result
assert "arg1 arg2" in result["stdout"]
def test_skill_script_execute_captures_stderr(temp_skill_dir: Path) -> None:
"""Test that stderr is captured."""
scripts_dir = temp_skill_dir / "scripts"
test_script = scripts_dir / "stderr_test.py"
test_script.write_text('#!/usr/bin/env python3\nimport sys; print("error message", file=sys.stderr)')
loader = LocalSkills(str(temp_skill_dir))
skills = Skills(loaders=[loader])
result_json = skills._get_skill_script("test-skill", "stderr_test.py", execute=True)
result = json.loads(result_json)
assert "error" not in result
assert "error message" in result["stderr"]
def test_skill_script_execute_nonzero_exit(temp_skill_dir: Path) -> None:
"""Test script with non-zero exit code."""
scripts_dir = temp_skill_dir / "scripts"
test_script = scripts_dir / "exit_code.py"
test_script.write_text("#!/usr/bin/env python3\nimport sys; sys.exit(42)")
loader = LocalSkills(str(temp_skill_dir))
skills = Skills(loaders=[loader])
result_json = skills._get_skill_script("test-skill", "exit_code.py", execute=True)
result = json.loads(result_json)
assert "error" not in result
assert result["returncode"] == 42
def test_skill_script_execute_path_traversal_blocked(mock_loader: MockSkillLoader) -> None:
"""Test that path traversal attempts are blocked for script execution."""
skills = Skills(loaders=[mock_loader])
result_json = skills._get_skill_script("test-skill", "../../../etc/passwd", execute=True)
result = json.loads(result_json)
assert "error" in result
assert "not found" in result["error"].lower()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/skills/test_agent_skills.py",
"license": "Apache License 2.0",
"lines": 388,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/skills/test_errors.py | """Unit tests for skill exceptions."""
import pytest
from agno.skills.errors import SkillError, SkillParseError, SkillValidationError
# --- SkillError Tests ---
def test_skill_error_is_exception() -> None:
"""Test that SkillError is a subclass of Exception."""
assert issubclass(SkillError, Exception)
def test_skill_error_can_be_raised() -> None:
"""Test that SkillError can be raised and caught."""
with pytest.raises(SkillError) as exc_info:
raise SkillError("Test error")
assert str(exc_info.value) == "Test error"
def test_skill_error_can_be_caught_as_exception() -> None:
"""Test that SkillError can be caught as generic Exception."""
with pytest.raises(Exception):
raise SkillError("Test error")
# --- SkillParseError Tests ---
def test_skill_parse_error_is_skill_error() -> None:
"""Test that SkillParseError is a subclass of SkillError."""
assert issubclass(SkillParseError, SkillError)
def test_skill_parse_error_can_be_raised() -> None:
"""Test that SkillParseError can be raised and caught."""
with pytest.raises(SkillParseError) as exc_info:
raise SkillParseError("Failed to parse SKILL.md")
assert str(exc_info.value) == "Failed to parse SKILL.md"
def test_skill_parse_error_caught_as_skill_error() -> None:
"""Test that SkillParseError can be caught as SkillError."""
with pytest.raises(SkillError):
raise SkillParseError("Parse failed")
# --- SkillValidationError Tests ---
def test_skill_validation_error_is_skill_error() -> None:
"""Test that SkillValidationError is a subclass of SkillError."""
assert issubclass(SkillValidationError, SkillError)
def test_validation_error_with_single_error() -> None:
"""Test SkillValidationError with a single error message."""
error = SkillValidationError("Single error")
assert error.errors == ["Single error"]
assert str(error) == "Single error"
def test_validation_error_with_errors_list() -> None:
"""Test SkillValidationError with explicit errors list."""
errors = ["Error 1", "Error 2", "Error 3"]
error = SkillValidationError("Validation failed", errors=errors)
assert error.errors == errors
assert "3 validation errors" in str(error)
assert "Error 1" in str(error)
assert "Error 2" in str(error)
assert "Error 3" in str(error)
def test_validation_error_with_empty_errors_list() -> None:
"""Test SkillValidationError with empty errors list."""
error = SkillValidationError("Validation failed", errors=[])
assert error.errors == []
# With empty errors list, __str__ returns "0 validation errors: "
assert "0 validation errors" in str(error)
def test_validation_error_str_single_item_list() -> None:
"""Test __str__ with single item in errors list."""
error = SkillValidationError("Wrapper message", errors=["Only one error"])
assert str(error) == "Only one error"
def test_validation_error_str_multiple_items() -> None:
"""Test __str__ with multiple items in errors list."""
error = SkillValidationError("Wrapper", errors=["Error A", "Error B"])
result = str(error)
assert "2 validation errors" in result
assert "Error A" in result
assert "Error B" in result
def test_validation_error_can_be_raised_and_caught() -> None:
"""Test that SkillValidationError can be raised and caught."""
with pytest.raises(SkillValidationError) as exc_info:
raise SkillValidationError("Validation failed", errors=["Name invalid", "Description missing"])
assert len(exc_info.value.errors) == 2
assert "Name invalid" in exc_info.value.errors
def test_validation_error_caught_as_skill_error() -> None:
"""Test that SkillValidationError can be caught as SkillError."""
with pytest.raises(SkillError):
raise SkillValidationError("Validation failed")
def test_validation_error_errors_attribute_accessible() -> None:
"""Test that errors attribute is accessible on caught exception."""
try:
raise SkillValidationError("Failed", errors=["Error 1", "Error 2"])
except SkillValidationError as e:
assert hasattr(e, "errors")
assert isinstance(e.errors, list)
assert len(e.errors) == 2
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/skills/test_errors.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/skills/test_skill.py | """Unit tests for Skill dataclass."""
import pytest
from agno.skills.skill import Skill
# --- Skill Creation Tests ---
def test_skill_creation_with_all_fields(sample_skill: Skill) -> None:
"""Test creating a Skill with all fields populated."""
assert sample_skill.name == "test-skill"
assert sample_skill.description == "A test skill for unit testing"
assert "Follow these instructions" in sample_skill.instructions
assert sample_skill.source_path == "/path/to/test-skill"
assert sample_skill.scripts == ["helper.py", "runner.sh"]
assert sample_skill.references == ["guide.md", "api-docs.md"]
assert sample_skill.metadata == {"version": "1.0.0", "author": "test-author", "tags": ["test", "example"]}
assert sample_skill.license == "MIT"
def test_skill_creation_minimal(minimal_skill: Skill) -> None:
"""Test creating a Skill with only required fields."""
assert minimal_skill.name == "minimal-skill"
assert minimal_skill.description == "A minimal skill"
assert minimal_skill.instructions == "Minimal instructions"
assert minimal_skill.source_path == "/path/to/minimal-skill"
# Optional fields should have default values
assert minimal_skill.scripts == []
assert minimal_skill.references == []
assert minimal_skill.metadata is None
assert minimal_skill.license is None
def test_skill_creation_with_empty_lists() -> None:
"""Test creating a Skill with explicitly empty lists."""
skill = Skill(
name="empty-lists",
description="Skill with empty lists",
instructions="Instructions",
source_path="/path",
scripts=[],
references=[],
)
assert skill.scripts == []
assert skill.references == []
# --- Skill Serialization Tests ---
def test_skill_to_dict(sample_skill: Skill) -> None:
"""Test converting Skill to dictionary."""
skill_dict = sample_skill.to_dict()
assert skill_dict["name"] == "test-skill"
assert skill_dict["description"] == "A test skill for unit testing"
assert "Follow these instructions" in skill_dict["instructions"]
assert skill_dict["source_path"] == "/path/to/test-skill"
assert skill_dict["scripts"] == ["helper.py", "runner.sh"]
assert skill_dict["references"] == ["guide.md", "api-docs.md"]
assert skill_dict["metadata"] == {"version": "1.0.0", "author": "test-author", "tags": ["test", "example"]}
assert skill_dict["license"] == "MIT"
def test_skill_to_dict_minimal(minimal_skill: Skill) -> None:
"""Test converting minimal Skill to dictionary."""
skill_dict = minimal_skill.to_dict()
assert skill_dict["name"] == "minimal-skill"
assert skill_dict["scripts"] == []
assert skill_dict["references"] == []
assert skill_dict["metadata"] is None
assert skill_dict["license"] is None
def test_skill_from_dict(sample_skill_dict: dict) -> None:
"""Test creating Skill from dictionary."""
skill = Skill.from_dict(sample_skill_dict)
assert skill.name == "dict-skill"
assert skill.description == "A skill from dictionary"
assert skill.instructions == "Instructions from dict"
assert skill.source_path == "/path/to/dict-skill"
assert skill.scripts == ["script.py"]
assert skill.references == ["ref.md"]
assert skill.metadata == {"version": "2.0.0"}
assert skill.license == "Apache-2.0"
def test_skill_from_dict_minimal() -> None:
"""Test creating Skill from minimal dictionary."""
data = {
"name": "minimal",
"description": "Minimal description",
"instructions": "Minimal instructions",
"source_path": "/path",
}
skill = Skill.from_dict(data)
assert skill.name == "minimal"
assert skill.scripts == []
assert skill.references == []
assert skill.metadata is None
assert skill.license is None
def test_skill_roundtrip(sample_skill: Skill) -> None:
"""Test that to_dict followed by from_dict preserves all data."""
skill_dict = sample_skill.to_dict()
recreated_skill = Skill.from_dict(skill_dict)
assert recreated_skill.name == sample_skill.name
assert recreated_skill.description == sample_skill.description
assert recreated_skill.instructions == sample_skill.instructions
assert recreated_skill.source_path == sample_skill.source_path
assert recreated_skill.scripts == sample_skill.scripts
assert recreated_skill.references == sample_skill.references
assert recreated_skill.metadata == sample_skill.metadata
assert recreated_skill.license == sample_skill.license
# --- Skill Equality Tests ---
def test_skills_with_same_data_are_equal() -> None:
"""Test that two Skills with identical data are equal."""
skill1 = Skill(
name="equal-skill",
description="Test description",
instructions="Test instructions",
source_path="/path",
)
skill2 = Skill(
name="equal-skill",
description="Test description",
instructions="Test instructions",
source_path="/path",
)
assert skill1 == skill2
def test_skills_with_different_names_are_not_equal() -> None:
"""Test that Skills with different names are not equal."""
skill1 = Skill(
name="skill-one",
description="Same description",
instructions="Same instructions",
source_path="/path",
)
skill2 = Skill(
name="skill-two",
description="Same description",
instructions="Same instructions",
source_path="/path",
)
assert skill1 != skill2
def test_skills_with_different_optional_fields() -> None:
"""Test equality with different optional fields."""
skill1 = Skill(
name="same-name",
description="Same",
instructions="Same",
source_path="/path",
scripts=["script.py"],
)
skill2 = Skill(
name="same-name",
description="Same",
instructions="Same",
source_path="/path",
scripts=["different.py"],
)
assert skill1 != skill2
# --- Error Handling Tests ---
def test_from_dict_missing_required_field() -> None:
"""Test that from_dict raises KeyError for missing required fields."""
incomplete_data = {
"name": "incomplete",
"description": "Missing fields",
# Missing: instructions, source_path
}
with pytest.raises(KeyError):
Skill.from_dict(incomplete_data)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/skills/test_skill.py",
"license": "Apache License 2.0",
"lines": 152,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/skills/test_validator.py | """Unit tests for skill validation logic."""
from pathlib import Path
import pytest
from agno.skills.validator import (
ALLOWED_FIELDS,
MAX_COMPATIBILITY_LENGTH,
MAX_DESCRIPTION_LENGTH,
MAX_SKILL_NAME_LENGTH,
_validate_allowed_tools,
_validate_compatibility,
_validate_description,
_validate_license,
_validate_metadata_fields,
_validate_metadata_value,
_validate_name,
validate_metadata,
validate_skill_directory,
)
# --- Name Validation Tests ---
def test_validate_name_valid() -> None:
"""Test that valid names pass validation."""
errors = _validate_name("valid-skill-name")
assert len(errors) == 0
def test_validate_name_with_numbers() -> None:
"""Test that names with numbers are valid."""
errors = _validate_name("skill-v2")
assert len(errors) == 0
def test_validate_name_single_word() -> None:
"""Test that single-word names are valid."""
errors = _validate_name("skill")
assert len(errors) == 0
def test_validate_name_too_long() -> None:
"""Test that names exceeding max length are rejected."""
long_name = "a" * (MAX_SKILL_NAME_LENGTH + 1)
errors = _validate_name(long_name)
assert len(errors) > 0
assert any("exceeds" in e.lower() or "character limit" in e.lower() for e in errors)
def test_validate_name_at_max_length() -> None:
"""Test that names at exactly max length are valid."""
max_name = "a" * MAX_SKILL_NAME_LENGTH
errors = _validate_name(max_name)
assert not any("exceeds" in e.lower() for e in errors)
def test_validate_name_uppercase_rejected() -> None:
"""Test that uppercase names are rejected."""
errors = _validate_name("Invalid-Name")
assert len(errors) > 0
assert any("lowercase" in e.lower() for e in errors)
def test_validate_name_with_underscore_rejected() -> None:
"""Test that names with underscores are rejected."""
errors = _validate_name("invalid_name")
assert len(errors) > 0
assert any("invalid characters" in e.lower() for e in errors)
def test_validate_name_with_space_rejected() -> None:
"""Test that names with spaces are rejected."""
errors = _validate_name("invalid name")
assert len(errors) > 0
assert any("invalid characters" in e.lower() for e in errors)
def test_validate_name_starting_with_hyphen_rejected() -> None:
"""Test that names starting with hyphen are rejected."""
errors = _validate_name("-invalid")
assert len(errors) > 0
assert any("cannot start" in e.lower() or "start or end" in e.lower() for e in errors)
def test_validate_name_ending_with_hyphen_rejected() -> None:
"""Test that names ending with hyphen are rejected."""
errors = _validate_name("invalid-")
assert len(errors) > 0
assert any("start or end" in e.lower() for e in errors)
def test_validate_name_consecutive_hyphens_rejected() -> None:
"""Test that names with consecutive hyphens are rejected."""
errors = _validate_name("invalid--name")
assert len(errors) > 0
assert any("consecutive" in e.lower() for e in errors)
def test_validate_name_empty_rejected() -> None:
"""Test that empty names are rejected."""
errors = _validate_name("")
assert len(errors) > 0
assert any("non-empty" in e.lower() for e in errors)
def test_validate_name_whitespace_only_rejected() -> None:
"""Test that whitespace-only names are rejected."""
errors = _validate_name(" ")
assert len(errors) > 0
def test_validate_name_directory_mismatch(tmp_path: Path) -> None:
"""Test that name must match directory name."""
skill_dir = tmp_path / "actual-dir-name"
skill_dir.mkdir()
errors = _validate_name("different-name", skill_dir)
assert len(errors) > 0
assert any("must match" in e.lower() for e in errors)
def test_validate_name_directory_match(tmp_path: Path) -> None:
"""Test that matching name and directory passes."""
skill_dir = tmp_path / "matching-name"
skill_dir.mkdir()
errors = _validate_name("matching-name", skill_dir)
assert not any("must match" in e.lower() for e in errors)
@pytest.mark.parametrize(
"invalid_name,expected_error",
[
("UPPERCASE", "lowercase"),
("-leading-hyphen", "start or end"),
("trailing-hyphen-", "start or end"),
("double--hyphen", "consecutive"),
("has_underscore", "invalid characters"),
("has space", "invalid characters"),
("has.dot", "invalid characters"),
],
)
def test_validate_name_invalid_parametrized(invalid_name: str, expected_error: str) -> None:
"""Test various invalid name formats."""
errors = _validate_name(invalid_name)
assert len(errors) > 0
assert any(expected_error.lower() in e.lower() for e in errors)
# --- Description Validation Tests ---
def test_validate_description_valid() -> None:
"""Test that valid descriptions pass validation."""
errors = _validate_description("A valid skill description")
assert len(errors) == 0
def test_validate_description_empty_rejected() -> None:
"""Test that empty descriptions are rejected."""
errors = _validate_description("")
assert len(errors) > 0
assert any("non-empty" in e.lower() for e in errors)
def test_validate_description_whitespace_only_rejected() -> None:
"""Test that whitespace-only descriptions are rejected."""
errors = _validate_description(" ")
assert len(errors) > 0
def test_validate_description_too_long() -> None:
"""Test that descriptions exceeding max length are rejected."""
long_desc = "a" * (MAX_DESCRIPTION_LENGTH + 1)
errors = _validate_description(long_desc)
assert len(errors) > 0
assert any("exceeds" in e.lower() or "character limit" in e.lower() for e in errors)
def test_validate_description_at_max_length() -> None:
"""Test that descriptions at exactly max length are valid."""
max_desc = "a" * MAX_DESCRIPTION_LENGTH
errors = _validate_description(max_desc)
assert len(errors) == 0
# --- Compatibility Validation Tests ---
def test_validate_compatibility_valid() -> None:
"""Test that valid compatibility strings pass validation."""
errors = _validate_compatibility("Requires Python 3.8+")
assert len(errors) == 0
def test_validate_compatibility_too_long() -> None:
"""Test that compatibility strings exceeding max length are rejected."""
long_compat = "a" * (MAX_COMPATIBILITY_LENGTH + 1)
errors = _validate_compatibility(long_compat)
assert len(errors) > 0
assert any("exceeds" in e.lower() for e in errors)
def test_validate_compatibility_at_max_length() -> None:
"""Test that compatibility at exactly max length is valid."""
max_compat = "a" * MAX_COMPATIBILITY_LENGTH
errors = _validate_compatibility(max_compat)
assert len(errors) == 0
# --- Metadata Fields Validation Tests ---
def test_validate_metadata_fields_all_allowed() -> None:
"""Test that all allowed fields pass validation."""
metadata = {field: "value" for field in ALLOWED_FIELDS}
errors = _validate_metadata_fields(metadata)
assert len(errors) == 0
def test_validate_metadata_fields_subset() -> None:
"""Test that subset of allowed fields passes."""
metadata = {"name": "test", "description": "Test description"}
errors = _validate_metadata_fields(metadata)
assert len(errors) == 0
def test_validate_metadata_fields_unknown_rejected() -> None:
"""Test that unknown fields are rejected."""
metadata = {"name": "test", "unknown_field": "value"}
errors = _validate_metadata_fields(metadata)
assert len(errors) > 0
assert any("unexpected" in e.lower() and "unknown_field" in e.lower() for e in errors)
def test_validate_metadata_fields_multiple_unknown() -> None:
"""Test that multiple unknown fields are all reported."""
metadata = {"name": "test", "bad1": "value", "bad2": "value"}
errors = _validate_metadata_fields(metadata)
assert len(errors) > 0
assert any("bad1" in e for e in errors) or any("bad2" in e for e in errors)
def test_validate_metadata_fields_empty() -> None:
"""Test that empty metadata passes field validation."""
errors = _validate_metadata_fields({})
assert len(errors) == 0
# --- Complete Metadata Validation Tests ---
def test_validate_metadata_valid(tmp_path: Path) -> None:
"""Test that valid complete metadata passes."""
skill_dir = tmp_path / "test-skill"
skill_dir.mkdir()
metadata = {
"name": "test-skill",
"description": "A valid test skill",
"license": "MIT",
}
errors = validate_metadata(metadata, skill_dir)
assert len(errors) == 0
def test_validate_metadata_missing_name() -> None:
"""Test that missing name is rejected."""
metadata = {"description": "Missing name field"}
errors = validate_metadata(metadata)
assert len(errors) > 0
assert any("name" in e.lower() and "missing" in e.lower() for e in errors)
def test_validate_metadata_missing_description() -> None:
"""Test that missing description is rejected."""
metadata = {"name": "test-skill"}
errors = validate_metadata(metadata)
assert len(errors) > 0
assert any("description" in e.lower() and "missing" in e.lower() for e in errors)
def test_validate_metadata_missing_both_required() -> None:
"""Test that missing both required fields generates multiple errors."""
metadata = {"license": "MIT"}
errors = validate_metadata(metadata)
assert len(errors) >= 2
def test_validate_metadata_invalid_name_format() -> None:
"""Test that invalid name format is rejected."""
metadata = {"name": "Invalid_Name", "description": "Valid description"}
errors = validate_metadata(metadata)
assert len(errors) > 0
def test_validate_metadata_with_optional_compatibility() -> None:
"""Test validation with optional compatibility field."""
metadata = {
"name": "test-skill",
"description": "Valid description",
"compatibility": "Requires Python 3.8+",
}
errors = validate_metadata(metadata)
assert len(errors) == 0
def test_validate_metadata_with_invalid_compatibility() -> None:
"""Test validation with too-long compatibility field."""
metadata = {
"name": "test-skill",
"description": "Valid description",
"compatibility": "x" * (MAX_COMPATIBILITY_LENGTH + 1),
}
errors = validate_metadata(metadata)
assert len(errors) > 0
# --- Skill Directory Validation Tests ---
def test_validate_skill_directory_valid(temp_skill_dir: Path) -> None:
"""Test that a valid skill directory passes validation."""
errors = validate_skill_directory(temp_skill_dir)
assert len(errors) == 0
def test_validate_skill_directory_not_exists(tmp_path: Path) -> None:
"""Test that non-existent directory is rejected."""
nonexistent = tmp_path / "does-not-exist"
errors = validate_skill_directory(nonexistent)
assert len(errors) > 0
assert any("does not exist" in e.lower() for e in errors)
def test_validate_skill_directory_is_file(tmp_path: Path) -> None:
"""Test that file path (not directory) is rejected."""
file_path = tmp_path / "not-a-dir.txt"
file_path.write_text("content")
errors = validate_skill_directory(file_path)
assert len(errors) > 0
assert any("not a directory" in e.lower() for e in errors)
def test_validate_skill_directory_missing_skill_md(tmp_path: Path) -> None:
"""Test that directory without SKILL.md is rejected."""
skill_dir = tmp_path / "no-skill-md"
skill_dir.mkdir()
errors = validate_skill_directory(skill_dir)
assert len(errors) > 0
assert any("skill.md" in e.lower() for e in errors)
def test_validate_skill_directory_missing_frontmatter_start(skill_dir_missing_frontmatter: Path) -> None:
"""Test that SKILL.md without opening --- is rejected."""
errors = validate_skill_directory(skill_dir_missing_frontmatter)
assert len(errors) > 0
assert any("frontmatter" in e.lower() and "---" in e for e in errors)
def test_validate_skill_directory_unclosed_frontmatter(tmp_path: Path) -> None:
"""Test that SKILL.md with unclosed frontmatter is rejected."""
skill_dir = tmp_path / "unclosed"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"""---
name: test
description: Test
# Missing closing ---
"""
)
errors = validate_skill_directory(skill_dir)
assert len(errors) > 0
assert any("closed" in e.lower() or "---" in e for e in errors)
def test_validate_skill_directory_invalid_yaml(tmp_path: Path) -> None:
"""Test that invalid YAML in frontmatter is rejected."""
skill_dir = tmp_path / "bad-yaml"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"""---
name: test
description: [unclosed bracket
---
# Instructions
"""
)
errors = validate_skill_directory(skill_dir)
assert len(errors) > 0
assert any("yaml" in e.lower() for e in errors)
def test_validate_skill_directory_invalid(invalid_skill_dir: Path) -> None:
"""Test that invalid skill directory fails validation."""
errors = validate_skill_directory(invalid_skill_dir)
assert len(errors) > 0
# --- Validator Constants Tests ---
def test_max_skill_name_length() -> None:
"""Test MAX_SKILL_NAME_LENGTH is set correctly."""
assert MAX_SKILL_NAME_LENGTH == 64
def test_max_description_length() -> None:
"""Test MAX_DESCRIPTION_LENGTH is set correctly."""
assert MAX_DESCRIPTION_LENGTH == 1024
def test_max_compatibility_length() -> None:
"""Test MAX_COMPATIBILITY_LENGTH is set correctly."""
assert MAX_COMPATIBILITY_LENGTH == 500
def test_allowed_fields() -> None:
"""Test ALLOWED_FIELDS contains expected fields."""
expected = {"name", "description", "license", "allowed-tools", "metadata", "compatibility"}
assert ALLOWED_FIELDS == expected
# --- License Validation Tests ---
def test_validate_license_valid() -> None:
"""Test that valid licenses pass validation."""
errors = _validate_license("MIT")
assert len(errors) == 0
def test_validate_license_apache() -> None:
"""Test that Apache-2.0 license is valid."""
errors = _validate_license("Apache-2.0")
assert len(errors) == 0
def test_validate_license_any_string() -> None:
"""Test that any string license is valid."""
errors = _validate_license("CustomLicense")
assert len(errors) == 0
def test_validate_license_wrong_type() -> None:
"""Test that non-string license is rejected."""
errors = _validate_license(123) # type: ignore
assert len(errors) > 0
assert "must be a string" in errors[0].lower()
# --- Allowed-Tools Validation Tests ---
def test_validate_allowed_tools_valid() -> None:
"""Test that valid allowed-tools list passes validation."""
errors = _validate_allowed_tools(["tool1", "tool2"])
assert len(errors) == 0
def test_validate_allowed_tools_empty_list() -> None:
"""Test that empty list is valid."""
errors = _validate_allowed_tools([])
assert len(errors) == 0
def test_validate_allowed_tools_not_list() -> None:
"""Test that non-list is rejected."""
errors = _validate_allowed_tools("tool1")
assert len(errors) > 0
assert "must be a list" in errors[0].lower()
def test_validate_allowed_tools_not_strings() -> None:
"""Test that list with non-strings is rejected."""
errors = _validate_allowed_tools(["tool1", 123])
assert len(errors) > 0
assert "list of strings" in errors[0].lower()
# --- Metadata Value Validation Tests ---
def test_validate_metadata_value_valid() -> None:
"""Test that valid dict passes validation."""
errors = _validate_metadata_value({"key": "value"})
assert len(errors) == 0
def test_validate_metadata_value_empty_dict() -> None:
"""Test that empty dict is valid."""
errors = _validate_metadata_value({})
assert len(errors) == 0
def test_validate_metadata_value_not_dict() -> None:
"""Test that non-dict is rejected."""
errors = _validate_metadata_value("not a dict")
assert len(errors) > 0
assert "must be a dictionary" in errors[0].lower()
def test_validate_metadata_value_list() -> None:
"""Test that list is rejected."""
errors = _validate_metadata_value(["item1", "item2"])
assert len(errors) > 0
assert "must be a dictionary" in errors[0].lower()
# --- Integration Tests for New Validators ---
def test_validate_metadata_with_valid_license() -> None:
"""Test validate_metadata with valid license field."""
metadata = {
"name": "test-skill",
"description": "Valid description",
"license": "MIT",
}
errors = validate_metadata(metadata)
assert len(errors) == 0
def test_validate_metadata_with_custom_license() -> None:
"""Test validate_metadata with custom license string."""
metadata = {
"name": "test-skill",
"description": "Valid description",
"license": "CustomLicense",
}
errors = validate_metadata(metadata)
assert len(errors) == 0
def test_validate_metadata_with_valid_allowed_tools() -> None:
"""Test validate_metadata with valid allowed-tools field."""
metadata = {
"name": "test-skill",
"description": "Valid description",
"allowed-tools": ["bash", "python"],
}
errors = validate_metadata(metadata)
assert len(errors) == 0
def test_validate_metadata_with_invalid_allowed_tools() -> None:
"""Test validate_metadata with invalid allowed-tools field."""
metadata = {
"name": "test-skill",
"description": "Valid description",
"allowed-tools": "not-a-list",
}
errors = validate_metadata(metadata)
assert len(errors) > 0
assert any("must be a list" in e.lower() for e in errors)
def test_validate_metadata_with_valid_metadata_field() -> None:
"""Test validate_metadata with valid metadata field."""
metadata = {
"name": "test-skill",
"description": "Valid description",
"metadata": {"version": "1.0.0", "author": "test"},
}
errors = validate_metadata(metadata)
assert len(errors) == 0
def test_validate_metadata_with_invalid_metadata_field() -> None:
"""Test validate_metadata with invalid metadata field."""
metadata = {
"name": "test-skill",
"description": "Valid description",
"metadata": "not-a-dict",
}
errors = validate_metadata(metadata)
assert len(errors) > 0
assert any("must be a dictionary" in e.lower() for e in errors)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/skills/test_validator.py",
"license": "Apache License 2.0",
"lines": 421,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/client/a2a/client.py | """A2A (Agent-to-Agent) protocol client for Agno.
This module provides a Pythonic client for communicating with any A2A-compatible
agent server, enabling cross-framework agent communication.
"""
import json
from typing import Any, AsyncIterator, Dict, List, Literal, Optional
from uuid import uuid4
from agno.client.a2a.schemas import AgentCard, Artifact, StreamEvent, TaskResult
from agno.exceptions import RemoteServerUnavailableError
from agno.media import Audio, File, Image, Video
from agno.utils.http import get_default_async_client, get_default_sync_client
from agno.utils.log import log_warning
try:
from httpx import ConnectError, ConnectTimeout, TimeoutException
except ImportError:
raise ImportError("`httpx` not installed. Please install using `pip install httpx`")
__all__ = ["A2AClient"]
class A2AClient:
"""Async client for A2A (Agent-to-Agent) protocol communication.
Provides a Pythonic interface for communicating with any A2A-compatible
agent server, including Agno AgentOS with a2a_interface=True.
The A2A protocol is a standard for agent-to-agent communication that enables
interoperability between different AI agent frameworks.
Attributes:
base_url: Base URL of the A2A server
timeout: Request timeout in seconds
a2a_prefix: URL prefix for A2A endpoints (default: "/a2a")
"""
def __init__(
self,
base_url: str,
timeout: int = 30,
protocol: Literal["rest", "json-rpc"] = "rest",
):
"""Initialize A2AClient.
Args:
base_url: Base URL of the A2A server (e.g., "http://localhost:7777")
timeout: Request timeout in seconds (default: 30)
protocol: Protocol to use for A2A communication (default: "rest")
"""
self.base_url = base_url.rstrip("/")
self.timeout = timeout
self.protocol = protocol
def _get_endpoint(self, path: str) -> str:
"""Build full endpoint URL.
If protocol is "json-rpc", always use the base URL. Otherwise, use the traditional
REST-style endpoints.
"""
if self.protocol == "json-rpc":
return self.base_url if self.base_url.endswith("/") else f"{self.base_url}/"
# Manually construct URL to ensure proper path joining
base = self.base_url.rstrip("/")
path_clean = path.lstrip("/")
return f"{base}/{path_clean}" if path_clean else base
def _build_message_request(
self,
message: str,
context_id: Optional[str] = None,
user_id: Optional[str] = None,
images: Optional[List[Image]] = None,
audio: Optional[List[Audio]] = None,
videos: Optional[List[Video]] = None,
files: Optional[List[File]] = None,
metadata: Optional[Dict[str, Any]] = None,
stream: bool = False,
) -> Dict[str, Any]:
"""Build A2A JSON-RPC request payload.
Args:
message: Text message to send
context_id: Session/context ID for multi-turn conversations
user_id: User identifier
images: List of images to include
audio: List of audio files to include
videos: List of videos to include
files: List of files to include
metadata: Additional metadata
stream: Whether this is a streaming request
Returns:
Dict containing the JSON-RPC request payload
"""
message_id = str(uuid4())
# Build message parts
parts: List[Dict[str, Any]] = [{"kind": "text", "text": message}]
# Add images as file parts
if images:
for img in images:
if hasattr(img, "url") and img.url:
parts.append(
{
"kind": "file",
"file": {"uri": img.url, "mimeType": "image/*"},
}
)
# Add audio as file parts
if audio:
for aud in audio:
if hasattr(aud, "url") and aud.url:
parts.append(
{
"kind": "file",
"file": {"uri": aud.url, "mimeType": "audio/*"},
}
)
# Add videos as file parts
if videos:
for vid in videos:
if hasattr(vid, "url") and vid.url:
parts.append(
{
"kind": "file",
"file": {"uri": vid.url, "mimeType": "video/*"},
}
)
# Add files as file parts
if files:
for f in files:
if hasattr(f, "url") and f.url:
mime_type = getattr(f, "mime_type", "application/octet-stream")
parts.append(
{
"kind": "file",
"file": {"uri": f.url, "mimeType": mime_type},
}
)
# Build metadata
msg_metadata: Dict[str, Any] = {}
if user_id:
msg_metadata["userId"] = user_id
if metadata:
msg_metadata.update(metadata)
# Build the message object, excluding null values
message_obj: Dict[str, Any] = {
"messageId": message_id,
"role": "user",
"parts": parts,
}
if context_id:
message_obj["contextId"] = context_id
if msg_metadata:
message_obj["metadata"] = msg_metadata
# Build the request
return {
"jsonrpc": "2.0",
"method": "message/stream" if stream else "message/send",
"id": message_id,
"params": {"message": message_obj},
}
def _parse_task_result(self, response_data: Dict[str, Any]) -> TaskResult:
"""Parse A2A response into TaskResult.
Args:
response_data: Raw JSON-RPC response
Returns:
TaskResult with parsed content
"""
result = response_data.get("result", {})
# Handle both direct task and nested task formats
task = result if "id" in result else result.get("task", result)
# Extract task metadata
task_id = task.get("id", "")
context_id = task.get("context_id", task.get("contextId", ""))
status_obj = task.get("status", {})
status = status_obj.get("state", "unknown") if isinstance(status_obj, dict) else str(status_obj)
# Extract content from history
content_parts: List[str] = []
for msg in task.get("history", []):
if msg.get("role") == "agent":
for part in msg.get("parts", []):
part_data = part.get("root", part) # Handle wrapped parts
if part_data.get("kind") == "text" or "text" in part_data:
text = part_data.get("text", "")
if text:
content_parts.append(text)
# Extract artifacts
artifacts: List[Artifact] = []
for artifact_data in task.get("artifacts", []):
artifacts.append(
Artifact(
artifact_id=artifact_data.get("artifact_id", artifact_data.get("artifactId", "")),
name=artifact_data.get("name"),
description=artifact_data.get("description"),
mime_type=artifact_data.get("mime_type", artifact_data.get("mimeType")),
uri=artifact_data.get("uri"),
)
)
return TaskResult(
task_id=task_id,
context_id=context_id,
status=status,
content="".join(content_parts),
artifacts=artifacts,
metadata=task.get("metadata"),
)
def _parse_stream_event(self, data: Dict[str, Any]) -> StreamEvent:
"""Parse streaming response line into StreamEvent.
Args:
data: Parsed JSON from stream line
Returns:
StreamEvent with parsed data
"""
result = data.get("result", {})
# Determine event type from various indicators
event_type = "unknown"
content = None
is_final = False
task_id = result.get("taskId", result.get("task_id"))
context_id = result.get("contextId", result.get("context_id"))
metadata = result.get("metadata")
# Use the 'kind' field to determine event type (A2A protocol standard)
kind = result.get("kind", "")
if kind == "task":
# Final task result
event_type = "task"
is_final = True
task_id = result.get("id", task_id)
# Extract content from history
for msg in result.get("history", []):
if msg.get("role") == "agent":
for part in msg.get("parts", []):
if part.get("kind") == "text" or "text" in part:
content = part.get("text", "")
break
elif kind == "status-update":
# Status update event
is_final = result.get("final", False)
status = result.get("status", {})
state = status.get("state", "") if isinstance(status, dict) else ""
event_type = state if state in {"working", "completed", "failed", "canceled"} else "status"
elif kind == "message":
# Content message event
event_type = "content"
if metadata and metadata.get("agno_content_category") == "reasoning":
event_type = "reasoning"
# Extract text content from parts
for part in result.get("parts", []):
if part.get("kind") == "text" or "text" in part:
content = part.get("text", "")
break
elif kind == "artifact-update":
event_type = "content"
artifact = result.get("artifact", {})
for part in artifact.get("parts", []):
if part.get("kind") == "text" or "text" in part:
content = part.get("text", "")
break
# Fallback parsing for non-standard formats
elif "history" in result:
event_type = "task"
is_final = True
task_id = result.get("id", task_id)
for msg in result.get("history", []):
if msg.get("role") == "agent":
for part in msg.get("parts", []):
part_data = part.get("root", part)
if "text" in part_data:
content = part_data.get("text", "")
break
elif "messageId" in result or "message_id" in result or "parts" in result:
event_type = "content"
for part in result.get("parts", []):
part_data = part.get("root", part)
if "text" in part_data:
content = part_data.get("text", "")
break
return StreamEvent(
event_type=event_type,
content=content,
task_id=task_id,
context_id=context_id,
metadata=metadata,
is_final=is_final,
)
async def send_message(
self,
message: str,
*,
context_id: Optional[str] = None,
user_id: Optional[str] = None,
images: Optional[List[Image]] = None,
audio: Optional[List[Audio]] = None,
videos: Optional[List[Video]] = None,
files: Optional[List[File]] = None,
metadata: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> TaskResult:
"""Send a message to an A2A agent and wait for the response.
Args:
message: Text message to send
context_id: Session/context ID for multi-turn conversations
user_id: User identifier (optional)
images: List of Image objects to include (optional)
audio: List of Audio objects to include (optional)
videos: List of Video objects to include (optional)
files: List of File objects to include (optional)
metadata: Additional metadata (optional)
headers: HTTP headers to include in the request (optional)
Returns:
TaskResult containing the agent's response
Raises:
HTTPStatusError: If the server returns an HTTP error (4xx, 5xx)
RemoteServerUnavailableError: If connection fails or times out
"""
client = get_default_async_client()
request_body = self._build_message_request(
message=message,
context_id=context_id,
user_id=user_id,
images=images,
audio=audio,
videos=videos,
files=files,
metadata=metadata,
stream=False,
)
try:
response = await client.post(
self._get_endpoint(path="/v1/message:send"),
json=request_body,
timeout=self.timeout,
headers=headers,
)
response.raise_for_status()
response_data = response.json()
return self._parse_task_result(response_data)
except (ConnectError, ConnectTimeout) as e:
raise RemoteServerUnavailableError(
message=f"Failed to connect to A2A server at {self.base_url}",
base_url=self.base_url,
original_error=e,
) from e
except TimeoutException as e:
raise RemoteServerUnavailableError(
message=f"Request to A2A server at {self.base_url} timed out",
base_url=self.base_url,
original_error=e,
) from e
async def stream_message(
self,
message: str,
*,
context_id: Optional[str] = None,
user_id: Optional[str] = None,
images: Optional[List[Image]] = None,
audio: Optional[List[Audio]] = None,
videos: Optional[List[Video]] = None,
files: Optional[List[File]] = None,
metadata: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> AsyncIterator[StreamEvent]:
"""Stream a message to an A2A agent with real-time events.
Args:
message: Text message to send
context_id: Session/context ID for multi-turn conversations
user_id: User identifier (optional)
images: List of Image objects to include (optional)
audio: List of Audio objects to include (optional)
videos: List of Video objects to include (optional)
files: List of File objects to include (optional)
metadata: Additional metadata (optional)
headers: HTTP headers to include in the request (optional)
Yields:
StreamEvent objects for each event in the stream
Raises:
HTTPStatusError: If the server returns an HTTP error (4xx, 5xx)
RemoteServerUnavailableError: If connection fails or times out
Example:
```python
async for event in client.stream_message("agent", "Hello"):
if event.is_content and event.content:
print(event.content, end="", flush=True)
elif event.is_final:
print() # Newline at end
```
"""
http_client = get_default_async_client()
request_body = self._build_message_request(
message=message,
context_id=context_id,
user_id=user_id,
images=images,
audio=audio,
videos=videos,
files=files,
metadata=metadata,
stream=True,
)
try:
if headers is None:
headers = {}
if "Accept" not in headers:
headers["Accept"] = "text/event-stream"
if "Cache-Control" not in headers:
headers["Cache-Control"] = "no-store"
async with http_client.stream(
"POST",
self._get_endpoint("/v1/message:stream"),
json=request_body,
timeout=self.timeout,
headers=headers,
) as response:
response.raise_for_status()
async for line in response.aiter_lines():
line = line.strip()
if not line:
continue
# Handle SSE format: skip "event:" lines, parse "data:" lines
if line.startswith("event:"):
continue
if line.startswith("data:"):
line = line[5:].strip() # Remove "data:" prefix
try:
data = json.loads(line)
event = self._parse_stream_event(data)
yield event
# Check for task start to capture IDs
if event.event_type == "started":
pass # Could store task_id/context_id if needed
except json.JSONDecodeError as e:
log_warning(f"Failed to decode JSON from stream line: {line[:100]}. Error: {e}")
continue
except (ConnectError, ConnectTimeout) as e:
raise RemoteServerUnavailableError(
message=f"Failed to connect to A2A server at {self.base_url}",
base_url=self.base_url,
original_error=e,
) from e
except TimeoutException as e:
raise RemoteServerUnavailableError(
message=f"Request to A2A server at {self.base_url} timed out",
base_url=self.base_url,
original_error=e,
) from e
def get_agent_card(self, headers: Optional[Dict[str, str]] = None) -> Optional[AgentCard]:
"""Get agent card for capability discovery.
Note: Not all A2A servers support agent cards. This method returns
None if the server doesn't provide an agent card.
Returns:
AgentCard if available, None otherwise
"""
client = get_default_sync_client()
agent_card_path = "/.well-known/agent-card.json"
url = self._get_endpoint(path=agent_card_path)
response = client.get(url, timeout=self.timeout, headers=headers)
if response.status_code != 200:
return None
data = response.json()
return AgentCard(
name=data.get("name", "Unknown"),
url=data.get("url", self.base_url),
description=data.get("description"),
version=data.get("version"),
capabilities=data.get("capabilities", []),
metadata=data.get("metadata"),
)
async def aget_agent_card(self, headers: Optional[Dict[str, str]] = None) -> Optional[AgentCard]:
"""Get agent card for capability discovery.
Note: Not all A2A servers support agent cards. This method returns
None if the server doesn't provide an agent card.
Returns:
AgentCard if available, None otherwise
"""
client = get_default_async_client()
agent_card_path = "/.well-known/agent-card.json"
url = self._get_endpoint(path=agent_card_path)
response = await client.get(url, timeout=self.timeout, headers=headers)
if response.status_code != 200:
return None
data = response.json()
return AgentCard(
name=data.get("name", "Unknown"),
url=data.get("url", self.base_url),
description=data.get("description"),
version=data.get("version"),
capabilities=data.get("capabilities", []),
metadata=data.get("metadata"),
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/client/a2a/client.py",
"license": "Apache License 2.0",
"lines": 475,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/client/a2a/schemas.py | """Agno-friendly schemas for A2A protocol responses.
These schemas provide a simplified, Pythonic interface for working with
A2A protocol responses, abstracting away the JSON-RPC complexity.
"""
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional
@dataclass
class Artifact:
"""Artifact from an A2A task (files, images, etc.)."""
artifact_id: str
name: Optional[str] = None
description: Optional[str] = None
mime_type: Optional[str] = None
uri: Optional[str] = None
content: Optional[bytes] = None
@dataclass
class TaskResult:
"""Result from a non-streaming A2A message.
Attributes:
task_id: Unique identifier for the task
context_id: Session/context identifier for multi-turn conversations
status: Task status ("completed", "failed", "canceled")
content: Text content from the agent's response
artifacts: List of artifacts (files, images, etc.)
metadata: Additional metadata from the response
"""
task_id: str
context_id: str
status: str
content: str
artifacts: List[Artifact] = field(default_factory=list)
metadata: Optional[Dict[str, Any]] = None
@property
def is_completed(self) -> bool:
"""Check if task completed successfully."""
return self.status == "completed"
@property
def is_failed(self) -> bool:
"""Check if task failed."""
return self.status == "failed"
@property
def is_canceled(self) -> bool:
"""Check if task was canceled."""
return self.status == "canceled"
@dataclass
class StreamEvent:
"""Event from a streaming A2A message.
Attributes:
event_type: Type of event (e.g., "started", "content", "tool_call", "completed")
content: Text content (for content events)
task_id: Task identifier
context_id: Session/context identifier
metadata: Additional event metadata
is_final: Whether this is the final event in the stream
"""
event_type: str
content: Optional[str] = None
task_id: Optional[str] = None
context_id: Optional[str] = None
metadata: Optional[Dict[str, Any]] = None
is_final: bool = False
@property
def is_content(self) -> bool:
"""Check if this is a content event with text."""
return self.event_type == "content" and self.content is not None
@property
def is_started(self) -> bool:
"""Check if this is a task started event."""
return self.event_type == "started"
@property
def is_completed(self) -> bool:
"""Check if this is a task completed event."""
return self.event_type == "completed"
@property
def is_tool_call(self) -> bool:
"""Check if this is a tool call event."""
return self.event_type in ("tool_call_started", "tool_call_completed")
@dataclass
class AgentCard:
"""Agent capability discovery card.
Describes the capabilities and metadata of an A2A-compatible agent.
"""
name: str
url: str
description: Optional[str] = None
version: Optional[str] = None
capabilities: List[str] = field(default_factory=list)
metadata: Optional[Dict[str, Any]] = None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/client/a2a/schemas.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/client/a2a/utils.py | """Utility functions for mapping between A2A and Agno data structures.
This module provides bidirectional mapping between:
- A2A TaskResult ↔ Agno RunOutput / TeamRunOutput / WorkflowRunOutput
- A2A StreamEvent ↔ Agno RunOutputEvent / TeamRunOutputEvent / WorkflowRunOutputEvent
"""
from typing import AsyncIterator, List, Optional, Union
from agno.client.a2a.schemas import Artifact, StreamEvent, TaskResult
from agno.media import Audio, File, Image, Video
from agno.run.agent import (
RunCompletedEvent,
RunContentEvent,
RunOutput,
RunOutputEvent,
RunStartedEvent,
)
from agno.run.team import (
RunCompletedEvent as TeamRunCompletedEvent,
)
from agno.run.team import (
RunContentEvent as TeamRunContentEvent,
)
from agno.run.team import (
RunStartedEvent as TeamRunStartedEvent,
)
from agno.run.team import (
TeamRunOutput,
TeamRunOutputEvent,
)
from agno.run.workflow import (
WorkflowCompletedEvent,
WorkflowRunOutput,
WorkflowRunOutputEvent,
WorkflowStartedEvent,
)
def map_task_result_to_run_output(
task_result: TaskResult,
agent_id: str,
user_id: Optional[str] = None,
) -> RunOutput:
"""Convert A2A TaskResult to Agno RunOutput.
Maps the A2A protocol response structure to Agno's internal format,
enabling seamless integration with Agno's agent infrastructure.
Args:
task_result: A2A TaskResult from send_message()
agent_id: Agent identifier to include in output
user_id: Optional user identifier to include in output
Returns:
RunOutput: Agno-compatible run output
"""
# Extract media from artifacts
images: List[Image] = []
videos: List[Video] = []
audio: List[Audio] = []
files: List[File] = []
for artifact in task_result.artifacts:
_classify_artifact(artifact, images, videos, audio, files)
return RunOutput(
content=task_result.content,
run_id=task_result.task_id,
session_id=task_result.context_id,
agent_id=agent_id,
user_id=user_id,
images=images if images else None,
videos=videos if videos else None,
audio=audio if audio else None,
files=files if files else None,
metadata=task_result.metadata,
)
def _classify_artifact(
artifact: Artifact,
images: List[Image],
videos: List[Video],
audio: List[Audio],
files: List[File],
) -> None:
"""Classify an A2A artifact into the appropriate media type list.
Args:
artifact: A2A artifact to classify
images: List to append images to
videos: List to append videos to
audio: List to append audio to
files: List to append generic files to
"""
mime_type = artifact.mime_type or ""
uri = artifact.uri
if not uri:
return
if mime_type.startswith("image/"):
images.append(Image(url=uri, name=artifact.name))
elif mime_type.startswith("video/"):
videos.append(Video(url=uri, name=artifact.name))
elif mime_type.startswith("audio/"):
audio.append(Audio(url=uri, name=artifact.name))
else:
files.append(File(url=uri, name=artifact.name, mime_type=mime_type or None))
async def map_stream_events_to_run_events(
stream: AsyncIterator[StreamEvent],
agent_id: str,
) -> AsyncIterator[RunOutputEvent]:
"""Convert A2A stream events to Agno run events.
Transforms the A2A streaming protocol events into Agno's event system,
enabling real-time streaming from A2A servers to work with Agno consumers.
Args:
stream: AsyncIterator of A2A StreamEvents
agent_id: Optional agent identifier to include in events
user_id: Optional user identifier to include in events
Yields:
RunOutputEvent: Agno-compatible run output events
"""
run_id: Optional[str] = None
session_id: Optional[str] = None
accumulated_content = ""
async for event in stream:
# Capture IDs from events
if event.task_id:
run_id = event.task_id
if event.context_id:
session_id = event.context_id
# Map event types
if event.event_type == "working":
yield RunStartedEvent(
run_id=run_id,
session_id=session_id,
agent_id=agent_id,
)
elif event.is_content and event.content:
accumulated_content += event.content
yield RunContentEvent(
content=event.content,
run_id=run_id,
session_id=session_id,
agent_id=agent_id,
)
elif event.is_final:
# Use content from final event or accumulated content
final_content = event.content if event.content else accumulated_content
yield RunCompletedEvent(
content=final_content,
run_id=run_id,
session_id=session_id,
agent_id=agent_id,
)
break # Stream complete
# =============================================================================
# Team Run Output Mapping Functions
# =============================================================================
def map_task_result_to_team_run_output(
task_result: TaskResult,
team_id: str,
user_id: Optional[str] = None,
) -> TeamRunOutput:
"""Convert A2A TaskResult to Agno TeamRunOutput.
Maps the A2A protocol response structure to Agno's team format,
enabling seamless integration with Agno's team infrastructure.
Args:
task_result: A2A TaskResult from send_message()
team_id: Optional team identifier to include in output
user_id: Optional user identifier to include in output
Returns:
TeamRunOutput: Agno-compatible team run output
"""
# Extract media from artifacts
images: List[Image] = []
videos: List[Video] = []
audio: List[Audio] = []
files: List[File] = []
for artifact in task_result.artifacts:
_classify_artifact(artifact, images, videos, audio, files)
return TeamRunOutput(
content=task_result.content,
run_id=task_result.task_id,
session_id=task_result.context_id,
team_id=team_id,
user_id=user_id,
images=images if images else None,
videos=videos if videos else None,
audio=audio if audio else None,
files=files if files else None,
metadata=task_result.metadata,
)
async def map_stream_events_to_team_run_events(
stream: AsyncIterator[StreamEvent],
team_id: str,
) -> AsyncIterator[TeamRunOutputEvent]:
"""Convert A2A stream events to Agno team run events.
Transforms the A2A streaming protocol events into Agno's team event system,
enabling real-time streaming from A2A servers to work with Agno team consumers.
Args:
stream: AsyncIterator of A2A StreamEvents
team_id: Optional team identifier to include in events
user_id: Optional user identifier to include in events
Yields:
TeamRunOutputEvent: Agno-compatible team run output events
"""
run_id: Optional[str] = None
session_id: Optional[str] = None
accumulated_content = ""
async for event in stream:
# Capture IDs from events
if event.task_id:
run_id = event.task_id
if event.context_id:
session_id = event.context_id
# Map event types
if event.event_type == "working":
yield TeamRunStartedEvent(
run_id=run_id,
session_id=session_id,
team_id=team_id,
)
elif event.is_content and event.content:
accumulated_content += event.content
yield TeamRunContentEvent(
content=event.content,
run_id=run_id,
session_id=session_id,
team_id=team_id,
)
elif event.is_final:
# Use content from final event or accumulated content
final_content = event.content if event.content else accumulated_content
yield TeamRunCompletedEvent(
content=final_content,
run_id=run_id,
session_id=session_id,
team_id=team_id,
)
break # Stream complete
# =============================================================================
# Workflow Run Output Mapping Functions
# =============================================================================
def map_task_result_to_workflow_run_output(
task_result: TaskResult,
workflow_id: str,
user_id: Optional[str] = None,
) -> WorkflowRunOutput:
"""Convert A2A TaskResult to Agno WorkflowRunOutput.
Maps the A2A protocol response structure to Agno's workflow format,
enabling seamless integration with Agno's workflow infrastructure.
Args:
task_result: A2A TaskResult from send_message()
workflow_id: Optional workflow identifier to include in output
user_id: Optional user identifier to include in output
Returns:
WorkflowRunOutput: Agno-compatible workflow run output
"""
# Extract media from artifacts
images: List[Image] = []
videos: List[Video] = []
audio: List[Audio] = []
files: List[File] = []
for artifact in task_result.artifacts:
_classify_artifact(artifact, images, videos, audio, files)
return WorkflowRunOutput(
content=task_result.content,
run_id=task_result.task_id,
session_id=task_result.context_id,
workflow_id=workflow_id,
user_id=user_id,
images=images if images else None,
videos=videos if videos else None,
audio=audio if audio else None,
metadata=task_result.metadata,
)
async def map_stream_events_to_workflow_run_events(
stream: AsyncIterator[StreamEvent],
workflow_id: str,
) -> AsyncIterator[Union[WorkflowRunOutputEvent, TeamRunOutputEvent, RunOutputEvent]]:
"""Convert A2A stream events to Agno workflow run events.
Transforms the A2A streaming protocol events into Agno's workflow event system,
enabling real-time streaming from A2A servers to work with Agno workflow consumers.
Args:
stream: AsyncIterator of A2A StreamEvents
workflow_id: Optional workflow identifier to include in events
user_id: Optional user identifier to include in events
Yields:
WorkflowRunOutputEvent: Agno-compatible workflow run output events
"""
run_id: Optional[str] = None
session_id: Optional[str] = None
accumulated_content = ""
async for event in stream:
# Capture IDs from events
if event.task_id:
run_id = event.task_id
if event.context_id:
session_id = event.context_id
# Map event types
if event.event_type == "working":
yield WorkflowStartedEvent(
run_id=run_id,
session_id=session_id,
workflow_id=workflow_id,
)
elif event.is_content and event.content:
accumulated_content += event.content
# TODO: We don't have workflow content events and we don't know which agent or team created the content, so we're using the workflow_id as the agent_id.
yield RunContentEvent(
content=event.content,
run_id=run_id,
session_id=session_id,
agent_id=workflow_id,
)
elif event.is_final:
# Use content from final event or accumulated content
final_content = event.content if event.content else accumulated_content
yield WorkflowCompletedEvent(
content=final_content,
run_id=run_id,
session_id=session_id,
workflow_id=workflow_id,
)
break # Stream complete
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/client/a2a/utils.py",
"license": "Apache License 2.0",
"lines": 313,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/system/adk_server.py | """Google ADK A2A Server for system tests.
Uses Google's ADK to create an A2A-compatible agent.
Requires GOOGLE_API_KEY environment variable.
This server exposes a facts-agent that provides interesting facts,
using pure JSON-RPC at root "/" endpoint (Google ADK style).
"""
import os
from a2a.types import AgentCapabilities, AgentCard
from google.adk import Agent
from google.adk.a2a.utils.agent_to_a2a import to_a2a
from google.adk.tools import google_search
agent = Agent(
name="facts_agent",
model="gemini-2.5-flash-lite",
description="Agent that provides interesting facts.",
instruction="You are a helpful agent who provides interesting facts.",
tools=[google_search],
)
# Define A2A agent card
agent_card = AgentCard(
name="facts_agent",
description="Agent that provides interesting facts.",
url="http://localhost:7003",
version="1.0.0",
capabilities=AgentCapabilities(streaming=True, push_notifications=False, state_transition_history=False),
skills=[],
default_input_modes=["text/plain"],
default_output_modes=["text/plain"],
)
app = to_a2a(agent, port=int(os.getenv("PORT", "7003")), agent_card=agent_card)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7003)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/system/adk_server.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/system/agno_a2a_server.py | """
Remote AgentOS Server for System Tests.
This server hosts the actual agents, teams, and workflows that the gateway
consumes via RemoteAgent, RemoteTeam, and RemoteWorkflow.
"""
import os
from agno.agent import Agent
from agno.db.postgres import AsyncPostgresDb
from agno.knowledge.embedder.openai import OpenAIEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.openai import OpenAIChat
from agno.os import AgentOS
from agno.team.team import Team
from agno.tools.calculator import CalculatorTools
from agno.tools.websearch import WebSearchTools
from agno.vectordb.pgvector import PgVector
from agno.workflow.step import Step
from agno.workflow.workflow import Workflow
# =============================================================================
# Database Configuration
# =============================================================================
db = AsyncPostgresDb(
id="remote-db",
db_url=os.getenv("DATABASE_URL", "postgresql+psycopg://ai:ai@postgres:5432/ai"),
)
# =============================================================================
# Knowledge Base Configuration
# =============================================================================
knowledge = Knowledge(
name="Remote Knowledge",
description="A knowledge base for the remote server",
vector_db=PgVector(
db_url=os.getenv("DATABASE_URL", "postgresql+psycopg://ai:ai@postgres:5432/ai"),
table_name="a2a_test_knowledge",
embedder=OpenAIEmbedder(id="text-embedding-3-small"),
),
contents_db=db,
)
# =============================================================================
# Agent Configuration
# =============================================================================
# Agent 1: Assistant with calculator tools and memory
assistant = Agent(
name="Assistant",
id="assistant-agent-2",
description="A helpful AI assistant with calculator capabilities.",
model=OpenAIChat(id="gpt-5-mini"),
db=db,
instructions=[
"You are a helpful AI assistant.",
"Use the calculator tool for any math operations.",
"You have access to a knowledge base - search it when asked about documents.",
],
markdown=True,
update_memory_on_run=True,
tools=[CalculatorTools()],
knowledge=knowledge,
search_knowledge=True,
)
# Agent 2: Researcher with web search capabilities
researcher = Agent(
name="Researcher",
id="researcher-agent-2",
description="A research assistant with web search capabilities.",
model=OpenAIChat(id="gpt-5-mini"),
update_memory_on_run=True,
db=db,
instructions=[
"You are a research assistant.",
"Search the web for information when needed.",
"Provide well-researched, accurate responses.",
],
markdown=True,
tools=[WebSearchTools()],
)
# =============================================================================
# Team Configuration
# =============================================================================
research_team = Team(
name="Research Team",
id="research-team-2",
description="A team that coordinates research and analysis tasks.",
model=OpenAIChat(id="gpt-5-mini"),
members=[assistant, researcher],
instructions=[
"You are a research team that coordinates multiple specialists.",
"Delegate math questions to the Assistant.",
"Delegate research questions to the Researcher.",
"Combine insights from team members for comprehensive answers.",
],
markdown=True,
update_memory_on_run=True,
db=db,
)
# =============================================================================
# Workflow Configuration
# =============================================================================
qa_workflow = Workflow(
name="QA Workflow",
description="A simple Q&A workflow that uses the assistant agent",
id="qa-workflow-2",
db=db,
steps=[
Step(
name="Answer Question",
agent=assistant,
),
],
)
# =============================================================================
# AgentOS Configuration
# =============================================================================
agent_os = AgentOS(
id="remote-os",
description="Remote AgentOS server hosting agents, teams, and workflows for system testing",
agents=[assistant, researcher],
teams=[research_team],
workflows=[qa_workflow],
knowledge=[knowledge],
a2a_interface=True,
)
# FastAPI app instance (for uvicorn)
app = agent_os.get_app()
# =============================================================================
# Main Entry Point
# =============================================================================
if __name__ == "__main__":
reload = os.getenv("RELOAD", "true").lower() == "true"
agent_os.serve(app="agno_a2a_server:app", reload=reload, host="0.0.0.0", port=7004, access_log=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/system/agno_a2a_server.py",
"license": "Apache License 2.0",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/system/tests/test_a2a_remote_agent.py | import uuid
import pytest
from agno.agent import RemoteAgent
from agno.team import RemoteTeam
from agno.utils.http import aclose_default_clients
from agno.workflow import RemoteWorkflow
from .test_utils import generate_jwt_token
REQUEST_TIMEOUT = 60.0
# A2A team and workflow IDs (exposed by gateway via A2A interface)
A2A_AGENT_ID = "assistant-agent-2"
A2A_TEAM_ID = "research-team-2"
A2A_WORKFLOW_ID = "qa-workflow-2"
@pytest.fixture(scope="module")
def a2a_base_url(agno_a2a_url: str) -> str:
"""Get the A2A endpoint URL."""
return f"{agno_a2a_url}/a2a"
@pytest.fixture(scope="module")
def test_user_id() -> str:
"""Generate a unique user ID for testing."""
return f"test-user-{uuid.uuid4().hex[:8]}"
@pytest.fixture(scope="module")
def token(test_user_id: str) -> str:
return generate_jwt_token(user_id=test_user_id)
@pytest.fixture(autouse=True)
async def cleanup_http_clients():
"""Cleanup HTTP clients after each test to prevent event loop closure issues."""
yield
try:
await aclose_default_clients()
except RuntimeError:
# Event loop may already be closed, ignore
pass
@pytest.mark.asyncio
async def test_a2a_remote_agent_basic_messaging(a2a_base_url: str, token: str):
"""Test basic non-streaming message via A2A protocol to RemoteAgent."""
# Create RemoteAgent with A2A protocol
remote_agent = RemoteAgent(
base_url=f"{a2a_base_url}/agents/{A2A_AGENT_ID}",
agent_id=A2A_AGENT_ID,
protocol="a2a",
timeout=REQUEST_TIMEOUT,
)
# Send message via A2A protocol
result = await remote_agent.arun(
input="What is 2 + 2?",
stream=False,
auth_token=token,
)
# Verify response
assert result is not None
assert result.content is not None
assert result.run_id is not None
@pytest.mark.asyncio
async def test_a2a_remote_team_basic_messaging(a2a_base_url: str, token: str):
"""Test basic non-streaming message via A2A protocol to RemoteTeam."""
# Create RemoteTeam with A2A protocol
remote_team = RemoteTeam(
base_url=f"{a2a_base_url}/teams/{A2A_TEAM_ID}",
team_id=A2A_TEAM_ID,
protocol="a2a",
timeout=REQUEST_TIMEOUT,
)
# Send message via A2A protocol
result = await remote_team.arun(
input="What is 2 + 2?",
stream=False,
auth_token=token,
)
# Verify response
assert result is not None
assert result.content is not None
assert result.run_id is not None
@pytest.mark.asyncio
async def test_a2a_remote_workflow_basic_messaging(a2a_base_url: str, token: str):
"""Test basic non-streaming message via A2A protocol to RemoteWorkflow."""
# Create RemoteWorkflow with A2A protocol
remote_workflow = RemoteWorkflow(
base_url=f"{a2a_base_url}/workflows/{A2A_WORKFLOW_ID}",
workflow_id=A2A_WORKFLOW_ID,
protocol="a2a",
timeout=REQUEST_TIMEOUT,
)
# Send message via A2A protocol
result = await remote_workflow.arun(
input="What is the capital of France?",
stream=False,
auth_token=token,
)
# Verify response
assert result is not None
assert result.content is not None
assert result.run_id is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/system/tests/test_a2a_remote_agent.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/a2a/test_client.py | """Unit tests for A2AClient."""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from httpx import HTTPStatusError, Request, Response
from agno.client.a2a import (
A2AClient,
StreamEvent,
TaskResult,
)
from agno.exceptions import RemoteServerUnavailableError
class TestA2AClientInit:
"""Test A2AClient initialization."""
def test_init_default_values(self):
"""Test client initialization with default values."""
client = A2AClient("http://localhost:7777")
assert client.base_url == "http://localhost:7777"
assert client.timeout == 30
assert client.protocol == "rest"
def test_init_custom_values(self):
"""Test client initialization with custom values."""
client = A2AClient(
"http://localhost:8080/",
timeout=60,
protocol="json-rpc",
)
assert client.base_url == "http://localhost:8080" # Trailing slash stripped
assert client.timeout == 60
assert client.protocol == "json-rpc"
def test_get_endpoint_rest_mode(self):
"""Test endpoint URL building in REST mode."""
client = A2AClient("http://localhost:7777", protocol="rest")
assert client._get_endpoint("/v1/message:send") == "http://localhost:7777/v1/message:send"
def test_get_endpoint_json_rpc_mode(self):
"""Test endpoint URL building in JSON-RPC mode."""
client = A2AClient("http://localhost:7777", protocol="json-rpc")
assert client._get_endpoint("/v1/message:send") == "http://localhost:7777/"
class TestBuildMessageRequest:
"""Test message request building."""
def test_basic_request(self):
"""Test building basic message request."""
client = A2AClient("http://localhost:7777")
request = client._build_message_request(
message="Hello",
stream=False,
)
assert request["jsonrpc"] == "2.0"
assert request["method"] == "message/send"
assert "id" in request
assert request["params"]["message"]["role"] == "user"
assert request["params"]["message"]["parts"][0]["kind"] == "text"
assert request["params"]["message"]["parts"][0]["text"] == "Hello"
def test_streaming_request(self):
"""Test building streaming message request."""
client = A2AClient("http://localhost:7777")
request = client._build_message_request(
message="Hello",
stream=True,
)
assert request["method"] == "message/stream"
def test_request_with_context(self):
"""Test building request with context ID."""
client = A2AClient("http://localhost:7777")
request = client._build_message_request(
message="Hello",
context_id="session-123",
user_id="user-456",
stream=False,
)
assert request["params"]["message"]["contextId"] == "session-123"
assert request["params"]["message"]["metadata"]["userId"] == "user-456"
class TestParseTaskResult:
"""Test task result parsing."""
def test_parse_basic_response(self):
"""Test parsing basic A2A response."""
client = A2AClient("http://localhost:7777")
response_data = {
"jsonrpc": "2.0",
"id": "req-1",
"result": {
"id": "task-123",
"context_id": "ctx-456",
"status": {"state": "completed"},
"history": [
{
"role": "agent",
"parts": [{"kind": "text", "text": "Hello, world!"}],
}
],
},
}
result = client._parse_task_result(response_data)
assert isinstance(result, TaskResult)
assert result.task_id == "task-123"
assert result.context_id == "ctx-456"
assert result.status == "completed"
assert result.content == "Hello, world!"
assert result.is_completed
assert not result.is_failed
def test_parse_failed_response(self):
"""Test parsing failed task response."""
client = A2AClient("http://localhost:7777")
response_data = {
"result": {
"id": "task-123",
"context_id": "ctx-456",
"status": {"state": "failed"},
"history": [
{
"role": "agent",
"parts": [{"kind": "text", "text": "Error occurred"}],
}
],
},
}
result = client._parse_task_result(response_data)
assert result.status == "failed"
assert result.is_failed
assert result.content == "Error occurred"
def test_parse_with_artifacts(self):
"""Test parsing response with artifacts."""
client = A2AClient("http://localhost:7777")
response_data = {
"result": {
"id": "task-123",
"context_id": "ctx-456",
"status": {"state": "completed"},
"history": [],
"artifacts": [
{
"artifact_id": "art-1",
"name": "image.png",
"mimeType": "image/png",
"uri": "http://example.com/image.png",
}
],
},
}
result = client._parse_task_result(response_data)
assert len(result.artifacts) == 1
assert result.artifacts[0].artifact_id == "art-1"
assert result.artifacts[0].name == "image.png"
class TestParseStreamEvent:
"""Test stream event parsing."""
def test_parse_content_event(self):
"""Test parsing content event."""
client = A2AClient("http://localhost:7777")
# Content events have kind="message" and parts with text
data = {
"result": {
"kind": "message",
"messageId": "msg-1",
"role": "agent",
"parts": [{"kind": "text", "text": "Hello"}],
"contextId": "ctx-456",
"taskId": "task-123",
},
}
event = client._parse_stream_event(data)
assert isinstance(event, StreamEvent)
assert event.event_type == "content"
assert event.content == "Hello"
assert event.is_content
assert not event.is_final
def test_parse_status_event(self):
"""Test parsing status event."""
client = A2AClient("http://localhost:7777")
data = {
"result": {
"kind": "status-update",
"taskId": "task-123",
"contextId": "ctx-456",
"status": {"state": "working"},
"final": False,
},
}
event = client._parse_stream_event(data)
assert event.event_type == "working"
assert not event.is_final
def test_parse_completed_event(self):
"""Test parsing completed event."""
client = A2AClient("http://localhost:7777")
data = {
"result": {
"kind": "status-update",
"taskId": "task-123",
"contextId": "ctx-456",
"status": {"state": "completed"},
"final": True,
},
}
event = client._parse_stream_event(data)
assert event.event_type == "completed"
assert event.is_final
class TestSendMessage:
"""Test send_message method."""
@pytest.mark.asyncio
async def test_send_message_success(self):
"""Test successful message send."""
with patch("agno.client.a2a.client.get_default_async_client") as mock_get_client:
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {
"jsonrpc": "2.0",
"id": "req-1",
"result": {
"id": "task-123",
"context_id": "ctx-456",
"status": {"state": "completed"},
"history": [
{
"role": "agent",
"parts": [{"kind": "text", "text": "The answer is 4"}],
}
],
},
}
mock_response.raise_for_status = MagicMock()
mock_http_client = AsyncMock()
mock_http_client.post.return_value = mock_response
mock_get_client.return_value = mock_http_client
client = A2AClient("http://localhost:7777")
result = await client.send_message(
message="What is 2 + 2?",
)
assert result.content == "The answer is 4"
assert result.is_completed
mock_http_client.post.assert_called_once()
@pytest.mark.asyncio
async def test_send_message_http_error(self):
"""Test send_message with HTTP error."""
with patch("agno.client.a2a.client.get_default_async_client") as mock_get_client:
mock_response = Response(404, request=Request("POST", "http://test"))
mock_http_client = AsyncMock()
mock_http_client.post.side_effect = HTTPStatusError(
"Not Found", request=mock_response.request, response=mock_response
)
mock_get_client.return_value = mock_http_client
client = A2AClient("http://localhost:7777")
with pytest.raises(HTTPStatusError) as exc_info:
await client.send_message(
message="Hello",
)
assert exc_info.value.response.status_code == 404
@pytest.mark.asyncio
async def test_send_message_connection_error(self):
"""Test send_message with connection error."""
with patch("agno.client.a2a.client.get_default_async_client") as mock_get_client:
from httpx import ConnectError
mock_http_client = AsyncMock()
mock_http_client.post.side_effect = ConnectError("Connection refused")
mock_get_client.return_value = mock_http_client
client = A2AClient("http://localhost:7777")
with pytest.raises(RemoteServerUnavailableError) as exc_info:
await client.send_message(
message="Hello",
)
assert "Failed to connect" in str(exc_info.value)
assert exc_info.value.base_url == "http://localhost:7777"
@pytest.mark.asyncio
async def test_send_message_timeout(self):
"""Test send_message with timeout."""
with patch("agno.client.a2a.client.get_default_async_client") as mock_get_client:
from httpx import TimeoutException
mock_http_client = AsyncMock()
mock_http_client.post.side_effect = TimeoutException("Request timed out")
mock_get_client.return_value = mock_http_client
client = A2AClient("http://localhost:7777")
with pytest.raises(RemoteServerUnavailableError) as exc_info:
await client.send_message(
message="Hello",
)
assert "timed out" in str(exc_info.value)
@pytest.mark.asyncio
async def test_send_message_failed_task_returns_result(self):
"""Test send_message returns TaskResult even when task reports failed status."""
with patch("agno.client.a2a.client.get_default_async_client") as mock_get_client:
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.json.return_value = {
"result": {
"id": "task-123",
"context_id": "ctx-456",
"status": {"state": "failed"},
"history": [
{
"role": "agent",
"parts": [{"kind": "text", "text": "Error: Something went wrong"}],
}
],
},
}
mock_response.raise_for_status = MagicMock()
mock_http_client = AsyncMock()
mock_http_client.post.return_value = mock_response
mock_get_client.return_value = mock_http_client
client = A2AClient("http://localhost:7777")
result = await client.send_message(
message="Do something",
)
assert result.is_failed
assert result.task_id == "task-123"
class TestStreamMessage:
"""Test stream_message method."""
@pytest.mark.asyncio
async def test_stream_message_success(self):
"""Test successful message streaming."""
with patch("agno.client.a2a.client.get_default_async_client") as mock_get_client:
# Create mock streaming response
async def mock_aiter_lines():
lines = [
'{"result": {"kind": "status-update", "taskId": "task-123", "status": {"state": "working"}}}',
'{"result": {"kind": "message", "messageId": "m1", "parts": [{"kind": "text", "text": "Hello"}]}}',
'{"result": {"kind": "message", "messageId": "m2", "parts": [{"kind": "text", "text": " World"}]}}',
'{"result": {"kind": "status-update", "taskId": "task-123", "status": {"state": "completed"}, "final": true}}',
]
for line in lines:
yield line
mock_stream_response = MagicMock()
mock_stream_response.status_code = 200
mock_stream_response.raise_for_status = MagicMock()
mock_stream_response.aiter_lines = mock_aiter_lines
# Create async context manager mock
mock_stream_cm = AsyncMock()
mock_stream_cm.__aenter__.return_value = mock_stream_response
mock_stream_cm.__aexit__.return_value = None
mock_http_client = MagicMock()
mock_http_client.stream.return_value = mock_stream_cm
mock_get_client.return_value = mock_http_client
events = []
client = A2AClient("http://localhost:7777")
async for event in client.stream_message(
message="Hello",
):
events.append(event)
assert len(events) == 4
# Check content events
content_events = [e for e in events if e.is_content]
assert len(content_events) == 2
assert content_events[0].content == "Hello"
assert content_events[1].content == " World"
class TestSchemas:
"""Test schema dataclasses."""
def test_task_result_properties(self):
"""Test TaskResult helper properties."""
result = TaskResult(
task_id="t1",
context_id="c1",
status="completed",
content="Done",
)
assert result.is_completed
assert not result.is_failed
assert not result.is_canceled
failed = TaskResult(
task_id="t2",
context_id="c2",
status="failed",
content="Error",
)
assert not failed.is_completed
assert failed.is_failed
def test_stream_event_properties(self):
"""Test StreamEvent helper properties."""
content_event = StreamEvent(
event_type="content",
content="Hello",
)
assert content_event.is_content
assert not content_event.is_final
completed_event = StreamEvent(
event_type="completed",
is_final=True,
)
assert completed_event.is_completed
assert completed_event.is_final
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/a2a/test_client.py",
"license": "Apache License 2.0",
"lines": 375,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/async_postgres/test_sanitization.py | """Integration tests for string sanitization in AsyncPostgresDb operations.
These tests verify that null bytes (\x00) are properly removed from strings
before storing them in PostgreSQL to prevent CharacterNotInRepertoireError.
"""
import time
import pytest
import pytest_asyncio
from agno.db.postgres import AsyncPostgresDb
from agno.db.schemas.knowledge import KnowledgeRow
from agno.db.schemas.memory import UserMemory
from agno.run.agent import RunOutput
from agno.run.base import RunStatus
from agno.session.agent import AgentSession
@pytest_asyncio.fixture(autouse=True)
async def cleanup_all_tables(async_postgres_db_real: AsyncPostgresDb):
"""Fixture to clean-up all test data after each test"""
yield
try:
# Clean up all tables
for table_type in ["memories", "sessions", "knowledge", "evals", "traces", "spans", "culture"]:
try:
table = await async_postgres_db_real._get_table(table_type)
async with async_postgres_db_real.async_session_factory() as session:
await session.execute(table.delete())
await session.commit()
except Exception:
pass # Ignore cleanup errors for tables that don't exist
except Exception:
pass # Ignore cleanup errors
# =============================================================================
# Memory Sanitization Tests
# =============================================================================
@pytest.mark.asyncio
async def test_memory_upsert_sanitizes_input_field(async_postgres_db_real: AsyncPostgresDb):
"""Test that null bytes in memory.input field are sanitized."""
memory = UserMemory(
memory_id="test_memory_null_input",
memory={"content": "Test memory"},
input="Test input with\x00null byte",
user_id="test_user_1",
agent_id="test_agent_1",
)
# Should not raise CharacterNotInRepertoireError
result = await async_postgres_db_real.upsert_user_memory(memory)
assert result is not None
assert result.input == "Test input withnull byte" # Null byte removed
assert "\x00" not in result.input
# Verify stored value doesn't have null bytes
retrieved = await async_postgres_db_real.get_user_memory("test_memory_null_input", "test_user_1")
assert retrieved is not None
assert "\x00" not in retrieved.input
assert retrieved.input == "Test input withnull byte"
@pytest.mark.asyncio
async def test_memory_upsert_sanitizes_feedback_field(async_postgres_db_real: AsyncPostgresDb):
"""Test that null bytes in memory.feedback field are sanitized."""
memory = UserMemory(
memory_id="test_memory_null_feedback",
memory={"content": "Test memory"},
input="Test input",
feedback="Feedback with\x00null\x00bytes",
user_id="test_user_1",
agent_id="test_agent_1",
)
result = await async_postgres_db_real.upsert_user_memory(memory)
assert result is not None
assert result.feedback == "Feedback withnullbytes" # Null bytes removed
assert "\x00" not in result.feedback
# Verify stored value
retrieved = await async_postgres_db_real.get_user_memory("test_memory_null_feedback", "test_user_1")
assert retrieved is not None
assert "\x00" not in retrieved.feedback
@pytest.mark.asyncio
async def test_memory_upsert_sanitizes_nested_jsonb_strings(async_postgres_db_real: AsyncPostgresDb):
"""Test that null bytes in nested JSONB fields (memory, topics) are sanitized."""
memory = UserMemory(
memory_id="test_memory_null_jsonb",
memory={"content": "Test\x00memory", "description": "Desc\x00with\x00nulls"},
input="Test input",
topics=["topic1\x00", "topic2", "topic\x003"],
user_id="test_user_1",
agent_id="test_agent_1",
created_at=int(time.time()),
)
result = await async_postgres_db_real.upsert_user_memory(memory)
assert result is not None
assert "\x00" not in result.memory["content"]
assert "\x00" not in result.memory["description"]
assert all("\x00" not in topic for topic in result.topics)
# Verify stored values
retrieved = await async_postgres_db_real.get_user_memory("test_memory_null_jsonb", "test_user_1")
assert retrieved is not None
assert "\x00" not in retrieved.memory["content"]
assert all("\x00" not in topic for topic in retrieved.topics)
@pytest.mark.asyncio
async def test_memory_upsert_handles_multiple_null_bytes(async_postgres_db_real: AsyncPostgresDb):
"""Test that multiple null bytes are all removed."""
memory = UserMemory(
memory_id="test_memory_multiple_nulls",
memory={"content": "Test"},
input="\x00\x00\x00Multiple\x00nulls\x00\x00",
feedback="\x00\x00\x00",
user_id="test_user_1",
agent_id="test_agent_1",
)
result = await async_postgres_db_real.upsert_user_memory(memory)
assert result is not None
assert result.input == "Multiplenulls" # All null bytes removed
assert result.feedback == "" # Only null bytes, so empty string
assert "\x00" not in result.input
assert "\x00" not in result.feedback
# =============================================================================
# Knowledge Sanitization Tests
# =============================================================================
@pytest.mark.asyncio
async def test_knowledge_upsert_sanitizes_string_fields(async_postgres_db_real: AsyncPostgresDb):
"""Test that null bytes in knowledge string fields are sanitized."""
knowledge = KnowledgeRow(
id="test_knowledge_null_strings",
name="Knowledge\x00Name",
description="Description\x00with\x00nulls",
type="document\x00",
status="active\x00",
status_message="Message\x00here",
linked_to="link\x00to",
external_id="ext\x00id",
metadata={"key": "value"},
created_at=int(time.time()),
)
result = await async_postgres_db_real.upsert_knowledge_content(knowledge)
assert result is not None
# Verify stored values (sanitization happens in DB, so check retrieved value)
retrieved = await async_postgres_db_real.get_knowledge_content("test_knowledge_null_strings")
assert retrieved is not None
assert "\x00" not in retrieved.name
assert "\x00" not in retrieved.description
assert "\x00" not in retrieved.type
assert "\x00" not in retrieved.status
assert "\x00" not in retrieved.status_message
assert "\x00" not in retrieved.linked_to
assert "\x00" not in retrieved.external_id
@pytest.mark.asyncio
async def test_knowledge_upsert_sanitizes_metadata_jsonb(async_postgres_db_real: AsyncPostgresDb):
"""Test that null bytes in knowledge.metadata JSONB field are sanitized."""
knowledge = KnowledgeRow(
id="test_knowledge_null_metadata",
name="Test Knowledge",
description="Test description",
metadata={
"key1": "value\x00with\x00nulls",
"key2": ["list\x00item1", "list\x00item2"],
"key3": {"nested": "nested\x00value", "nested_list": ["a\x00", "b\x00"]},
},
created_at=int(time.time()),
)
result = await async_postgres_db_real.upsert_knowledge_content(knowledge)
assert result is not None
# upsert_knowledge_content returns the original object, so verify stored values from DB
retrieved = await async_postgres_db_real.get_knowledge_content("test_knowledge_null_metadata")
assert retrieved is not None
assert "\x00" not in retrieved.metadata["key1"]
assert all("\x00" not in item for item in retrieved.metadata["key2"])
assert "\x00" not in retrieved.metadata["key3"]["nested"]
assert all("\x00" not in item for item in retrieved.metadata["key3"]["nested_list"])
# =============================================================================
# Session Sanitization Tests
# =============================================================================
@pytest.mark.asyncio
async def test_session_upsert_sanitizes_nested_jsonb_fields(async_postgres_db_real: AsyncPostgresDb):
"""Test that null bytes in session JSONB fields are sanitized."""
agent_run = RunOutput(
run_id="test_run_1",
agent_id="test_agent_1",
user_id="test_user_1",
status=RunStatus.completed,
messages=[],
)
session = AgentSession(
session_id="test_session_null_jsonb",
agent_id="test_agent_1",
user_id="test_user_1",
session_data={"name": "Session\x00Name", "data": "Value\x00here"},
agent_data={"name": "Agent\x00Name", "model": "gpt-4"},
metadata={"key": "value\x00with\x00nulls"},
runs=[agent_run],
created_at=int(time.time()),
updated_at=int(time.time()),
)
result = await async_postgres_db_real.upsert_session(session)
assert result is not None
assert "\x00" not in result.session_data["name"]
assert "\x00" not in result.session_data["data"]
assert "\x00" not in result.agent_data["name"]
assert "\x00" not in result.metadata["key"]
# Verify stored values
from agno.db.base import SessionType
retrieved = await async_postgres_db_real.get_session("test_session_null_jsonb", SessionType.AGENT)
assert retrieved is not None
assert "\x00" not in str(retrieved.session_data)
assert "\x00" not in str(retrieved.agent_data)
assert "\x00" not in str(retrieved.metadata)
@pytest.mark.asyncio
async def test_session_upsert_sanitizes_summary_field(async_postgres_db_real: AsyncPostgresDb):
"""Test that null bytes in session summary field are sanitized."""
from agno.session.summary import SessionSummary
agent_run = RunOutput(
run_id="test_run_2",
agent_id="test_agent_1",
user_id="test_user_1",
status=RunStatus.completed,
messages=[],
)
session = AgentSession(
session_id="test_session_null_summary",
agent_id="test_agent_1",
user_id="test_user_1",
session_data={},
summary=SessionSummary(summary="Summary\x00with\x00null\x00bytes"),
runs=[agent_run],
created_at=int(time.time()),
updated_at=int(time.time()),
)
result = await async_postgres_db_real.upsert_session(session)
assert result is not None
assert result.summary is not None
assert "\x00" not in result.summary.summary
# Verify stored value
from agno.db.base import SessionType
retrieved = await async_postgres_db_real.get_session("test_session_null_summary", SessionType.AGENT)
assert retrieved is not None
assert retrieved.summary is not None
assert "\x00" not in retrieved.summary.summary
# =============================================================================
# Trace and Span Sanitization Tests
# =============================================================================
@pytest.mark.asyncio
async def test_trace_upsert_sanitizes_fields(async_postgres_db_real: AsyncPostgresDb):
"""Test that null bytes in trace fields are sanitized."""
from datetime import datetime, timezone
from agno.tracing.schemas import Trace
trace = Trace(
trace_id="test_trace_null",
name="Trace\x00Name",
status="OK",
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
duration_ms=100,
total_spans=1,
error_count=0,
run_id=None,
session_id=None,
user_id=None,
agent_id=None,
team_id=None,
workflow_id=None,
created_at=datetime.now(timezone.utc),
)
# upsert_trace returns None, so we verify by querying
await async_postgres_db_real.upsert_trace(trace)
# Verify stored values by querying directly
async with async_postgres_db_real.async_session_factory() as session:
traces_table = await async_postgres_db_real._get_table("traces")
result_query = await session.execute(traces_table.select().where(traces_table.c.trace_id == "test_trace_null"))
row = result_query.fetchone()
assert row is not None
assert "\x00" not in str(row.name) if row.name else True
assert "\x00" not in str(row.status) if row.status else True
@pytest.mark.asyncio
async def test_span_upsert_sanitizes_fields(async_postgres_db_real: AsyncPostgresDb):
"""Test that null bytes in span fields are sanitized."""
from datetime import datetime, timezone
from agno.tracing.schemas import Span
span = Span(
span_id="test_span_null",
trace_id="test_trace_null",
parent_span_id=None,
name="Span\x00Name",
span_kind="INTERNAL",
status_code="OK",
status_message=None,
start_time=datetime.now(timezone.utc),
end_time=datetime.now(timezone.utc),
duration_ms=50,
attributes={"key": "value\x00with\x00nulls", "list": ["item\x00"]},
created_at=datetime.now(timezone.utc),
)
# create_span returns None, so we verify by querying
await async_postgres_db_real.create_span(span)
# Verify stored values
async with async_postgres_db_real.async_session_factory() as session:
spans_table = await async_postgres_db_real._get_table("spans")
result_query = await session.execute(spans_table.select().where(spans_table.c.span_id == "test_span_null"))
row = result_query.fetchone()
assert row is not None
assert "\x00" not in str(row.name) if row.name else True
assert "\x00" not in str(row.attributes) if row.attributes else True
# =============================================================================
# Eval Sanitization Tests
# =============================================================================
@pytest.mark.asyncio
async def test_eval_upsert_sanitizes_fields(async_postgres_db_real: AsyncPostgresDb):
"""Test that null bytes in eval fields are sanitized."""
from agno.db.schemas.evals import EvalRunRecord, EvalType
eval_run = EvalRunRecord(
run_id="test_eval_null",
eval_type=EvalType.ACCURACY,
name="Eval\x00Name",
evaluated_component_name="Component\x00Name",
eval_data={"key": "value\x00with\x00nulls", "list": ["item\x00"]},
eval_input={"input": "input\x00value"},
agent_id="test_agent_1",
created_at=int(time.time()),
)
result = await async_postgres_db_real.create_eval_run(eval_run)
assert result is not None
# create_eval_run returns the original object, so verify stored values from DB
retrieved = await async_postgres_db_real.get_eval_run("test_eval_null")
assert retrieved is not None
assert "\x00" not in retrieved.name
assert "\x00" not in retrieved.evaluated_component_name
assert "\x00" not in str(retrieved.eval_data)
assert "\x00" not in str(retrieved.eval_input)
# =============================================================================
# Cultural Knowledge Sanitization Tests
# =============================================================================
@pytest.mark.asyncio
async def test_cultural_knowledge_upsert_sanitizes_fields(async_postgres_db_real: AsyncPostgresDb):
"""Test that null bytes in cultural knowledge fields are sanitized."""
from agno.db.schemas.culture import CulturalKnowledge
cultural_knowledge = CulturalKnowledge(
id="test_culture_null",
name="Culture\x00Name",
summary="Summary\x00with\x00nulls",
input="Input\x00value",
content="Content\x00with\x00nulls",
metadata={"meta": "meta\x00value", "nested": {"inner": "inner\x00value"}},
)
result = await async_postgres_db_real.upsert_cultural_knowledge(cultural_knowledge)
assert result is not None
assert "\x00" not in result.name
assert "\x00" not in result.summary
assert "\x00" not in result.input
assert "\x00" not in result.content
assert "\x00" not in str(result.metadata)
# Verify stored values
retrieved = await async_postgres_db_real.get_cultural_knowledge("test_culture_null")
assert retrieved is not None
assert "\x00" not in retrieved.name
assert "\x00" not in retrieved.content
assert "\x00" not in str(retrieved.metadata)
# =============================================================================
# Edge Cases
# =============================================================================
@pytest.mark.asyncio
async def test_sanitization_preserves_normal_strings(async_postgres_db_real: AsyncPostgresDb):
"""Test that normal strings without null bytes are preserved unchanged."""
memory = UserMemory(
memory_id="test_memory_normal",
memory={"content": "Normal content"},
input="Normal input string",
feedback="Normal feedback",
user_id="test_user_1",
agent_id="test_agent_1",
)
result = await async_postgres_db_real.upsert_user_memory(memory)
assert result is not None
assert result.input == "Normal input string"
assert result.feedback == "Normal feedback"
assert result.memory["content"] == "Normal content"
@pytest.mark.asyncio
async def test_sanitization_handles_none_values(async_postgres_db_real: AsyncPostgresDb):
"""Test that None values are handled correctly (not sanitized)."""
memory = UserMemory(
memory_id="test_memory_none",
memory={"content": "Test"},
input=None,
feedback=None,
user_id="test_user_1",
agent_id="test_agent_1",
)
result = await async_postgres_db_real.upsert_user_memory(memory)
assert result is not None
assert result.input is None
assert result.feedback is None
@pytest.mark.asyncio
async def test_sanitization_handles_empty_strings(async_postgres_db_real: AsyncPostgresDb):
"""Test that empty strings are handled correctly."""
memory = UserMemory(
memory_id="test_memory_empty",
memory={"content": "Test"},
input="",
feedback="",
user_id="test_user_1",
agent_id="test_agent_1",
)
result = await async_postgres_db_real.upsert_user_memory(memory)
assert result is not None
assert result.input == ""
assert result.feedback == ""
@pytest.mark.asyncio
async def test_sanitization_handles_only_null_bytes(async_postgres_db_real: AsyncPostgresDb):
"""Test that strings containing only null bytes become empty strings."""
memory = UserMemory(
memory_id="test_memory_only_nulls",
memory={"content": "Test"},
input="\x00\x00\x00",
user_id="test_user_1",
agent_id="test_agent_1",
)
result = await async_postgres_db_real.upsert_user_memory(memory)
assert result is not None
assert result.input == "" # All null bytes removed, empty string
@pytest.mark.asyncio
async def test_sanitization_handles_deeply_nested_structures(async_postgres_db_real: AsyncPostgresDb):
"""Test that null bytes in deeply nested JSONB structures are sanitized."""
memory = UserMemory(
memory_id="test_memory_deep_nested",
memory={
"level1": {
"level2": {
"level3": {
"level4": "deep\x00value",
"list": [{"item": "item\x00value"}, {"item2": "item2\x00"}],
}
}
},
"simple": "simple\x00value",
},
input="Test input",
user_id="test_user_1",
agent_id="test_agent_1",
created_at=int(time.time()),
)
result = await async_postgres_db_real.upsert_user_memory(memory)
assert result is not None
# Verify all nested strings are sanitized
assert "\x00" not in result.memory["level1"]["level2"]["level3"]["level4"]
assert "\x00" not in result.memory["level1"]["level2"]["level3"]["list"][0]["item"]
assert "\x00" not in result.memory["level1"]["level2"]["level3"]["list"][1]["item2"]
assert "\x00" not in result.memory["simple"]
# Verify stored values
retrieved = await async_postgres_db_real.get_user_memory("test_memory_deep_nested", "test_user_1")
assert retrieved is not None
assert "\x00" not in str(retrieved.memory)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/async_postgres/test_sanitization.py",
"license": "Apache License 2.0",
"lines": 443,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/db/test_migrations_utils.py | from agno.db.migrations.utils import quote_db_identifier
def test_quote_db_identifier_postgres():
"""Test that quote_db_identifier uses double quotes for PostgreSQL"""
assert quote_db_identifier("PostgresDb", "my_table") == '"my_table"'
assert quote_db_identifier("AsyncPostgresDb", "my_schema") == '"my_schema"'
def test_quote_db_identifier_mysql():
"""Test that quote_db_identifier uses backticks for MySQL"""
assert quote_db_identifier("MySQLDb", "my_table") == "`my_table`"
assert quote_db_identifier("AsyncMySQLDb", "my_schema") == "`my_schema`"
assert quote_db_identifier("SingleStoreDb", "my_table") == "`my_table`"
def test_quote_db_identifier_sqlite():
"""Test that quote_db_identifier uses double quotes for SQLite"""
assert quote_db_identifier("SqliteDb", "my_table") == '"my_table"'
assert quote_db_identifier("AsyncSqliteDb", "my_schema") == '"my_schema"'
def test_quote_db_identifier_unknown_defaults_to_double_quotes():
assert quote_db_identifier("UnknownDb", "tbl") == '"tbl"'
def test_quote_db_identifier_escapes_double_quote_postgres():
"""Embedded double-quotes must be doubled for Postgres identifiers."""
assert quote_db_identifier("PostgresDb", 'my"table') == '"my""table"'
assert quote_db_identifier("AsyncPostgresDb", 'a"b"c') == '"a""b""c"'
def test_quote_db_identifier_escapes_double_quote_sqlite():
"""Embedded double-quotes must be doubled for SQLite identifiers."""
assert quote_db_identifier("SqliteDb", 'my"table') == '"my""table"'
assert quote_db_identifier("AsyncSqliteDb", 'x"y') == '"x""y"'
def test_quote_db_identifier_escapes_backtick_mysql():
"""Embedded backticks must be doubled for MySQL/SingleStore identifiers."""
assert quote_db_identifier("MySQLDb", "my`table") == "`my``table`"
assert quote_db_identifier("AsyncMySQLDb", "a`b`c") == "`a``b``c`"
assert quote_db_identifier("SingleStoreDb", "t`bl") == "`t``bl`"
def test_quote_db_identifier_no_unnecessary_escaping():
"""Double-quotes in MySQL and backticks in Postgres should pass through unchanged."""
assert quote_db_identifier("MySQLDb", 'has"quote') == '`has"quote`'
assert quote_db_identifier("PostgresDb", "has`tick") == '"has`tick"'
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/db/test_migrations_utils.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/knowledge/chunking/test_chunking_split.py | """
Tests for text splitting behavior in chunking strategies.
"""
from unittest.mock import patch
import pytest
from agno.knowledge.chunking.document import DocumentChunking
from agno.knowledge.chunking.recursive import RecursiveChunking
from agno.knowledge.document.base import Document
pytest.importorskip("unstructured")
from agno.knowledge.chunking.markdown import MarkdownChunking
class TestDocumentChunkingParagraphSplit:
"""DocumentChunking should split text at paragraph boundaries"""
def test_splits_at_paragraph_boundaries(self):
"""Text with paragraph breaks should produce multiple chunks."""
text = """First paragraph here.
Second paragraph here.
Third paragraph here."""
doc = Document(id="test", name="test", content=text)
chunker = DocumentChunking(chunk_size=30, overlap=0)
chunks = chunker.chunk(doc)
assert len(chunks) > 1
class TestRecursiveChunkingNewlineSplit:
"""RecursiveChunking should split text at newline boundaries."""
def test_splits_at_newline_boundary(self):
"""Text should split exactly at newline positions."""
text = "AAAAAAAAAA\nBBBBBBBBBB"
doc = Document(id="test", name="test", content=text)
chunker = RecursiveChunking(chunk_size=15, overlap=0)
chunks = chunker.chunk(doc)
assert len(chunks) == 2
assert chunks[0].content.strip() == "AAAAAAAAAA"
assert chunks[1].content.strip() == "BBBBBBBBBB"
class TestMarkdownChunkingFallbackSplit:
"""MarkdownChunking fallback should split at paragraph boundaries."""
def test_fallback_splits_at_paragraphs(self):
"""When markdown parsing fails, should fall back to paragraph splitting."""
text = """First paragraph.
Second paragraph.
Third paragraph."""
doc = Document(id="test", name="test", content=text)
chunker = MarkdownChunking(chunk_size=30, overlap=0)
with patch("agno.knowledge.chunking.markdown.partition_md", side_effect=Exception("test")):
chunks = chunker.chunk(doc)
assert len(chunks) > 1
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/knowledge/chunking/test_chunking_split.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/db/test_user_memory.py | from datetime import datetime, timezone
from agno.db.schemas import UserMemory
def test_user_memory_from_dict_handles_epoch_ints():
memory = UserMemory.from_dict(
{
"memory_id": "m1",
"memory": "hello",
"created_at": 1_700_000_000,
"updated_at": 1_700_000_123,
}
)
assert isinstance(memory.created_at, int)
assert isinstance(memory.updated_at, int)
as_dict = memory.to_dict()
assert as_dict["memory_id"] == "m1"
assert isinstance(as_dict["created_at"], str)
assert isinstance(as_dict["updated_at"], str)
def test_user_memory_from_dict_handles_iso_strings():
memory = UserMemory.from_dict(
{
"memory_id": "m1",
"memory": "hello",
"created_at": "2025-01-02T03:04:05+00:00",
"updated_at": "2025-01-02T03:04:06Z",
}
)
assert isinstance(memory.created_at, int)
assert isinstance(memory.updated_at, int)
assert memory.to_dict()["memory_id"] == "m1"
def test_user_memory_init_normalizes_datetime_objects():
now = datetime(2025, 1, 2, 3, 4, 5, tzinfo=timezone.utc)
memory = UserMemory.from_dict(
{
"memory_id": "m1",
"memory": "hello",
"created_at": now,
"updated_at": now,
}
)
assert isinstance(memory.created_at, int)
assert isinstance(memory.updated_at, int)
assert memory.to_dict()["memory_id"] == "m1"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/db/test_user_memory.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/run/cancellation_management/base.py | from abc import ABC, abstractmethod
from typing import Dict
class BaseRunCancellationManager(ABC):
"""Manages cancellation state for agent runs.
This class can be extended to implement custom cancellation logic.
Use set_cancellation_manager() to replace the global instance with your own.
"""
@abstractmethod
def register_run(self, run_id: str) -> None:
"""Register a new run as not cancelled."""
pass
@abstractmethod
async def aregister_run(self, run_id: str) -> None:
"""Register a new run as not cancelled (async version)."""
pass
@abstractmethod
def cancel_run(self, run_id: str) -> bool:
"""Cancel a run by marking it as cancelled.
Always stores cancellation intent, even for runs not yet registered
(cancel-before-start support for background runs).
Returns:
bool: True if run was previously registered, False if storing
cancellation intent for an unregistered run.
"""
pass
@abstractmethod
async def acancel_run(self, run_id: str) -> bool:
"""Cancel a run by marking it as cancelled (async version).
Always stores cancellation intent, even for runs not yet registered
(cancel-before-start support for background runs).
Returns:
bool: True if run was previously registered, False if storing
cancellation intent for an unregistered run.
"""
pass
@abstractmethod
def is_cancelled(self, run_id: str) -> bool:
"""Check if a run is cancelled."""
pass
@abstractmethod
async def ais_cancelled(self, run_id: str) -> bool:
"""Check if a run is cancelled (async version)."""
pass
@abstractmethod
def cleanup_run(self, run_id: str) -> None:
"""Remove a run from tracking (called when run completes)."""
pass
@abstractmethod
async def acleanup_run(self, run_id: str) -> None:
"""Remove a run from tracking (called when run completes) (async version)."""
pass
@abstractmethod
def raise_if_cancelled(self, run_id: str) -> None:
"""Check if a run should be cancelled and raise exception if so."""
pass
@abstractmethod
async def araise_if_cancelled(self, run_id: str) -> None:
"""Check if a run should be cancelled and raise exception if so (async version)."""
pass
@abstractmethod
def get_active_runs(self) -> Dict[str, bool]:
"""Get all currently tracked runs and their cancellation status."""
pass
@abstractmethod
async def aget_active_runs(self) -> Dict[str, bool]:
"""Get all currently tracked runs and their cancellation status (async version)."""
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/run/cancellation_management/base.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/agno/run/cancellation_management/in_memory_cancellation_manager.py | """Run cancellation management."""
import asyncio
import threading
from typing import Dict
from agno.exceptions import RunCancelledException
from agno.run.cancellation_management.base import BaseRunCancellationManager
from agno.utils.log import logger
class InMemoryRunCancellationManager(BaseRunCancellationManager):
def __init__(self):
self._cancelled_runs: Dict[str, bool] = {}
self._lock = threading.Lock()
self._async_lock = asyncio.Lock()
def register_run(self, run_id: str) -> None:
"""Register a new run as not cancelled.
Uses setdefault to preserve any existing cancellation intent
(cancel-before-start support for background runs).
"""
with self._lock:
self._cancelled_runs.setdefault(run_id, False)
async def aregister_run(self, run_id: str) -> None:
"""Register a new run as not cancelled (async version).
Uses setdefault to preserve any existing cancellation intent
(cancel-before-start support for background runs).
"""
async with self._async_lock:
self._cancelled_runs.setdefault(run_id, False)
def cancel_run(self, run_id: str) -> bool:
"""Cancel a run by marking it as cancelled.
Always stores cancellation intent, even for runs not yet registered
(cancel-before-start support for background runs).
Returns:
bool: True if run was previously registered, False if storing
cancellation intent for an unregistered run.
"""
with self._lock:
was_registered = run_id in self._cancelled_runs
self._cancelled_runs[run_id] = True
if was_registered:
logger.info(f"Run {run_id} marked for cancellation")
else:
logger.info(f"Run {run_id} not yet registered, storing cancellation intent")
return was_registered
async def acancel_run(self, run_id: str) -> bool:
"""Cancel a run by marking it as cancelled (async version).
Always stores cancellation intent, even for runs not yet registered
(cancel-before-start support for background runs).
Returns:
bool: True if run was previously registered, False if storing
cancellation intent for an unregistered run.
"""
async with self._async_lock:
was_registered = run_id in self._cancelled_runs
self._cancelled_runs[run_id] = True
if was_registered:
logger.info(f"Run {run_id} marked for cancellation")
else:
logger.info(f"Run {run_id} not yet registered, storing cancellation intent")
return was_registered
def is_cancelled(self, run_id: str) -> bool:
"""Check if a run is cancelled."""
with self._lock:
return self._cancelled_runs.get(run_id, False)
async def ais_cancelled(self, run_id: str) -> bool:
"""Check if a run is cancelled (async version)."""
async with self._async_lock:
return self._cancelled_runs.get(run_id, False)
def cleanup_run(self, run_id: str) -> None:
"""Remove a run from tracking (called when run completes)."""
with self._lock:
if run_id in self._cancelled_runs:
del self._cancelled_runs[run_id]
async def acleanup_run(self, run_id: str) -> None:
"""Remove a run from tracking (called when run completes) (async version)."""
async with self._async_lock:
if run_id in self._cancelled_runs:
del self._cancelled_runs[run_id]
def raise_if_cancelled(self, run_id: str) -> None:
"""Check if a run should be cancelled and raise exception if so."""
if self.is_cancelled(run_id):
logger.info(f"Cancelling run {run_id}")
raise RunCancelledException(f"Run {run_id} was cancelled")
async def araise_if_cancelled(self, run_id: str) -> None:
"""Check if a run should be cancelled and raise exception if so (async version)."""
if await self.ais_cancelled(run_id):
logger.info(f"Cancelling run {run_id}")
raise RunCancelledException(f"Run {run_id} was cancelled")
def get_active_runs(self) -> Dict[str, bool]:
"""Get all currently tracked runs and their cancellation status."""
with self._lock:
return self._cancelled_runs.copy()
async def aget_active_runs(self) -> Dict[str, bool]:
"""Get all currently tracked runs and their cancellation status (async version)."""
async with self._async_lock:
return self._cancelled_runs.copy()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/run/cancellation_management/in_memory_cancellation_manager.py",
"license": "Apache License 2.0",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/run/cancellation_management/redis_cancellation_manager.py | """Redis-based run cancellation management."""
from typing import TYPE_CHECKING, Any, Dict, Optional, Union
from agno.exceptions import RunCancelledException
from agno.run.cancellation_management.base import BaseRunCancellationManager
from agno.utils.log import logger
# Defer import error until class instantiation
_redis_available = True
_redis_import_error: Optional[str] = None
try:
from redis import Redis, RedisCluster
from redis.asyncio import Redis as AsyncRedis
from redis.asyncio import RedisCluster as AsyncRedisCluster
except ImportError:
_redis_available = False
_redis_import_error = "`redis` not installed. Please install it using `pip install redis`"
# Type hints for when redis is not installed
if TYPE_CHECKING:
from redis import Redis, RedisCluster
from redis.asyncio import Redis as AsyncRedis
from redis.asyncio import RedisCluster as AsyncRedisCluster
else:
Redis = Any
RedisCluster = Any
AsyncRedis = Any
AsyncRedisCluster = Any
class RedisRunCancellationManager(BaseRunCancellationManager):
"""Redis-based cancellation manager for distributed run cancellation.
This manager stores run cancellation state in Redis, enabling cancellation
across multiple processes or services.
To use: call the set_cancellation_manager function to set the cancellation manager.
Args:
redis_client: Sync Redis client for sync methods. Can be Redis or RedisCluster.
async_redis_client: Async Redis client for async methods. Can be AsyncRedis or AsyncRedisCluster.
key_prefix: Prefix for Redis keys. Defaults to "agno:run:cancellation:".
ttl_seconds: TTL for keys in seconds. Defaults to 86400 (1 day).
Keys auto-expire to prevent orphaned keys if runs aren't cleaned up.
Set to None to disable expiration.
"""
DEFAULT_TTL_SECONDS = 60 * 60 * 24 # 1 day
def __init__(
self,
redis_client: Optional[Union[Redis, RedisCluster]] = None,
async_redis_client: Optional[Union[AsyncRedis, AsyncRedisCluster]] = None,
key_prefix: str = "agno:run:cancellation:",
ttl_seconds: Optional[int] = DEFAULT_TTL_SECONDS,
):
if not _redis_available:
raise ImportError(_redis_import_error)
super().__init__()
self.redis_client = redis_client
self.async_redis_client = async_redis_client
self.key_prefix = key_prefix
self.ttl_seconds = ttl_seconds
if redis_client is None and async_redis_client is None:
raise ValueError("At least one of redis_client or async_redis_client must be provided")
def _get_key(self, run_id: str) -> str:
"""Get the Redis key for a run ID."""
return f"{self.key_prefix}{run_id}"
def _ensure_sync_client(self) -> Union[Redis, RedisCluster]:
"""Ensure sync client is available."""
if self.redis_client is None:
raise RuntimeError("Sync Redis client not provided. Use async methods or provide a sync client.")
return self.redis_client
def _ensure_async_client(self) -> Union[AsyncRedis, AsyncRedisCluster]:
"""Ensure async client is available."""
if self.async_redis_client is None:
raise RuntimeError("Async Redis client not provided. Use sync methods or provide an async client.")
return self.async_redis_client
def register_run(self, run_id: str) -> None:
"""Register a new run as not cancelled.
Uses NX flag to preserve any existing cancellation intent
(cancel-before-start support for background runs).
"""
client = self._ensure_sync_client()
key = self._get_key(run_id)
# NX: only set if key does not exist, preserving cancel-before-start intent
client.set(key, "0", ex=self.ttl_seconds, nx=True)
async def aregister_run(self, run_id: str) -> None:
"""Register a new run as not cancelled (async version).
Uses NX flag to preserve any existing cancellation intent
(cancel-before-start support for background runs).
"""
client = self._ensure_async_client()
key = self._get_key(run_id)
# NX: only set if key does not exist, preserving cancel-before-start intent
await client.set(key, "0", ex=self.ttl_seconds, nx=True)
def _cancel_via_pipeline(self, client: Union[Redis, RedisCluster], key: str) -> bool:
"""Cancel a run atomically using a pipeline: EXISTS + SET (+ EXPIRE).
Returns True if the key already existed (run was registered).
"""
pipe = client.pipeline()
pipe.exists(key)
if self.ttl_seconds and self.ttl_seconds > 0:
pipe.set(key, "1", ex=self.ttl_seconds)
else:
pipe.set(key, "1")
results = pipe.execute()
return bool(results[0])
async def _acancel_via_pipeline(self, client: Union[AsyncRedis, AsyncRedisCluster], key: str) -> bool:
"""Cancel a run atomically using an async pipeline: EXISTS + SET (+ EXPIRE).
Returns True if the key already existed (run was registered).
"""
pipe = client.pipeline()
pipe.exists(key)
if self.ttl_seconds and self.ttl_seconds > 0:
pipe.set(key, "1", ex=self.ttl_seconds)
else:
pipe.set(key, "1")
results = await pipe.execute()
return bool(results[0])
def cancel_run(self, run_id: str) -> bool:
"""Cancel a run by marking it as cancelled.
Always stores cancellation intent, even for runs not yet registered
(cancel-before-start support for background runs).
Returns:
bool: True if run was previously registered, False if storing
cancellation intent for an unregistered run.
"""
client = self._ensure_sync_client()
key = self._get_key(run_id)
was_registered = self._cancel_via_pipeline(client, key)
if was_registered:
logger.info(f"Run {run_id} marked for cancellation")
else:
logger.info(f"Run {run_id} not yet registered, storing cancellation intent")
return was_registered
async def acancel_run(self, run_id: str) -> bool:
"""Cancel a run by marking it as cancelled (async version).
Always stores cancellation intent, even for runs not yet registered
(cancel-before-start support for background runs).
Returns:
bool: True if run was previously registered, False if storing
cancellation intent for an unregistered run.
"""
client = self._ensure_async_client()
key = self._get_key(run_id)
was_registered = await self._acancel_via_pipeline(client, key)
if was_registered:
logger.info(f"Run {run_id} marked for cancellation")
else:
logger.info(f"Run {run_id} not yet registered, storing cancellation intent")
return was_registered
def is_cancelled(self, run_id: str) -> bool:
"""Check if a run is cancelled."""
client = self._ensure_sync_client()
key = self._get_key(run_id)
value = client.get(key)
if value is None:
return False
# Redis returns bytes, handle both bytes and str
if isinstance(value, bytes):
return value == b"1"
return value == "1"
async def ais_cancelled(self, run_id: str) -> bool:
"""Check if a run is cancelled (async version)."""
client = self._ensure_async_client()
key = self._get_key(run_id)
value = await client.get(key)
if value is None:
return False
# Redis returns bytes, handle both bytes and str
if isinstance(value, bytes):
return value == b"1"
return value == "1"
def cleanup_run(self, run_id: str) -> None:
"""Remove a run from tracking (called when run completes)."""
client = self._ensure_sync_client()
key = self._get_key(run_id)
client.delete(key)
async def acleanup_run(self, run_id: str) -> None:
"""Remove a run from tracking (called when run completes) (async version)."""
client = self._ensure_async_client()
key = self._get_key(run_id)
await client.delete(key)
def raise_if_cancelled(self, run_id: str) -> None:
"""Check if a run should be cancelled and raise exception if so."""
if self.is_cancelled(run_id):
logger.info(f"Cancelling run {run_id}")
raise RunCancelledException(f"Run {run_id} was cancelled")
async def araise_if_cancelled(self, run_id: str) -> None:
"""Check if a run should be cancelled and raise exception if so (async version)."""
if await self.ais_cancelled(run_id):
logger.info(f"Cancelling run {run_id}")
raise RunCancelledException(f"Run {run_id} was cancelled")
def get_active_runs(self) -> Dict[str, bool]:
"""Get all currently tracked runs and their cancellation status.
Note: Uses scan_iter which works correctly with both standalone Redis
and Redis Cluster (scans all nodes in cluster mode).
"""
client = self._ensure_sync_client()
result: Dict[str, bool] = {}
# scan_iter handles cluster mode correctly (scans all nodes)
pattern = f"{self.key_prefix}*"
for key in client.scan_iter(match=pattern, count=100):
# Extract run_id from key
if isinstance(key, bytes):
key = key.decode("utf-8")
run_id = key[len(self.key_prefix) :]
# Get value
value = client.get(key)
if value is not None:
if isinstance(value, bytes):
is_cancelled = value == b"1"
else:
is_cancelled = value == "1"
result[run_id] = is_cancelled
return result
async def aget_active_runs(self) -> Dict[str, bool]:
"""Get all currently tracked runs and their cancellation status (async version).
Note: Uses scan_iter which works correctly with both standalone Redis
and Redis Cluster (scans all nodes in cluster mode).
"""
client = self._ensure_async_client()
result: Dict[str, bool] = {}
# scan_iter handles cluster mode correctly (scans all nodes)
pattern = f"{self.key_prefix}*"
async for key in client.scan_iter(match=pattern, count=100):
# Extract run_id from key
if isinstance(key, bytes):
key = key.decode("utf-8")
run_id = key[len(self.key_prefix) :]
# Get value
value = await client.get(key)
if value is not None:
if isinstance(value, bytes):
is_cancelled = value == b"1"
else:
is_cancelled = value == "1"
result[run_id] = is_cancelled
return result
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/run/cancellation_management/redis_cancellation_manager.py",
"license": "Apache License 2.0",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/os/test_schemas.py | """Tests our API schemas handle all expected parsing."""
import json
from agno.os.routers.memory.schemas import UserMemorySchema
def test_user_memory_schema():
"""Test that our UserMemorySchema handle common memory objects."""
memory_dict = {
"memory_id": "123",
"memory": "This is a test memory",
"topics": ["test", "memory"],
"user_id": "456",
"agent_id": "789",
"team_id": "101",
"updated_at": 1719859200,
"created_at": 1719859200,
}
user_memory_schema = UserMemorySchema.from_dict(memory_dict)
assert user_memory_schema is not None
assert user_memory_schema.memory == "This is a test memory"
assert user_memory_schema.topics == ["test", "memory"]
assert user_memory_schema.user_id == "456"
assert user_memory_schema.agent_id == "789"
assert user_memory_schema.team_id == "101"
def test_v1_migrated_user_memories():
"""Test that our UserMemorySchema handles v1 migrated memories."""
memory_dict = {
"memory_id": "123",
"user_id": "456",
"memory": {"memory": "This is a test memory", "other": "other"},
"input": "This is a test input",
"updated_at": 1719859200,
}
user_memory_schema = UserMemorySchema.from_dict(memory_dict)
assert user_memory_schema is not None
assert user_memory_schema.memory == "This is a test memory"
def test_user_memory_schema_complex_memory_content():
"""Test that our UserMemorySchema handles custom, complex memory content."""
complex_content = {"user_mem": "This is a test memory", "score": "10", "other_fields": "other_fields"}
memory_dict = {
"memory_id": "123",
"user_id": "456",
"memory": complex_content,
"input": "This is a test input",
"updated_at": 1719859200,
}
user_memory_schema = UserMemorySchema.from_dict(memory_dict)
assert user_memory_schema is not None
assert json.loads(user_memory_schema.memory) == complex_content
assert user_memory_schema.user_id == "456"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/os/test_schemas.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/models/google/test_gemini.py | import tempfile
from pathlib import Path
from unittest.mock import MagicMock, patch
from agno.media import File
from agno.models.google.gemini import Gemini
from agno.models.message import Message
def test_gemini_get_client_with_credentials_vertexai():
"""Test that credentials are correctly passed to the client when vertexai is True."""
mock_credentials = MagicMock()
model = Gemini(vertexai=True, project_id="test-project", location="test-location", credentials=mock_credentials)
with patch("agno.models.google.gemini.genai.Client") as mock_client_cls:
model.get_client()
# Verify credentials were passed to the client
_, kwargs = mock_client_cls.call_args
assert kwargs["credentials"] == mock_credentials
assert kwargs["vertexai"] is True
assert kwargs["project"] == "test-project"
assert kwargs["location"] == "test-location"
def test_gemini_get_client_without_credentials_vertexai():
"""Test that client is initialized without credentials when not provided in vertexai mode."""
model = Gemini(vertexai=True, project_id="test-project", location="test-location")
with patch("agno.models.google.gemini.genai.Client") as mock_client_cls:
model.get_client()
# Verify credentials were NOT passed to the client
_, kwargs = mock_client_cls.call_args
assert "credentials" not in kwargs
assert kwargs["vertexai"] is True
def test_gemini_get_client_ai_studio_mode():
"""Test that credentials are NOT passed in Google AI Studio mode (non-vertexai)."""
mock_credentials = MagicMock()
# Even if credentials are provided, they shouldn't be passed if vertexai=False
model = Gemini(vertexai=False, api_key="test-api-key", credentials=mock_credentials)
with patch("agno.models.google.gemini.genai.Client") as mock_client_cls:
model.get_client()
# Verify credentials were NOT passed to the client
_, kwargs = mock_client_cls.call_args
assert "credentials" not in kwargs
assert "api_key" in kwargs
assert kwargs.get("vertexai") is not True
class TestFormatFileForMessage:
def _make_model(self):
model = Gemini(api_key="test-key")
return model
@patch("agno.models.google.gemini.Part")
def test_bytes_with_mime_type(self, mock_part):
model = self._make_model()
f = File(content=b"hello", mime_type="text/plain")
model._format_file_for_message(f)
mock_part.from_bytes.assert_called_once_with(mime_type="text/plain", data=b"hello")
@patch("agno.models.google.gemini.Part")
def test_bytes_without_mime_type_guesses_from_filename(self, mock_part):
model = self._make_model()
f = File(content=b"data", filename="report.pdf")
f.mime_type = None
model._format_file_for_message(f)
mock_part.from_bytes.assert_called_once_with(mime_type="application/pdf", data=b"data")
@patch("agno.models.google.gemini.Part")
def test_bytes_without_mime_type_or_filename_falls_back(self, mock_part):
model = self._make_model()
f = File(content=b"data")
f.mime_type = None
model._format_file_for_message(f)
mock_part.from_bytes.assert_called_once_with(mime_type="application/pdf", data=b"data")
@patch("agno.models.google.gemini.Part")
def test_gcs_uri_without_mime_type(self, mock_part):
model = self._make_model()
f = File(url="gs://bucket/file.csv")
f.mime_type = None
model._format_file_for_message(f)
mock_part.from_uri.assert_called_once_with(file_uri="gs://bucket/file.csv", mime_type="text/csv")
@patch("agno.models.google.gemini.Part")
def test_https_url_with_mime_type_sends_as_uri(self, mock_part):
model = self._make_model()
f = File(url="https://example.com/report.pdf", mime_type="application/pdf")
model._format_file_for_message(f)
mock_part.from_uri.assert_called_once_with(
file_uri="https://example.com/report.pdf", mime_type="application/pdf"
)
@patch("agno.models.google.gemini.Part")
def test_https_url_without_mime_type_falls_through_to_download(self, mock_part):
model = self._make_model()
f = File(url="https://example.com/report.pdf")
f.mime_type = None
# Mock the download property to return content with detected MIME
with patch.object(
type(f), "file_url_content", new_callable=lambda: property(lambda self: (b"pdf-data", "application/pdf"))
):
model._format_file_for_message(f)
mock_part.from_uri.assert_not_called()
mock_part.from_bytes.assert_called_once_with(mime_type="application/pdf", data=b"pdf-data")
@patch("agno.models.google.gemini.Part")
def test_local_file_without_mime_type(self, mock_part):
model = self._make_model()
with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as tmp:
tmp.write(b"hello world")
tmp.flush()
f = File(filepath=tmp.name)
f.mime_type = None
model._format_file_for_message(f)
mock_part.from_bytes.assert_called_once_with(mime_type="text/plain", data=b"hello world")
Path(tmp.name).unlink()
@patch("agno.models.google.gemini.Part")
def test_local_file_without_mime_type_or_extension_falls_back(self, mock_part):
model = self._make_model()
with tempfile.NamedTemporaryFile(suffix="", delete=False) as tmp:
tmp.write(b"binary data")
tmp.flush()
f = File(filepath=tmp.name)
f.mime_type = None
model._format_file_for_message(f)
mock_part.from_bytes.assert_called_once_with(mime_type="application/pdf", data=b"binary data")
Path(tmp.name).unlink()
class TestFormatMessagesEmptyParts:
"""Test that messages with empty parts are filtered out before sending to Gemini API."""
def _make_model(self):
model = Gemini(api_key="test-key")
return model
def test_filters_message_with_none_content(self):
"""Messages with None content and no tool calls should be filtered out."""
model = self._make_model()
messages = [
Message(role="system", content="You are helpful"),
Message(role="user", content="Hello"),
Message(role="assistant", content=None), # empty parts
Message(role="user", content="How are you?"),
]
formatted, system_msg = model._format_messages(messages)
# System message is extracted separately, not in formatted list.
# The assistant message with None content should be filtered out.
assert len(formatted) == 2
assert all(msg.parts for msg in formatted)
def test_filters_message_with_empty_list_content(self):
"""Messages with list content but no tool calls produce empty parts and should be filtered."""
model = self._make_model()
messages = [
Message(role="user", content="Hello"),
Message(role="assistant", content=["some list"]), # list content, no tool calls -> empty parts
Message(role="user", content="Next question"),
]
formatted, system_msg = model._format_messages(messages)
# The assistant message with list content (no tool_calls) falls through
# the text content branch without adding parts, so it should be filtered.
assert len(formatted) == 2
for msg in formatted:
assert msg.parts is not None
assert len(msg.parts) > 0
def test_keeps_message_with_valid_content(self):
"""Messages with valid string content should be kept."""
model = self._make_model()
messages = [
Message(role="user", content="Hello"),
Message(role="assistant", content="Hi there!"),
Message(role="user", content="Thanks"),
]
formatted, system_msg = model._format_messages(messages)
assert len(formatted) == 3
for msg in formatted:
assert msg.parts is not None
assert len(msg.parts) > 0
def test_keeps_system_message_separate(self):
"""System messages should be extracted as system_message, not in formatted list."""
model = self._make_model()
messages = [
Message(role="system", content="Be helpful"),
Message(role="user", content="Hello"),
]
formatted, system_msg = model._format_messages(messages)
assert system_msg == "Be helpful"
assert len(formatted) == 1
def test_all_empty_parts_returns_empty_list(self):
"""If all non-system messages have empty parts, return empty formatted list."""
model = self._make_model()
messages = [
Message(role="system", content="Be helpful"),
Message(role="assistant", content=None),
]
formatted, system_msg = model._format_messages(messages)
assert system_msg == "Be helpful"
assert len(formatted) == 0
def test_mixed_valid_and_empty_messages(self):
"""Only empty-parts messages are filtered; valid ones are kept."""
model = self._make_model()
messages = [
Message(role="user", content="First"),
Message(role="assistant", content=None), # will be filtered
Message(role="user", content="Second"),
Message(role="assistant", content="Response"),
Message(role="assistant", content=None), # will be filtered
Message(role="user", content="Third"),
]
formatted, system_msg = model._format_messages(messages)
assert len(formatted) == 4
roles = [msg.role for msg in formatted]
assert roles == ["user", "user", "model", "user"]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/models/google/test_gemini.py",
"license": "Apache License 2.0",
"lines": 194,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/os/test_resync.py | """Integration tests for AgentOS resync functionality."""
from contextlib import asynccontextmanager
import pytest
from fastapi.testclient import TestClient
from agno.agent.agent import Agent
from agno.db.in_memory import InMemoryDb
from agno.os import AgentOS
from agno.team.team import Team
from agno.workflow.workflow import Workflow
@pytest.fixture
def test_agent():
"""Create a test agent."""
return Agent(name="test-agent", id="test-agent-id", db=InMemoryDb())
@pytest.fixture
def second_agent():
"""Create a second test agent to be added during lifespan."""
return Agent(name="second-agent", id="second-agent-id", db=InMemoryDb())
@pytest.fixture
def test_team(test_agent: Agent):
"""Create a test team."""
return Team(name="test-team", id="test-team-id", members=[test_agent])
@pytest.fixture
def second_team():
"""Create a second test team to be added during lifespan."""
member = Agent(name="second-team-member", id="second-team-member-id")
return Team(name="second-team", id="second-team-id", members=[member])
@pytest.fixture
def test_workflow():
"""Create a test workflow."""
return Workflow(name="test-workflow", id="test-workflow-id")
@pytest.fixture
def second_workflow():
"""Create a second test workflow to be added during lifespan."""
return Workflow(name="second-workflow", id="second-workflow-id")
class TestResyncPreservesEndpoints:
"""Tests to verify that resync preserves and restores all endpoints."""
def test_resync_preserves_health_endpoint(self, test_agent: Agent):
"""Test that resync preserves the health endpoint."""
agent_os = AgentOS(agents=[test_agent])
app = agent_os.get_app()
# Verify health endpoint works before resync
with TestClient(app) as client:
response = client.get("/health")
assert response.status_code == 200
assert response.json()["status"] == "ok"
# Perform resync
agent_os.resync(app=app)
# Verify health endpoint still works after resync
response = client.get("/health")
assert response.status_code == 200
assert response.json()["status"] == "ok"
def test_resync_preserves_home_endpoint(self, test_agent: Agent):
"""Test that resync preserves the home endpoint."""
agent_os = AgentOS(agents=[test_agent])
app = agent_os.get_app()
with TestClient(app) as client:
# Verify home endpoint works before resync
response = client.get("/")
assert response.status_code == 200
data = response.json()
assert "name" in data
assert "AgentOS API" in data["name"]
# Perform resync
agent_os.resync(app=app)
# Verify home endpoint still works after resync
response = client.get("/")
assert response.status_code == 200
data = response.json()
assert "name" in data
assert "AgentOS API" in data["name"]
def test_resync_preserves_config_endpoint(self, test_agent: Agent):
"""Test that resync preserves the config endpoint."""
agent_os = AgentOS(agents=[test_agent])
app = agent_os.get_app()
with TestClient(app) as client:
# Verify config endpoint works before resync
response = client.get("/config")
assert response.status_code == 200
# Perform resync
agent_os.resync(app=app)
# Verify config endpoint still works after resync
response = client.get("/config")
assert response.status_code == 200
def test_resync_preserves_sessions_endpoint(self, test_agent: Agent):
"""Test that resync preserves the sessions endpoint."""
agent_os = AgentOS(agents=[test_agent])
app = agent_os.get_app()
with TestClient(app) as client:
# Verify sessions endpoint exists before resync
response = client.get("/sessions")
assert response.status_code == 200
# Perform resync
agent_os.resync(app=app)
# Verify sessions endpoint still exists after resync
response = client.get("/sessions")
assert response.status_code == 200
def test_resync_preserves_agents_endpoint(self, test_agent: Agent):
"""Test that resync preserves the agents endpoint."""
agent_os = AgentOS(agents=[test_agent])
app = agent_os.get_app()
with TestClient(app) as client:
# Verify agents endpoint works before resync
response = client.get("/agents")
assert response.status_code == 200
agents_before = response.json()
assert len(agents_before) == 1
# Perform resync
agent_os.resync(app=app)
# Verify agents endpoint still works after resync
response = client.get("/agents")
assert response.status_code == 200
agents_after = response.json()
assert len(agents_after) == 1
def test_resync_preserves_teams_endpoint(self, test_agent: Agent, test_team: Team):
"""Test that resync preserves the teams endpoint."""
agent_os = AgentOS(agents=[test_agent], teams=[test_team])
app = agent_os.get_app()
with TestClient(app) as client:
# Verify teams endpoint works before resync
response = client.get("/teams")
assert response.status_code == 200
teams_before = response.json()
assert len(teams_before) == 1
# Perform resync
agent_os.resync(app=app)
# Verify teams endpoint still works after resync
response = client.get("/teams")
assert response.status_code == 200
teams_after = response.json()
assert len(teams_after) == 1
def test_resync_preserves_workflows_endpoint(self, test_agent: Agent, test_workflow: Workflow):
"""Test that resync preserves the workflows endpoint."""
agent_os = AgentOS(agents=[test_agent], workflows=[test_workflow])
app = agent_os.get_app()
with TestClient(app) as client:
# Verify workflows endpoint works before resync
response = client.get("/workflows")
assert response.status_code == 200
workflows_before = response.json()
assert len(workflows_before) == 1
# Perform resync
agent_os.resync(app=app)
# Verify workflows endpoint still works after resync
response = client.get("/workflows")
assert response.status_code == 200
workflows_after = response.json()
assert len(workflows_after) == 1
def test_resync_preserves_all_core_endpoints(self, test_agent: Agent, test_team: Team, test_workflow: Workflow):
"""Test that resync preserves all core endpoints."""
agent_os = AgentOS(agents=[test_agent], teams=[test_team], workflows=[test_workflow])
app = agent_os.get_app()
core_endpoints = [
"/",
"/health",
"/config",
"/agents",
"/teams",
"/workflows",
"/sessions",
]
with TestClient(app) as client:
# Verify all core endpoints work before resync
for endpoint in core_endpoints:
response = client.get(endpoint)
assert response.status_code == 200, f"Endpoint {endpoint} failed before resync"
# Perform resync
agent_os.resync(app=app)
# Verify all core endpoints still work after resync
for endpoint in core_endpoints:
response = client.get(endpoint)
assert response.status_code == 200, f"Endpoint {endpoint} failed after resync"
class TestResyncWithLifespanAdditions:
"""Tests to verify that resync picks up agents/teams/workflows added during lifespan."""
def test_resync_picks_up_agent_added_in_lifespan(self, test_agent: Agent, second_agent: Agent):
"""Test that resync picks up an agent added during lifespan."""
lifespan_executed = False
@asynccontextmanager
async def lifespan(app, agent_os):
nonlocal lifespan_executed
lifespan_executed = True
# Add the new agent
agent_os.agents.append(second_agent)
# Resync the AgentOS to pick up the new agent
agent_os.resync(app=app)
yield
agent_os = AgentOS(agents=[test_agent], lifespan=lifespan)
app = agent_os.get_app()
with TestClient(app) as client:
# Verify lifespan was executed
assert lifespan_executed is True
# Verify both agents are now available
response = client.get("/agents")
assert response.status_code == 200
agents = response.json()
assert len(agents) == 2
agent_ids = [agent["id"] for agent in agents]
assert "test-agent-id" in agent_ids
assert "second-agent-id" in agent_ids
def test_resync_picks_up_team_added_in_lifespan(self, test_agent: Agent, test_team: Team, second_team: Team):
"""Test that resync picks up a team added during lifespan."""
lifespan_executed = False
@asynccontextmanager
async def lifespan(app, agent_os):
nonlocal lifespan_executed
lifespan_executed = True
# Add the new team
agent_os.teams.append(second_team)
# Resync the AgentOS to pick up the new team
agent_os.resync(app=app)
yield
agent_os = AgentOS(agents=[test_agent], teams=[test_team], lifespan=lifespan)
app = agent_os.get_app()
with TestClient(app) as client:
# Verify lifespan was executed
assert lifespan_executed is True
# Verify both teams are now available
response = client.get("/teams")
assert response.status_code == 200
teams = response.json()
assert len(teams) == 2
team_ids = [team["id"] for team in teams]
assert "test-team-id" in team_ids
assert "second-team-id" in team_ids
def test_resync_picks_up_workflow_added_in_lifespan(
self, test_agent: Agent, test_workflow: Workflow, second_workflow: Workflow
):
"""Test that resync picks up a workflow added during lifespan."""
lifespan_executed = False
@asynccontextmanager
async def lifespan(app, agent_os):
nonlocal lifespan_executed
lifespan_executed = True
# Add the new workflow
agent_os.workflows.append(second_workflow)
# Resync the AgentOS to pick up the new workflow
agent_os.resync(app=app)
yield
agent_os = AgentOS(agents=[test_agent], workflows=[test_workflow], lifespan=lifespan)
app = agent_os.get_app()
with TestClient(app) as client:
# Verify lifespan was executed
assert lifespan_executed is True
# Verify both workflows are now available
response = client.get("/workflows")
assert response.status_code == 200
workflows = response.json()
assert len(workflows) == 2
workflow_ids = [workflow["id"] for workflow in workflows]
assert "test-workflow-id" in workflow_ids
assert "second-workflow-id" in workflow_ids
def test_resync_picks_up_multiple_entities_added_in_lifespan(
self,
test_agent: Agent,
second_agent: Agent,
test_team: Team,
second_team: Team,
test_workflow: Workflow,
second_workflow: Workflow,
):
"""Test that resync picks up multiple entities added during lifespan."""
lifespan_executed = False
@asynccontextmanager
async def lifespan(app, agent_os):
nonlocal lifespan_executed
lifespan_executed = True
# Add multiple new entities
agent_os.agents.append(second_agent)
agent_os.teams.append(second_team)
agent_os.workflows.append(second_workflow)
# Resync the AgentOS to pick up all new entities
agent_os.resync(app=app)
yield
agent_os = AgentOS(
agents=[test_agent],
teams=[test_team],
workflows=[test_workflow],
lifespan=lifespan,
)
app = agent_os.get_app()
with TestClient(app) as client:
# Verify lifespan was executed
assert lifespan_executed is True
# Verify all agents are available
response = client.get("/agents")
assert response.status_code == 200
agents = response.json()
assert len(agents) == 2
# Verify all teams are available
response = client.get("/teams")
assert response.status_code == 200
teams = response.json()
assert len(teams) == 2
# Verify all workflows are available
response = client.get("/workflows")
assert response.status_code == 200
workflows = response.json()
assert len(workflows) == 2
def test_home_endpoint_works_after_lifespan_resync(self, test_agent: Agent, second_agent: Agent):
"""Test that home (/) endpoint works after resync during lifespan."""
lifespan_executed = False
@asynccontextmanager
async def lifespan(app, agent_os):
nonlocal lifespan_executed
lifespan_executed = True
# Add the new agent
agent_os.agents.append(second_agent)
# Resync the AgentOS to pick up the new agent
agent_os.resync(app=app)
yield
agent_os = AgentOS(agents=[test_agent], lifespan=lifespan)
app = agent_os.get_app()
with TestClient(app) as client:
# Verify lifespan was executed
assert lifespan_executed is True
# Verify home endpoint works after resync
response = client.get("/")
assert response.status_code == 200
data = response.json()
assert "name" in data
assert "AgentOS API" in data["name"]
def test_new_agent_endpoint_available_after_resync(self, test_agent: Agent, second_agent: Agent):
"""Test that individual agent endpoint is available for agents added during lifespan."""
lifespan_executed = False
@asynccontextmanager
async def lifespan(app, agent_os):
nonlocal lifespan_executed
lifespan_executed = True
# Add the new agent
agent_os.agents.append(second_agent)
# Resync the AgentOS to pick up the new agent
agent_os.resync(app=app)
yield
agent_os = AgentOS(agents=[test_agent], lifespan=lifespan)
app = agent_os.get_app()
with TestClient(app) as client:
# Verify lifespan was executed
assert lifespan_executed is True
# Verify the new agent's individual endpoint is accessible
response = client.get(f"/agents/{second_agent.id}")
assert response.status_code == 200
agent_data = response.json()
assert agent_data["id"] == "second-agent-id"
assert agent_data["name"] == "second-agent"
def test_new_team_endpoint_available_after_resync(self, test_agent: Agent, test_team: Team, second_team: Team):
"""Test that individual team endpoint is available for teams added during lifespan."""
lifespan_executed = False
@asynccontextmanager
async def lifespan(app, agent_os):
nonlocal lifespan_executed
lifespan_executed = True
# Add the new team
agent_os.teams.append(second_team)
# Resync the AgentOS to pick up the new team
agent_os.resync(app=app)
yield
agent_os = AgentOS(agents=[test_agent], teams=[test_team], lifespan=lifespan)
app = agent_os.get_app()
with TestClient(app) as client:
# Verify lifespan was executed
assert lifespan_executed is True
# Verify the new team's individual endpoint is accessible
response = client.get(f"/teams/{second_team.id}")
assert response.status_code == 200
team_data = response.json()
assert team_data["id"] == "second-team-id"
assert team_data["name"] == "second-team"
def test_new_workflow_endpoint_available_after_resync(
self, test_agent: Agent, test_workflow: Workflow, second_workflow: Workflow
):
"""Test that individual workflow endpoint is available for workflows added during lifespan."""
lifespan_executed = False
@asynccontextmanager
async def lifespan(app, agent_os):
nonlocal lifespan_executed
lifespan_executed = True
# Add the new workflow
agent_os.workflows.append(second_workflow)
# Resync the AgentOS to pick up the new workflow
agent_os.resync(app=app)
yield
agent_os = AgentOS(agents=[test_agent], workflows=[test_workflow], lifespan=lifespan)
app = agent_os.get_app()
with TestClient(app) as client:
# Verify lifespan was executed
assert lifespan_executed is True
# Verify the new workflow's individual endpoint is accessible
response = client.get(f"/workflows/{second_workflow.id}")
assert response.status_code == 200
workflow_data = response.json()
assert workflow_data["id"] == "second-workflow-id"
assert workflow_data["name"] == "second-workflow"
class TestResyncConfig:
"""Tests to verify that resync updates the config endpoint correctly."""
def test_config_reflects_agents_added_in_lifespan(self, test_agent: Agent, second_agent: Agent):
"""Test that the config endpoint reflects agents added during lifespan."""
@asynccontextmanager
async def lifespan(app, agent_os):
# Add the new agent
agent_os.agents.append(second_agent)
# Resync the AgentOS to pick up the new agent
agent_os.resync(app=app)
yield
agent_os = AgentOS(agents=[test_agent], lifespan=lifespan)
app = agent_os.get_app()
with TestClient(app) as client:
# Verify config reflects both agents
response = client.get("/config")
assert response.status_code == 200
config = response.json()
assert len(config["agents"]) == 2
agent_ids = [agent["id"] for agent in config["agents"]]
assert "test-agent-id" in agent_ids
assert "second-agent-id" in agent_ids
def test_config_reflects_teams_added_in_lifespan(self, test_agent: Agent, test_team: Team, second_team: Team):
"""Test that the config endpoint reflects teams added during lifespan."""
@asynccontextmanager
async def lifespan(app, agent_os):
# Add the new team
agent_os.teams.append(second_team)
# Resync the AgentOS to pick up the new team
agent_os.resync(app=app)
yield
agent_os = AgentOS(agents=[test_agent], teams=[test_team], lifespan=lifespan)
app = agent_os.get_app()
with TestClient(app) as client:
# Verify config reflects both teams
response = client.get("/config")
assert response.status_code == 200
config = response.json()
assert len(config["teams"]) == 2
team_ids = [team["id"] for team in config["teams"]]
assert "test-team-id" in team_ids
assert "second-team-id" in team_ids
def test_config_reflects_workflows_added_in_lifespan(
self, test_agent: Agent, test_workflow: Workflow, second_workflow: Workflow
):
"""Test that the config endpoint reflects workflows added during lifespan."""
@asynccontextmanager
async def lifespan(app, agent_os):
# Add the new workflow
agent_os.workflows.append(second_workflow)
# Resync the AgentOS to pick up the new workflow
agent_os.resync(app=app)
yield
agent_os = AgentOS(agents=[test_agent], workflows=[test_workflow], lifespan=lifespan)
app = agent_os.get_app()
with TestClient(app) as client:
# Verify config reflects both workflows
response = client.get("/config")
assert response.status_code == 200
config = response.json()
assert len(config["workflows"]) == 2
workflow_ids = [workflow["id"] for workflow in config["workflows"]]
assert "test-workflow-id" in workflow_ids
assert "second-workflow-id" in workflow_ids
class TestResyncMultipleTimes:
"""Tests to verify that resync can be called multiple times."""
def test_multiple_resync_calls_preserve_endpoints(self, test_agent: Agent):
"""Test that multiple resync calls preserve all endpoints."""
agent_os = AgentOS(agents=[test_agent])
app = agent_os.get_app()
with TestClient(app) as client:
# Perform multiple resyncs
for i in range(3):
agent_os.resync(app=app)
# Verify home endpoint still works after each resync
response = client.get("/")
assert response.status_code == 200, f"Home (/) failed after resync {i + 1}"
data = response.json()
assert "name" in data, f"Home (/) missing 'name' after resync {i + 1}"
assert "AgentOS API" in data["name"], f"Home (/) missing 'AgentOS API' after resync {i + 1}"
# Verify health endpoint still works after each resync
response = client.get("/health")
assert response.status_code == 200, f"Health failed after resync {i + 1}"
# Verify agents endpoint still works after each resync
response = client.get("/agents")
assert response.status_code == 200, f"Agents failed after resync {i + 1}"
def test_resync_does_not_duplicate_routes(self, test_agent: Agent):
"""Test that resync does not create duplicate routes."""
agent_os = AgentOS(agents=[test_agent])
app = agent_os.get_app()
with TestClient(app):
# Get route count before multiple resyncs
routes_before = [route for route in app.routes if hasattr(route, "path")]
unique_paths_before = set((route.path, tuple(getattr(route, "methods", []))) for route in routes_before)
# Perform multiple resyncs
for _ in range(3):
agent_os.resync(app=app)
# Get route count after resyncs
routes_after = [route for route in app.routes if hasattr(route, "path")]
unique_paths_after = set((route.path, tuple(getattr(route, "methods", []))) for route in routes_after)
# Verify no duplicate routes were created
assert len(unique_paths_after) == len(unique_paths_before), "Resync created duplicate routes"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/os/test_resync.py",
"license": "Apache License 2.0",
"lines": 493,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/os/managers.py | """
Managers for AgentOS.
This module provides various manager classes for AgentOS:
- WebSocketManager: WebSocket connection management for real-time streaming
- EventsBuffer: Event buffering for agent/team/workflow reconnection support
- WebSocketHandler: Handler for sending events over WebSocket connections
These managers are used by agents, teams, and workflows for background WebSocket execution.
"""
import json
from dataclasses import dataclass
from time import time
from typing import Any, Dict, List, Optional, Union
from starlette.websockets import WebSocket
from agno.run.agent import RunOutputEvent
from agno.run.base import RunStatus
from agno.run.team import TeamRunOutputEvent
from agno.run.workflow import WorkflowRunOutputEvent
from agno.utils.log import log_debug, log_warning, logger
from agno.utils.serialize import json_serializer
@dataclass
class WebSocketHandler:
"""Generic WebSocket handler for real-time agent/team/workflow events"""
websocket: Optional[WebSocket] = None
def format_sse_event(self, json_data: str) -> str:
"""Parse JSON data into SSE-compliant format.
Args:
json_data: JSON string containing the event data
Returns:
SSE-formatted response with event type and data
"""
try:
# Parse the JSON to extract the event type
data = json.loads(json_data)
event_type = data.get("event", "message")
# Format as SSE: event: <event_type>\ndata: <json_data>\n\n
return f"event: {event_type}\ndata: {json_data}\n\n"
except (json.JSONDecodeError, KeyError):
# Fallback to generic message event if parsing fails
return f"event: message\ndata: {json_data}\n\n"
async def handle_event(
self,
event: Union[RunOutputEvent, TeamRunOutputEvent, WorkflowRunOutputEvent],
event_index: Optional[int] = None,
run_id: Optional[str] = None,
) -> None:
"""Handle an event object - serializes and sends via WebSocket with event_index for reconnection support"""
if not self.websocket:
return
try:
if hasattr(event, "to_dict"):
data = event.to_dict()
elif hasattr(event, "__dict__"):
data = event.__dict__
elif isinstance(event, dict):
data = event
else:
data = {"type": "message", "content": str(event)}
# Add event_index for reconnection support (if provided)
if event_index is not None:
data["event_index"] = event_index
# Only set run_id if not already present in the event data
# This preserves the agent's own run_id for agent events
if run_id and "run_id" not in data:
data["run_id"] = run_id
await self.websocket.send_text(self.format_sse_event(json.dumps(data, default=json_serializer)))
except RuntimeError as e:
if "websocket.close" in str(e).lower() or "already completed" in str(e).lower():
log_debug("WebSocket closed, event not sent (expected during disconnection)")
else:
log_warning(f"Failed to handle WebSocket event: {e}")
except Exception as e:
log_warning(f"Failed to handle WebSocket event: {e}")
class WebSocketManager:
"""
Manages WebSocket connections for agent, team, and workflow runs.
Handles connection lifecycle, authentication, and message broadcasting
for real-time event streaming across all execution types.
"""
active_connections: Dict[str, WebSocket] # {run_id: websocket}
authenticated_connections: Dict[WebSocket, bool] # {websocket: is_authenticated}
def __init__(
self,
active_connections: Optional[Dict[str, WebSocket]] = None,
):
# Store active connections: {run_id: websocket}
self.active_connections = active_connections or {}
# Track authentication state for each websocket
self.authenticated_connections = {}
async def connect(self, websocket: WebSocket, requires_auth: bool = True):
"""Accept WebSocket connection"""
await websocket.accept()
logger.debug("WebSocket connected")
# Send connection confirmation with auth requirement info
await websocket.send_text(
json.dumps(
{
"event": "connected",
"message": (
"Connected to AgentOS. Please authenticate to continue."
if requires_auth
else "Connected to AgentOS."
),
"requires_auth": requires_auth,
}
)
)
async def authenticate_websocket(self, websocket: WebSocket):
"""Mark a WebSocket connection as authenticated"""
self.authenticated_connections[websocket] = True
logger.debug("WebSocket authenticated")
# Send authentication confirmation
await websocket.send_text(
json.dumps(
{
"event": "authenticated",
"message": "Authentication successful. You can now send commands.",
}
)
)
def is_authenticated(self, websocket: WebSocket) -> bool:
"""Check if a WebSocket connection is authenticated"""
return self.authenticated_connections.get(websocket, False)
async def register_websocket(self, run_id: str, websocket: WebSocket):
"""Register a run (agent/team/workflow) with its WebSocket connection"""
self.active_connections[run_id] = websocket
logger.debug(f"Registered WebSocket for run_id: {run_id}")
async def broadcast_to_run(self, run_id: str, message: str):
"""Broadcast a message to the websocket registered for this run (agent/team/workflow)"""
if run_id in self.active_connections:
websocket = self.active_connections[run_id]
try:
await websocket.send_text(message)
except Exception as e:
log_warning(f"Failed to broadcast to run {run_id}: {e}")
# Remove dead connection
await self.disconnect_by_run_id(run_id)
async def disconnect_by_run_id(self, run_id: str):
"""Remove WebSocket connection by run_id"""
if run_id in self.active_connections:
websocket = self.active_connections[run_id]
del self.active_connections[run_id]
# Clean up authentication state
if websocket in self.authenticated_connections:
del self.authenticated_connections[websocket]
logger.debug(f"WebSocket disconnected for run_id: {run_id}")
async def disconnect_websocket(self, websocket: WebSocket):
"""Remove WebSocket connection and clean up all associated state"""
# Remove from authenticated connections
if websocket in self.authenticated_connections:
del self.authenticated_connections[websocket]
# Remove from active connections
runs_to_remove = [run_id for run_id, ws in self.active_connections.items() if ws == websocket]
for run_id in runs_to_remove:
del self.active_connections[run_id]
logger.debug("WebSocket disconnected and cleaned up")
async def get_websocket_for_run(self, run_id: str) -> Optional[WebSocket]:
"""Get WebSocket connection for a run (agent/team/workflow)"""
return self.active_connections.get(run_id)
class EventsBuffer:
"""
In-memory buffer for events to support WebSocket reconnection.
Stores recent events for active runs (agents, teams, workflows), allowing clients
to catch up on missed events when reconnecting after disconnection or page refresh.
Buffers all event types: RunOutputEvent (agents), TeamRunOutputEvent (teams),
and WorkflowRunOutputEvent (workflows).
"""
def __init__(self, max_events_per_run: int = 1000, cleanup_interval: int = 3600):
"""
Initialize the event buffer.
Args:
max_events_per_run: Maximum number of events to store per run (prevents memory bloat)
cleanup_interval: How long (in seconds) to keep completed runs in buffer
"""
# Store all event types (WorkflowRunOutputEvent, RunOutputEvent, TeamRunOutputEvent)
self.events: Dict[str, List[Union[WorkflowRunOutputEvent, RunOutputEvent, TeamRunOutputEvent]]] = {}
self.run_metadata: Dict[str, Dict[str, Any]] = {} # {run_id: {status, last_updated, etc}}
self.max_events_per_run = max_events_per_run
self.cleanup_interval = cleanup_interval
def add_event(self, run_id: str, event: Union[WorkflowRunOutputEvent, RunOutputEvent, TeamRunOutputEvent]) -> int:
"""Add event to buffer for a specific run and return the event index (handles workflow, agent, and team events)"""
current_time = time()
if run_id not in self.events:
self.events[run_id] = []
self.run_metadata[run_id] = {
"status": RunStatus.running,
"created_at": current_time,
"last_updated": current_time,
}
self.events[run_id].append(event)
self.run_metadata[run_id]["last_updated"] = current_time
# Get the index of the event we just added (before potential trimming)
event_index = len(self.events[run_id]) - 1
# Keep buffer size under control - trim oldest events if exceeded
if len(self.events[run_id]) > self.max_events_per_run:
self.events[run_id] = self.events[run_id][-self.max_events_per_run :]
log_debug(f"Trimmed event buffer for run {run_id} to {self.max_events_per_run} events")
return event_index
def get_events(
self, run_id: str, last_event_index: Optional[int] = None
) -> List[Union[WorkflowRunOutputEvent, RunOutputEvent, TeamRunOutputEvent]]:
"""
Get events since the last received event index.
Args:
run_id: The run ID (agent/team/workflow)
last_event_index: Index of last event received by client (0-based)
Returns:
List of events since last_event_index, or all events if None
"""
events = self.events.get(run_id, [])
if last_event_index is None:
# Client has no events, send all
return events
# Client has events up to last_event_index, send new ones
# last_event_index is 0-based, so we want events starting from index + 1
if last_event_index >= len(events) - 1:
# Client is caught up
return []
return events[last_event_index + 1 :]
def get_event_count(self, run_id: str) -> int:
"""Get the current number of events for a run"""
return len(self.events.get(run_id, []))
def set_run_completed(self, run_id: str, status: RunStatus) -> None:
"""Mark a run as completed/cancelled/error for future cleanup"""
if run_id in self.run_metadata:
self.run_metadata[run_id]["status"] = status
self.run_metadata[run_id]["completed_at"] = time()
log_debug(f"Marked run {run_id} as {status}")
# Trigger cleanup of old completed runs
self.cleanup_runs()
def cleanup_run(self, run_id: str) -> None:
"""Remove buffer for a completed run (called after retention period)"""
if run_id in self.events:
del self.events[run_id]
if run_id in self.run_metadata:
del self.run_metadata[run_id]
log_debug(f"Cleaned up event buffer for run {run_id}")
def cleanup_runs(self) -> None:
"""Clean up runs that have been completed for longer than cleanup_interval"""
current_time = time()
runs_to_cleanup = []
for run_id, metadata in self.run_metadata.items():
# Only cleanup completed runs
if metadata["status"] in [RunStatus.completed, RunStatus.error, RunStatus.cancelled]:
completed_at = metadata.get("completed_at", metadata["last_updated"])
if current_time - completed_at > self.cleanup_interval:
runs_to_cleanup.append(run_id)
for run_id in runs_to_cleanup:
self.cleanup_run(run_id)
if runs_to_cleanup:
log_debug(f"Cleaned up {len(runs_to_cleanup)} old run buffers")
def get_run_status(self, run_id: str) -> Optional[RunStatus]:
"""Get the status of a run from metadata"""
metadata = self.run_metadata.get(run_id)
return metadata["status"] if metadata else None
# Global manager instances
websocket_manager = WebSocketManager(
active_connections={},
)
event_buffer = EventsBuffer(
max_events_per_run=10000, # Keep last 10000 events per run
cleanup_interval=1800, # Clean up completed runs after 30 minutes
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/managers.py",
"license": "Apache License 2.0",
"lines": 262,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/agent/remote.py | import json
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, AsyncIterator, Dict, List, Literal, Optional, Sequence, Tuple, Union, overload
from pydantic import BaseModel
from agno.media import Audio, File, Image, Video
from agno.models.base import Model
from agno.models.message import Message
from agno.models.response import ToolExecution
from agno.remote.base import BaseRemote, RemoteDb, RemoteKnowledge
from agno.run.agent import RunOutput, RunOutputEvent
from agno.utils.agent import validate_input
from agno.utils.log import log_warning
from agno.utils.remote import serialize_input
if TYPE_CHECKING:
from agno.os.routers.agents.schema import AgentResponse
@dataclass
class RemoteAgent(BaseRemote):
# Private cache for agent config with TTL: (config, timestamp)
_cached_agent_config: Optional[Tuple["AgentResponse", float]] = field(default=None, init=False, repr=False)
knowledge_filters: Optional[Dict[str, Any]] = None
enable_agentic_knowledge_filters: Optional[bool] = False
output_schema: Optional[Any] = None
store_media: bool = True
store_tool_messages: bool = True
store_history_messages: bool = False
send_media_to_model: bool = True
add_history_to_context: bool = False
num_history_runs: Optional[int] = None
num_history_messages: Optional[int] = None
debug_mode: bool = False
debug_level: Literal[1, 2] = 1
def __init__(
self,
base_url: str,
agent_id: str,
timeout: float = 60.0,
protocol: Literal["agentos", "a2a"] = "agentos",
a2a_protocol: Literal["json-rpc", "rest"] = "rest",
config_ttl: float = 300.0,
):
"""Initialize RemoteAgent for remote execution.
Supports two protocols:
- "agentos": Agno's proprietary AgentOS REST API (default)
- "a2a": A2A (Agent-to-Agent) protocol for cross-framework communication
Args:
base_url: Base URL for remote instance (e.g., "http://localhost:7777")
agent_id: ID of remote agent on the remote server
timeout: Request timeout in seconds (default: 60)
protocol: Communication protocol - "agentos" (default) or "a2a"
a2a_protocol: For A2A protocol only - Whether to use JSON-RPC or REST protocol.
config_ttl: Time-to-live for cached config in seconds (default: 300)
"""
super().__init__(base_url, timeout, protocol, a2a_protocol, config_ttl)
self.agent_id = agent_id
self._cached_agent_config = None
@property
def id(self) -> str:
return self.agent_id
async def get_agent_config(self) -> "AgentResponse":
"""
Get the agent config from remote.
For A2A protocol, returns a minimal AgentResponse since A2A servers
don't expose the same config endpoints as AgentOS. For AgentOS, always fetches fresh config.
"""
from agno.os.routers.agents.schema import AgentResponse
if self.a2a_client:
from agno.client.a2a.schemas import AgentCard
agent_card: Optional[AgentCard] = await self.a2a_client.aget_agent_card()
return AgentResponse(
id=self.agent_id,
name=agent_card.name if agent_card else self.agent_id,
description=agent_card.description if agent_card else f"A2A agent: {self.agent_id}",
)
return await self.agentos_client.aget_agent(self.agent_id) # type: ignore
@property
def _agent_config(self) -> Optional["AgentResponse"]:
"""
Get the agent config from remote, cached with TTL.
Returns None for A2A protocol since A2A servers don't expose agent config endpoints.
"""
import time
from agno.os.routers.agents.schema import AgentResponse
if self.a2a_client:
from agno.client.a2a.schemas import AgentCard
agent_card: Optional[AgentCard] = self.a2a_client.get_agent_card()
return AgentResponse(
id=self.agent_id,
name=agent_card.name if agent_card else self.agent_id,
description=agent_card.description if agent_card else f"A2A agent: {self.agent_id}",
)
current_time = time.time()
# Check if cache is valid
if self._cached_agent_config is not None:
config, cached_at = self._cached_agent_config
if current_time - cached_at < self.config_ttl:
return config
# Fetch fresh config
config: AgentResponse = self.agentos_client.get_agent(self.agent_id) # type: ignore
self._cached_agent_config = (config, current_time)
return config
async def refresh_config(self) -> Optional["AgentResponse"]:
"""
Force refresh the cached agent config.
Returns None for A2A protocol.
"""
import time
from agno.os.routers.agents.schema import AgentResponse
if self.a2a_client:
self._cached_agent_config = None
return None
config: AgentResponse = await self.agentos_client.aget_agent(self.agent_id) # type: ignore
self._cached_agent_config = (config, time.time())
return config
@property
def name(self) -> Optional[str]:
if self._agent_config is not None:
return self._agent_config.name
return self.agent_id
@property
def description(self) -> Optional[str]:
if self._agent_config is not None:
return self._agent_config.description
return ""
def role(self) -> Optional[str]:
if self._agent_config is not None:
return self._agent_config.role
return None
@property
def tools(self) -> Optional[List[Dict[str, Any]]]:
if self._agent_config is not None:
try:
return json.loads(self._agent_config.tools["tools"]) if self._agent_config.tools else None
except Exception as e:
log_warning(f"Failed to load tools for agent {self.agent_id}: {e}")
return None
return None
@property
def db(self) -> Optional[RemoteDb]:
if (
self.agentos_client
and self._config
and self._agent_config is not None
and self._agent_config.db_id is not None
):
return RemoteDb.from_config(
db_id=self._agent_config.db_id,
client=self.agentos_client,
config=self._config,
)
return None
@property
def knowledge(self) -> Optional[RemoteKnowledge]:
if self.agentos_client and self._agent_config is not None and self._agent_config.knowledge is not None:
return RemoteKnowledge(
client=self.agentos_client,
contents_db=RemoteDb(
id=self._agent_config.knowledge.get("db_id"), # type: ignore
client=self.agentos_client,
knowledge_table_name=self._agent_config.knowledge.get("knowledge_table"),
)
if self._agent_config.knowledge.get("db_id") is not None
else None,
)
return None
@property
def model(self) -> Optional[Model]:
# We don't expose the remote agent's models, since they can't be used by other services in AgentOS.
return None
async def aget_tools(self, **kwargs: Any) -> List[Dict]:
if self._agent_config is not None and self._agent_config.tools is not None:
return json.loads(self._agent_config.tools["tools"])
return []
@overload
async def arun(
self,
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
*,
stream: Literal[False] = False,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
stream_events: Optional[bool] = None,
retries: Optional[int] = None,
knowledge_filters: Optional[Dict[str, Any]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> RunOutput: ...
@overload
def arun(
self,
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
*,
stream: Literal[True] = True,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
stream_events: Optional[bool] = None,
retries: Optional[int] = None,
knowledge_filters: Optional[Dict[str, Any]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> AsyncIterator[RunOutputEvent]: ...
def arun( # type: ignore
self,
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
*,
stream: Optional[bool] = None,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
stream_events: Optional[bool] = None,
retries: Optional[int] = None,
knowledge_filters: Optional[Dict[str, Any]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> Union[
RunOutput,
AsyncIterator[RunOutputEvent],
]:
validated_input = validate_input(input)
serialized_input = serialize_input(validated_input)
headers = self._get_auth_headers(auth_token)
# A2A protocol path
if self.a2a_client:
return self._arun_a2a( # type: ignore[return-value]
message=serialized_input,
stream=stream or False,
user_id=user_id,
context_id=session_id, # Map session_id → context_id for A2A
audio=audio,
images=images,
videos=videos,
files=files,
headers=headers,
)
# AgentOS protocol path (default)
if self.agentos_client:
if stream:
# Handle streaming response
return self.agentos_client.run_agent_stream(
agent_id=self.agent_id,
message=serialized_input,
session_id=session_id,
user_id=user_id,
audio=audio,
images=images,
videos=videos,
files=files,
session_state=session_state,
stream_events=stream_events,
retries=retries,
knowledge_filters=knowledge_filters,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
dependencies=dependencies,
metadata=metadata,
headers=headers,
**kwargs,
)
else:
return self.agentos_client.run_agent( # type: ignore
agent_id=self.agent_id,
message=serialized_input,
session_id=session_id,
user_id=user_id,
audio=audio,
images=images,
videos=videos,
files=files,
session_state=session_state,
stream_events=stream_events,
retries=retries,
knowledge_filters=knowledge_filters,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
dependencies=dependencies,
metadata=metadata,
headers=headers,
**kwargs,
)
else:
raise ValueError("No client available")
def _arun_a2a(
self,
message: str,
stream: bool,
user_id: Optional[str],
context_id: Optional[str],
audio: Optional[Sequence[Audio]],
images: Optional[Sequence[Image]],
videos: Optional[Sequence[Video]],
files: Optional[Sequence[File]],
headers: Optional[Dict[str, str]],
) -> Union[RunOutput, AsyncIterator[RunOutputEvent]]:
"""Execute via A2A protocol.
Args:
message: Serialized message string
stream: Whether to stream the response
user_id: User identifier
context_id: Session/context ID (maps to session_id)
audio: Audio files to include
images: Images to include
videos: Videos to include
files: Files to include
headers: HTTP headers to include in the request (optional)
Returns:
RunOutput for non-streaming, AsyncIterator[RunOutputEvent] for streaming
"""
if not self.a2a_client:
raise ValueError("A2A client not available")
from agno.client.a2a.utils import map_stream_events_to_run_events
if stream:
# Return async generator for streaming
event_stream = self.a2a_client.stream_message(
message=message,
context_id=context_id,
user_id=user_id,
images=list(images) if images else None,
audio=list(audio) if audio else None,
videos=list(videos) if videos else None,
files=list(files) if files else None,
headers=headers,
)
return map_stream_events_to_run_events(event_stream, agent_id=self.agent_id)
else:
# Return coroutine for non-streaming
return self._arun_a2a_send( # type: ignore[return-value]
message=message,
user_id=user_id,
context_id=context_id,
audio=audio,
images=images,
videos=videos,
files=files,
headers=headers,
)
async def _arun_a2a_send(
self,
message: str,
user_id: Optional[str],
context_id: Optional[str],
audio: Optional[Sequence[Audio]],
images: Optional[Sequence[Image]],
videos: Optional[Sequence[Video]],
files: Optional[Sequence[File]],
headers: Optional[Dict[str, str]],
) -> RunOutput:
"""Send a non-streaming A2A message and convert response to RunOutput."""
if not self.a2a_client:
raise ValueError("A2A client not available")
from agno.client.a2a.utils import map_task_result_to_run_output
task_result = await self.a2a_client.send_message(
message=message,
context_id=context_id,
user_id=user_id,
images=list(images) if images else None,
audio=list(audio) if audio else None,
videos=list(videos) if videos else None,
files=list(files) if files else None,
headers=headers,
)
return map_task_result_to_run_output(task_result, agent_id=self.agent_id, user_id=user_id)
@overload
async def acontinue_run(
self,
run_id: str,
updated_tools: Optional[List[ToolExecution]] = None,
stream: Literal[False] = False,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> RunOutput: ...
@overload
def acontinue_run(
self,
run_id: str,
updated_tools: Optional[List[ToolExecution]] = None,
stream: Literal[True] = True,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> AsyncIterator[RunOutputEvent]: ...
def acontinue_run( # type: ignore
self,
run_id: str, # type: ignore
updated_tools: Optional[List[ToolExecution]] = None,
stream: Optional[bool] = None,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> Union[
RunOutput,
AsyncIterator[RunOutputEvent],
]:
headers = self._get_auth_headers(auth_token)
if self.agentos_client:
tools_list = updated_tools or []
if stream:
# Handle streaming response
return self.agentos_client.continue_agent_run_stream( # type: ignore
agent_id=self.agent_id,
run_id=run_id,
user_id=user_id,
session_id=session_id,
tools=tools_list,
headers=headers,
**kwargs,
)
else:
return self.agentos_client.continue_agent_run( # type: ignore
agent_id=self.agent_id,
run_id=run_id,
tools=tools_list,
user_id=user_id,
session_id=session_id,
headers=headers,
**kwargs,
)
else:
raise ValueError("No client available")
async def acancel_run(self, run_id: str, auth_token: Optional[str] = None) -> bool:
"""Cancel a running agent execution.
Args:
run_id (str): The run_id to cancel.
auth_token: Optional JWT token for authentication.
Returns:
bool: True if the run was successfully cancelled, False otherwise.
"""
headers = self._get_auth_headers(auth_token)
if not self.agentos_client:
raise ValueError("AgentOS client not available")
try:
await self.agentos_client.cancel_agent_run(
agent_id=self.agent_id,
run_id=run_id,
headers=headers,
)
return True
except Exception:
return False
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/agent/remote.py",
"license": "Apache License 2.0",
"lines": 471,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/client/os.py | import json
from datetime import date
from typing import Any, AsyncIterator, Callable, Dict, List, Optional, Sequence, Union
from fastapi import UploadFile
from httpx import ConnectError, ConnectTimeout, TimeoutException
from agno.db.base import SessionType
from agno.db.schemas.evals import EvalFilterType, EvalType
from agno.exceptions import RemoteServerUnavailableError
from agno.media import Audio, File, Image, Video
from agno.media import File as MediaFile
from agno.models.response import ToolExecution
from agno.os.routers.agents.schema import AgentResponse
from agno.os.routers.evals.schemas import (
DeleteEvalRunsRequest,
EvalRunInput,
EvalSchema,
UpdateEvalRunRequest,
)
from agno.os.routers.knowledge.schemas import (
ConfigResponseSchema as KnowledgeConfigResponse,
)
from agno.os.routers.knowledge.schemas import (
ContentResponseSchema,
ContentStatusResponse,
VectorSearchResult,
)
from agno.os.routers.memory.schemas import (
DeleteMemoriesRequest,
OptimizeMemoriesRequest,
OptimizeMemoriesResponse,
UserMemoryCreateSchema,
UserMemorySchema,
UserStatsSchema,
)
from agno.os.routers.metrics.schemas import DayAggregatedMetrics, MetricsResponse
from agno.os.routers.teams.schema import TeamResponse
from agno.os.routers.traces.schemas import (
TraceDetail,
TraceNode,
TraceSearchGroupBy,
TraceSessionStats,
TraceSummary,
)
from agno.os.routers.workflows.schema import WorkflowResponse
from agno.os.schema import (
AgentSessionDetailSchema,
AgentSummaryResponse,
ConfigResponse,
CreateSessionRequest,
DeleteSessionRequest,
Model,
PaginatedResponse,
PaginationInfo,
RunSchema,
SessionSchema,
TeamRunSchema,
TeamSessionDetailSchema,
TeamSummaryResponse,
UpdateSessionRequest,
WorkflowRunSchema,
WorkflowSessionDetailSchema,
WorkflowSummaryResponse,
)
from agno.run.agent import RunOutput, RunOutputEvent, run_output_event_from_dict
from agno.run.team import TeamRunOutput, TeamRunOutputEvent, team_run_output_event_from_dict
from agno.run.workflow import WorkflowRunOutput, WorkflowRunOutputEvent, workflow_run_output_event_from_dict
from agno.utils.http import get_default_async_client, get_default_sync_client
class AgentOSClient:
"""Client for interacting with AgentOS API endpoints.
Attributes:
base_url: Base URL of the AgentOS instance
timeout: Request timeout in seconds
"""
def __init__(
self,
base_url: str,
timeout: float = 60.0,
):
"""Initialize AgentOSClient.
Args:
base_url: Base URL of the AgentOS instance (e.g., "http://localhost:7777")
timeout: Request timeout in seconds (default: 60.0)
"""
self.base_url = base_url.rstrip("/")
self.timeout = timeout
def _request(
self,
method: str,
endpoint: str,
data: Optional[Dict[str, Any]] = None,
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
as_form: bool = False,
) -> Any:
"""Execute synchronous HTTP request.
Args:
method: HTTP method (GET, POST, PATCH, DELETE)
endpoint: API endpoint path (without base URL)
data: Request body data (optional)
params: Query parameters (optional)
headers: HTTP headers to include in the request (optional)
as_form: If True, send data as form data instead of JSON
Returns:
Parsed JSON response, or None for empty responses
Raises:
RemoteServerUnavailableError: When the remote server is unavailable
HTTPStatusError: On HTTP errors (4xx, 5xx)
"""
url = f"{self.base_url}{endpoint}"
kwargs: Dict[str, Any] = {"headers": headers or {}}
if data is not None:
if as_form:
kwargs["data"] = data
else:
kwargs["json"] = data
if params is not None:
kwargs["params"] = params
sync_client = get_default_sync_client()
try:
response = sync_client.request(method, url, timeout=self.timeout, **kwargs)
response.raise_for_status()
# Return None for empty responses (204 No Content, etc.)
if not response.content:
return None
return response.json()
except (ConnectError, ConnectTimeout) as e:
raise RemoteServerUnavailableError(
message=f"Failed to connect to remote server at {self.base_url}",
base_url=self.base_url,
original_error=e,
) from e
except TimeoutException as e:
raise RemoteServerUnavailableError(
message=f"Request to remote server at {self.base_url} timed out after {self.timeout} seconds.",
base_url=self.base_url,
original_error=e,
) from e
async def _arequest(
self,
method: str,
endpoint: str,
data: Optional[Dict[str, Any]] = None,
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
as_form: bool = False,
) -> Any:
"""Execute asynchronous HTTP request.
Args:
method: HTTP method (GET, POST, PATCH, DELETE)
endpoint: API endpoint path (without base URL)
data: Request body data (optional)
params: Query parameters (optional)
headers: HTTP headers to include in the request (optional)
as_form: If True, send data as form data instead of JSON
Returns:
Parsed JSON response, or None for empty responses
Raises:
RemoteServerUnavailableError: When the remote server is unavailable
HTTPStatusError: On HTTP errors (4xx, 5xx)
"""
url = f"{self.base_url}{endpoint}"
kwargs: Dict[str, Any] = {"headers": headers or {}}
if data is not None:
if as_form:
kwargs["data"] = data
else:
kwargs["json"] = data
if params is not None:
kwargs["params"] = params
async_client = get_default_async_client()
try:
response = await async_client.request(method, url, timeout=self.timeout, **kwargs)
response.raise_for_status()
# Return None for empty responses (204 No Content, etc.)
if not response.content:
return None
return response.json()
except (ConnectError, ConnectTimeout) as e:
raise RemoteServerUnavailableError(
message=f"Failed to connect to remote server at {self.base_url}",
base_url=self.base_url,
original_error=e,
) from e
except TimeoutException as e:
raise RemoteServerUnavailableError(
message=f"Request to remote server at {self.base_url} timed out after {self.timeout} seconds",
base_url=self.base_url,
original_error=e,
) from e
def _get(
self,
endpoint: str,
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> Any:
"""Execute synchronous GET request.
Args:
endpoint: API endpoint path (without base URL)
params: Query parameters (optional)
headers: HTTP headers to include in the request (optional)
Returns:
Parsed JSON response
Raises:
HTTPStatusError: On HTTP errors (4xx, 5xx)
"""
return self._request("GET", endpoint, params=params, headers=headers)
async def _aget(
self,
endpoint: str,
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> Any:
"""Execute asynchronous GET request.
Args:
endpoint: API endpoint path (without base URL)
params: Query parameters (optional)
headers: HTTP headers to include in the request (optional)
Returns:
Parsed JSON response
Raises:
HTTPStatusError: On HTTP errors (4xx, 5xx)
"""
return await self._arequest("GET", endpoint, params=params, headers=headers)
async def _apost(
self,
endpoint: str,
data: Optional[Dict[str, Any]] = None,
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
as_form: bool = False,
) -> Any:
"""Execute asynchronous POST request.
Args:
endpoint: API endpoint path (without base URL)
data: Request body data (optional)
params: Query parameters (optional)
headers: HTTP headers to include in the request (optional)
as_form: If True, send data as form data instead of JSON
Returns:
Parsed JSON response
Raises:
HTTPStatusError: On HTTP errors (4xx, 5xx)
"""
return await self._arequest("POST", endpoint, data=data, params=params, headers=headers, as_form=as_form)
async def _apatch(
self,
endpoint: str,
data: Optional[Dict[str, Any]] = None,
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> Any:
"""Execute asynchronous PATCH request.
Args:
endpoint: API endpoint path (without base URL)
data: Request body data
params: Query parameters (optional)
headers: HTTP headers to include in the request (optional)
Returns:
Parsed JSON response
Raises:
HTTPStatusError: On HTTP errors (4xx, 5xx)
"""
return await self._arequest("PATCH", endpoint, data=data, params=params, headers=headers)
async def _adelete(
self,
endpoint: str,
data: Optional[Dict[str, Any]] = None,
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> None:
"""Execute asynchronous DELETE request.
Args:
endpoint: API endpoint path (without base URL)
data: Optional request body data
params: Query parameters (optional)
headers: HTTP headers to include in the request (optional)
Raises:
HTTPStatusError: On HTTP errors (4xx, 5xx)
"""
await self._arequest("DELETE", endpoint, data=data, params=params, headers=headers)
async def _astream_post_form_data(
self,
endpoint: str,
data: Dict[str, Any],
headers: Optional[Dict[str, str]] = None,
) -> AsyncIterator[str]:
"""Stream POST request with form data.
Args:
endpoint: API endpoint path (without base URL)
data: Form data dictionary
headers: HTTP headers to include in the request (optional)
Yields:
str: Lines from the streaming response
Raises:
RemoteServerUnavailableError: When the remote server is unavailable
"""
url = f"{self.base_url}{endpoint}"
async_client = get_default_async_client()
try:
async with async_client.stream(
"POST", url, data=data, headers=headers or {}, timeout=self.timeout
) as response:
response.raise_for_status()
async for line in response.aiter_lines():
yield line
except (ConnectError, ConnectTimeout) as e:
raise RemoteServerUnavailableError(
message=f"Failed to connect to remote server at {self.base_url}",
base_url=self.base_url,
original_error=e,
) from e
except TimeoutException as e:
raise RemoteServerUnavailableError(
message=f"Request to remote server at {self.base_url} timed out after {self.timeout} seconds",
base_url=self.base_url,
original_error=e,
) from e
async def _parse_sse_events(
self,
raw_stream: AsyncIterator[str],
event_parser: Callable[[dict], Any],
) -> AsyncIterator[Any]:
"""Parse SSE stream into typed event objects.
Args:
raw_stream: Raw SSE lines from streaming response
event_parser: Function to parse event dict into typed object
Yields:
Parsed event objects
"""
from agno.utils.log import logger
async for line in raw_stream:
# Skip empty lines and comments (SSE protocol)
if not line or line.startswith(":"):
continue
# Parse SSE data lines
if line.startswith("data: "):
try:
# Extract and parse JSON payload
json_str = line[6:] # Remove "data: " prefix
event_dict = json.loads(json_str)
# Parse into typed event using provided factory
event = event_parser(event_dict)
yield event
except json.JSONDecodeError as e:
logger.error(f"Failed to parse SSE JSON: {line[:100]}... | Error: {e}")
continue # Skip bad events, continue stream
except ValueError as e:
logger.error(f"Unknown event type: {line[:100]}... | Error: {e}")
continue # Skip unknown events, continue stream
# Discovery & Configuration Operations
def get_config(self, headers: Optional[Dict[str, str]] = None) -> ConfigResponse:
"""Get AgentOS configuration and metadata.
Returns comprehensive OS configuration including:
- OS metadata (id, description, version)
- List of available agents
- List of available teams
- List of available workflows
- Interface configurations
- Knowledge, evals, and metrics settings
Args:
headers: HTTP headers to include in the request (optional)
Returns:
ConfigResponse: Complete OS configuration
We need this sync version so it can be used for other sync use-cases upstream
"""
data = self._get("/config", headers=headers)
return ConfigResponse.model_validate(data)
async def aget_config(self, headers: Optional[Dict[str, str]] = None) -> ConfigResponse:
"""Get AgentOS configuration and metadata.
Returns comprehensive OS configuration including:
- OS metadata (id, description, version)
- List of available agents
- List of available teams
- List of available workflows
- Interface configurations
- Knowledge, evals, and metrics settings
Args:
headers: HTTP headers to include in the request (optional)
Returns:
ConfigResponse: Complete OS configuration
Raises:
HTTPStatusError: On HTTP errors
"""
data = await self._aget("/config", headers=headers)
return ConfigResponse.model_validate(data)
async def get_models(self, headers: Optional[Dict[str, str]] = None) -> List[Model]:
"""Get list of all models used by agents and teams.
Args:
headers: HTTP headers to include in the request (optional)
Returns:
List[Model]: List of model configurations
Raises:
HTTPStatusError: On HTTP errors
"""
data = await self._aget("/models", headers=headers)
return [Model.model_validate(item) for item in data]
async def migrate_database(
self, db_id: str, target_version: Optional[str] = None, headers: Optional[Dict[str, str]] = None
) -> None:
"""Migrate a database to a target version.
Args:
db_id: ID of the database to migrate
target_version: Target version to migrate to
headers: HTTP headers to include in the request (optional)
"""
return await self._apost(
f"/databases/{db_id}/migrate", data={"target_version": target_version}, headers=headers
)
async def list_agents(self, headers: Optional[Dict[str, str]] = None) -> List[AgentSummaryResponse]:
"""List all agents configured in the AgentOS instance.
Returns summary information for each agent including:
- Agent ID, name, description
- Model configuration
- Basic settings
Args:
headers: HTTP headers to include in the request (optional)
Returns:
List[AgentSummaryResponse]: List of agent summaries
Raises:
HTTPStatusError: On HTTP errors
"""
data = await self._aget("/agents", headers=headers)
return [AgentSummaryResponse.model_validate(item) for item in data]
def get_agent(self, agent_id: str, headers: Optional[Dict[str, str]] = None) -> AgentResponse:
"""Get detailed configuration for a specific agent.
Args:
agent_id: ID of the agent to retrieve
headers: HTTP headers to include in the request (optional)
Returns:
AgentResponse: Detailed agent configuration
Raises:
HTTPStatusError: On HTTP errors (404 if agent not found)
"""
data = self._get(f"/agents/{agent_id}", headers=headers)
return AgentResponse.model_validate(data)
async def aget_agent(self, agent_id: str, headers: Optional[Dict[str, str]] = None) -> AgentResponse:
"""Get detailed configuration for a specific agent.
Args:
agent_id: ID of the agent to retrieve
headers: HTTP headers to include in the request (optional)
Returns:
AgentResponse: Detailed agent configuration
Raises:
HTTPStatusError: On HTTP errors (404 if agent not found)
"""
data = await self._aget(f"/agents/{agent_id}", headers=headers)
return AgentResponse.model_validate(data)
async def run_agent(
self,
agent_id: str,
message: str,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
images: Optional[Sequence[Image]] = None,
audio: Optional[Sequence[Audio]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[MediaFile]] = None,
headers: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> RunOutput:
"""Execute an agent run.
Args:
agent_id: ID of the agent to run
message: The message/prompt for the agent
session_id: Optional session ID for context
user_id: Optional user ID
images: Optional list of Image objects
audio: Optional list of Audio objects
videos: Optional list of Video objects
files: Optional list of MediaFile objects
headers: HTTP headers to include in the request (optional)
**kwargs: Additional parameters passed to the agent run, such as:
- session_state: Dict for session state
- dependencies: Dict for dependencies
- metadata: Dict for metadata
- knowledge_filters: Filters for knowledge search
- output_schema: JSON schema for structured output
Returns:
RunOutput: The run response
Raises:
HTTPStatusError: On HTTP errors
"""
endpoint = f"/agents/{agent_id}/runs"
data: Dict[str, Any] = {"message": message, "stream": "false"}
if session_id is not None:
data["session_id"] = session_id
if user_id is not None:
data["user_id"] = user_id
if images:
data["images"] = json.dumps([img.model_dump() for img in images])
if audio:
data["audio"] = json.dumps([a.model_dump() for a in audio])
if videos:
data["videos"] = json.dumps([v.model_dump() for v in videos])
if files:
data["files"] = json.dumps([f.model_dump() for f in files])
# Add kwargs to data, serializing dicts as JSON
for key, value in kwargs.items():
if isinstance(value, dict):
data[key] = json.dumps(value)
else:
data[key] = value
data = {k: v for k, v in data.items() if v is not None}
response_data = await self._apost(endpoint, data, headers=headers, as_form=True)
return RunOutput.from_dict(response_data)
async def run_agent_stream(
self,
agent_id: str,
message: str,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
images: Optional[Sequence[Image]] = None,
audio: Optional[Sequence[Audio]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[MediaFile]] = None,
headers: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> AsyncIterator[RunOutputEvent]:
"""Stream an agent run response.
Args:
agent_id: ID of the agent to run
message: The message/prompt for the agent
session_id: Optional session ID for context
user_id: Optional user ID
images: Optional list of Image objects
audio: Optional list of Audio objects
videos: Optional list of Video objects
files: Optional list of MediaFile objects
headers: HTTP headers to include in the request (optional)
**kwargs: Additional parameters (session_state, dependencies, metadata, etc.)
Yields:
RunOutputEvent: Typed event objects (RunStartedEvent, RunContentEvent, etc.)
Raises:
HTTPStatusError: On HTTP errors
"""
endpoint = f"/agents/{agent_id}/runs"
data: Dict[str, Any] = {"message": message, "stream": "true"}
if session_id is not None:
data["session_id"] = session_id
if user_id is not None:
data["user_id"] = user_id
if images:
data["images"] = json.dumps([img.model_dump() for img in images])
if audio:
data["audio"] = json.dumps([a.model_dump() for a in audio])
if videos:
data["videos"] = json.dumps([v.model_dump() for v in videos])
if files:
data["files"] = json.dumps([f.model_dump() for f in files])
for key, value in kwargs.items():
if isinstance(value, dict):
data[key] = json.dumps(value)
else:
data[key] = value
data = {k: v for k, v in data.items() if v is not None}
# Get raw SSE stream and parse into typed events
raw_stream = self._astream_post_form_data(endpoint, data, headers=headers)
async for event in self._parse_sse_events(raw_stream, run_output_event_from_dict):
yield event
async def continue_agent_run(
self,
agent_id: str,
run_id: str,
tools: List[ToolExecution],
session_id: Optional[str] = None,
user_id: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> RunOutput:
"""Continue a paused agent run with tool results.
Args:
agent_id: ID of the agent
run_id: ID of the run to continue
tools: List of ToolExecution objects with tool results
stream: Whether to stream the response
session_id: Optional session ID
user_id: Optional user ID
headers: HTTP headers to include in the request (optional)
Returns:
RunOutput: The continued run response
Raises:
HTTPStatusError: On HTTP errors
"""
endpoint = f"/agents/{agent_id}/runs/{run_id}/continue"
data: Dict[str, Any] = {"tools": json.dumps([tool.to_dict() for tool in tools]), "stream": "false"}
if session_id is not None:
data["session_id"] = session_id
if user_id is not None:
data["user_id"] = user_id
for key, value in kwargs.items():
if isinstance(value, dict):
data[key] = json.dumps(value)
else:
data[key] = value
response_data = await self._apost(endpoint, data, headers=headers, as_form=True)
return RunOutput.from_dict(response_data)
async def continue_agent_run_stream(
self,
agent_id: str,
run_id: str,
tools: List[ToolExecution],
session_id: Optional[str] = None,
user_id: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> AsyncIterator[RunOutputEvent]:
"""Stream a continued agent run response.
Args:
agent_id: ID of the agent
run_id: ID of the run to continue
tools: List of ToolExecution objects with tool results
session_id: Optional session ID
user_id: Optional user ID
headers: HTTP headers to include in the request (optional)
Yields:
RunOutputEvent: Typed event objects (RunStartedEvent, RunContentEvent, etc.)
Raises:
HTTPStatusError: On HTTP errors
"""
endpoint = f"/agents/{agent_id}/runs/{run_id}/continue"
data: Dict[str, Any] = {"tools": json.dumps([tool.to_dict() for tool in tools]), "stream": "true"}
if session_id is not None:
data["session_id"] = session_id
if user_id is not None:
data["user_id"] = user_id
for key, value in kwargs.items():
if isinstance(value, dict):
data[key] = json.dumps(value)
else:
data[key] = value
raw_stream = self._astream_post_form_data(endpoint, data, headers=headers)
async for event in self._parse_sse_events(raw_stream, run_output_event_from_dict):
yield event
async def cancel_agent_run(self, agent_id: str, run_id: str, headers: Optional[Dict[str, str]] = None) -> None:
"""Cancel an agent run.
Args:
agent_id: ID of the agent
run_id: ID of the run to cancel
headers: HTTP headers to include in the request (optional)
Raises:
HTTPStatusError: On HTTP errors
"""
await self._apost(f"/agents/{agent_id}/runs/{run_id}/cancel", headers=headers)
async def list_teams(self, headers: Optional[Dict[str, str]] = None) -> List[TeamSummaryResponse]:
"""List all teams configured in the AgentOS instance.
Returns summary information for each team including:
- Team ID, name, description
- Model configuration
- Member information
Args:
headers: HTTP headers to include in the request (optional)
Returns:
List[TeamSummaryResponse]: List of team summaries
Raises:
HTTPStatusError: On HTTP errors
"""
data = await self._aget("/teams", headers=headers)
return [TeamSummaryResponse.model_validate(item) for item in data]
def get_team(self, team_id: str, headers: Optional[Dict[str, str]] = None) -> TeamResponse:
"""Get detailed configuration for a specific team.
Args:
team_id: ID of the team to retrieve
headers: HTTP headers to include in the request (optional)
Returns:
TeamResponse: Detailed team configuration
Raises:
HTTPStatusError: On HTTP errors (404 if team not found)
"""
data = self._get(f"/teams/{team_id}", headers=headers)
return TeamResponse.model_validate(data)
async def aget_team(self, team_id: str, headers: Optional[Dict[str, str]] = None) -> TeamResponse:
"""Get detailed configuration for a specific team.
Args:
team_id: ID of the team to retrieve
headers: HTTP headers to include in the request (optional)
Returns:
TeamResponse: Detailed team configuration
Raises:
HTTPStatusError: On HTTP errors (404 if team not found)
"""
data = await self._aget(f"/teams/{team_id}", headers=headers)
return TeamResponse.model_validate(data)
async def run_team(
self,
team_id: str,
message: str,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
images: Optional[Sequence[Image]] = None,
audio: Optional[Sequence[Audio]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[MediaFile]] = None,
headers: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> TeamRunOutput:
"""Execute a team run.
Args:
team_id: ID of the team to run
message: The message/prompt for the team
session_id: Optional session ID for context
user_id: Optional user ID
images: Optional list of images
audio: Optional audio data
videos: Optional list of videos
files: Optional list of files
headers: HTTP headers to include in the request (optional)
**kwargs: Additional parameters passed to the team run
Returns:
TeamRunOutput: The team run response
Raises:
HTTPStatusError: On HTTP errors
"""
endpoint = f"/teams/{team_id}/runs"
data: Dict[str, Any] = {"message": message, "stream": "false"}
if session_id is not None:
data["session_id"] = session_id
if user_id is not None:
data["user_id"] = user_id
if images:
data["images"] = json.dumps(images)
if audio:
data["audio"] = json.dumps(audio)
if videos:
data["videos"] = json.dumps(videos)
if files:
data["files"] = json.dumps(files)
# Add kwargs to data, serializing dicts as JSON
for key, value in kwargs.items():
if isinstance(value, dict):
data[key] = json.dumps(value)
else:
data[key] = value
data = {k: v for k, v in data.items() if v is not None}
response_data = await self._apost(endpoint, data, headers=headers, as_form=True)
return TeamRunOutput.from_dict(response_data)
async def run_team_stream(
self,
team_id: str,
message: str,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
images: Optional[Sequence[Image]] = None,
audio: Optional[Sequence[Audio]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[MediaFile]] = None,
headers: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> AsyncIterator[TeamRunOutputEvent]:
"""Stream a team run response.
Args:
team_id: ID of the team to run
message: The message/prompt for the team
session_id: Optional session ID for context
user_id: Optional user ID
images: Optional list of images
audio: Optional audio data
videos: Optional list of videos
files: Optional list of files
headers: HTTP headers to include in the request (optional)
**kwargs: Additional parameters passed to the team run
Yields:
TeamRunOutputEvent: Typed event objects (team and agent events)
Raises:
HTTPStatusError: On HTTP errors
"""
endpoint = f"/teams/{team_id}/runs"
data: Dict[str, Any] = {"message": message, "stream": "true"}
if session_id is not None:
data["session_id"] = session_id
if user_id is not None:
data["user_id"] = user_id
if images:
data["images"] = json.dumps(images)
if audio:
data["audio"] = json.dumps(audio)
if videos:
data["videos"] = json.dumps(videos)
if files:
data["files"] = json.dumps(files)
# Add kwargs to data, serializing dicts as JSON
for key, value in kwargs.items():
if isinstance(value, dict):
data[key] = json.dumps(value)
else:
data[key] = value
data = {k: v for k, v in data.items() if v is not None}
# Get raw SSE stream and parse into typed events
raw_stream = self._astream_post_form_data(endpoint, data, headers=headers)
async for event in self._parse_sse_events(raw_stream, team_run_output_event_from_dict):
yield event
async def cancel_team_run(self, team_id: str, run_id: str, headers: Optional[Dict[str, str]] = None) -> None:
"""Cancel a team run.
Args:
team_id: ID of the team
run_id: ID of the run to cancel
headers: HTTP headers to include in the request (optional)
Raises:
HTTPStatusError: On HTTP errors
"""
await self._apost(f"/teams/{team_id}/runs/{run_id}/cancel", headers=headers)
async def list_workflows(self, headers: Optional[Dict[str, str]] = None) -> List[WorkflowSummaryResponse]:
"""List all workflows configured in the AgentOS instance.
Returns summary information for each workflow including:
- Workflow ID, name, description
- Step information
Args:
headers: HTTP headers to include in the request (optional)
Returns:
List[WorkflowSummaryResponse]: List of workflow summaries
Raises:
HTTPStatusError: On HTTP errors
"""
data = await self._aget("/workflows", headers=headers)
return [WorkflowSummaryResponse.model_validate(item) for item in data]
def get_workflow(self, workflow_id: str, headers: Optional[Dict[str, str]] = None) -> WorkflowResponse:
"""Get detailed configuration for a specific workflow.
Args:
workflow_id: ID of the workflow to retrieve
headers: HTTP headers to include in the request (optional)
Returns:
WorkflowResponse: Detailed workflow configuration
Raises:
HTTPStatusError: On HTTP errors (404 if workflow not found)
"""
data = self._get(f"/workflows/{workflow_id}", headers=headers)
return WorkflowResponse.model_validate(data)
async def aget_workflow(self, workflow_id: str, headers: Optional[Dict[str, str]] = None) -> WorkflowResponse:
"""Get detailed configuration for a specific workflow.
Args:
workflow_id: ID of the workflow to retrieve
headers: HTTP headers to include in the request (optional)
Returns:
WorkflowResponse: Detailed workflow configuration
Raises:
HTTPStatusError: On HTTP errors (404 if workflow not found)
"""
data = await self._aget(f"/workflows/{workflow_id}", headers=headers)
return WorkflowResponse.model_validate(data)
async def run_workflow(
self,
workflow_id: str,
message: str,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
images: Optional[Sequence[Image]] = None,
audio: Optional[Sequence[Audio]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[MediaFile]] = None,
headers: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> WorkflowRunOutput:
"""Execute a workflow run.
Args:
workflow_id: ID of the workflow to run
message: The message/prompt for the workflow
session_id: Optional session ID for context
user_id: Optional user ID
images: Optional list of images
audio: Optional audio data
videos: Optional list of videos
files: Optional list of files
headers: HTTP headers to include in the request (optional)
**kwargs: Additional parameters passed to the workflow run
Returns:
WorkflowRunOutput: The workflow run response
Raises:
HTTPStatusError: On HTTP errors
"""
endpoint = f"/workflows/{workflow_id}/runs"
data: Dict[str, Any] = {"message": message, "stream": "false"}
if session_id is not None:
data["session_id"] = session_id
if user_id is not None:
data["user_id"] = user_id
if images:
data["images"] = json.dumps(images)
if audio:
data["audio"] = json.dumps(audio)
if videos:
data["videos"] = json.dumps(videos)
if files:
data["files"] = json.dumps(files)
# Add kwargs to data, serializing dicts as JSON
for key, value in kwargs.items():
if isinstance(value, dict):
data[key] = json.dumps(value)
else:
data[key] = value
data = {k: v for k, v in data.items() if v is not None}
response_data = await self._apost(endpoint, data, headers=headers, as_form=True)
return WorkflowRunOutput.from_dict(response_data)
async def run_workflow_stream(
self,
workflow_id: str,
message: str,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
images: Optional[Sequence[Image]] = None,
audio: Optional[Sequence[Audio]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[MediaFile]] = None,
headers: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> AsyncIterator[WorkflowRunOutputEvent]:
"""Stream a workflow run response.
Args:
workflow_id: ID of the workflow to run
message: The message/prompt for the workflow
session_id: Optional session ID for context
user_id: Optional user ID
images: Optional list of images
audio: Optional audio data
videos: Optional list of videos
files: Optional list of files
headers: HTTP headers to include in the request (optional)
**kwargs: Additional parameters passed to the workflow run.
Yields:
WorkflowRunOutputEvent: Typed event objects (workflow, team, and agent events)
Raises:
HTTPStatusError: On HTTP errors
"""
endpoint = f"/workflows/{workflow_id}/runs"
data: Dict[str, Any] = {"message": message, "stream": "true"}
if session_id is not None:
data["session_id"] = session_id
if user_id is not None:
data["user_id"] = user_id
if images:
data["images"] = json.dumps(images)
if audio:
data["audio"] = json.dumps(audio)
if videos:
data["videos"] = json.dumps(videos)
if files:
data["files"] = json.dumps(files)
# Add kwargs to data, serializing dicts as JSON
for key, value in kwargs.items():
if isinstance(value, dict):
data[key] = json.dumps(value)
else:
data[key] = value
data = {k: v for k, v in data.items() if v is not None}
# Get raw SSE stream and parse into typed events
raw_stream = self._astream_post_form_data(endpoint, data, headers=headers)
async for event in self._parse_sse_events(raw_stream, workflow_run_output_event_from_dict):
yield event
async def cancel_workflow_run(
self, workflow_id: str, run_id: str, headers: Optional[Dict[str, str]] = None
) -> None:
"""Cancel a workflow run.
Args:
workflow_id: ID of the workflow
run_id: ID of the run to cancel
headers: HTTP headers to include in the request (optional)
Raises:
HTTPStatusError: On HTTP errors
"""
await self._apost(f"/workflows/{workflow_id}/runs/{run_id}/cancel", headers=headers)
async def create_memory(
self,
memory: str,
user_id: str,
topics: Optional[List[str]] = None,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> UserMemorySchema:
"""Create a new user memory.
Args:
memory: The memory content to store
user_id: User ID to associate with the memory
topics: Optional list of topics to categorize the memory
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Returns:
UserMemorySchema: The created memory
Raises:
HTTPStatusError: On HTTP errors
"""
params = {"db_id": db_id, "table": table}
params = {k: v for k, v in params.items() if v is not None}
# Use schema for type-safe payload construction
payload = UserMemoryCreateSchema(memory=memory, user_id=user_id, topics=topics)
data = await self._apost("/memories", payload.model_dump(exclude_none=True), params=params, headers=headers)
return UserMemorySchema.model_validate(data)
async def get_memory(
self,
memory_id: str,
user_id: Optional[str] = None,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> UserMemorySchema:
"""Get a specific memory by ID.
Args:
memory_id: ID of the memory to retrieve
user_id: Optional user ID filter
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Returns:
UserMemorySchema: The requested memory
Raises:
HTTPStatusError: On HTTP errors (404 if not found)
"""
params = {"db_id": db_id, "table": table, "user_id": user_id}
params = {k: v for k, v in params.items() if v is not None}
data = await self._aget(f"/memories/{memory_id}", params=params, headers=headers)
return UserMemorySchema.model_validate(data)
async def list_memories(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
topics: Optional[List[str]] = None,
search_content: Optional[str] = None,
limit: int = 20,
page: int = 1,
sort_by: str = "updated_at",
sort_order: str = "desc",
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> PaginatedResponse[UserMemorySchema]:
"""List user memories with filtering and pagination.
Args:
user_id: Filter by user ID
agent_id: Filter by agent ID
team_id: Filter by team ID
topics: Filter by topics
search_content: Search within memory content
limit: Number of memories per page
page: Page number
sort_by: Field to sort by
sort_order: Sort order (asc or desc)
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Returns:
PaginatedResponse[UserMemorySchema]: Paginated list of memories
Raises:
HTTPStatusError: On HTTP errors
"""
params: Dict[str, Any] = {
"limit": limit,
"page": page,
"sort_by": sort_by,
"sort_order": sort_order,
"db_id": db_id,
"table": table,
"user_id": user_id,
"agent_id": agent_id,
"team_id": team_id,
"topics": topics,
"search_content": search_content,
}
params = {k: v for k, v in params.items() if v is not None}
data = await self._aget("/memories", params=params, headers=headers)
return PaginatedResponse[UserMemorySchema].model_validate(data)
async def update_memory(
self,
memory_id: str,
memory: str,
user_id: str,
topics: Optional[List[str]] = None,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> UserMemorySchema:
"""Update an existing memory.
Args:
memory_id: ID of the memory to update
memory: New memory content
user_id: User ID associated with the memory
topics: Optional new list of topics
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Returns:
UserMemorySchema: The updated memory
Raises:
HTTPStatusError: On HTTP errors
"""
params = {"db_id": db_id, "table": table}
params = {k: v for k, v in params.items() if v is not None}
# Use schema for type-safe payload construction
payload = UserMemoryCreateSchema(memory=memory, user_id=user_id, topics=topics)
data = await self._apatch(
f"/memories/{memory_id}", payload.model_dump(exclude_none=True), params=params, headers=headers
)
return UserMemorySchema.model_validate(data)
async def delete_memory(
self,
memory_id: str,
user_id: Optional[str] = None,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> None:
"""Delete a specific memory.
Args:
memory_id: ID of the memory to delete
user_id: Optional user ID filter
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Raises:
HTTPStatusError: On HTTP errors
"""
params = {"db_id": db_id, "table": table, "user_id": user_id}
params = {k: v for k, v in params.items() if v is not None}
await self._adelete(f"/memories/{memory_id}", params=params, headers=headers)
async def delete_memories(
self,
memory_ids: List[str],
user_id: Optional[str] = None,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> None:
"""Delete multiple memories.
Args:
memory_ids: List of memory IDs to delete
user_id: Optional user ID filter
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Raises:
HTTPStatusError: On HTTP errors
"""
params = {"db_id": db_id, "table": table}
params = {k: v for k, v in params.items() if v is not None}
# Use schema for type-safe payload construction
payload = DeleteMemoriesRequest(memory_ids=memory_ids, user_id=user_id)
await self._adelete("/memories", payload.model_dump(exclude_none=True), params=params, headers=headers)
async def get_memory_topics(
self,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> List[str]:
"""Get all unique memory topics.
Args:
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Returns:
List[str]: List of unique topic names
Raises:
HTTPStatusError: On HTTP errors
"""
params = {"db_id": db_id, "table": table}
params = {k: v for k, v in params.items() if v is not None}
return await self._aget("/memory_topics", params=params, headers=headers)
async def get_user_memory_stats(
self,
limit: int = 20,
page: int = 1,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> PaginatedResponse[UserStatsSchema]:
"""Get user memory statistics.
Args:
limit: Number of stats per page
page: Page number
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Returns:
PaginatedResponse[UserStatsSchema]: Paginated user statistics
Raises:
HTTPStatusError: On HTTP errors
"""
params: Dict[str, Any] = {"limit": limit, "page": page, "db_id": db_id, "table": table}
params = {k: v for k, v in params.items() if v is not None}
data = await self._aget("/user_memory_stats", params=params, headers=headers)
return PaginatedResponse[UserStatsSchema].model_validate(data)
async def optimize_memories(
self,
user_id: str,
model: Optional[str] = None,
apply: bool = True,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> OptimizeMemoriesResponse:
"""Optimize user memories.
Args:
user_id: User ID to optimize memories for
model: Optional model to use for optimization
apply: If True, automatically replace memories in database
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Returns:
OptimizeMemoriesResponse
"""
params: Dict[str, Any] = {"db_id": db_id, "table": table}
params = {k: v for k, v in params.items() if v is not None}
# Use schema for type-safe payload construction
payload = OptimizeMemoriesRequest(user_id=user_id, model=model, apply=apply)
data = await self._apost(
"/optimize-memories", payload.model_dump(exclude_none=True), params=params, headers=headers
)
return OptimizeMemoriesResponse.model_validate(data)
# Session Operations
async def create_session(
self,
session_type: SessionType = SessionType.AGENT,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
session_name: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
workflow_id: Optional[str] = None,
db_id: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> Union[AgentSessionDetailSchema, TeamSessionDetailSchema, WorkflowSessionDetailSchema]:
"""Create a new session.
Args:
session_type: Type of session to create (agent, team, or workflow)
session_id: Optional session ID (auto-generated if not provided)
user_id: User ID to associate with the session
session_name: Optional session name
session_state: Optional initial session state
metadata: Optional session metadata
agent_id: Agent ID (for agent sessions)
team_id: Team ID (for team sessions)
workflow_id: Workflow ID (for workflow sessions)
db_id: Optional database ID to use
headers: HTTP headers to include in the request (optional)
Returns:
AgentSessionDetailSchema, TeamSessionDetailSchema, or WorkflowSessionDetailSchema
Raises:
HTTPStatusError: On HTTP errors
"""
params: Dict[str, Any] = {"type": session_type.value, "db_id": db_id}
params = {k: v for k, v in params.items() if v is not None}
# Use schema for type-safe payload construction
payload = CreateSessionRequest(
session_id=session_id,
user_id=user_id,
session_name=session_name,
session_state=session_state,
metadata=metadata,
agent_id=agent_id,
team_id=team_id,
workflow_id=workflow_id,
)
data = await self._apost("/sessions", payload.model_dump(), params=params, headers=headers)
if session_type == SessionType.AGENT:
return AgentSessionDetailSchema.model_validate(data)
elif session_type == SessionType.TEAM:
return TeamSessionDetailSchema.model_validate(data)
else:
return WorkflowSessionDetailSchema.model_validate(data)
async def get_sessions(
self,
session_type: Optional[SessionType] = None,
component_id: Optional[str] = None,
user_id: Optional[str] = None,
session_name: Optional[str] = None,
limit: int = 20,
page: int = 1,
sort_by: str = "created_at",
sort_order: str = "desc",
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> PaginatedResponse[SessionSchema]:
"""Get a specific session by ID.
Args:
session_type: Type of session (agent, team, or workflow)
component_id: Optional component ID filter
user_id: Optional user ID filter
session_name: Optional session name filter
limit: Number of sessions per page
page: Page number
sort_by: Field to sort by
sort_order: Sort order (asc or desc)
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Returns:
PaginatedResponse[SessionSchema]
"""
params: Dict[str, Any] = {
"type": session_type.value if session_type else None,
"limit": str(limit),
"page": str(page),
"sort_by": sort_by,
"sort_order": sort_order,
"db_id": db_id,
"table": table,
"user_id": user_id,
"session_name": session_name,
"component_id": component_id,
}
params = {k: v for k, v in params.items() if v is not None}
response = await self._aget("/sessions", params=params, headers=headers)
data = response.get("data", [])
pagination_info = PaginationInfo.model_validate(response.get("meta", {}))
return PaginatedResponse[SessionSchema](
data=[SessionSchema.from_dict(session) for session in data],
meta=pagination_info,
)
async def get_session(
self,
session_id: str,
session_type: SessionType = SessionType.AGENT,
user_id: Optional[str] = None,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> Union[AgentSessionDetailSchema, TeamSessionDetailSchema, WorkflowSessionDetailSchema]:
"""Get a specific session by ID.
Args:
session_id: ID of the session to retrieve
session_type: Type of session (agent, team, or workflow)
user_id: Optional user ID filter
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Returns:
AgentSessionDetailSchema, TeamSessionDetailSchema, or WorkflowSessionDetailSchema
Raises:
HTTPStatusError: On HTTP errors (404 if not found)
"""
params: Dict[str, Any] = {
"type": session_type.value,
"user_id": user_id,
"db_id": db_id,
"table": table,
}
params = {k: v for k, v in params.items() if v is not None}
data = await self._aget(f"/sessions/{session_id}", params=params, headers=headers)
if session_type == SessionType.AGENT:
return AgentSessionDetailSchema.model_validate(data)
elif session_type == SessionType.TEAM:
return TeamSessionDetailSchema.model_validate(data)
else:
return WorkflowSessionDetailSchema.model_validate(data)
async def get_session_runs(
self,
session_id: str,
session_type: SessionType = SessionType.AGENT,
user_id: Optional[str] = None,
created_after: Optional[int] = None,
created_before: Optional[int] = None,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> List[Union[RunSchema, TeamRunSchema, WorkflowRunSchema]]:
"""Get all runs for a specific session.
Args:
session_id: ID of the session
session_type: Type of session (agent, team, or workflow)
user_id: Optional user ID filter
created_after: Filter runs created after this Unix timestamp
created_before: Filter runs created before this Unix timestamp
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Returns:
List of runs (RunSchema, TeamRunSchema, or WorkflowRunSchema)
Raises:
HTTPStatusError: On HTTP errors
"""
params: Dict[str, Any] = {
"type": session_type.value,
"user_id": user_id,
"created_after": created_after,
"created_before": created_before,
"db_id": db_id,
"table": table,
}
params = {k: v for k, v in params.items() if v is not None}
data = await self._aget(f"/sessions/{session_id}/runs", params=params, headers=headers)
# Parse runs based on session type and run content
runs: List[Union[RunSchema, TeamRunSchema, WorkflowRunSchema]] = []
for run in data:
if run.get("workflow_id") is not None:
runs.append(WorkflowRunSchema.model_validate(run))
elif run.get("team_id") is not None:
runs.append(TeamRunSchema.model_validate(run))
else:
runs.append(RunSchema.model_validate(run))
return runs
async def get_session_run(
self,
session_id: str,
run_id: str,
session_type: SessionType = SessionType.AGENT,
user_id: Optional[str] = None,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> Union[RunSchema, TeamRunSchema, WorkflowRunSchema]:
"""Get a specific run from a session.
Args:
session_id: ID of the session
run_id: ID of the run to retrieve
session_type: Type of session (agent, team, or workflow)
user_id: Optional user ID filter
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Returns:
RunSchema, TeamRunSchema, or WorkflowRunSchema
Raises:
HTTPStatusError: On HTTP errors (404 if not found)
"""
params: Dict[str, Any] = {
"type": session_type.value,
"user_id": user_id,
"db_id": db_id,
"table": table,
}
params = {k: v for k, v in params.items() if v is not None}
data = await self._aget(f"/sessions/{session_id}/runs/{run_id}", params=params, headers=headers)
# Return appropriate schema based on run type
if data.get("workflow_id") is not None:
return WorkflowRunSchema.model_validate(data)
elif data.get("team_id") is not None:
return TeamRunSchema.model_validate(data)
else:
return RunSchema.model_validate(data)
async def delete_session(
self,
session_id: str,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
user_id: Optional[str] = None,
) -> None:
"""Delete a specific session.
Args:
session_id: ID of the session to delete
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Raises:
HTTPStatusError: On HTTP errors
"""
params: Dict[str, Any] = {
"user_id": user_id,
"db_id": db_id,
"table": table,
}
params = {k: v for k, v in params.items() if v is not None}
await self._adelete(f"/sessions/{session_id}", params=params, headers=headers)
async def delete_sessions(
self,
session_ids: List[str],
session_types: List[SessionType],
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
user_id: Optional[str] = None,
) -> None:
"""Delete multiple sessions.
Args:
session_ids: List of session IDs to delete
session_types: List of session types corresponding to each session ID
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Raises:
HTTPStatusError: On HTTP errors
"""
params: Dict[str, Any] = {
"user_id": user_id,
"db_id": db_id,
"table": table,
}
params = {k: v for k, v in params.items() if v is not None}
# Use schema for type-safe payload construction
payload = DeleteSessionRequest(session_ids=session_ids, session_types=session_types)
await self._adelete("/sessions", payload.model_dump(mode="json"), params=params, headers=headers)
async def rename_session(
self,
session_id: str,
session_name: str,
session_type: SessionType = SessionType.AGENT,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
user_id: Optional[str] = None,
) -> Union[AgentSessionDetailSchema, TeamSessionDetailSchema, WorkflowSessionDetailSchema]:
"""Rename a session.
Args:
session_id: ID of the session to rename
session_name: New name for the session
session_type: Type of session (agent, team, or workflow)
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Returns:
AgentSessionDetailSchema, TeamSessionDetailSchema, or WorkflowSessionDetailSchema
Raises:
HTTPStatusError: On HTTP errors (404 if not found)
"""
params: Dict[str, Any] = {
"type": session_type.value,
"user_id": user_id,
"db_id": db_id,
"table": table,
}
params = {k: v for k, v in params.items() if v is not None}
payload = {"session_name": session_name}
data = await self._apost(f"/sessions/{session_id}/rename", payload, params=params, headers=headers)
if session_type == SessionType.AGENT:
return AgentSessionDetailSchema.model_validate(data)
elif session_type == SessionType.TEAM:
return TeamSessionDetailSchema.model_validate(data)
else:
return WorkflowSessionDetailSchema.model_validate(data)
async def update_session(
self,
session_id: str,
session_type: SessionType = SessionType.AGENT,
session_name: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
summary: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> Union[AgentSessionDetailSchema, TeamSessionDetailSchema, WorkflowSessionDetailSchema]:
"""Update session properties.
Args:
session_id: ID of the session to update
session_type: Type of session (agent, team, or workflow)
session_name: Optional new session name
session_state: Optional new session state
metadata: Optional new metadata
summary: Optional new summary
user_id: Optional user ID
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Returns:
AgentSessionDetailSchema, TeamSessionDetailSchema, or WorkflowSessionDetailSchema
Raises:
HTTPStatusError: On HTTP errors (404 if not found)
"""
params: Dict[str, Any] = {
"type": session_type.value,
"user_id": user_id,
"db_id": db_id,
"table": table,
}
params = {k: v for k, v in params.items() if v is not None}
# Use schema for type-safe payload construction
payload = UpdateSessionRequest(
session_name=session_name,
session_state=session_state,
metadata=metadata,
summary=summary,
)
data = await self._apatch(
f"/sessions/{session_id}", payload.model_dump(exclude_none=True), params=params, headers=headers
)
if session_type == SessionType.AGENT:
return AgentSessionDetailSchema.model_validate(data)
elif session_type == SessionType.TEAM:
return TeamSessionDetailSchema.model_validate(data)
else:
return WorkflowSessionDetailSchema.model_validate(data)
# Eval Operations
async def list_eval_runs(
self,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
workflow_id: Optional[str] = None,
model_id: Optional[str] = None,
filter_type: Optional[EvalFilterType] = None,
eval_types: Optional[List[EvalType]] = None,
limit: int = 20,
page: int = 1,
sort_by: str = "created_at",
sort_order: str = "desc",
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> PaginatedResponse[EvalSchema]:
"""List evaluation runs with filtering and pagination.
Args:
agent_id: Filter by agent ID
team_id: Filter by team ID
workflow_id: Filter by workflow ID
model_id: Filter by model ID
filter_type: Filter type (agent, team, workflow)
eval_types: List of eval types to filter by (accuracy, performance, reliability)
limit: Number of eval runs per page
page: Page number
sort_by: Field to sort by
sort_order: Sort order (asc or desc)
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Returns:
PaginatedResponse[EvalSchema]: Paginated list of evaluation runs
Raises:
HTTPStatusError: On HTTP errors
"""
params: Dict[str, Any] = {
"limit": limit,
"page": page,
"sort_by": sort_by,
"sort_order": sort_order,
"agent_id": agent_id,
"team_id": team_id,
"workflow_id": workflow_id,
"model_id": model_id,
"type": filter_type.value if filter_type else None,
"eval_types": ",".join(et.value for et in eval_types) if eval_types else None,
"db_id": db_id,
"table": table,
}
params = {k: v for k, v in params.items() if v is not None}
data = await self._aget("/eval-runs", params=params, headers=headers)
return PaginatedResponse[EvalSchema].model_validate(data)
async def get_eval_run(
self,
eval_run_id: str,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> EvalSchema:
"""Get a specific evaluation run by ID.
Args:
eval_run_id: ID of the evaluation run to retrieve
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Returns:
EvalSchema: The evaluation run details
Raises:
HTTPStatusError: On HTTP errors (404 if not found)
"""
params: Dict[str, Any] = {
"db_id": db_id,
"table": table,
}
params = {k: v for k, v in params.items() if v is not None}
data = await self._aget(f"/eval-runs/{eval_run_id}", params=params, headers=headers)
return EvalSchema.model_validate(data)
async def delete_eval_runs(
self,
eval_run_ids: List[str],
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> None:
"""Delete multiple evaluation runs.
Args:
eval_run_ids: List of evaluation run IDs to delete
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Raises:
HTTPStatusError: On HTTP errors
"""
params: Dict[str, Any] = {
"db_id": db_id,
"table": table,
}
params = {k: v for k, v in params.items() if v is not None}
# Use schema for type-safe payload construction
payload = DeleteEvalRunsRequest(eval_run_ids=eval_run_ids)
await self._adelete("/eval-runs", payload.model_dump(), params=params, headers=headers)
async def update_eval_run(
self,
eval_run_id: str,
name: str,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> EvalSchema:
"""Update an evaluation run (rename).
Args:
eval_run_id: ID of the evaluation run to update
name: New name for the evaluation run
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Returns:
EvalSchema: The updated evaluation run
Raises:
HTTPStatusError: On HTTP errors (404 if not found)
"""
params: Dict[str, Any] = {
"db_id": db_id,
"table": table,
}
params = {k: v for k, v in params.items() if v is not None}
# Use schema for type-safe payload construction
payload = UpdateEvalRunRequest(name=name)
data = await self._apatch(f"/eval-runs/{eval_run_id}", payload.model_dump(), params=params, headers=headers)
return EvalSchema.model_validate(data)
async def run_eval(
self,
eval_type: EvalType,
input_text: str,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
model_id: Optional[str] = None,
model_provider: Optional[str] = None,
expected_output: Optional[str] = None,
expected_tool_calls: Optional[List[str]] = None,
num_iterations: int = 1,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> Optional[EvalSchema]:
"""Execute an evaluation on an agent or team.
Args:
eval_type: Type of evaluation (accuracy, performance, reliability)
input_text: Input text for the evaluation
agent_id: Agent ID to evaluate (mutually exclusive with team_id)
team_id: Team ID to evaluate (mutually exclusive with agent_id)
model_id: Optional model ID to use (overrides agent/team default)
model_provider: Optional model provider to use
expected_output: Expected output for accuracy evaluations
expected_tool_calls: Expected tool calls for reliability evaluations
num_iterations: Number of iterations for performance evaluations
db_id: Optional database ID to use
table: Optional table name to use
headers: HTTP headers to include in the request (optional)
Returns:
EvalSchema: The evaluation result, or None if evaluation against remote agents
Raises:
HTTPStatusError: On HTTP errors
"""
params: Dict[str, Any] = {
"db_id": db_id,
"table": table,
}
params = {k: v for k, v in params.items() if v is not None}
# Use schema for type-safe payload construction
payload = EvalRunInput(
eval_type=eval_type,
input=input_text,
agent_id=agent_id,
team_id=team_id,
model_id=model_id,
model_provider=model_provider,
expected_output=expected_output,
expected_tool_calls=expected_tool_calls,
num_iterations=num_iterations,
)
endpoint = "/eval-runs"
data = await self._apost(
endpoint, payload.model_dump(exclude_none=True, mode="json"), params=params, headers=headers
)
if data is None:
return None
return EvalSchema.model_validate(data)
# Knowledge Operations
async def _apost_multipart(
self,
endpoint: str,
form_data: Dict[str, Any],
files: Optional[Dict[str, Any]] = None,
params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> Any:
"""Execute asynchronous POST request with multipart form data and optional files.
Args:
endpoint: API endpoint path (without base URL)
form_data: Form data dictionary
files: Optional files dictionary for multipart upload
params: Query parameters (optional)
headers: HTTP headers to include in the request (optional)
Returns:
Parsed JSON response
Raises:
RemoteServerUnavailableError: When the remote server is unavailable
HTTPStatusError: On HTTP errors (4xx, 5xx)
"""
url = f"{self.base_url}{endpoint}"
async_client = get_default_async_client()
try:
if files:
response = await async_client.post(
url, data=form_data, files=files, params=params, headers=headers or {}, timeout=self.timeout
)
else:
response = await async_client.post(
url, data=form_data, params=params, headers=headers or {}, timeout=self.timeout
)
response.raise_for_status()
return response.json()
except (ConnectError, ConnectTimeout) as e:
raise RemoteServerUnavailableError(
message=f"Failed to connect to remote server at {self.base_url}",
base_url=self.base_url,
original_error=e,
) from e
except TimeoutException as e:
raise RemoteServerUnavailableError(
message=f"Request to remote server at {self.base_url} timed out after {self.timeout} seconds.",
base_url=self.base_url,
original_error=e,
) from e
async def upload_knowledge_content(
self,
name: Optional[str] = None,
description: Optional[str] = None,
url: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
file: Optional[Union[File, "UploadFile"]] = None,
text_content: Optional[str] = None,
reader_id: Optional[str] = None,
chunker: Optional[str] = None,
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
db_id: Optional[str] = None,
knowledge_id: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> ContentResponseSchema:
"""Upload content to the knowledge base.
Args:
name: Content name (auto-generated from file/URL if not provided)
description: Content description
url: URL to fetch content from (can be a single URL string or a JSON-encoded array of URLs)
metadata: Metadata dictionary for the content
file: File object containing content (bytes or file-like object), filename, and mime_type. Can also be a FastAPI UploadFile.
text_content: Raw text content to process
reader_id: ID of the reader to use for processing
chunker: Chunking strategy to apply
chunk_size: Chunk size for processing
chunk_overlap: Chunk overlap for processing
db_id: Optional database ID to use
knowledge_id: Optional knowledge instance ID for content isolation
headers: HTTP headers to include in the request (optional)
Returns:
ContentResponseSchema: The uploaded content info
Raises:
HTTPStatusError: On HTTP errors
"""
params: Dict[str, Any] = {"db_id": db_id, "knowledge_id": knowledge_id}
params = {k: v for k, v in params.items() if v is not None}
# Build multipart form data
form_data: Dict[str, Any] = {}
files: Optional[Dict[str, Any]] = None
if name:
form_data["name"] = name
if description:
form_data["description"] = description
if url:
form_data["url"] = url
if metadata:
form_data["metadata"] = json.dumps(metadata)
if text_content:
form_data["text_content"] = text_content
if reader_id:
form_data["reader_id"] = reader_id
if chunker:
form_data["chunker"] = chunker
if chunk_size:
form_data["chunk_size"] = str(chunk_size)
if chunk_overlap:
form_data["chunk_overlap"] = str(chunk_overlap)
if file:
# Handle both agno.media.File and FastAPI UploadFile
if isinstance(file, UploadFile):
files = {
"file": (file.filename or "upload", file.file, file.content_type or "application/octet-stream")
}
elif file.content:
files = {
"file": (file.filename or "upload", file.content, file.mime_type or "application/octet-stream")
}
data = await self._apost_multipart("/knowledge/content", form_data, files=files, params=params, headers=headers)
return ContentResponseSchema.model_validate(data)
async def update_knowledge_content(
self,
content_id: str,
name: Optional[str] = None,
description: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
reader_id: Optional[str] = None,
db_id: Optional[str] = None,
knowledge_id: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> ContentResponseSchema:
"""Update content properties.
Args:
content_id: ID of the content to update
name: New content name
description: New content description
metadata: New metadata dictionary
reader_id: ID of the reader to use
db_id: Optional database ID to use
knowledge_id: Optional knowledge instance ID for content isolation
headers: HTTP headers to include in the request (optional)
Returns:
ContentResponseSchema: The updated content
Raises:
HTTPStatusError: On HTTP errors (404 if not found)
"""
params: Dict[str, Any] = {"db_id": db_id, "knowledge_id": knowledge_id}
params = {k: v for k, v in params.items() if v is not None}
form_data: Dict[str, Any] = {}
if name:
form_data["name"] = name
if description:
form_data["description"] = description
if metadata:
form_data["metadata"] = json.dumps(metadata)
if reader_id:
form_data["reader_id"] = reader_id
data = await self._arequest(
"PATCH", f"/knowledge/content/{content_id}", data=form_data, params=params, headers=headers, as_form=True
)
return ContentResponseSchema.model_validate(data)
async def list_knowledge_content(
self,
limit: Optional[int] = 20,
page: Optional[int] = 1,
sort_by: Optional[str] = "created_at",
sort_order: Optional[str] = "desc",
db_id: Optional[str] = None,
knowledge_id: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> PaginatedResponse[ContentResponseSchema]:
"""List all content in the knowledge base.
Args:
limit: Number of content entries per page
page: Page number
sort_by: Field to sort by
sort_order: Sort order (asc or desc)
db_id: Optional database ID to use
knowledge_id: Optional knowledge instance ID for content isolation
headers: HTTP headers to include in the request (optional)
Returns:
PaginatedResponse[ContentResponseSchema]: Paginated list of content
Raises:
HTTPStatusError: On HTTP errors
"""
params: Dict[str, Any] = {
"limit": limit,
"page": page,
"sort_by": sort_by,
"sort_order": sort_order,
"db_id": db_id,
"knowledge_id": knowledge_id,
}
params = {k: v for k, v in params.items() if v is not None}
data = await self._aget("/knowledge/content", params=params, headers=headers)
return PaginatedResponse[ContentResponseSchema].model_validate(data)
async def get_knowledge_content(
self,
content_id: str,
db_id: Optional[str] = None,
knowledge_id: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> ContentResponseSchema:
"""Get a specific content by ID.
Args:
content_id: ID of the content to retrieve
db_id: Optional database ID to use
knowledge_id: Optional knowledge instance ID for content isolation
headers: HTTP headers to include in the request (optional)
Returns:
ContentResponseSchema: The content details
Raises:
HTTPStatusError: On HTTP errors (404 if not found)
"""
params: Dict[str, Any] = {"db_id": db_id, "knowledge_id": knowledge_id}
params = {k: v for k, v in params.items() if v is not None}
data = await self._aget(f"/knowledge/content/{content_id}", params=params, headers=headers)
return ContentResponseSchema.model_validate(data)
async def delete_knowledge_content(
self,
content_id: str,
db_id: Optional[str] = None,
knowledge_id: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> ContentResponseSchema:
"""Delete a specific content.
Args:
content_id: ID of the content to delete
db_id: Optional database ID to use
knowledge_id: Optional knowledge instance ID for content isolation
headers: HTTP headers to include in the request (optional)
Returns:
ContentResponseSchema: The deleted content info
Raises:
HTTPStatusError: On HTTP errors (404 if not found)
"""
params: Dict[str, Any] = {"db_id": db_id, "knowledge_id": knowledge_id}
params = {k: v for k, v in params.items() if v is not None}
endpoint = f"/knowledge/content/{content_id}"
data = await self._arequest("DELETE", endpoint, params=params, headers=headers)
return ContentResponseSchema.model_validate(data)
async def delete_all_knowledge_content(
self,
db_id: Optional[str] = None,
knowledge_id: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> str:
"""Delete all content from the knowledge base.
WARNING: This is a destructive operation that cannot be undone.
Args:
db_id: Optional database ID to use
knowledge_id: Optional knowledge instance ID for content isolation
headers: HTTP headers to include in the request (optional)
Returns:
str: "success" if successful
Raises:
HTTPStatusError: On HTTP errors
"""
params: Dict[str, Any] = {"db_id": db_id, "knowledge_id": knowledge_id}
params = {k: v for k, v in params.items() if v is not None}
endpoint = "/knowledge/content"
return await self._arequest("DELETE", endpoint, params=params, headers=headers)
async def get_knowledge_content_status(
self,
content_id: str,
db_id: Optional[str] = None,
knowledge_id: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> ContentStatusResponse:
"""Get the processing status of a content item.
Args:
content_id: ID of the content
db_id: Optional database ID to use
knowledge_id: Optional knowledge instance ID for content isolation
headers: HTTP headers to include in the request (optional)
Returns:
ContentStatusResponse: The content processing status
Raises:
HTTPStatusError: On HTTP errors
"""
params: Dict[str, Any] = {"db_id": db_id, "knowledge_id": knowledge_id}
params = {k: v for k, v in params.items() if v is not None}
data = await self._aget(f"/knowledge/content/{content_id}/status", params=params, headers=headers)
return ContentStatusResponse.model_validate(data)
async def search_knowledge(
self,
query: str,
max_results: Optional[int] = None,
filters: Optional[Dict[str, Any]] = None,
search_type: Optional[str] = None,
vector_db_ids: Optional[List[str]] = None,
limit: int = 20,
page: int = 1,
db_id: Optional[str] = None,
knowledge_id: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> PaginatedResponse[VectorSearchResult]:
"""Search the knowledge base.
Args:
query: Search query string
max_results: Maximum number of results to return from search
filters: Optional filters to apply
search_type: Type of search (vector, keyword, hybrid)
vector_db_ids: Optional list of vector DB IDs to search
limit: Number of results per page
page: Page number
db_id: Optional database ID to use
knowledge_id: Optional knowledge instance ID for content isolation
headers: HTTP headers to include in the request (optional)
Returns:
PaginatedResponse[VectorSearchResult]: Paginated search results
Raises:
HTTPStatusError: On HTTP errors
"""
payload: Dict[str, Any] = {"query": query}
if max_results:
payload["max_results"] = max_results
if filters:
payload["filters"] = filters
if search_type:
payload["search_type"] = search_type
if vector_db_ids:
payload["vector_db_ids"] = vector_db_ids
payload["meta"] = {"limit": limit, "page": page}
if db_id:
payload["db_id"] = db_id
if knowledge_id:
payload["knowledge_id"] = knowledge_id
data = await self._apost("/knowledge/search", payload, headers=headers)
return PaginatedResponse[VectorSearchResult].model_validate(data)
async def get_knowledge_config(
self,
db_id: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> KnowledgeConfigResponse:
"""Get knowledge base configuration.
Returns available readers, chunkers, vector DBs, and filters.
Args:
db_id: Optional database ID to filter by
headers: HTTP headers to include in the request (optional)
Returns:
KnowledgeConfigResponse: Knowledge configuration
Raises:
HTTPStatusError: On HTTP errors
"""
params: Dict[str, Any] = {"db_id": db_id}
params = {k: v for k, v in params.items() if v is not None}
data = await self._aget("/knowledge/config", params=params, headers=headers)
return KnowledgeConfigResponse.model_validate(data)
# Trace Operations
async def get_traces(
self,
run_id: Optional[str] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
workflow_id: Optional[str] = None,
status: Optional[str] = None,
start_time: Optional[str] = None,
end_time: Optional[str] = None,
page: int = 1,
limit: int = 20,
db_id: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> PaginatedResponse[TraceSummary]:
"""List execution traces with filtering and pagination.
Traces provide observability into agent execution flows, model invocations,
tool calls, errors, and performance bottlenecks.
Args:
run_id: Filter by run ID
session_id: Filter by session ID
user_id: Filter by user ID
agent_id: Filter by agent ID
team_id: Filter by team ID
workflow_id: Filter by workflow ID
status: Filter by status (OK, ERROR)
start_time: Filter traces starting after this time (ISO 8601 format)
end_time: Filter traces ending before this time (ISO 8601 format)
page: Page number (1-indexed)
limit: Number of traces per page
db_id: Optional database ID to use
headers: HTTP headers to include in the request (optional)
Returns:
PaginatedResponse[TraceSummary]: Paginated list of trace summaries
Raises:
HTTPStatusError: On HTTP errors
"""
params: Dict[str, Any] = {
"page": page,
"limit": limit,
"run_id": run_id,
"session_id": session_id,
"user_id": user_id,
"agent_id": agent_id,
"team_id": team_id,
"workflow_id": workflow_id,
"status": status,
"start_time": start_time,
"end_time": end_time,
"db_id": db_id,
}
params = {k: v for k, v in params.items() if v is not None}
data = await self._aget("/traces", params=params, headers=headers)
return PaginatedResponse[TraceSummary].model_validate(data)
async def get_trace(
self,
trace_id: str,
span_id: Optional[str] = None,
run_id: Optional[str] = None,
db_id: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> Union[TraceDetail, TraceNode]:
"""Get detailed trace information with hierarchical span tree, or a specific span.
Without span_id: Returns the full trace with hierarchical span tree including
trace metadata, timing, status, and all spans organized hierarchically.
With span_id: Returns details for a specific span within the trace including
span metadata, timing, status, and type-specific attributes.
Args:
trace_id: ID of the trace to retrieve
span_id: Optional span ID to retrieve a specific span
run_id: Optional run ID to retrieve trace for
db_id: Optional database ID to use
headers: HTTP headers to include in the request (optional)
Returns:
TraceDetail if no span_id provided, TraceNode if span_id provided
Raises:
HTTPStatusError: On HTTP errors (404 if not found)
"""
params: Dict[str, Any] = {
"span_id": span_id,
"run_id": run_id,
"db_id": db_id,
}
params = {k: v for k, v in params.items() if v is not None}
data = await self._aget(f"/traces/{trace_id}", params=params, headers=headers)
# If span_id was provided, return TraceNode, otherwise TraceDetail
if span_id:
return TraceNode.model_validate(data)
return TraceDetail.model_validate(data)
async def get_trace_session_stats(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
workflow_id: Optional[str] = None,
start_time: Optional[str] = None,
end_time: Optional[str] = None,
page: int = 1,
limit: int = 20,
db_id: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> PaginatedResponse[TraceSessionStats]:
"""Get aggregated trace statistics grouped by session ID.
Provides insights into total traces per session, first and last trace
timestamps, and associated user and agent information.
Args:
user_id: Filter by user ID
agent_id: Filter by agent ID
team_id: Filter by team ID
workflow_id: Filter by workflow ID
start_time: Filter sessions with traces created after this time (ISO 8601 format)
end_time: Filter sessions with traces created before this time (ISO 8601 format)
page: Page number (1-indexed)
limit: Number of sessions per page
db_id: Optional database ID to use
headers: HTTP headers to include in the request (optional)
Returns:
PaginatedResponse[TraceSessionStats]: Paginated list of session statistics
Raises:
HTTPStatusError: On HTTP errors
"""
params: Dict[str, Any] = {
"page": page,
"limit": limit,
"user_id": user_id,
"agent_id": agent_id,
"team_id": team_id,
"workflow_id": workflow_id,
"start_time": start_time,
"end_time": end_time,
"db_id": db_id,
}
params = {k: v for k, v in params.items() if v is not None}
data = await self._aget("/trace_session_stats", params=params, headers=headers)
return PaginatedResponse[TraceSessionStats].model_validate(data)
async def search_traces(
self,
filter_expr: Optional[Dict[str, Any]] = None,
group_by: TraceSearchGroupBy = TraceSearchGroupBy.RUN,
page: int = 1,
limit: int = 20,
db_id: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> Union[PaginatedResponse[TraceDetail], PaginatedResponse[TraceSessionStats]]:
"""Search traces using the FilterExpr DSL for composable queries.
Supports operators: EQ, NEQ, GT, GTE, LT, LTE, IN, CONTAINS, STARTSWITH,
AND, OR, NOT.
Args:
filter_expr: FilterExpr DSL as a dict (e.g., {"op": "EQ", "key": "status", "value": "OK"})
group_by: Grouping mode - TraceSearchGroupBy.RUN (default) returns TraceDetail,
TraceSearchGroupBy.SESSION returns TraceSessionStats
page: Page number (1-indexed)
limit: Number of traces per page (max 100)
db_id: Optional database ID to use
headers: HTTP headers to include in the request (optional)
Returns:
PaginatedResponse[TraceDetail] if group_by=RUN, PaginatedResponse[TraceSessionStats] if group_by=SESSION
Raises:
HTTPStatusError: On HTTP errors (400 for invalid filter)
"""
body: Dict[str, Any] = {
"filter": filter_expr,
"group_by": group_by.value,
"page": page,
"limit": limit,
}
params: Dict[str, Any] = {}
if db_id is not None:
params["db_id"] = db_id
data = await self._apost("/traces/search", data=body, params=params if params else None, headers=headers)
if group_by == TraceSearchGroupBy.SESSION:
return PaginatedResponse[TraceSessionStats].model_validate(data)
return PaginatedResponse[TraceDetail].model_validate(data)
# Metrics Operations
async def get_metrics(
self,
starting_date: Optional[date] = None,
ending_date: Optional[date] = None,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> MetricsResponse:
"""Retrieve AgentOS metrics and analytics data for a specified date range.
If no date range is specified, returns all available metrics.
Args:
starting_date: Starting date for metrics range (YYYY-MM-DD format)
ending_date: Ending date for metrics range (YYYY-MM-DD format)
db_id: Optional database ID to use
table: Optional database table to use
headers: HTTP headers to include in the request (optional)
Returns:
MetricsResponse: Metrics data including daily aggregated metrics
Raises:
HTTPStatusError: On HTTP errors
"""
params: Dict[str, Any] = {
"starting_date": starting_date.strftime("%Y-%m-%d") if starting_date else None,
"ending_date": ending_date.strftime("%Y-%m-%d") if ending_date else None,
"db_id": db_id,
"table": table,
}
params = {k: v for k, v in params.items() if v is not None}
data = await self._aget("/metrics", params=params, headers=headers)
return MetricsResponse.model_validate(data)
async def refresh_metrics(
self,
db_id: Optional[str] = None,
table: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> List[DayAggregatedMetrics]:
"""Manually trigger recalculation of system metrics from raw data.
This operation analyzes system activity logs and regenerates aggregated metrics.
Useful for ensuring metrics are up-to-date or after system maintenance.
Args:
db_id: Optional database ID to use
table: Optional database table to use
headers: HTTP headers to include in the request (optional)
Returns:
List[DayAggregatedMetrics]: List of refreshed daily aggregated metrics
Raises:
HTTPStatusError: On HTTP errors
"""
params: Dict[str, Any] = {"db_id": db_id, "table": table}
params = {k: v for k, v in params.items() if v is not None}
data = await self._apost("/metrics/refresh", params=params, headers=headers)
return [DayAggregatedMetrics.model_validate(m) for m in data]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/client/os.py",
"license": "Apache License 2.0",
"lines": 2339,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/remote/base.py | import time
from abc import abstractmethod
from dataclasses import dataclass, field
from datetime import date
from typing import TYPE_CHECKING, Any, AsyncIterator, Dict, List, Literal, Optional, Sequence, Tuple, Union
from pydantic import BaseModel
from agno.db.base import SessionType
from agno.media import Audio, File, Image, Video
from agno.models.message import Message
from agno.models.response import ToolExecution
from agno.run.agent import RunOutput, RunOutputEvent
from agno.run.team import TeamRunOutput, TeamRunOutputEvent
from agno.run.workflow import WorkflowRunOutput, WorkflowRunOutputEvent
if TYPE_CHECKING:
from fastapi import UploadFile
from agno.client import AgentOSClient
from agno.client.a2a import A2AClient
from agno.client.a2a.schemas import AgentCard
from agno.os.routers.evals.schemas import EvalSchema
from agno.os.routers.knowledge.schemas import (
ConfigResponseSchema,
ContentResponseSchema,
ContentStatusResponse,
VectorSearchResult,
)
from agno.os.routers.memory.schemas import OptimizeMemoriesResponse, UserMemorySchema, UserStatsSchema
from agno.os.routers.metrics.schemas import DayAggregatedMetrics, MetricsResponse
from agno.os.routers.traces.schemas import TraceDetail, TraceNode, TraceSessionStats, TraceSummary
from agno.os.schema import (
AgentSessionDetailSchema,
ConfigResponse,
PaginatedResponse,
RunSchema,
SessionSchema,
TeamRunSchema,
TeamSessionDetailSchema,
WorkflowRunSchema,
WorkflowSessionDetailSchema,
)
@dataclass
class RemoteDb:
id: str
client: "AgentOSClient"
session_table_name: Optional[str] = None
knowledge_table_name: Optional[str] = None
memory_table_name: Optional[str] = None
metrics_table_name: Optional[str] = None
eval_table_name: Optional[str] = None
traces_table_name: Optional[str] = None
spans_table_name: Optional[str] = None
culture_table_name: Optional[str] = None
@classmethod
def from_config(
cls,
db_id: str,
client: "AgentOSClient",
config: "ConfigResponse",
) -> Optional["RemoteDb"]:
"""Create a RemoteDb instance from an AgentResponse/TeamResponse/WorkflowResponse and ConfigResponse.
Args:
db_id (str): The id of the remote database
client: The AgentOSClient for remote operations.
config: The ConfigResponse containing database table information.
Returns:
RemoteDb instance if db_id is present, None otherwise.
"""
session_table_name = None
knowledge_table_name = None
memory_table_name = None
metrics_table_name = None
eval_table_name = None
traces_table_name = None
if config and config.session and config.session.dbs is not None:
session_dbs = [db for db in config.session.dbs if db.db_id == db_id]
session_table_name = session_dbs[0].tables[0] if session_dbs and session_dbs[0].tables else None
if config and config.knowledge and config.knowledge.dbs is not None:
knowledge_dbs = [db for db in config.knowledge.dbs if db.db_id == db_id]
knowledge_table_name = knowledge_dbs[0].tables[0] if knowledge_dbs and knowledge_dbs[0].tables else None
if config and config.memory and config.memory.dbs is not None:
memory_dbs = [db for db in config.memory.dbs if db.db_id == db_id]
memory_table_name = memory_dbs[0].tables[0] if memory_dbs and memory_dbs[0].tables else None
if config and config.metrics and config.metrics.dbs is not None:
metrics_dbs = [db for db in config.metrics.dbs if db.db_id == db_id]
metrics_table_name = metrics_dbs[0].tables[0] if metrics_dbs and metrics_dbs[0].tables else None
if config and config.evals and config.evals.dbs is not None:
eval_dbs = [db for db in config.evals.dbs if db.db_id == db_id]
eval_table_name = eval_dbs[0].tables[0] if eval_dbs and eval_dbs[0].tables else None
if config and config.traces and config.traces.dbs is not None:
traces_dbs = [db for db in config.traces.dbs if db.db_id == db_id]
traces_table_name = traces_dbs[0].tables[0] if traces_dbs and traces_dbs[0].tables else None
return cls(
id=db_id,
client=client,
session_table_name=session_table_name,
knowledge_table_name=knowledge_table_name,
memory_table_name=memory_table_name,
metrics_table_name=metrics_table_name,
eval_table_name=eval_table_name,
traces_table_name=traces_table_name,
)
# SESSIONS
async def get_sessions(self, **kwargs: Any) -> "PaginatedResponse[SessionSchema]":
return await self.client.get_sessions(**kwargs)
async def get_session(
self, session_id: str, **kwargs: Any
) -> Union["AgentSessionDetailSchema", "TeamSessionDetailSchema", "WorkflowSessionDetailSchema"]:
return await self.client.get_session(session_id, **kwargs)
async def get_session_runs(
self, session_id: str, **kwargs: Any
) -> List[Union["RunSchema", "TeamRunSchema", "WorkflowRunSchema"]]:
return await self.client.get_session_runs(session_id, **kwargs)
async def create_session(
self,
session_id: Optional[str] = None,
session_name: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
workflow_id: Optional[str] = None,
**kwargs: Any,
) -> Union["AgentSessionDetailSchema", "TeamSessionDetailSchema", "WorkflowSessionDetailSchema"]:
return await self.client.create_session(
session_id=session_id,
session_name=session_name,
session_state=session_state,
metadata=metadata,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
workflow_id=workflow_id,
**kwargs,
)
async def get_session_run(
self, session_id: str, run_id: str, **kwargs: Any
) -> Union["RunSchema", "TeamRunSchema", "WorkflowRunSchema"]:
return await self.client.get_session_run(session_id, run_id, **kwargs)
async def rename_session(
self, session_id: str, session_name: str, **kwargs: Any
) -> Union["AgentSessionDetailSchema", "TeamSessionDetailSchema", "WorkflowSessionDetailSchema"]:
return await self.client.rename_session(session_id, session_name, **kwargs)
async def update_session(
self, session_id: str, session_type: SessionType, **kwargs: Any
) -> Union["AgentSessionDetailSchema", "TeamSessionDetailSchema", "WorkflowSessionDetailSchema"]:
return await self.client.update_session(session_id=session_id, session_type=session_type, **kwargs)
async def delete_session(self, session_id: str, **kwargs: Any) -> None:
return await self.client.delete_session(session_id, **kwargs)
async def delete_sessions(self, session_ids: List[str], session_types: List[SessionType], **kwargs: Any) -> None:
return await self.client.delete_sessions(session_ids, session_types, **kwargs)
# MEMORIES
async def create_memory(self, memory: str, topics: List[str], user_id: str, **kwargs: Any) -> "UserMemorySchema":
return await self.client.create_memory(memory=memory, topics=topics, user_id=user_id, **kwargs)
async def delete_memory(self, memory_id: str, **kwargs: Any) -> None:
return await self.client.delete_memory(memory_id, **kwargs)
async def delete_memories(self, memory_ids: List[str], **kwargs: Any) -> None:
return await self.client.delete_memories(memory_ids, **kwargs)
async def get_memory(self, memory_id: str, **kwargs: Any) -> "UserMemorySchema":
return await self.client.get_memory(memory_id, **kwargs)
async def get_memories(self, user_id: Optional[str] = None, **kwargs: Any) -> "PaginatedResponse[UserMemorySchema]":
return await self.client.list_memories(user_id, **kwargs)
async def update_memory(self, memory_id: str, **kwargs: Any) -> "UserMemorySchema":
return await self.client.update_memory(memory_id, **kwargs)
async def get_user_memory_stats(self, **kwargs: Any) -> "PaginatedResponse[UserStatsSchema]":
return await self.client.get_user_memory_stats(**kwargs)
async def optimize_memories(self, **kwargs: Any) -> "OptimizeMemoriesResponse":
return await self.client.optimize_memories(**kwargs)
async def get_memory_topics(self, **kwargs: Any) -> List[str]:
return await self.client.get_memory_topics(**kwargs)
# TRACES
async def get_traces(self, **kwargs: Any) -> "PaginatedResponse[TraceSummary]":
return await self.client.get_traces(**kwargs)
async def get_trace(self, trace_id: str, **kwargs: Any) -> Union["TraceDetail", "TraceNode"]:
return await self.client.get_trace(trace_id, **kwargs)
async def get_trace_session_stats(self, **kwargs: Any) -> "PaginatedResponse[TraceSessionStats]":
return await self.client.get_trace_session_stats(**kwargs)
async def search_traces(
self, **kwargs: Any
) -> Union["PaginatedResponse[TraceDetail]", "PaginatedResponse[TraceSessionStats]"]:
return await self.client.search_traces(**kwargs)
# EVALS
async def get_eval_runs(self, **kwargs: Any) -> "PaginatedResponse[EvalSchema]":
return await self.client.list_eval_runs(**kwargs)
async def get_eval_run(self, eval_run_id: str, **kwargs: Any) -> "EvalSchema":
return await self.client.get_eval_run(eval_run_id, **kwargs)
async def delete_eval_runs(self, eval_run_ids: List[str], **kwargs: Any) -> None:
return await self.client.delete_eval_runs(eval_run_ids, **kwargs)
async def update_eval_run(self, eval_run_id: str, **kwargs: Any) -> "EvalSchema":
return await self.client.update_eval_run(eval_run_id, **kwargs)
async def create_eval_run(self, **kwargs: Any) -> Optional["EvalSchema"]:
return await self.client.run_eval(**kwargs)
# METRICS
async def get_metrics(
self, starting_date: Optional[date] = None, ending_date: Optional[date] = None, **kwargs: Any
) -> "MetricsResponse":
return await self.client.get_metrics(starting_date=starting_date, ending_date=ending_date, **kwargs)
async def refresh_metrics(self, **kwargs: Any) -> List["DayAggregatedMetrics"]:
return await self.client.refresh_metrics(**kwargs)
# OTHER
async def migrate_database(self, target_version: Optional[str] = None) -> None:
"""Migrate the database to a target version.
Args:
target_version: Target version to migrate to
"""
return await self.client.migrate_database(self.id, target_version)
@dataclass
class RemoteKnowledge:
client: "AgentOSClient"
contents_db: Optional[RemoteDb] = None
knowledge_id: Optional[str] = None
def _get_db_id(self) -> Optional[str]:
return self.contents_db.id if self.contents_db else None
async def get_config(self, headers: Optional[Dict[str, str]] = None) -> "ConfigResponseSchema":
return await self.client.get_knowledge_config(db_id=self._get_db_id(), headers=headers)
async def search_knowledge(self, query: str, **kwargs: Any) -> "PaginatedResponse[VectorSearchResult]":
return await self.client.search_knowledge(
query, db_id=self._get_db_id(), knowledge_id=self.knowledge_id, **kwargs
)
async def upload_content(
self,
name: Optional[str] = None,
description: Optional[str] = None,
url: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
file: Optional[Union[File, "UploadFile"]] = None,
text_content: Optional[str] = None,
reader_id: Optional[str] = None,
chunker: Optional[str] = None,
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
**kwargs: Any,
) -> "ContentResponseSchema":
return await self.client.upload_knowledge_content(
name=name,
description=description,
url=url,
metadata=metadata,
file=file,
text_content=text_content,
reader_id=reader_id,
chunker=chunker,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
db_id=self._get_db_id(),
knowledge_id=self.knowledge_id,
**kwargs,
)
async def update_content(
self,
content_id: str,
name: Optional[str] = None,
description: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
reader_id: Optional[str] = None,
**kwargs: Any,
) -> "ContentResponseSchema":
return await self.client.update_knowledge_content(
content_id=content_id,
name=name,
description=description,
metadata=metadata,
reader_id=reader_id,
db_id=self._get_db_id(),
knowledge_id=self.knowledge_id,
**kwargs,
)
async def get_content(
self,
limit: Optional[int] = None,
page: Optional[int] = None,
sort_by: Optional[str] = None,
sort_order: Optional[str] = None,
**kwargs: Any,
) -> "PaginatedResponse[ContentResponseSchema]":
return await self.client.list_knowledge_content(
limit=limit,
page=page,
sort_by=sort_by,
sort_order=sort_order,
db_id=self._get_db_id(),
knowledge_id=self.knowledge_id,
**kwargs,
)
async def get_content_by_id(self, content_id: str, **kwargs: Any) -> "ContentResponseSchema":
return await self.client.get_knowledge_content(
content_id=content_id, db_id=self._get_db_id(), knowledge_id=self.knowledge_id, **kwargs
)
async def delete_content_by_id(self, content_id: str, **kwargs: Any) -> None:
await self.client.delete_knowledge_content(
content_id=content_id, db_id=self._get_db_id(), knowledge_id=self.knowledge_id, **kwargs
)
async def delete_all_content(self, **kwargs: Any) -> None:
await self.client.delete_all_knowledge_content(
db_id=self._get_db_id(), knowledge_id=self.knowledge_id, **kwargs
)
async def get_content_status(self, content_id: str, **kwargs: Any) -> "ContentStatusResponse":
return await self.client.get_knowledge_content_status(
content_id=content_id, db_id=self._get_db_id(), knowledge_id=self.knowledge_id, **kwargs
)
@dataclass
class BaseRemote:
# Private cache for OS config with TTL: (config, timestamp)
_cached_config: Optional[Tuple["ConfigResponse", float]] = field(default=None, init=False, repr=False)
# Private cache for agent card with TTL: (agent_card, timestamp)
_cached_agent_card: Optional[Tuple[Optional["AgentCard"], float]] = field(default=None, init=False, repr=False)
def __init__(
self,
base_url: str,
timeout: float = 60.0,
protocol: Literal["agentos", "a2a"] = "agentos",
a2a_protocol: Literal["json-rpc", "rest"] = "rest",
config_ttl: float = 300.0,
):
"""Initialize BaseRemote for remote execution.
Supports two protocols:
- "agentos": Agno's proprietary AgentOS REST API (default)
- "a2a": A2A (Agent-to-Agent) protocol for cross-framework communication
For local execution, provide agent/team/workflow instances.
For remote execution, provide base_url.
Args:
base_url: Base URL for remote instance (e.g., "http://localhost:7777")
timeout: Request timeout in seconds (default: 60)
protocol: Communication protocol - "agentos" (default) or "a2a"
a2a_protocol: For A2A protocol only - Whether to use JSON-RPC or REST protocol.
config_ttl: Time-to-live for cached config in seconds (default: 300)
"""
self.base_url = base_url.rstrip("/")
self.timeout: float = timeout
self.protocol = protocol
self.a2a_protocol = a2a_protocol
self.config_ttl: float = config_ttl
self._cached_config = None
self._cached_agent_card = None
self.agentos_client = None
self.a2a_client = None
if protocol == "agentos":
self.agentos_client = self.get_os_client()
elif protocol == "a2a":
self.a2a_client = self.get_a2a_client()
else:
raise ValueError(f"Invalid protocol: {protocol}")
def get_os_client(self) -> "AgentOSClient":
"""Get an AgentOSClient for fetching remote configuration.
This is used internally by AgentOS to fetch configuration from remote
AgentOS instances when this runner represents a remote resource.
Returns:
AgentOSClient: Client configured for this remote resource's base URL
"""
from agno.client import AgentOSClient
return AgentOSClient(
base_url=self.base_url,
timeout=self.timeout,
)
def get_a2a_client(self) -> "A2AClient":
"""Get an A2AClient for A2A protocol communication.
Returns cached client if available, otherwise creates a new one.
This method provides lazy initialization of the A2A client.
Returns:
A2AClient: Client configured for A2A protocol communication
"""
from agno.client.a2a import A2AClient
return A2AClient(
base_url=self.base_url,
timeout=int(self.timeout),
protocol=self.a2a_protocol,
)
@property
def _config(self) -> Optional["ConfigResponse"]:
"""Get the OS config from remote, cached with TTL."""
from agno.os.schema import ConfigResponse
if self.protocol == "a2a":
return None
current_time = time.time()
# Check if cache is valid
if self._cached_config is not None:
config, cached_at = self._cached_config
if current_time - cached_at < self.config_ttl:
return config
# Fetch fresh config
config: ConfigResponse = self.agentos_client.get_config() # type: ignore
self._cached_config = (config, current_time)
return config
async def refresh_os_config(self) -> "ConfigResponse":
"""Force refresh the cached OS config."""
from agno.os.schema import ConfigResponse
config: ConfigResponse = await self.agentos_client.aget_config() # type: ignore
self._cached_config = (config, time.time())
return config
def _get_headers(self, auth_token: Optional[str] = None) -> Dict[str, str]:
"""Get headers for HTTP requests.
Args:
auth_token: Optional JWT token for authentication
Returns:
Dict[str, str]: Headers including Content-Type and optional Authorization
"""
headers = {"Content-Type": "application/x-www-form-urlencoded"}
if auth_token:
headers["Authorization"] = f"Bearer {auth_token}"
return headers
def _get_auth_headers(self, auth_token: Optional[str] = None) -> Optional[Dict[str, str]]:
"""Get Authorization headers for HTTP requests.
Args:
auth_token: Optional JWT token for authentication
Returns:
Dict[str, str] with Authorization header if auth_token is provided, None otherwise
"""
if auth_token:
return {"Authorization": f"Bearer {auth_token}"}
return None
def get_agent_card(self) -> Optional["AgentCard"]:
"""Get agent card for A2A protocol agents, cached with TTL.
Fetches the agent card from the standard /.well-known/agent.json endpoint
to populate agent metadata (name, description, etc.) for A2A agents.
Returns None for non-A2A protocols or if the server doesn't support agent cards.
"""
if self.protocol != "a2a":
return None
current_time = time.time()
# Check if cache is valid
if self._cached_agent_card is not None:
agent_card, cached_at = self._cached_agent_card
if current_time - cached_at < self.config_ttl:
return agent_card
try:
agent_card = self.a2a_client.get_agent_card() # type: ignore
self._cached_agent_card = (agent_card, current_time)
return agent_card
except Exception:
self._cached_agent_card = (None, current_time)
return None
async def aget_agent_card(self) -> Optional["AgentCard"]:
"""Get agent card for A2A protocol agents, cached with TTL.
Fetches the agent card from the standard /.well-known/agent.json endpoint
to populate agent metadata (name, description, etc.) for A2A agents.
Returns None for non-A2A protocols or if the server doesn't support agent cards.
"""
if self.protocol != "a2a":
return None
current_time = time.time()
# Check if cache is valid
if self._cached_agent_card is not None:
agent_card, cached_at = self._cached_agent_card
if current_time - cached_at < self.config_ttl:
return agent_card
try:
agent_card = await self.a2a_client.aget_agent_card() # type: ignore
self._cached_agent_card = (agent_card, current_time)
return agent_card
except Exception:
self._cached_agent_card = (None, current_time)
return None
@abstractmethod
def arun( # type: ignore
self,
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
*,
stream: Optional[bool] = None,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
stream_events: Optional[bool] = None,
retries: Optional[int] = None,
knowledge_filters: Optional[Dict[str, Any]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> Union[
RunOutput,
TeamRunOutput,
WorkflowRunOutput,
AsyncIterator[Union[RunOutputEvent, TeamRunOutputEvent, WorkflowRunOutputEvent]],
]:
raise NotImplementedError("arun method must be implemented by the subclass")
@abstractmethod
async def acontinue_run( # type: ignore
self,
run_id: str,
stream: Optional[bool] = None,
updated_tools: Optional[List[ToolExecution]] = None,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
) -> Union[RunOutput, TeamRunOutput, WorkflowRunOutput]:
raise NotImplementedError("acontinue_run method must be implemented by the subclass")
@abstractmethod
async def acancel_run(self, run_id: str) -> bool:
raise NotImplementedError("cancel_run method must be implemented by the subclass")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/remote/base.py",
"license": "Apache License 2.0",
"lines": 494,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/team/remote.py | import json
from typing import TYPE_CHECKING, Any, AsyncIterator, Dict, List, Literal, Optional, Sequence, Tuple, Union, overload
from pydantic import BaseModel
from agno.media import Audio, File, Image, Video
from agno.models.base import Model
from agno.models.message import Message
from agno.remote.base import BaseRemote, RemoteDb, RemoteKnowledge
from agno.run.agent import RunOutputEvent
from agno.run.team import TeamRunOutput, TeamRunOutputEvent
from agno.utils.agent import validate_input
from agno.utils.log import log_warning
from agno.utils.remote import serialize_input
if TYPE_CHECKING:
from agno.os.routers.teams.schema import TeamResponse
class RemoteTeam(BaseRemote):
# Private cache for team config with TTL: (config, timestamp)
_cached_team_config: Optional[Tuple["TeamResponse", float]] = None
def __init__(
self,
base_url: str,
team_id: str,
timeout: float = 300.0,
protocol: Literal["agentos", "a2a"] = "agentos",
a2a_protocol: Literal["json-rpc", "rest"] = "rest",
config_ttl: float = 300.0,
):
"""Initialize RemoteTeam for remote execution.
Supports two protocols:
- "agentos": Agno's proprietary AgentOS REST API (default)
- "a2a": A2A (Agent-to-Agent) protocol for cross-framework communication
Args:
base_url: Base URL for remote instance (e.g., "http://localhost:7777")
team_id: ID of remote team on the remote server
timeout: Request timeout in seconds (default: 300)
protocol: Communication protocol - "agentos" (default) or "a2a"
a2a_protocol: For A2A protocol only - Whether to use JSON-RPC or REST protocol.
config_ttl: Time-to-live for cached config in seconds (default: 300)
"""
super().__init__(base_url, timeout, protocol, a2a_protocol, config_ttl)
self.team_id = team_id
self._cached_team_config = None
@property
def id(self) -> str:
return self.team_id
async def get_team_config(self) -> "TeamResponse":
"""
Get the team config from remote.
- For AgentOS protocol, always fetches fresh config from the remote.
- For A2A protocol, returns a minimal TeamResponse because A2A servers
do not expose detailed config endpoints.
Returns:
TeamResponse: The remote team configuration.
"""
from agno.os.routers.teams.schema import TeamResponse
if self.a2a_client:
from agno.client.a2a.schemas import AgentCard
agent_card: Optional[AgentCard] = await self.a2a_client.aget_agent_card()
return TeamResponse(
id=self.team_id,
name=agent_card.name if agent_card else self.team_id,
description=agent_card.description if agent_card else f"A2A team: {self.team_id}",
)
# Fetch fresh config from remote for AgentOS
return await self.agentos_client.aget_team(self.team_id) # type: ignore
@property
def _team_config(self) -> Optional["TeamResponse"]:
"""
Get the team config from remote, cached with TTL.
- Returns None for A2A protocol (no config available).
- For AgentOS protocol, uses TTL caching for efficiency.
"""
import time
from agno.os.routers.teams.schema import TeamResponse
if self.a2a_client:
from agno.client.a2a.schemas import AgentCard
agent_card: Optional[AgentCard] = self.a2a_client.get_agent_card()
return TeamResponse(
id=self.team_id,
name=agent_card.name if agent_card else self.team_id,
description=agent_card.description if agent_card else f"A2A team: {self.team_id}",
)
current_time = time.time()
if self._cached_team_config is not None:
config, cached_at = self._cached_team_config
if current_time - cached_at < self.config_ttl:
return config
# Fetch fresh config and update cache
config: TeamResponse = self.agentos_client.get_team(self.team_id) # type: ignore
self._cached_team_config = (config, current_time)
return config
async def refresh_config(self) -> Optional["TeamResponse"]:
"""
Force refresh the cached team config from remote.
"""
import time
from agno.os.routers.teams.schema import TeamResponse
if self.a2a_client:
return None
config: TeamResponse = await self.agentos_client.aget_team(self.team_id) # type: ignore
self._cached_team_config = (config, time.time())
return config
@property
def name(self) -> Optional[str]:
config = self._team_config
if config is not None:
return config.name
return self.team_id
@property
def description(self) -> Optional[str]:
config = self._team_config
if config is not None:
return config.description
return ""
def role(self) -> Optional[str]:
if self._team_config is not None:
return self._team_config.role
return None
@property
def tools(self) -> Optional[List[Dict[str, Any]]]:
if self._team_config is not None:
try:
return json.loads(self._team_config.tools["tools"]) if self._team_config.tools else None
except Exception as e:
log_warning(f"Failed to load tools for team {self.team_id}: {e}")
return None
return None
@property
def db(self) -> Optional[RemoteDb]:
if (
self.agentos_client
and self._config
and self._team_config is not None
and self._team_config.db_id is not None
):
return RemoteDb.from_config(
db_id=self._team_config.db_id,
client=self.agentos_client,
config=self._config,
)
return None
@property
def knowledge(self) -> Optional[RemoteKnowledge]:
"""Whether the team has knowledge enabled."""
if self.agentos_client and self._team_config is not None and self._team_config.knowledge is not None:
return RemoteKnowledge(
client=self.agentos_client,
contents_db=RemoteDb(
id=self._team_config.knowledge.get("db_id"), # type: ignore
client=self.agentos_client,
knowledge_table_name=self._team_config.knowledge.get("knowledge_table"),
)
if self._team_config.knowledge.get("db_id") is not None
else None,
)
return None
@property
def model(self) -> Optional[Model]:
# We don't expose the remote team's models, since they can't be used by other services in AgentOS.
return None
@property
def user_id(self) -> Optional[str]:
return None
@overload
async def arun(
self,
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
*,
stream: Literal[False] = False,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
stream_events: Optional[bool] = None,
retries: Optional[int] = None,
knowledge_filters: Optional[Dict[str, Any]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> TeamRunOutput: ...
@overload
def arun(
self,
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
*,
stream: Literal[True] = True,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
stream_events: Optional[bool] = None,
retries: Optional[int] = None,
knowledge_filters: Optional[Dict[str, Any]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> AsyncIterator[TeamRunOutputEvent]: ...
def arun( # type: ignore
self,
input: Union[str, List, Dict, Message, BaseModel, List[Message]],
*,
stream: Optional[bool] = None,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
stream_events: Optional[bool] = None,
retries: Optional[int] = None,
knowledge_filters: Optional[Dict[str, Any]] = None,
add_history_to_context: Optional[bool] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> Union[
TeamRunOutput,
AsyncIterator[RunOutputEvent],
]:
validated_input = validate_input(input)
serialized_input = serialize_input(validated_input)
headers = self._get_auth_headers(auth_token)
# A2A protocol path
if self.a2a_client:
return self._arun_a2a( # type: ignore[return-value]
message=serialized_input,
stream=stream or False,
user_id=user_id,
context_id=session_id, # Map session_id → context_id for A2A
audio=audio,
images=images,
videos=videos,
files=files,
headers=headers,
)
# AgentOS protocol path (default)
if self.agentos_client:
if stream:
# Handle streaming response
return self.agentos_client.run_team_stream( # type: ignore
team_id=self.team_id,
message=serialized_input,
session_id=session_id,
user_id=user_id,
audio=audio,
images=images,
videos=videos,
files=files,
session_state=session_state,
stream_events=stream_events,
retries=retries,
knowledge_filters=knowledge_filters,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
dependencies=dependencies,
metadata=metadata,
headers=headers,
**kwargs,
)
else:
return self.agentos_client.run_team( # type: ignore
team_id=self.team_id,
message=serialized_input,
session_id=session_id,
user_id=user_id,
audio=audio,
images=images,
videos=videos,
files=files,
session_state=session_state,
stream_events=stream_events,
retries=retries,
knowledge_filters=knowledge_filters,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
dependencies=dependencies,
metadata=metadata,
headers=headers,
**kwargs,
)
else:
raise ValueError("No client available")
def _arun_a2a(
self,
message: str,
stream: bool,
user_id: Optional[str],
context_id: Optional[str],
audio: Optional[Sequence[Audio]],
images: Optional[Sequence[Image]],
videos: Optional[Sequence[Video]],
files: Optional[Sequence[File]],
headers: Optional[Dict[str, str]],
) -> Union[TeamRunOutput, AsyncIterator[TeamRunOutputEvent]]:
"""Execute via A2A protocol.
Args:
message: Serialized message string
stream: Whether to stream the response
user_id: User identifier
context_id: Session/context ID (maps to session_id)
audio: Audio files to include
images: Images to include
videos: Videos to include
files: Files to include
headers: HTTP headers to include in the request (optional)
Returns:
TeamRunOutput for non-streaming, AsyncIterator[TeamRunOutputEvent] for streaming
"""
from agno.client.a2a.utils import map_stream_events_to_team_run_events
if not self.a2a_client:
raise ValueError("A2A client not available")
if stream:
# Return async generator for streaming
event_stream = self.a2a_client.stream_message(
message=message,
context_id=context_id,
user_id=user_id,
audio=list(audio) if audio else None,
images=list(images) if images else None,
videos=list(videos) if videos else None,
files=list(files) if files else None,
headers=headers,
)
return map_stream_events_to_team_run_events(event_stream, team_id=self.team_id)
else:
# Return coroutine for non-streaming
return self._arun_a2a_send( # type: ignore[return-value]
message=message,
user_id=user_id,
context_id=context_id,
audio=audio,
images=images,
videos=videos,
files=files,
headers=headers,
)
async def _arun_a2a_send(
self,
message: str,
user_id: Optional[str],
context_id: Optional[str],
audio: Optional[Sequence[Audio]],
images: Optional[Sequence[Image]],
videos: Optional[Sequence[Video]],
files: Optional[Sequence[File]],
headers: Optional[Dict[str, str]],
) -> TeamRunOutput:
"""Send a non-streaming A2A message and convert response to TeamRunOutput."""
if not self.a2a_client:
raise ValueError("A2A client not available")
from agno.client.a2a.utils import map_task_result_to_team_run_output
task_result = await self.a2a_client.send_message(
message=message,
context_id=context_id,
user_id=user_id,
images=list(images) if images else None,
audio=list(audio) if audio else None,
videos=list(videos) if videos else None,
files=list(files) if files else None,
headers=headers,
)
return map_task_result_to_team_run_output(task_result, team_id=self.team_id, user_id=user_id)
async def acancel_run(self, run_id: str, auth_token: Optional[str] = None) -> bool:
"""Cancel a running team execution.
Args:
run_id (str): The run_id to cancel.
auth_token: Optional JWT token for authentication.
Returns:
bool: True if the run was found and marked for cancellation, False otherwise.
"""
headers = self._get_auth_headers(auth_token)
try:
await self.agentos_client.cancel_team_run( # type: ignore
team_id=self.team_id,
run_id=run_id,
headers=headers,
)
return True
except Exception:
return False
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/team/remote.py",
"license": "Apache License 2.0",
"lines": 398,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/utils/remote.py | import json
from typing import Any, Dict, List, Union
from pydantic import BaseModel
from agno.models.message import Message
def serialize_input(
input: Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]],
) -> str:
"""Serialize the input to a string."""
if isinstance(input, str):
return input
elif isinstance(input, dict):
return json.dumps(input)
elif isinstance(input, list):
if any(isinstance(item, Message) for item in input):
return json.dumps([item.to_dict() for item in input])
else:
return json.dumps(input)
elif isinstance(input, BaseModel):
return input.model_dump_json()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/remote.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/workflow/remote.py | import time
from typing import TYPE_CHECKING, Any, AsyncIterator, Dict, List, Literal, Optional, Tuple, Union, overload
from fastapi import WebSocket
from pydantic import BaseModel
from agno.media import Audio, File, Image, Video
from agno.models.message import Message
from agno.remote.base import BaseRemote, RemoteDb
from agno.run.workflow import WorkflowRunOutput, WorkflowRunOutputEvent
from agno.utils.agent import validate_input
from agno.utils.remote import serialize_input
if TYPE_CHECKING:
from agno.os.routers.workflows.schema import WorkflowResponse
class RemoteWorkflow(BaseRemote):
# Private cache for workflow config with TTL: (config, timestamp)
_cached_workflow_config: Optional[Tuple["WorkflowResponse", float]] = None
def __init__(
self,
base_url: str,
workflow_id: str,
timeout: float = 300.0,
protocol: Literal["agentos", "a2a"] = "agentos",
a2a_protocol: Literal["json-rpc", "rest"] = "rest",
config_ttl: float = 300.0,
):
"""Initialize RemoteWorkflow for remote execution.
Supports two protocols:
- "agentos": Agno's proprietary AgentOS REST API (default)
- "a2a": A2A (Agent-to-Agent) protocol for cross-framework communication
Args:
base_url: Base URL for remote instance (e.g., "http://localhost:7777")
workflow_id: ID of remote workflow on the remote server
timeout: Request timeout in seconds (default: 300)
protocol: Communication protocol - "agentos" (default) or "a2a"
a2a_protocol: For A2A protocol only - Whether to use JSON-RPC or REST protocol.
config_ttl: Time-to-live for cached config in seconds (default: 300)
"""
super().__init__(base_url, timeout, protocol, a2a_protocol, config_ttl)
self.workflow_id = workflow_id
self._cached_workflow_config = None
self._config_ttl = config_ttl
@property
def id(self) -> str:
return self.workflow_id
async def get_workflow_config(self) -> "WorkflowResponse":
"""Get the workflow config from remote (always fetches fresh)."""
from agno.os.routers.workflows.schema import WorkflowResponse
if self.protocol == "a2a":
from agno.client.a2a.schemas import AgentCard
agent_card: Optional[AgentCard] = await self.a2a_client.aget_agent_card() # type: ignore
return WorkflowResponse(
id=self.workflow_id,
name=agent_card.name if agent_card else self.workflow_id,
description=agent_card.description if agent_card else f"A2A workflow: {self.workflow_id}",
)
# AgentOS protocol: fetch fresh config from remote
return await self.agentos_client.aget_workflow(self.workflow_id) # type: ignore
@property
def _workflow_config(self) -> "WorkflowResponse":
"""Get the workflow config from remote, cached with TTL."""
from agno.os.routers.workflows.schema import WorkflowResponse
if self.protocol == "a2a":
from agno.client.a2a.schemas import AgentCard
agent_card: Optional[AgentCard] = self.a2a_client.get_agent_card() # type: ignore
return WorkflowResponse(
id=self.workflow_id,
name=agent_card.name if agent_card else self.workflow_id,
description=agent_card.description if agent_card else f"A2A workflow: {self.workflow_id}",
)
current_time = time.time()
# Check if cache is valid
if self._cached_workflow_config is not None:
config, cached_at = self._cached_workflow_config
if current_time - cached_at < self.config_ttl:
return config
# Fetch fresh config
config: WorkflowResponse = self.agentos_client.get_workflow(self.workflow_id) # type: ignore
self._cached_workflow_config = (config, current_time)
return config
async def refresh_config(self) -> "WorkflowResponse":
"""Force refresh the cached workflow config."""
from agno.os.routers.workflows.schema import WorkflowResponse
config: WorkflowResponse = await self.agentos_client.aget_workflow(self.workflow_id) # type: ignore
self._cached_workflow_config = (config, time.time())
return config
@property
def name(self) -> Optional[str]:
if self._workflow_config is not None:
return self._workflow_config.name
return None
@property
def description(self) -> Optional[str]:
if self._workflow_config is not None:
return self._workflow_config.description
return None
@property
def db(self) -> Optional[RemoteDb]:
if (
self.agentos_client
and self._config
and self._workflow_config is not None
and self._workflow_config.db_id is not None
):
return RemoteDb.from_config(
db_id=self._workflow_config.db_id,
client=self.agentos_client,
config=self._config,
)
return None
@overload
async def arun(
self,
input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
additional_data: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
run_id: Optional[str] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
audio: Optional[List[Audio]] = None,
images: Optional[List[Image]] = None,
videos: Optional[List[Video]] = None,
files: Optional[List[File]] = None,
stream: Literal[False] = False,
stream_events: Optional[bool] = None,
stream_intermediate_steps: Optional[bool] = None,
background: Optional[bool] = False,
websocket: Optional[WebSocket] = None,
background_tasks: Optional[Any] = None,
auth_token: Optional[str] = None,
) -> WorkflowRunOutput: ...
@overload
def arun(
self,
input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]]] = None,
additional_data: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
run_id: Optional[str] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
audio: Optional[List[Audio]] = None,
images: Optional[List[Image]] = None,
videos: Optional[List[Video]] = None,
files: Optional[List[File]] = None,
stream: Literal[True] = True,
stream_events: Optional[bool] = None,
stream_intermediate_steps: Optional[bool] = None,
background: Optional[bool] = False,
websocket: Optional[WebSocket] = None,
background_tasks: Optional[Any] = None,
auth_token: Optional[str] = None,
) -> AsyncIterator[WorkflowRunOutputEvent]: ...
def arun( # type: ignore
self,
input: Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]],
additional_data: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
run_id: Optional[str] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
audio: Optional[List[Audio]] = None,
images: Optional[List[Image]] = None,
videos: Optional[List[Video]] = None,
files: Optional[List[File]] = None,
stream: bool = False,
stream_events: Optional[bool] = None,
background: Optional[bool] = False,
websocket: Optional[WebSocket] = None,
background_tasks: Optional[Any] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> Union[WorkflowRunOutput, AsyncIterator[WorkflowRunOutputEvent]]:
# TODO: Deal with background
validated_input = validate_input(input)
serialized_input = serialize_input(validated_input)
headers = self._get_auth_headers(auth_token)
# A2A protocol path
if self.a2a_client:
return self._arun_a2a( # type: ignore[return-value]
message=serialized_input,
stream=stream or False,
user_id=user_id,
context_id=session_id, # Map session_id → context_id for A2A
images=images,
videos=videos,
audio=audio,
files=files,
headers=headers,
)
# AgentOS protocol path (default)
if self.agentos_client:
if stream:
# Handle streaming response
return self.agentos_client.run_workflow_stream(
workflow_id=self.workflow_id,
message=serialized_input,
additional_data=additional_data,
run_id=run_id,
session_id=session_id,
user_id=user_id,
audio=audio,
images=images,
videos=videos,
files=files,
session_state=session_state,
stream_events=stream_events,
headers=headers,
**kwargs,
)
else:
return self.agentos_client.run_workflow( # type: ignore
workflow_id=self.workflow_id,
message=serialized_input,
additional_data=additional_data,
run_id=run_id,
session_id=session_id,
user_id=user_id,
audio=audio,
images=images,
videos=videos,
files=files,
session_state=session_state,
headers=headers,
**kwargs,
)
else:
raise ValueError("No client available")
def _arun_a2a(
self,
message: str,
stream: bool,
user_id: Optional[str],
context_id: Optional[str],
images: Optional[List[Image]],
videos: Optional[List[Video]],
audio: Optional[List[Audio]],
files: Optional[List[File]],
headers: Optional[Dict[str, str]],
) -> Union[WorkflowRunOutput, AsyncIterator[WorkflowRunOutputEvent]]:
"""Execute via A2A protocol.
Args:
message: Serialized message string
stream: Whether to stream the response
user_id: User identifier
context_id: Session/context ID (maps to session_id)
images: Images to include
videos: Videos to include
audio: Audio files to include
files: Files to include
headers: HTTP headers to include in the request (optional)
Returns:
WorkflowRunOutput for non-streaming, AsyncIterator[WorkflowRunOutputEvent] for streaming
"""
if not self.a2a_client:
raise ValueError("A2A client not available")
from agno.client.a2a.utils import map_stream_events_to_workflow_run_events
if stream:
# Return async generator for streaming
event_stream = self.a2a_client.stream_message(
message=message,
context_id=context_id,
user_id=user_id,
images=list(images) if images else None,
audio=list(audio) if audio else None,
videos=list(videos) if videos else None,
files=list(files) if files else None,
headers=headers,
)
return map_stream_events_to_workflow_run_events(event_stream, workflow_id=self.workflow_id) # type: ignore
else:
# Return coroutine for non-streaming
return self._arun_a2a_send( # type: ignore[return-value]
message=message,
user_id=user_id,
context_id=context_id,
images=images,
audio=audio,
videos=videos,
files=files,
headers=headers,
)
async def _arun_a2a_send(
self,
message: str,
user_id: Optional[str],
context_id: Optional[str],
images: Optional[List[Image]],
videos: Optional[List[Video]],
audio: Optional[List[Audio]],
files: Optional[List[File]],
headers: Optional[Dict[str, str]],
) -> WorkflowRunOutput:
"""Send a non-streaming A2A message and convert response to WorkflowRunOutput."""
if not self.a2a_client:
raise ValueError("A2A client not available")
from agno.client.a2a.utils import map_task_result_to_workflow_run_output
task_result = await self.a2a_client.send_message(
message=message,
context_id=context_id,
user_id=user_id,
images=list(images) if images else None,
audio=list(audio) if audio else None,
videos=list(videos) if videos else None,
files=list(files) if files else None,
headers=headers,
)
return map_task_result_to_workflow_run_output(task_result, workflow_id=self.workflow_id, user_id=user_id)
async def acancel_run(self, run_id: str, auth_token: Optional[str] = None) -> bool:
"""Cancel a running workflow execution.
Args:
run_id (str): The run_id to cancel.
auth_token: Optional JWT token for authentication.
Returns:
bool: True if the run was found and marked for cancellation, False otherwise.
"""
headers = self._get_auth_headers(auth_token)
try:
await self.get_os_client().cancel_workflow_run(
workflow_id=self.workflow_id,
run_id=run_id,
headers=headers,
)
return True
except Exception:
return False
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/workflow/remote.py",
"license": "Apache License 2.0",
"lines": 323,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/system/gateway_server.py | """
Gateway AgentOS Server for System Tests.
This server acts as a gateway that consumes remote agents, teams, and workflows
defined in a separate remote server container.
"""
import os
from agno.agent import Agent, RemoteAgent
from agno.db.postgres import AsyncPostgresDb
from agno.knowledge.embedder.openai import OpenAIEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.openai import OpenAIChat
from agno.os import AgentOS
from agno.os.config import AuthorizationConfig
from agno.os.interfaces.a2a import A2A
from agno.os.interfaces.agui import AGUI
from agno.os.interfaces.slack import Slack
from agno.team import RemoteTeam, Team
from agno.vectordb.pgvector.pgvector import PgVector
from agno.workflow import RemoteWorkflow, Workflow
from agno.workflow.step import Step
# =============================================================================
# JWT Authorization Configuration
# =============================================================================
# Shared secret key for JWT verification (in production, use proper key management)
JWT_SECRET_KEY = os.getenv("JWT_SECRET_KEY", "test-secret-key-for-system-tests-do-not-use-in-production")
ENABLE_AUTHORIZATION = os.getenv("ENABLE_AUTHORIZATION", "true").lower() == "true"
# =============================================================================
# Database Configuration
# =============================================================================
db = AsyncPostgresDb(
id="gateway-db",
db_url=os.getenv("DATABASE_URL", "postgresql+psycopg://ai:ai@postgres:5432/ai"),
)
knowledge = Knowledge(
name="Gateway Knowledge",
description="A knowledge base for the gateway server",
vector_db=PgVector(
db_url=os.getenv("DATABASE_URL", "postgresql+psycopg://ai:ai@postgres:5432/ai"),
table_name="gateway_knowledge",
embedder=OpenAIEmbedder(id="text-embedding-3-small"),
),
contents_db=db,
)
# =============================================================================
# Local Agent for Gateway
# =============================================================================
local_agent = Agent(
name="Gateway Agent",
id="gateway-agent",
description="A local agent on the gateway for testing",
model=OpenAIChat(id="gpt-4o-mini"),
db=db,
knowledge=knowledge,
instructions=["You are a helpful assistant on the gateway server."],
update_memory_on_run=True,
markdown=True,
)
# =============================================================================
# Local Team for Gateway
# =============================================================================
local_team = Team(
name="Gateway Team",
id="gateway-team",
description="A local team on the gateway for testing",
model=OpenAIChat(id="gpt-4o-mini"),
members=[local_agent],
db=db,
)
# =============================================================================
# Local Workflow for Gateway
# =============================================================================
local_workflow = Workflow(
name="Gateway Workflow",
description="A local workflow on the gateway for testing",
id="gateway-workflow",
db=db,
steps=[
Step(
name="Gateway Step",
agent=local_agent,
),
],
)
# =============================================================================
# Remote Configuration
# =============================================================================
REMOTE_SERVER_URL = os.getenv("REMOTE_SERVER_URL", "http://remote-server:7002")
ADK_SERVER_URL = os.getenv("ADK_SERVER_URL", "http://adk-server:7003")
REMOTE_A2A_SERVER_URL = os.getenv("REMOTE_A2A_SERVER_URL", "http://agno-a2a-server:7004")
# Remote agent for interface testing
remote_assistant = RemoteAgent(base_url=REMOTE_SERVER_URL, agent_id="assistant-agent")
remote_researcher = RemoteAgent(base_url=REMOTE_SERVER_URL, agent_id="researcher-agent")
# Remote team for interface testing
remote_team = RemoteTeam(base_url=REMOTE_SERVER_URL, team_id="research-team")
# Remote workflow for interface testing
remote_workflow = RemoteWorkflow(base_url=REMOTE_SERVER_URL, workflow_id="qa-workflow")
# ADK Remote agent (A2A protocol)
adk_facts_agent = RemoteAgent(
base_url=ADK_SERVER_URL,
agent_id="facts_agent",
protocol="a2a",
a2a_protocol="json-rpc", # Needed for Google ADK servers
)
remote_a2a_assistant = RemoteAgent(
base_url=REMOTE_A2A_SERVER_URL + "/a2a/agents/assistant-agent-2", # Agno's format for a2a endpoints
agent_id="assistant-agent-2",
protocol="a2a",
)
remote_a2a_researcher = RemoteAgent(
base_url=REMOTE_A2A_SERVER_URL + "/a2a/agents/researcher-agent-2", # Agno's format for a2a endpoints
agent_id="researcher-agent-2",
protocol="a2a",
)
# A2A Remote team and workflow
remote_a2a_team = RemoteTeam(
base_url=REMOTE_A2A_SERVER_URL + "/a2a/teams/research-team-2",
team_id="research-team-2",
protocol="a2a",
)
remote_a2a_workflow = RemoteWorkflow(
base_url=REMOTE_A2A_SERVER_URL + "/a2a/workflows/qa-workflow-2",
workflow_id="qa-workflow-2",
protocol="a2a",
)
# =============================================================================
# Interface Configuration
# =============================================================================
# AG-UI Interfaces (for local agent, remote agent, and team)
agui_local = AGUI(agent=local_agent, prefix="/agui/local", tags=["AGUI-Local"])
agui_remote = AGUI(agent=remote_assistant, prefix="/agui/remote", tags=["AGUI-Remote"])
agui_team = AGUI(team=remote_team, prefix="/agui/team", tags=["AGUI-Team"])
# Slack Interfaces (for local agent, remote agent, team, and workflow)
slack_local = Slack(agent=local_agent, prefix="/slack/local", tags=["Slack-Local"])
slack_remote = Slack(agent=remote_assistant, prefix="/slack/remote", tags=["Slack-Remote"])
slack_team = Slack(team=remote_team, prefix="/slack/team", tags=["Slack-Team"])
slack_workflow = Slack(workflow=local_workflow, prefix="/slack/workflow", tags=["Slack-Workflow"])
# A2A Interface (exposes all agents, teams, and workflows)
a2a_interface = A2A(
agents=[local_agent, remote_assistant, remote_researcher],
teams=[remote_team],
workflows=[local_workflow, remote_workflow],
prefix="/a2a",
tags=["A2A"],
)
# =============================================================================
# AgentOS Configuration
# =============================================================================
agent_os = AgentOS(
id="gateway-os",
description="Gateway AgentOS for system testing - consumes remote agents, teams, and workflows",
agents=[
local_agent,
remote_assistant,
remote_researcher,
adk_facts_agent,
remote_a2a_assistant,
remote_a2a_researcher,
],
teams=[
local_team,
remote_team,
remote_a2a_team,
],
workflows=[
local_workflow,
remote_workflow,
remote_a2a_workflow,
],
interfaces=[
agui_local,
agui_remote,
agui_team,
slack_local,
slack_remote,
slack_team,
slack_workflow,
a2a_interface,
],
tracing=True,
db=db,
enable_mcp_server=True,
authorization=ENABLE_AUTHORIZATION,
authorization_config=AuthorizationConfig(
verification_keys=[JWT_SECRET_KEY],
algorithm="HS256",
)
if ENABLE_AUTHORIZATION
else None,
)
# FastAPI app instance (for uvicorn)
app = agent_os.get_app()
# =============================================================================
# Main Entry Point
# =============================================================================
if __name__ == "__main__":
reload = os.getenv("RELOAD", "true").lower() == "true"
agent_os.serve(app="gateway_server:app", reload=reload, host="0.0.0.0", port=7001, access_log=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/system/gateway_server.py",
"license": "Apache License 2.0",
"lines": 197,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/system/remote_server.py | """
Remote AgentOS Server for System Tests.
This server hosts the actual agents, teams, and workflows that the gateway
consumes via RemoteAgent, RemoteTeam, and RemoteWorkflow.
"""
import os
from agno.agent import Agent
from agno.db.postgres import AsyncPostgresDb
from agno.knowledge.embedder.openai import OpenAIEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.openai import OpenAIChat
from agno.os import AgentOS
from agno.os.middleware.jwt import JWTMiddleware
from agno.team.team import Team
from agno.tools.calculator import CalculatorTools
from agno.tools.websearch import WebSearchTools
from agno.vectordb.pgvector import PgVector
from agno.workflow.step import Step
from agno.workflow.workflow import Workflow
# =============================================================================
# JWT Authorization Configuration
# =============================================================================
# Shared secret key for JWT verification (in production, use proper key management)
JWT_SECRET_KEY = os.getenv("JWT_SECRET_KEY", "test-secret-key-for-system-tests-do-not-use-in-production")
ENABLE_AUTHORIZATION = os.getenv("ENABLE_AUTHORIZATION", "true").lower() == "true"
# =============================================================================
# Database Configuration
# =============================================================================
db = AsyncPostgresDb(
id="remote-db",
db_url=os.getenv("DATABASE_URL", "postgresql+psycopg://ai:ai@postgres:5432/ai"),
)
# =============================================================================
# Knowledge Base Configuration
# =============================================================================
knowledge = Knowledge(
name="Remote Knowledge",
description="A knowledge base for the remote server",
vector_db=PgVector(
db_url=os.getenv("DATABASE_URL", "postgresql+psycopg://ai:ai@postgres:5432/ai"),
table_name="remote_test_knowledge",
embedder=OpenAIEmbedder(id="text-embedding-3-small"),
),
contents_db=db,
)
# =============================================================================
# Agent Configuration
# =============================================================================
# Agent 1: Assistant with calculator tools and memory
assistant = Agent(
name="Assistant",
id="assistant-agent",
description="A helpful AI assistant with calculator capabilities.",
model=OpenAIChat(id="gpt-5-mini"),
db=db,
instructions=[
"You are a helpful AI assistant.",
"Use the calculator tool for any math operations.",
"You have access to a knowledge base - search it when asked about documents.",
],
markdown=True,
update_memory_on_run=True,
tools=[CalculatorTools()],
knowledge=knowledge,
search_knowledge=True,
)
# Agent 2: Researcher with web search capabilities
researcher = Agent(
name="Researcher",
id="researcher-agent",
description="A research assistant with web search capabilities.",
model=OpenAIChat(id="gpt-5-mini"),
update_memory_on_run=True,
db=db,
instructions=[
"You are a research assistant.",
"Search the web for information when needed.",
"Provide well-researched, accurate responses.",
],
markdown=True,
tools=[WebSearchTools()],
)
# =============================================================================
# Team Configuration
# =============================================================================
research_team = Team(
name="Research Team",
id="research-team",
description="A team that coordinates research and analysis tasks.",
model=OpenAIChat(id="gpt-5-mini"),
members=[assistant, researcher],
instructions=[
"You are a research team that coordinates multiple specialists.",
"Delegate math questions to the Assistant.",
"Delegate research questions to the Researcher.",
"Combine insights from team members for comprehensive answers.",
],
markdown=True,
update_memory_on_run=True,
db=db,
)
# =============================================================================
# Workflow Configuration
# =============================================================================
qa_workflow = Workflow(
name="QA Workflow",
description="A simple Q&A workflow that uses the assistant agent",
id="qa-workflow",
db=db,
steps=[
Step(
name="Answer Question",
agent=assistant,
),
],
)
# =============================================================================
# AgentOS Configuration
# =============================================================================
agent_os = AgentOS(
id="remote-os",
description="Remote AgentOS server hosting agents, teams, and workflows for system testing",
agents=[assistant, researcher],
teams=[research_team],
workflows=[qa_workflow],
knowledge=[knowledge],
tracing=True,
db=db,
)
# FastAPI app instance (for uvicorn)
app = agent_os.get_app()
app.add_middleware(
JWTMiddleware,
verification_keys=[JWT_SECRET_KEY],
algorithm="HS256",
authorization=ENABLE_AUTHORIZATION,
verify_audience=False,
# We have to exclude for the config endpoint to work correctly.
excluded_route_paths=[
"/health",
"/config",
"/agents",
"/agents/*",
"/teams",
"/teams/*",
"/workflows",
"/workflows/*",
],
)
# =============================================================================
# Main Entry Point
# =============================================================================
if __name__ == "__main__":
reload = os.getenv("RELOAD", "true").lower() == "true"
agent_os.serve(app="remote_server:app", reload=reload, host="0.0.0.0", port=7002, access_log=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/system/remote_server.py",
"license": "Apache License 2.0",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/os/test_client.py | """
Unit tests for AgentOSClient.
Tests cover:
1. Client initialization and configuration
2. HTTP method helpers
3. Discovery operations
4. Memory operations
5. Session operations
6. Eval operations
7. Knowledge operations
8. Run operations
"""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from agno.client import AgentOSClient
def test_init_with_base_url():
"""Verify basic initialization with base URL."""
client = AgentOSClient(base_url="http://localhost:7777")
assert client.base_url == "http://localhost:7777"
assert client.timeout == 60
def test_init_strips_trailing_slash():
"""Verify trailing slash is removed from base URL."""
client = AgentOSClient(base_url="http://localhost:7777/")
assert client.base_url == "http://localhost:7777"
def test_init_with_custom_timeout():
"""Verify custom timeout is respected."""
client = AgentOSClient(base_url="http://localhost:7777", timeout=120)
assert client.timeout == 120
@pytest.mark.asyncio
async def test_get_method():
"""Verify _aget method makes correct HTTP request."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_response = MagicMock()
mock_response.json.return_value = {"data": "test"}
mock_response.raise_for_status = MagicMock()
mock_response.content = b'{"data": "test"}'
mock_http_client = MagicMock()
mock_http_client.request = AsyncMock(return_value=mock_response)
with patch("agno.client.os.get_default_async_client", return_value=mock_http_client):
result = await client._aget("/test-endpoint")
mock_http_client.request.assert_called_once()
call_args = mock_http_client.request.call_args
assert call_args[0][0] == "GET"
assert "http://localhost:7777/test-endpoint" in str(call_args)
assert result == {"data": "test"}
@pytest.mark.asyncio
async def test_post_method():
"""Verify _apost method makes correct HTTP request."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_response = MagicMock()
mock_response.json.return_value = {"created": True}
mock_response.raise_for_status = MagicMock()
mock_response.content = b'{"created": true}'
mock_http_client = MagicMock()
mock_http_client.request = AsyncMock(return_value=mock_response)
with patch("agno.client.os.get_default_async_client", return_value=mock_http_client):
result = await client._apost("/test-endpoint", {"key": "value"})
mock_http_client.request.assert_called_once()
call_args = mock_http_client.request.call_args
assert call_args[0][0] == "POST"
assert result == {"created": True}
@pytest.mark.asyncio
async def test_patch_method():
"""Verify _apatch method makes correct HTTP request."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_response = MagicMock()
mock_response.json.return_value = {"updated": True}
mock_response.raise_for_status = MagicMock()
mock_response.content = b'{"updated": true}'
mock_http_client = MagicMock()
mock_http_client.request = AsyncMock(return_value=mock_response)
with patch("agno.client.os.get_default_async_client", return_value=mock_http_client):
result = await client._apatch("/test-endpoint", {"key": "value"})
mock_http_client.request.assert_called_once()
call_args = mock_http_client.request.call_args
assert call_args[0][0] == "PATCH"
assert result == {"updated": True}
@pytest.mark.asyncio
async def test_delete_method():
"""Verify _adelete method makes correct HTTP request."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_response = MagicMock()
mock_response.raise_for_status = MagicMock()
mock_response.content = b""
mock_http_client = MagicMock()
mock_http_client.request = AsyncMock(return_value=mock_response)
with patch("agno.client.os.get_default_async_client", return_value=mock_http_client):
await client._adelete("/test-endpoint")
mock_http_client.request.assert_called_once()
call_args = mock_http_client.request.call_args
assert call_args[0][0] == "DELETE"
@pytest.mark.asyncio
async def test_get_config():
"""Verify get_config returns ConfigResponse."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"os_id": "test-os",
"name": "Test OS",
"description": "Test description",
"databases": ["db-1"],
"agents": [],
"teams": [],
"workflows": [],
"interfaces": [],
}
with patch.object(client, "_aget", new_callable=AsyncMock) as mock_get:
mock_get.return_value = mock_data
config = await client.aget_config()
mock_get.assert_called_once_with("/config", headers=None)
assert config.os_id == "test-os"
assert config.name == "Test OS"
@pytest.mark.asyncio
async def test_get_agent():
"""Verify get_agent returns AgentResponse."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"id": "agent-1",
"name": "Test Agent",
"model": {"name": "GPT-4o", "model": "gpt-4o", "provider": "openai"},
"tools": {"calculator": {"name": "calculator"}},
}
with patch.object(client, "_aget", new_callable=AsyncMock) as mock_get:
mock_get.return_value = mock_data
agent = await client.aget_agent("agent-1")
mock_get.assert_called_once_with("/agents/agent-1", headers=None)
assert agent.id == "agent-1"
assert agent.name == "Test Agent"
@pytest.mark.asyncio
async def test_get_team():
"""Verify get_team returns TeamResponse."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"id": "team-1",
"name": "Test Team",
"model": {"name": "GPT-4o", "model": "gpt-4o", "provider": "openai"},
"members": [],
}
with patch.object(client, "_aget", new_callable=AsyncMock) as mock_get:
mock_get.return_value = mock_data
team = await client.aget_team("team-1")
mock_get.assert_called_once_with("/teams/team-1", headers=None)
assert team.id == "team-1"
assert team.name == "Test Team"
@pytest.mark.asyncio
async def test_get_workflow():
"""Verify get_workflow returns WorkflowResponse."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"id": "workflow-1",
"name": "Test Workflow",
}
with patch.object(client, "_aget", new_callable=AsyncMock) as mock_get:
mock_get.return_value = mock_data
workflow = await client.aget_workflow("workflow-1")
mock_get.assert_called_once_with("/workflows/workflow-1", headers=None)
assert workflow.id == "workflow-1"
assert workflow.name == "Test Workflow"
@pytest.mark.asyncio
async def test_create_memory():
"""Verify create_memory creates a new memory."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"memory_id": "mem-123",
"memory": "User likes blue",
"user_id": "user-1",
"topics": ["preferences"],
}
with patch.object(client, "_apost", new_callable=AsyncMock) as mock_post:
mock_post.return_value = mock_data
memory = await client.create_memory(
memory="User likes blue",
user_id="user-1",
topics=["preferences"],
)
mock_post.assert_called_once()
assert memory.memory_id == "mem-123"
assert memory.memory == "User likes blue"
@pytest.mark.asyncio
async def test_get_memory():
"""Verify get_memory retrieves a memory."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"memory_id": "mem-123",
"memory": "User likes blue",
"user_id": "user-1",
"topics": ["preferences"],
}
with patch.object(client, "_aget", new_callable=AsyncMock) as mock_get:
mock_get.return_value = mock_data
memory = await client.get_memory("mem-123")
assert "mem-123" in str(mock_get.call_args)
assert memory.memory_id == "mem-123"
@pytest.mark.asyncio
async def test_list_memories():
"""Verify list_memories returns paginated memories."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"data": [
{
"memory_id": "mem-1",
"memory": "Memory 1",
"user_id": "user-1",
"topics": [],
}
],
"meta": {"page": 1, "limit": 20, "total": 1, "total_pages": 1},
}
with patch.object(client, "_aget", new_callable=AsyncMock) as mock_get:
mock_get.return_value = mock_data
result = await client.list_memories(user_id="user-1")
assert len(result.data) == 1
assert result.data[0].memory_id == "mem-1"
@pytest.mark.asyncio
async def test_update_memory():
"""Verify update_memory updates a memory."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"memory_id": "mem-123",
"memory": "Updated memory",
"user_id": "user-1",
"topics": ["updated"],
}
with patch.object(client, "_apatch", new_callable=AsyncMock) as mock_patch:
mock_patch.return_value = mock_data
memory = await client.update_memory(
memory_id="mem-123",
memory="Updated memory",
user_id="user-1",
)
mock_patch.assert_called_once()
assert memory.memory == "Updated memory"
@pytest.mark.asyncio
async def test_delete_memory():
"""Verify delete_memory deletes a memory."""
client = AgentOSClient(base_url="http://localhost:7777")
with patch.object(client, "_adelete", new_callable=AsyncMock) as mock_delete:
await client.delete_memory("mem-123", user_id="user-1")
mock_delete.assert_called_once()
@pytest.mark.asyncio
async def test_list_sessions():
"""Verify list_sessions returns paginated sessions."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"data": [
{
"session_id": "sess-1",
"session_name": "Test Session",
}
],
"meta": {"page": 1, "limit": 20, "total": 1, "total_pages": 1},
}
with patch.object(client, "_aget", new_callable=AsyncMock) as mock_get:
mock_get.return_value = mock_data
result = await client.get_sessions()
assert len(result.data) == 1
assert result.data[0].session_id == "sess-1"
@pytest.mark.asyncio
async def test_create_session():
"""Verify create_session creates a new session."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"agent_session_id": "agent-sess-123",
"session_id": "sess-123",
"session_name": "New Session",
"agent_id": "agent-1",
"user_id": "user-1",
}
with patch.object(client, "_apost", new_callable=AsyncMock) as mock_post:
mock_post.return_value = mock_data
session = await client.create_session(agent_id="agent-1", user_id="user-1")
mock_post.assert_called_once()
assert session.session_id == "sess-123"
@pytest.mark.asyncio
async def test_get_session():
"""Verify get_session retrieves a session."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"agent_session_id": "agent-sess-123",
"session_id": "sess-123",
"session_name": "Test Session",
"agent_id": "agent-1",
"user_id": "user-1",
}
with patch.object(client, "_aget", new_callable=AsyncMock) as mock_get:
mock_get.return_value = mock_data
session = await client.get_session("sess-123")
assert "sess-123" in str(mock_get.call_args)
assert session.session_id == "sess-123"
@pytest.mark.asyncio
async def test_delete_session():
"""Verify delete_session deletes a session."""
client = AgentOSClient(base_url="http://localhost:7777")
with patch.object(client, "_adelete", new_callable=AsyncMock) as mock_delete:
await client.delete_session("sess-123")
mock_delete.assert_called_once()
@pytest.mark.asyncio
async def test_list_eval_runs():
"""Verify list_eval_runs returns paginated evals."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"data": [
{
"id": "eval-1",
"name": "Test Eval",
"eval_type": "accuracy",
"eval_data": {"score": 0.95},
}
],
"meta": {"page": 1, "limit": 20, "total": 1, "total_pages": 1},
}
with patch.object(client, "_aget", new_callable=AsyncMock) as mock_get:
mock_get.return_value = mock_data
result = await client.list_eval_runs()
assert len(result.data) == 1
assert result.data[0].id == "eval-1"
@pytest.mark.asyncio
async def test_get_eval_run():
"""Verify get_eval_run retrieves an eval."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"id": "eval-123",
"name": "Test Eval",
"eval_type": "accuracy",
"eval_data": {"score": 0.95},
}
with patch.object(client, "_aget", new_callable=AsyncMock) as mock_get:
mock_get.return_value = mock_data
eval_run = await client.get_eval_run("eval-123")
assert "eval-123" in str(mock_get.call_args)
assert eval_run.id == "eval-123"
@pytest.mark.asyncio
async def test_list_content():
"""Verify list_content returns paginated content."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"data": [
{
"id": "content-1",
"name": "Test Document",
}
],
"meta": {"page": 1, "limit": 20, "total": 1, "total_pages": 1},
}
with patch.object(client, "_aget", new_callable=AsyncMock) as mock_get:
mock_get.return_value = mock_data
result = await client.list_knowledge_content()
assert len(result.data) == 1
assert result.data[0].id == "content-1"
@pytest.mark.asyncio
async def test_get_content():
"""Verify get_content retrieves content."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"id": "content-123",
"name": "Test Document",
}
with patch.object(client, "_aget", new_callable=AsyncMock) as mock_get:
mock_get.return_value = mock_data
content = await client.get_knowledge_content("content-123")
assert "content-123" in str(mock_get.call_args)
assert content.id == "content-123"
@pytest.mark.asyncio
async def test_search_knowledge():
"""Verify search_knowledge searches the knowledge base."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"data": [
{
"id": "result-1",
"content": "Matching content",
}
],
"meta": {"page": 1, "limit": 20, "total": 1, "total_pages": 1},
}
with patch.object(client, "_apost", new_callable=AsyncMock) as mock_post:
mock_post.return_value = mock_data
result = await client.search_knowledge(query="test query")
mock_post.assert_called_once()
assert len(result.data) == 1
@pytest.mark.asyncio
async def test_get_knowledge_config():
"""Verify get_knowledge_config returns config."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {}
with patch.object(client, "_aget", new_callable=AsyncMock) as mock_get:
mock_get.return_value = mock_data
await client.get_knowledge_config()
assert "/knowledge/config" in str(mock_get.call_args)
@pytest.mark.asyncio
async def test_run_agent():
"""Verify run_agent executes an agent run."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"run_id": "run-123",
"agent_id": "agent-1",
"content": "Hello! How can I help?",
"created_at": 1234567890,
}
with patch.object(client, "_apost", new_callable=AsyncMock) as mock_post:
mock_post.return_value = mock_data
result = await client.run_agent(
agent_id="agent-1",
message="Hello",
)
mock_post.assert_called_once()
assert result.run_id == "run-123"
assert result.content == "Hello! How can I help?"
@pytest.mark.asyncio
async def test_run_team():
"""Verify run_team executes a team run."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"run_id": "run-123",
"team_id": "team-1",
"content": "Team response",
"created_at": 1234567890,
}
with patch.object(client, "_apost", new_callable=AsyncMock) as mock_post:
mock_post.return_value = mock_data
result = await client.run_team(
team_id="team-1",
message="Hello team",
)
mock_post.assert_called_once()
assert result.run_id == "run-123"
@pytest.mark.asyncio
async def test_run_workflow():
"""Verify run_workflow executes a workflow run."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"run_id": "run-123",
"workflow_id": "workflow-1",
"content": "Workflow output",
"created_at": 1234567890,
}
with patch.object(client, "_apost", new_callable=AsyncMock) as mock_post:
mock_post.return_value = mock_data
result = await client.run_workflow(
workflow_id="workflow-1",
message="Start workflow",
)
mock_post.assert_called_once()
assert result.run_id == "run-123"
@pytest.mark.asyncio
async def test_cancel_agent_run():
"""Verify cancel_agent_run cancels a run."""
client = AgentOSClient(base_url="http://localhost:7777")
with patch.object(client, "_apost", new_callable=AsyncMock) as mock_post:
mock_post.return_value = None
await client.cancel_agent_run("agent-1", "run-123")
mock_post.assert_called_once()
assert "/agents/agent-1/runs/run-123/cancel" in str(mock_post.call_args)
@pytest.mark.asyncio
async def test_headers_passed_through():
"""Verify headers are passed through to requests."""
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"os_id": "test-os",
"name": "Test OS",
"databases": ["db-1"],
"agents": [],
"teams": [],
"workflows": [],
"interfaces": [],
}
with patch.object(client, "_aget", new_callable=AsyncMock) as mock_get:
mock_get.return_value = mock_data
headers = {"Authorization": "Bearer test-token", "X-Custom": "value"}
await client.aget_config(headers=headers)
mock_get.assert_called_once_with("/config", headers=headers)
# Streaming Methods Tests
@pytest.mark.asyncio
async def test_run_agent_stream_returns_typed_events():
"""Verify run_agent_stream yields typed RunOutputEvent objects."""
from agno.run.agent import RunCompletedEvent, RunContentEvent, RunStartedEvent
client = AgentOSClient(base_url="http://localhost:7777")
# Mock SSE lines
mock_lines = [
'data: {"event": "RunStarted", "run_id": "run-123", "agent_id": "agent-1", "created_at": 1234567890}',
'data: {"event": "RunContent", "content": "Hello", "content_type": "str", "run_id": "run-123", "agent_id": "agent-1", "created_at": 1234567890}',
'data: {"event": "RunCompleted", "run_id": "run-123", "agent_id": "agent-1", "created_at": 1234567890}',
]
async def async_generator():
for line in mock_lines:
yield line
with patch.object(client, "_astream_post_form_data") as mock_stream:
mock_stream.return_value = async_generator()
events = []
async for event in client.run_agent_stream("agent-123", "test message"):
events.append(event)
assert len(events) == 3
assert isinstance(events[0], RunStartedEvent)
assert isinstance(events[1], RunContentEvent)
assert events[1].content == "Hello"
assert isinstance(events[2], RunCompletedEvent)
@pytest.mark.asyncio
async def test_stream_handles_invalid_json():
"""Verify invalid JSON is logged and skipped."""
from agno.run.agent import RunCompletedEvent, RunStartedEvent
client = AgentOSClient(base_url="http://localhost:7777")
mock_lines = [
'data: {"event": "RunStarted", "run_id": "run-123", "agent_id": "agent-1", "created_at": 1234567890}',
"data: {invalid json}", # Bad JSON
'data: {"event": "RunCompleted", "run_id": "run-123", "agent_id": "agent-1", "created_at": 1234567890}',
]
async def async_generator():
for line in mock_lines:
yield line
with patch.object(client, "_astream_post_form_data") as mock_stream:
mock_stream.return_value = async_generator()
events = []
with patch("agno.utils.log.logger") as mock_logger:
async for event in client.run_agent_stream("agent-123", "test"):
events.append(event)
# Should skip invalid event and continue
assert len(events) == 2
assert isinstance(events[0], RunStartedEvent)
assert isinstance(events[1], RunCompletedEvent)
assert mock_logger.error.called
@pytest.mark.asyncio
async def test_stream_handles_unknown_event_type():
"""Verify unknown event types are logged and skipped."""
from agno.run.agent import RunCompletedEvent, RunStartedEvent
client = AgentOSClient(base_url="http://localhost:7777")
mock_lines = [
'data: {"event": "RunStarted", "run_id": "run-123", "agent_id": "agent-1", "created_at": 1234567890}',
'data: {"event": "FutureEventType", "data": "something"}', # Unknown type
'data: {"event": "RunCompleted", "run_id": "run-123", "agent_id": "agent-1", "created_at": 1234567890}',
]
async def async_generator():
for line in mock_lines:
yield line
with patch.object(client, "_astream_post_form_data") as mock_stream:
mock_stream.return_value = async_generator()
events = []
with patch("agno.utils.log.logger") as mock_logger:
async for event in client.run_agent_stream("agent-123", "test"):
events.append(event)
# Should skip unknown event and continue
assert len(events) == 2
assert isinstance(events[0], RunStartedEvent)
assert isinstance(events[1], RunCompletedEvent)
assert mock_logger.error.called
@pytest.mark.asyncio
async def test_stream_handles_empty_lines():
"""Verify empty lines and comments are skipped."""
from agno.run.agent import RunCompletedEvent, RunStartedEvent
client = AgentOSClient(base_url="http://localhost:7777")
mock_lines = [
"", # Empty line
": comment", # SSE comment
'data: {"event": "RunStarted", "run_id": "run-123", "agent_id": "agent-1", "created_at": 1234567890}',
"", # Another empty line
'data: {"event": "RunCompleted", "run_id": "run-123", "agent_id": "agent-1", "created_at": 1234567890}',
]
async def async_generator():
for line in mock_lines:
yield line
with patch.object(client, "_astream_post_form_data") as mock_stream:
mock_stream.return_value = async_generator()
events = []
async for event in client.run_agent_stream("agent-123", "test"):
events.append(event)
# Should only yield actual events
assert len(events) == 2
assert isinstance(events[0], RunStartedEvent)
assert isinstance(events[1], RunCompletedEvent)
@pytest.mark.asyncio
async def test_run_team_stream_returns_typed_events():
"""Verify run_team_stream yields BaseTeamRunEvent objects."""
from agno.run.agent import RunCompletedEvent, RunStartedEvent
client = AgentOSClient(base_url="http://localhost:7777")
# Team runs can emit agent events
mock_lines = [
'data: {"event": "RunStarted", "run_id": "run-123", "agent_id": "agent-1", "created_at": 1234567890}',
'data: {"event": "RunCompleted", "run_id": "run-123", "agent_id": "agent-1", "created_at": 1234567890}',
]
async def async_generator():
for line in mock_lines:
yield line
with patch.object(client, "_astream_post_form_data") as mock_stream:
mock_stream.return_value = async_generator()
events = []
async for event in client.run_team_stream("team-123", "test message"):
events.append(event)
assert len(events) == 2
assert isinstance(events[0], RunStartedEvent)
assert isinstance(events[1], RunCompletedEvent)
@pytest.mark.asyncio
async def test_run_workflow_stream_returns_typed_events():
"""Verify run_workflow_stream yields WorkflowRunOutputEvent objects."""
from agno.run.agent import RunCompletedEvent, RunStartedEvent
client = AgentOSClient(base_url="http://localhost:7777")
# Workflow runs can emit agent events
mock_lines = [
'data: {"event": "RunStarted", "run_id": "run-123", "agent_id": "agent-1", "created_at": 1234567890}',
'data: {"event": "RunCompleted", "run_id": "run-123", "agent_id": "agent-1", "created_at": 1234567890}',
]
async def async_generator():
for line in mock_lines:
yield line
with patch.object(client, "_astream_post_form_data") as mock_stream:
mock_stream.return_value = async_generator()
events = []
async for event in client.run_workflow_stream("workflow-123", "test message"):
events.append(event)
assert len(events) == 2
assert isinstance(events[0], RunStartedEvent)
assert isinstance(events[1], RunCompletedEvent)
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/os/test_client.py",
"license": "Apache License 2.0",
"lines": 611,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/os/routers/database.py | from typing import TYPE_CHECKING, Optional
from fastapi import (
APIRouter,
Depends,
HTTPException,
)
from fastapi.responses import JSONResponse
from packaging import version
from agno.db.base import AsyncBaseDb
from agno.db.migrations.manager import MigrationManager
from agno.os.auth import get_authentication_dependency
from agno.os.schema import (
BadRequestResponse,
InternalServerErrorResponse,
NotFoundResponse,
UnauthenticatedResponse,
ValidationErrorResponse,
)
from agno.os.settings import AgnoAPISettings
from agno.os.utils import (
get_db,
)
from agno.remote.base import RemoteDb
from agno.utils.log import log_info
if TYPE_CHECKING:
from agno.os.app import AgentOS
def get_database_router(
os: "AgentOS",
settings: AgnoAPISettings = AgnoAPISettings(),
) -> APIRouter:
"""Create the database router with comprehensive OpenAPI documentation."""
router = APIRouter(
dependencies=[Depends(get_authentication_dependency(settings))],
responses={
400: {"description": "Bad Request", "model": BadRequestResponse},
401: {"description": "Unauthorized", "model": UnauthenticatedResponse},
404: {"description": "Not Found", "model": NotFoundResponse},
422: {"description": "Validation Error", "model": ValidationErrorResponse},
500: {"description": "Internal Server Error", "model": InternalServerErrorResponse},
},
)
async def _migrate_single_db(db, target_version: Optional[str] = None) -> None:
"""Migrate a single database."""
if isinstance(db, RemoteDb):
log_info("Skipping logs for remote DB")
if target_version:
# Use the session table as proxy for the database schema version
if isinstance(db, AsyncBaseDb):
current_version = await db.get_latest_schema_version(db.session_table_name)
else:
current_version = db.get_latest_schema_version(db.session_table_name)
if version.parse(target_version) > version.parse(current_version): # type: ignore
await MigrationManager(db).up(target_version) # type: ignore
else:
await MigrationManager(db).down(target_version) # type: ignore
else:
# If the target version is not provided, migrate to the latest version
await MigrationManager(db).up() # type: ignore
@router.post(
"/databases/all/migrate",
tags=["Database"],
operation_id="migrate_all_databases",
summary="Migrate All Databases",
description=(
"Migrate all database schemas to the given target version. "
"If a target version is not provided, all databases will be migrated to the latest version."
),
responses={
200: {
"description": "All databases migrated successfully",
"content": {
"application/json": {
"example": {"message": "All databases migrated successfully to version 3.0.0"},
}
},
},
500: {"description": "Failed to migrate databases", "model": InternalServerErrorResponse},
},
)
async def migrate_all_databases(target_version: Optional[str] = None):
"""Migrate all databases."""
all_dbs = {db.id: db for db_id, dbs in os.dbs.items() for db in dbs}
failed_dbs: dict[str, str] = {}
for db_id, db in all_dbs.items():
try:
await _migrate_single_db(db, target_version)
except Exception as e:
failed_dbs[db_id] = str(e)
version_msg = f"version {target_version}" if target_version else "latest version"
migrated_count = len(all_dbs) - len(failed_dbs)
if failed_dbs:
return JSONResponse(
content={
"message": f"Migrated {migrated_count}/{len(all_dbs)} databases to {version_msg}",
"failed": failed_dbs,
},
status_code=207, # Multi-Status
)
return JSONResponse(
content={"message": f"All databases migrated successfully to {version_msg}"}, status_code=200
)
@router.post(
"/databases/{db_id}/migrate",
tags=["Database"],
operation_id="migrate_database",
summary="Migrate Database",
description=(
"Migrate the given database schema to the given target version. "
"If a target version is not provided, the database will be migrated to the latest version."
),
responses={
200: {
"description": "Database migrated successfully",
"content": {
"application/json": {
"example": {"message": "Database migrated successfully to version 3.0.0"},
}
},
},
404: {"description": "Database not found", "model": NotFoundResponse},
500: {"description": "Failed to migrate database", "model": InternalServerErrorResponse},
},
)
async def migrate_database(db_id: str, target_version: Optional[str] = None):
db = await get_db(os.dbs, db_id)
if not db:
raise HTTPException(status_code=404, detail="Database not found")
try:
await _migrate_single_db(db, target_version)
version_msg = f"version {target_version}" if target_version else "latest version"
return JSONResponse(
content={"message": f"Database migrated successfully to {version_msg}"}, status_code=200
)
except HTTPException:
raise
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to migrate database: {str(e)}")
return router
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/database.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/os/test_db_migrations.py | """Tests for database migration endpoints in AgentOS."""
import os
import tempfile
import time
import pytest
from fastapi.testclient import TestClient
from sqlalchemy import BigInteger, Column, MetaData, String, Table, text
from sqlalchemy.engine import create_engine
from sqlalchemy.types import JSON
from agno.agent.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.os import AgentOS
def create_old_schema_memory_table(db_file: str, table_name: str) -> None:
"""Create a memory table with the old schema (v2.0.0) - without created_at and feedback columns."""
engine = create_engine(f"sqlite:///{db_file}")
metadata = MetaData()
# Old schema - without created_at and feedback columns
Table(
table_name,
metadata,
Column("memory_id", String, primary_key=True, nullable=False),
Column("memory", JSON, nullable=False),
Column("input", String, nullable=True),
Column("agent_id", String, nullable=True),
Column("team_id", String, nullable=True),
Column("user_id", String, nullable=True),
Column("topics", JSON, nullable=True),
Column("updated_at", BigInteger, nullable=True),
)
metadata.create_all(engine)
engine.dispose()
def create_versions_table_with_old_version(db_file: str, table_name: str, memory_table_name: str) -> None:
"""Create a versions table and set the memory table version to 2.0.0."""
engine = create_engine(f"sqlite:///{db_file}")
metadata = MetaData()
versions_table = Table(
table_name,
metadata,
Column("table_name", String, primary_key=True, nullable=False),
Column("version", String, nullable=False),
Column("created_at", String, nullable=False),
Column("updated_at", String, nullable=True),
)
metadata.create_all(engine)
# Insert old version record for memory table
with engine.connect() as conn:
conn.execute(
versions_table.insert().values(
table_name=memory_table_name,
version="2.0.0",
created_at=str(int(time.time())),
updated_at=None,
)
)
conn.commit()
engine.dispose()
def get_memory_table_columns(db_file: str, table_name: str) -> set:
"""Get the column names from the memory table."""
engine = create_engine(f"sqlite:///{db_file}")
with engine.connect() as conn:
result = conn.execute(text(f"PRAGMA table_info({table_name})"))
columns = {row[1] for row in result.fetchall()}
engine.dispose()
return columns
def get_schema_version(db: SqliteDb, table_name: str) -> str:
"""Get the schema version for a table."""
return db.get_latest_schema_version(table_name)
@pytest.fixture
def temp_db_file():
"""Create a temporary SQLite database file."""
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as temp_file:
db_path = temp_file.name
yield db_path
# Clean up
if os.path.exists(db_path):
os.unlink(db_path)
@pytest.fixture
def old_schema_dbs(temp_db_file):
"""Create multiple SqliteDb instances with old schema pointing to the same file."""
# Create old schema tables for each agent
memory_tables = ["agent1_memories", "agent2_memories", "agent3_memories"]
versions_table = "agno_versions"
for memory_table in memory_tables:
create_old_schema_memory_table(temp_db_file, memory_table)
# Create versions table with old versions for each memory table
engine = create_engine(f"sqlite:///{temp_db_file}")
metadata = MetaData()
versions_table_obj = Table(
versions_table,
metadata,
Column("table_name", String, primary_key=True, nullable=False),
Column("version", String, nullable=False),
Column("created_at", String, nullable=False),
Column("updated_at", String, nullable=True),
)
metadata.create_all(engine)
with engine.connect() as conn:
for memory_table in memory_tables:
conn.execute(
versions_table_obj.insert().values(
table_name=memory_table,
version="2.0.0",
created_at=str(int(time.time())),
updated_at=None,
)
)
conn.commit()
engine.dispose()
# Create SqliteDb instances
db1 = SqliteDb(
db_file=temp_db_file,
id="db-1",
memory_table="agent1_memories",
session_table="agent1_sessions",
)
db2 = SqliteDb(
db_file=temp_db_file,
id="db-2",
memory_table="agent2_memories",
session_table="agent2_sessions",
)
db3 = SqliteDb(
db_file=temp_db_file,
id="db-3",
memory_table="agent3_memories",
session_table="agent3_sessions",
)
return {
"db_file": temp_db_file,
"dbs": [db1, db2, db3],
"memory_tables": memory_tables,
}
@pytest.fixture
def os_client_with_old_schema_dbs(old_schema_dbs):
"""Create an AgentOS with multiple agents having different DBs with old schema."""
dbs = old_schema_dbs["dbs"]
agent1 = Agent(name="agent-1", id="agent-1-id", db=dbs[0])
agent2 = Agent(name="agent-2", id="agent-2-id", db=dbs[1])
agent3 = Agent(name="agent-3", id="agent-3-id", db=dbs[2])
agent_os = AgentOS(
id="test-os",
agents=[agent1, agent2, agent3],
)
app = agent_os.get_app()
client = TestClient(app)
return {
"client": client,
"db_file": old_schema_dbs["db_file"],
"dbs": dbs,
"memory_tables": old_schema_dbs["memory_tables"],
"agent_os": agent_os,
}
def test_migrate_single_db_success(os_client_with_old_schema_dbs):
"""Test successfully migrating a single database."""
client = os_client_with_old_schema_dbs["client"]
dbs = os_client_with_old_schema_dbs["dbs"]
db_file = os_client_with_old_schema_dbs["db_file"]
memory_tables = os_client_with_old_schema_dbs["memory_tables"]
# Verify old schema - no created_at or feedback columns
columns_before = get_memory_table_columns(db_file, memory_tables[0])
assert "created_at" not in columns_before
assert "feedback" not in columns_before
# Verify old version
version_before = get_schema_version(dbs[0], memory_tables[0])
assert version_before == "2.0.0"
# Migrate the first database
response = client.post(f"/databases/{dbs[0].id}/migrate")
assert response.status_code == 200
assert "migrated successfully" in response.json()["message"]
# Verify new schema - created_at and feedback columns should exist
columns_after = get_memory_table_columns(db_file, memory_tables[0])
assert "created_at" in columns_after
assert "feedback" in columns_after
# Verify new version
version_after = get_schema_version(dbs[0], memory_tables[0])
assert version_after == "2.5.0"
# Other databases should still be at old version
version_db2 = get_schema_version(dbs[1], memory_tables[1])
assert version_db2 == "2.0.0"
def test_migrate_single_db_not_found(os_client_with_old_schema_dbs):
"""Test migrating a non-existent database returns 404."""
client = os_client_with_old_schema_dbs["client"]
response = client.post("/databases/non-existent-db/migrate")
assert response.status_code == 404
assert response.json()["detail"] == "No database found with id 'non-existent-db'"
def test_migrate_single_db_to_specific_version(os_client_with_old_schema_dbs):
"""Test migrating a database to a specific version."""
client = os_client_with_old_schema_dbs["client"]
dbs = os_client_with_old_schema_dbs["dbs"]
memory_tables = os_client_with_old_schema_dbs["memory_tables"]
# Migrate to version 2.3.0 explicitly
response = client.post(f"/databases/{dbs[0].id}/migrate?target_version=2.3.0")
assert response.status_code == 200
# Verify version
version_after = get_schema_version(dbs[0], memory_tables[0])
assert version_after == "2.3.0"
def test_migrate_all_dbs_success(os_client_with_old_schema_dbs):
"""Test successfully migrating all databases."""
client = os_client_with_old_schema_dbs["client"]
dbs = os_client_with_old_schema_dbs["dbs"]
db_file = os_client_with_old_schema_dbs["db_file"]
memory_tables = os_client_with_old_schema_dbs["memory_tables"]
# Verify all databases are at old version
for i, db in enumerate(dbs):
version = get_schema_version(db, memory_tables[i])
assert version == "2.0.0", f"DB {i} should be at version 2.0.0"
columns = get_memory_table_columns(db_file, memory_tables[i])
assert "created_at" not in columns
assert "feedback" not in columns
# Migrate all databases
response = client.post("/databases/all/migrate")
assert response.status_code == 200
assert "All databases migrated successfully" in response.json()["message"]
# Verify all databases are now at new version
for i, db in enumerate(dbs):
version = get_schema_version(db, memory_tables[i])
assert version == "2.5.0", f"DB {i} should be at version 2.5.0"
columns = get_memory_table_columns(db_file, memory_tables[i])
assert "created_at" in columns
assert "feedback" in columns
def test_migrate_all_dbs_to_specific_version(os_client_with_old_schema_dbs):
"""Test migrating all databases to a specific version."""
client = os_client_with_old_schema_dbs["client"]
dbs = os_client_with_old_schema_dbs["dbs"]
memory_tables = os_client_with_old_schema_dbs["memory_tables"]
# Migrate all to 2.3.0
response = client.post("/databases/all/migrate?target_version=2.3.0")
assert response.status_code == 200
# Verify all are at 2.3.0
for i, db in enumerate(dbs):
version = get_schema_version(db, memory_tables[i])
assert version == "2.3.0"
def test_migrate_all_dbs_partial_failure(temp_db_file):
"""Test that migration continues even if one database fails."""
# Create two DBs - one with old schema, one that will fail
memory_table_good = "good_memories"
memory_table_bad = "bad_memories"
versions_table = "agno_versions"
# Create good old schema table
create_old_schema_memory_table(temp_db_file, memory_table_good)
# Create versions table
engine = create_engine(f"sqlite:///{temp_db_file}")
metadata = MetaData()
versions_table_obj = Table(
versions_table,
metadata,
Column("table_name", String, primary_key=True, nullable=False),
Column("version", String, nullable=False),
Column("created_at", String, nullable=False),
Column("updated_at", String, nullable=True),
)
metadata.create_all(engine)
with engine.connect() as conn:
conn.execute(
versions_table_obj.insert().values(
table_name=memory_table_good,
version="2.0.0",
created_at=str(int(time.time())),
updated_at=None,
)
)
# Set bad table to old version but don't create the table
conn.execute(
versions_table_obj.insert().values(
table_name=memory_table_bad,
version="2.0.0",
created_at=str(int(time.time())),
updated_at=None,
)
)
conn.commit()
engine.dispose()
db_good = SqliteDb(
db_file=temp_db_file,
id="db-good",
memory_table=memory_table_good,
session_table="good_sessions",
)
db_bad = SqliteDb(
db_file=temp_db_file,
id="db-bad",
memory_table=memory_table_bad, # Table doesn't exist
session_table="bad_sessions",
)
agent_good = Agent(name="agent-good", id="agent-good-id", db=db_good)
agent_bad = Agent(name="agent-bad", id="agent-bad-id", db=db_bad)
agent_os = AgentOS(id="test-os", agents=[agent_good, agent_bad])
app = agent_os.get_app()
client = TestClient(app)
# Migrate all - should continue even if one fails
response = client.post("/databases/all/migrate")
# Should return 207 Multi-Status when some fail
# Or 200 if the bad table is just skipped without error
assert response.status_code in [200, 207]
# The good database should still be migrated
version_good = get_schema_version(db_good, memory_table_good)
assert version_good == "2.5.0"
columns = get_memory_table_columns(temp_db_file, memory_table_good)
assert "created_at" in columns
assert "feedback" in columns
def test_migrate_already_migrated_db(os_client_with_old_schema_dbs):
"""Test that migrating an already migrated database is idempotent."""
client = os_client_with_old_schema_dbs["client"]
dbs = os_client_with_old_schema_dbs["dbs"]
memory_tables = os_client_with_old_schema_dbs["memory_tables"]
# First migration
response1 = client.post(f"/databases/{dbs[0].id}/migrate")
assert response1.status_code == 200
version_after_first = get_schema_version(dbs[0], memory_tables[0])
assert version_after_first == "2.5.0"
# Second migration - should succeed without errors
response2 = client.post(f"/databases/{dbs[0].id}/migrate")
assert response2.status_code == 200
version_after_second = get_schema_version(dbs[0], memory_tables[0])
assert version_after_second == "2.5.0"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/os/test_db_migrations.py",
"license": "Apache License 2.0",
"lines": 315,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/agent/test_tool_error_events.py | from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.models.response import ModelResponse, ModelResponseEvent, ToolExecution
from agno.run.agent import RunEvent, RunOutput, ToolCallErrorEvent
from agno.run.messages import RunMessages
from agno.run.team import TeamRunEvent, TeamRunOutput
from agno.run.team import ToolCallErrorEvent as TeamToolCallErrorEvent
from agno.session import TeamSession
from agno.team import Team
def test_agent_yields_tool_call_error_event(mocker):
# Mock the model to return a tool call and then a tool result with error
mock_model = mocker.Mock(spec=OpenAIChat)
# Create an agent with the mock model
agent = Agent(model=mock_model)
# Mock model.get_function_call_to_run_from_tool_execution
mock_function_call = mocker.Mock()
mock_function_call.get_call_str.return_value = "test_tool()"
mock_function_call.call_id = "call_1"
mock_function_call.function.name = "test_tool"
mock_function_call.arguments = {}
mock_model.get_function_call_to_run_from_tool_execution.return_value = mock_function_call
# Mock model.run_function_call to yield ToolCallStarted and then ToolCallCompleted with error
tool_execution = ToolExecution(
tool_call_id="call_1", tool_name="test_tool", tool_args={}, tool_call_error=True, result="Tool failed"
)
mock_model.run_function_call.return_value = [
ModelResponse(event=ModelResponseEvent.tool_call_started.value),
ModelResponse(event=ModelResponseEvent.tool_call_completed.value, tool_executions=[tool_execution]),
]
# Run _run_tool and collect events
run_response = RunOutput(run_id="run_1", agent_id="agent_1", agent_name="Agent")
run_messages = RunMessages()
from agno.agent._tools import run_tool
events = list(
run_tool(agent, run_response=run_response, run_messages=run_messages, tool=tool_execution, stream_events=True)
)
# Verify events
event_types = [e.event for e in events]
assert RunEvent.tool_call_started.value in event_types
assert RunEvent.tool_call_completed.value in event_types
assert RunEvent.tool_call_error.value in event_types
# Verify the ToolCallErrorEvent details
error_event = next(e for e in events if e.event == RunEvent.tool_call_error.value)
assert isinstance(error_event, ToolCallErrorEvent)
assert error_event.tool.tool_call_id == "call_1" # type: ignore
assert error_event.error == "Tool failed"
def test_team_yields_tool_call_error_event(mocker):
# Mock model
mock_model = mocker.Mock(spec=OpenAIChat)
# Create a team
agent1 = Agent(name="Agent1", model=mock_model)
team = Team(members=[agent1], model=mock_model)
# Setup session and run_response
session = TeamSession(session_id="session_1")
run_response = TeamRunOutput(run_id="run_1", team_id="team_1", team_name="Team")
# Tool execution with error
tool_execution = ToolExecution(
tool_call_id="call_1", tool_name="test_tool", tool_args={}, tool_call_error=True, result="Tool failed"
)
# ModelResponse event for tool completion
model_response_event = ModelResponse(
event=ModelResponseEvent.tool_call_completed.value, tool_executions=[tool_execution], content="Tool result"
)
full_model_response = ModelResponse()
# Run _handle_model_response_chunk
events = list(
team._handle_model_response_chunk(
session=session,
run_response=run_response,
full_model_response=full_model_response,
model_response_event=model_response_event,
stream_events=True,
)
)
# Verify events
event_types = [e.event for e in events]
assert TeamRunEvent.tool_call_completed.value in event_types
assert TeamRunEvent.tool_call_error.value in event_types
# Verify the ToolCallErrorEvent details
error_event = next(e for e in events if e.event == TeamRunEvent.tool_call_error.value)
assert isinstance(error_event, TeamToolCallErrorEvent)
assert error_event.tool.tool_call_id == "call_1" # type: ignore
assert error_event.error == "Tool failed"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/agent/test_tool_error_events.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/os/test_duplicate_ids.py | """Tests for duplicate ID validation in AgentOS."""
import pytest
from agno.agent.agent import Agent
from agno.os import AgentOS
from agno.team.team import Team
from agno.workflow.workflow import Workflow
def test_duplicate_agent_ids_with_explicit_ids_raises_error():
"""Test that duplicate explicit agent IDs raise ValueError."""
agent1 = Agent(name="Agent 1", id="same-id", telemetry=False)
agent2 = Agent(name="Agent 2", id="same-id", telemetry=False)
with pytest.raises(ValueError) as exc_info:
AgentOS(agents=[agent1, agent2], telemetry=False)
assert "Duplicate IDs found in AgentOS" in str(exc_info.value)
assert "same-id" in str(exc_info.value)
def test_duplicate_agent_ids_from_same_name_raises_error():
"""Test that agents with the same name (generating same ID) raise ValueError."""
agent1 = Agent(name="My Agent", telemetry=False)
agent2 = Agent(name="My Agent", telemetry=False)
with pytest.raises(ValueError) as exc_info:
AgentOS(agents=[agent1, agent2], telemetry=False)
assert "Duplicate IDs found in AgentOS" in str(exc_info.value)
def test_unique_agent_ids_work_correctly():
"""Test that unique agent IDs are accepted."""
agent1 = Agent(name="Agent 1", id="agent-1", telemetry=False)
agent2 = Agent(name="Agent 2", id="agent-2", telemetry=False)
# Should not raise
os = AgentOS(agents=[agent1, agent2], telemetry=False)
assert len(os.agents) == 2
def test_single_agent_works():
"""Test that a single agent is accepted."""
agent = Agent(name="Single Agent", id="single-id", telemetry=False)
os = AgentOS(agents=[agent], telemetry=False)
assert len(os.agents) == 1
def test_duplicate_team_ids_with_explicit_ids_raises_error():
"""Test that duplicate explicit team IDs raise ValueError."""
agent1 = Agent(name="Agent 1", telemetry=False)
agent2 = Agent(name="Agent 2", telemetry=False)
team1 = Team(name="Team 1", id="same-team-id", members=[agent1])
team2 = Team(name="Team 2", id="same-team-id", members=[agent2])
with pytest.raises(ValueError) as exc_info:
AgentOS(teams=[team1, team2], telemetry=False)
assert "Duplicate IDs found in AgentOS" in str(exc_info.value)
assert "same-team-id" in str(exc_info.value)
def test_duplicate_team_ids_from_same_name_raises_error():
"""Test that teams with the same name (generating same ID) raise ValueError."""
agent1 = Agent(name="Agent 1", telemetry=False)
agent2 = Agent(name="Agent 2", telemetry=False)
team1 = Team(name="My Team", members=[agent1])
team2 = Team(name="My Team", members=[agent2])
with pytest.raises(ValueError) as exc_info:
AgentOS(teams=[team1, team2], telemetry=False)
assert "Duplicate IDs found in AgentOS" in str(exc_info.value)
def test_unique_team_ids_work_correctly():
"""Test that unique team IDs are accepted."""
agent1 = Agent(name="Agent 1", telemetry=False)
agent2 = Agent(name="Agent 2", telemetry=False)
team1 = Team(name="Team 1", id="team-1", members=[agent1])
team2 = Team(name="Team 2", id="team-2", members=[agent2])
os = AgentOS(teams=[team1, team2], telemetry=False)
assert len(os.teams) == 2
def test_duplicate_workflow_ids_with_explicit_ids_raises_error():
"""Test that duplicate explicit workflow IDs raise ValueError."""
workflow1 = Workflow(name="Workflow 1", id="same-workflow-id")
workflow2 = Workflow(name="Workflow 2", id="same-workflow-id")
with pytest.raises(ValueError) as exc_info:
AgentOS(workflows=[workflow1, workflow2], telemetry=False)
assert "Duplicate IDs found in AgentOS" in str(exc_info.value)
assert "same-workflow-id" in str(exc_info.value)
def test_duplicate_workflow_ids_from_same_name_raises_error():
"""Test that workflows with the same name (generating same ID) raise ValueError."""
workflow1 = Workflow(name="My Workflow")
workflow2 = Workflow(name="My Workflow")
with pytest.raises(ValueError) as exc_info:
AgentOS(workflows=[workflow1, workflow2], telemetry=False)
assert "Duplicate IDs found in AgentOS" in str(exc_info.value)
def test_unique_workflow_ids_work_correctly():
"""Test that unique workflow IDs are accepted."""
workflow1 = Workflow(name="Workflow 1", id="workflow-1")
workflow2 = Workflow(name="Workflow 2", id="workflow-2")
os = AgentOS(workflows=[workflow1, workflow2], telemetry=False)
assert len(os.workflows) == 2
# Mixed component tests
def test_mixed_components_with_unique_ids():
"""Test that mixed components with unique IDs work correctly."""
agent = Agent(name="Agent", id="agent-id", telemetry=False)
team = Team(name="Team", id="team-id", members=[Agent(name="Team Agent", telemetry=False)])
workflow = Workflow(name="Workflow", id="workflow-id")
os = AgentOS(agents=[agent], teams=[team], workflows=[workflow], telemetry=False)
assert len(os.agents) == 1
assert len(os.teams) == 1
assert len(os.workflows) == 1
def test_error_message_contains_duplicate_id():
"""Test that error message contains the duplicate ID."""
agent1 = Agent(name="First Agent", id="duplicate-id", telemetry=False)
agent2 = Agent(name="Second Agent", id="duplicate-id", telemetry=False)
with pytest.raises(ValueError) as exc_info:
AgentOS(agents=[agent1, agent2], telemetry=False)
error_message = str(exc_info.value)
assert "duplicate-id" in error_message
def test_multiple_duplicate_ids_all_reported():
"""Test that all duplicate IDs are reported in a single error."""
agent1 = Agent(name="Agent 1", id="dup-1", telemetry=False)
agent2 = Agent(name="Agent 2", id="dup-1", telemetry=False)
agent3 = Agent(name="Agent 3", id="dup-2", telemetry=False)
agent4 = Agent(name="Agent 4", id="dup-2", telemetry=False)
with pytest.raises(ValueError) as exc_info:
AgentOS(agents=[agent1, agent2, agent3, agent4], telemetry=False)
error_message = str(exc_info.value)
assert "Duplicate IDs found in AgentOS" in error_message
assert "dup-1" in error_message
assert "dup-2" in error_message
def test_same_id_across_different_entity_types_allowed():
"""Test that same ID across different entity types (agent, team, workflow) is allowed."""
shared_id = "shared-entity-id"
agent = Agent(name="Test Agent", id=shared_id, telemetry=False)
team = Team(name="Test Team", id=shared_id, members=[Agent(name="Team Member", telemetry=False)])
workflow = Workflow(name="Test Workflow", id=shared_id)
# Should NOT raise - same ID across different types is OK
app = AgentOS(agents=[agent], teams=[team], workflows=[workflow], telemetry=False)
assert app is not None
assert len(app.agents) == 1
assert len(app.teams) == 1
assert len(app.workflows) == 1
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/os/test_duplicate_ids.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/reasoning/manager.py | """
ReasoningManager - Centralized manager for all reasoning operations.
This module consolidates reasoning logic from the Agent class into a single,
maintainable manager that handles:
- Native reasoning models (DeepSeek, Anthropic, OpenAI, Gemini, etc.)
- Default Chain-of-Thought reasoning
- Both streaming and non-streaming modes
"""
from __future__ import annotations
from dataclasses import dataclass, field
from enum import Enum
from typing import (
TYPE_CHECKING,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Literal,
Optional,
Tuple,
Union,
)
from agno.models.base import Model
from agno.models.message import Message
from agno.reasoning.step import NextAction, ReasoningStep, ReasoningSteps
from agno.run.base import RunContext
from agno.run.messages import RunMessages
from agno.tools import Toolkit
from agno.tools.function import Function
from agno.utils.log import log_debug, log_error, log_info, log_warning
if TYPE_CHECKING:
from agno.agent import Agent
from agno.metrics import RunMetrics
from agno.run.agent import RunOutput
class ReasoningEventType(str, Enum):
"""Types of reasoning events that can be emitted."""
started = "reasoning_started"
content_delta = "reasoning_content_delta"
step = "reasoning_step"
completed = "reasoning_completed"
error = "reasoning_error"
@dataclass
class ReasoningEvent:
"""
A unified reasoning event that can be converted to Agent or Team specific events.
This allows the ReasoningManager to emit events without knowing about the
specific event types used by Agent or Team.
"""
event_type: ReasoningEventType
# For content_delta events
reasoning_content: Optional[str] = None
# For step events
reasoning_step: Optional[ReasoningStep] = None
# For completed events
reasoning_steps: List[ReasoningStep] = field(default_factory=list)
# For error events
error: Optional[str] = None
# The message to append to run_messages (for native reasoning)
message: Optional[Message] = None
# All reasoning messages (for updating run_output)
reasoning_messages: List[Message] = field(default_factory=list)
@dataclass
class ReasoningConfig:
"""Configuration for reasoning operations."""
reasoning_model: Optional[Model] = None
reasoning_agent: Optional["Agent"] = None
min_steps: int = 1
max_steps: int = 10
tools: Optional[List[Union[Toolkit, Callable, Function, Dict]]] = None
tool_call_limit: Optional[int] = None
use_json_mode: bool = False
telemetry: bool = True
debug_mode: bool = False
debug_level: Literal[1, 2] = 1
run_context: Optional[RunContext] = None
run_metrics: Optional["RunMetrics"] = None
@dataclass
class ReasoningResult:
"""Result from a reasoning operation."""
message: Optional[Message] = None
steps: List[ReasoningStep] = field(default_factory=list)
reasoning_messages: List[Message] = field(default_factory=list)
success: bool = True
error: Optional[str] = None
class ReasoningManager:
"""
Centralized manager for all reasoning operations.
Handles both native reasoning models (DeepSeek, Anthropic, OpenAI, etc.)
and default Chain-of-Thought reasoning with a clean, unified interface.
"""
def __init__(self, config: ReasoningConfig):
self.config = config
self._reasoning_agent: Optional["Agent"] = None
self._model_type: Optional[str] = None
@property
def reasoning_model(self) -> Optional[Model]:
return self.config.reasoning_model
def _detect_model_type(self, model: Model) -> Optional[str]:
"""Detect the type of reasoning model."""
from agno.reasoning.anthropic import is_anthropic_reasoning_model
from agno.reasoning.azure_ai_foundry import is_ai_foundry_reasoning_model
from agno.reasoning.deepseek import is_deepseek_reasoning_model
from agno.reasoning.gemini import is_gemini_reasoning_model
from agno.reasoning.groq import is_groq_reasoning_model
from agno.reasoning.ollama import is_ollama_reasoning_model
from agno.reasoning.openai import is_openai_reasoning_model
from agno.reasoning.vertexai import is_vertexai_reasoning_model
if is_deepseek_reasoning_model(model):
return "deepseek"
if is_anthropic_reasoning_model(model):
return "anthropic"
if is_openai_reasoning_model(model):
return "openai"
if is_groq_reasoning_model(model):
return "groq"
if is_ollama_reasoning_model(model):
return "ollama"
if is_ai_foundry_reasoning_model(model):
return "ai_foundry"
if is_gemini_reasoning_model(model):
return "gemini"
if is_vertexai_reasoning_model(model):
return "vertexai"
return None
def _get_reasoning_agent(self, model: Model) -> "Agent":
"""Get or create a reasoning agent for the given model."""
if self.config.reasoning_agent is not None:
return self.config.reasoning_agent
from agno.reasoning.helpers import get_reasoning_agent
return get_reasoning_agent(
reasoning_model=model,
telemetry=self.config.telemetry,
debug_mode=self.config.debug_mode,
debug_level=self.config.debug_level,
run_context=self.config.run_context,
)
def _get_default_reasoning_agent(self, model: Model) -> Optional["Agent"]:
"""Get or create a default CoT reasoning agent."""
if self.config.reasoning_agent is not None:
return self.config.reasoning_agent
from agno.reasoning.default import get_default_reasoning_agent
return get_default_reasoning_agent(
reasoning_model=model,
min_steps=self.config.min_steps,
max_steps=self.config.max_steps,
tools=self.config.tools,
tool_call_limit=self.config.tool_call_limit,
use_json_mode=self.config.use_json_mode,
telemetry=self.config.telemetry,
debug_mode=self.config.debug_mode,
debug_level=self.config.debug_level,
run_context=self.config.run_context,
)
def is_native_reasoning_model(self, model: Optional[Model] = None) -> bool:
"""Check if the model is a native reasoning model."""
model = model or self.config.reasoning_model
if model is None:
return False
return self._detect_model_type(model) is not None
# =========================================================================
# Native Model Reasoning (Non-Streaming)
# =========================================================================
def get_native_reasoning(self, model: Model, messages: List[Message]) -> ReasoningResult:
"""Get reasoning from a native reasoning model (non-streaming)."""
model_type = self._detect_model_type(model)
if model_type is None:
return ReasoningResult(success=False, error="Not a native reasoning model")
reasoning_agent = self._get_reasoning_agent(model)
reasoning_message: Optional[Message] = None
run_metrics = self.config.run_metrics
try:
if model_type == "deepseek":
from agno.reasoning.deepseek import get_deepseek_reasoning
log_debug("Starting DeepSeek Reasoning", center=True, symbol="=")
reasoning_message = get_deepseek_reasoning(reasoning_agent, messages, run_metrics=run_metrics)
elif model_type == "anthropic":
from agno.reasoning.anthropic import get_anthropic_reasoning
log_debug("Starting Anthropic Claude Reasoning", center=True, symbol="=")
reasoning_message = get_anthropic_reasoning(reasoning_agent, messages, run_metrics=run_metrics)
elif model_type == "openai":
from agno.reasoning.openai import get_openai_reasoning
log_debug("Starting OpenAI Reasoning", center=True, symbol="=")
reasoning_message = get_openai_reasoning(reasoning_agent, messages, run_metrics=run_metrics)
elif model_type == "groq":
from agno.reasoning.groq import get_groq_reasoning
log_debug("Starting Groq Reasoning", center=True, symbol="=")
reasoning_message = get_groq_reasoning(reasoning_agent, messages, run_metrics=run_metrics)
elif model_type == "ollama":
from agno.reasoning.ollama import get_ollama_reasoning
log_debug("Starting Ollama Reasoning", center=True, symbol="=")
reasoning_message = get_ollama_reasoning(reasoning_agent, messages, run_metrics=run_metrics)
elif model_type == "ai_foundry":
from agno.reasoning.azure_ai_foundry import get_ai_foundry_reasoning
log_debug("Starting Azure AI Foundry Reasoning", center=True, symbol="=")
reasoning_message = get_ai_foundry_reasoning(reasoning_agent, messages, run_metrics=run_metrics)
elif model_type == "gemini":
from agno.reasoning.gemini import get_gemini_reasoning
log_debug("Starting Gemini Reasoning", center=True, symbol="=")
reasoning_message = get_gemini_reasoning(reasoning_agent, messages, run_metrics=run_metrics)
elif model_type == "vertexai":
from agno.reasoning.vertexai import get_vertexai_reasoning
log_debug("Starting VertexAI Reasoning", center=True, symbol="=")
reasoning_message = get_vertexai_reasoning(reasoning_agent, messages, run_metrics=run_metrics)
except Exception as e:
log_error(f"Reasoning error: {e}")
return ReasoningResult(success=False, error=str(e))
if reasoning_message is None:
return ReasoningResult(
success=False,
error="Reasoning response is None",
)
return ReasoningResult(
message=reasoning_message,
steps=[ReasoningStep(result=reasoning_message.content)],
reasoning_messages=[reasoning_message],
success=True,
)
async def aget_native_reasoning(self, model: Model, messages: List[Message]) -> ReasoningResult:
"""Get reasoning from a native reasoning model asynchronously (non-streaming)."""
model_type = self._detect_model_type(model)
if model_type is None:
return ReasoningResult(success=False, error="Not a native reasoning model")
reasoning_agent = self._get_reasoning_agent(model)
reasoning_message: Optional[Message] = None
run_metrics = self.config.run_metrics
try:
if model_type == "deepseek":
from agno.reasoning.deepseek import aget_deepseek_reasoning
log_debug("Starting DeepSeek Reasoning", center=True, symbol="=")
reasoning_message = await aget_deepseek_reasoning(reasoning_agent, messages, run_metrics=run_metrics)
elif model_type == "anthropic":
from agno.reasoning.anthropic import aget_anthropic_reasoning
log_debug("Starting Anthropic Claude Reasoning", center=True, symbol="=")
reasoning_message = await aget_anthropic_reasoning(reasoning_agent, messages, run_metrics=run_metrics)
elif model_type == "openai":
from agno.reasoning.openai import aget_openai_reasoning
log_debug("Starting OpenAI Reasoning", center=True, symbol="=")
reasoning_message = await aget_openai_reasoning(reasoning_agent, messages, run_metrics=run_metrics)
elif model_type == "groq":
from agno.reasoning.groq import aget_groq_reasoning
log_debug("Starting Groq Reasoning", center=True, symbol="=")
reasoning_message = await aget_groq_reasoning(reasoning_agent, messages, run_metrics=run_metrics)
elif model_type == "ollama":
from agno.reasoning.ollama import aget_ollama_reasoning
log_debug("Starting Ollama Reasoning", center=True, symbol="=")
reasoning_message = await aget_ollama_reasoning(reasoning_agent, messages, run_metrics=run_metrics)
elif model_type == "ai_foundry":
from agno.reasoning.azure_ai_foundry import aget_ai_foundry_reasoning
log_debug("Starting Azure AI Foundry Reasoning", center=True, symbol="=")
reasoning_message = await aget_ai_foundry_reasoning(reasoning_agent, messages, run_metrics=run_metrics)
elif model_type == "gemini":
from agno.reasoning.gemini import aget_gemini_reasoning
log_debug("Starting Gemini Reasoning", center=True, symbol="=")
reasoning_message = await aget_gemini_reasoning(reasoning_agent, messages, run_metrics=run_metrics)
elif model_type == "vertexai":
from agno.reasoning.vertexai import aget_vertexai_reasoning
log_debug("Starting VertexAI Reasoning", center=True, symbol="=")
reasoning_message = await aget_vertexai_reasoning(reasoning_agent, messages, run_metrics=run_metrics)
except Exception as e:
log_error(f"Reasoning error: {e}")
return ReasoningResult(success=False, error=str(e))
if reasoning_message is None:
return ReasoningResult(
success=False,
error="Reasoning response is None",
)
return ReasoningResult(
message=reasoning_message,
steps=[ReasoningStep(result=reasoning_message.content)],
reasoning_messages=[reasoning_message],
success=True,
)
# =========================================================================
# Native Model Reasoning (Streaming)
# =========================================================================
def stream_native_reasoning(
self, model: Model, messages: List[Message]
) -> Iterator[Tuple[Optional[str], Optional[ReasoningResult]]]:
"""
Stream reasoning from a native reasoning model.
Yields:
Tuple of (reasoning_content_delta, final_result)
- During streaming: (reasoning_content_delta, None)
- At the end: (None, ReasoningResult)
"""
model_type = self._detect_model_type(model)
if model_type is None:
yield (None, ReasoningResult(success=False, error="Not a native reasoning model"))
return
reasoning_agent = self._get_reasoning_agent(model)
# Currently only DeepSeek and Anthropic support streaming
if model_type == "deepseek":
from agno.reasoning.deepseek import get_deepseek_reasoning_stream
log_debug("Starting DeepSeek Reasoning (streaming)", center=True, symbol="=")
final_message: Optional[Message] = None
for reasoning_delta, message in get_deepseek_reasoning_stream(reasoning_agent, messages):
if reasoning_delta is not None:
yield (reasoning_delta, None)
if message is not None:
final_message = message
if final_message:
yield (
None,
ReasoningResult(
message=final_message,
steps=[ReasoningStep(result=final_message.content)],
reasoning_messages=[final_message],
success=True,
),
)
else:
yield (None, ReasoningResult(success=False, error="No reasoning content"))
elif model_type == "anthropic":
from agno.reasoning.anthropic import get_anthropic_reasoning_stream
log_debug("Starting Anthropic Claude Reasoning (streaming)", center=True, symbol="=")
final_message = None
for reasoning_delta, message in get_anthropic_reasoning_stream(reasoning_agent, messages):
if reasoning_delta is not None:
yield (reasoning_delta, None)
if message is not None:
final_message = message
if final_message:
yield (
None,
ReasoningResult(
message=final_message,
steps=[ReasoningStep(result=final_message.content)],
reasoning_messages=[final_message],
success=True,
),
)
else:
yield (None, ReasoningResult(success=False, error="No reasoning content"))
elif model_type == "gemini":
from agno.reasoning.gemini import get_gemini_reasoning_stream
log_debug("Starting Gemini Reasoning (streaming)", center=True, symbol="=")
final_message = None
for reasoning_delta, message in get_gemini_reasoning_stream(reasoning_agent, messages):
if reasoning_delta is not None:
yield (reasoning_delta, None)
if message is not None:
final_message = message
if final_message:
yield (
None,
ReasoningResult(
message=final_message,
steps=[ReasoningStep(result=final_message.content)],
reasoning_messages=[final_message],
success=True,
),
)
else:
yield (None, ReasoningResult(success=False, error="No reasoning content"))
elif model_type == "openai":
from agno.reasoning.openai import get_openai_reasoning_stream
log_debug("Starting OpenAI Reasoning (streaming)", center=True, symbol="=")
final_message = None
for reasoning_delta, message in get_openai_reasoning_stream(reasoning_agent, messages):
if reasoning_delta is not None:
yield (reasoning_delta, None)
if message is not None:
final_message = message
if final_message:
yield (
None,
ReasoningResult(
message=final_message,
steps=[ReasoningStep(result=final_message.content)],
reasoning_messages=[final_message],
success=True,
),
)
else:
yield (None, ReasoningResult(success=False, error="No reasoning content"))
elif model_type == "vertexai":
from agno.reasoning.vertexai import get_vertexai_reasoning_stream
log_debug("Starting VertexAI Reasoning (streaming)", center=True, symbol="=")
final_message = None
for reasoning_delta, message in get_vertexai_reasoning_stream(reasoning_agent, messages):
if reasoning_delta is not None:
yield (reasoning_delta, None)
if message is not None:
final_message = message
if final_message:
yield (
None,
ReasoningResult(
message=final_message,
steps=[ReasoningStep(result=final_message.content)],
reasoning_messages=[final_message],
success=True,
),
)
else:
yield (None, ReasoningResult(success=False, error="No reasoning content"))
elif model_type == "ai_foundry":
from agno.reasoning.azure_ai_foundry import get_ai_foundry_reasoning_stream
log_debug("Starting Azure AI Foundry Reasoning (streaming)", center=True, symbol="=")
final_message = None
for reasoning_delta, message in get_ai_foundry_reasoning_stream(reasoning_agent, messages):
if reasoning_delta is not None:
yield (reasoning_delta, None)
if message is not None:
final_message = message
if final_message:
yield (
None,
ReasoningResult(
message=final_message,
steps=[ReasoningStep(result=final_message.content)],
reasoning_messages=[final_message],
success=True,
),
)
else:
yield (None, ReasoningResult(success=False, error="No reasoning content"))
elif model_type == "groq":
from agno.reasoning.groq import get_groq_reasoning_stream
log_debug("Starting Groq Reasoning (streaming)", center=True, symbol="=")
final_message = None
for reasoning_delta, message in get_groq_reasoning_stream(reasoning_agent, messages):
if reasoning_delta is not None:
yield (reasoning_delta, None)
if message is not None:
final_message = message
if final_message:
yield (
None,
ReasoningResult(
message=final_message,
steps=[ReasoningStep(result=final_message.content)],
reasoning_messages=[final_message],
success=True,
),
)
else:
yield (None, ReasoningResult(success=False, error="No reasoning content"))
elif model_type == "ollama":
from agno.reasoning.ollama import get_ollama_reasoning_stream
log_debug("Starting Ollama Reasoning (streaming)", center=True, symbol="=")
final_message = None
for reasoning_delta, message in get_ollama_reasoning_stream(reasoning_agent, messages):
if reasoning_delta is not None:
yield (reasoning_delta, None)
if message is not None:
final_message = message
if final_message:
yield (
None,
ReasoningResult(
message=final_message,
steps=[ReasoningStep(result=final_message.content)],
reasoning_messages=[final_message],
success=True,
),
)
else:
yield (None, ReasoningResult(success=False, error="No reasoning content"))
else:
# Fall back to non-streaming for other models
result = self.get_native_reasoning(model, messages)
yield (None, result)
async def astream_native_reasoning(
self, model: Model, messages: List[Message]
) -> AsyncIterator[Tuple[Optional[str], Optional[ReasoningResult]]]:
"""
Stream reasoning from a native reasoning model asynchronously.
Yields:
Tuple of (reasoning_content_delta, final_result)
- During streaming: (reasoning_content_delta, None)
- At the end: (None, ReasoningResult)
"""
model_type = self._detect_model_type(model)
if model_type is None:
yield (None, ReasoningResult(success=False, error="Not a native reasoning model"))
return
reasoning_agent = self._get_reasoning_agent(model)
# Currently only DeepSeek and Anthropic support streaming
if model_type == "deepseek":
from agno.reasoning.deepseek import aget_deepseek_reasoning_stream
log_debug("Starting DeepSeek Reasoning (streaming)", center=True, symbol="=")
final_message: Optional[Message] = None
async for reasoning_delta, message in aget_deepseek_reasoning_stream(reasoning_agent, messages):
if reasoning_delta is not None:
yield (reasoning_delta, None)
if message is not None:
final_message = message
if final_message:
yield (
None,
ReasoningResult(
message=final_message,
steps=[ReasoningStep(result=final_message.content)],
reasoning_messages=[final_message],
success=True,
),
)
else:
yield (None, ReasoningResult(success=False, error="No reasoning content"))
elif model_type == "anthropic":
from agno.reasoning.anthropic import aget_anthropic_reasoning_stream
log_debug("Starting Anthropic Claude Reasoning (streaming)", center=True, symbol="=")
final_message = None
async for reasoning_delta, message in aget_anthropic_reasoning_stream(reasoning_agent, messages):
if reasoning_delta is not None:
yield (reasoning_delta, None)
if message is not None:
final_message = message
if final_message:
yield (
None,
ReasoningResult(
message=final_message,
steps=[ReasoningStep(result=final_message.content)],
reasoning_messages=[final_message],
success=True,
),
)
else:
yield (None, ReasoningResult(success=False, error="No reasoning content"))
elif model_type == "gemini":
from agno.reasoning.gemini import aget_gemini_reasoning_stream
log_debug("Starting Gemini Reasoning (streaming)", center=True, symbol="=")
final_message = None
async for reasoning_delta, message in aget_gemini_reasoning_stream(reasoning_agent, messages):
if reasoning_delta is not None:
yield (reasoning_delta, None)
if message is not None:
final_message = message
if final_message:
yield (
None,
ReasoningResult(
message=final_message,
steps=[ReasoningStep(result=final_message.content)],
reasoning_messages=[final_message],
success=True,
),
)
else:
yield (None, ReasoningResult(success=False, error="No reasoning content"))
elif model_type == "openai":
from agno.reasoning.openai import aget_openai_reasoning_stream
log_debug("Starting OpenAI Reasoning (streaming)", center=True, symbol="=")
final_message = None
async for reasoning_delta, message in aget_openai_reasoning_stream(reasoning_agent, messages):
if reasoning_delta is not None:
yield (reasoning_delta, None)
if message is not None:
final_message = message
if final_message:
yield (
None,
ReasoningResult(
message=final_message,
steps=[ReasoningStep(result=final_message.content)],
reasoning_messages=[final_message],
success=True,
),
)
else:
yield (None, ReasoningResult(success=False, error="No reasoning content"))
elif model_type == "vertexai":
from agno.reasoning.vertexai import aget_vertexai_reasoning_stream
log_debug("Starting VertexAI Reasoning (streaming)", center=True, symbol="=")
final_message = None
async for reasoning_delta, message in aget_vertexai_reasoning_stream(reasoning_agent, messages):
if reasoning_delta is not None:
yield (reasoning_delta, None)
if message is not None:
final_message = message
if final_message:
yield (
None,
ReasoningResult(
message=final_message,
steps=[ReasoningStep(result=final_message.content)],
reasoning_messages=[final_message],
success=True,
),
)
else:
yield (None, ReasoningResult(success=False, error="No reasoning content"))
elif model_type == "ai_foundry":
from agno.reasoning.azure_ai_foundry import aget_ai_foundry_reasoning_stream
log_debug("Starting Azure AI Foundry Reasoning (streaming)", center=True, symbol="=")
final_message = None
async for reasoning_delta, message in aget_ai_foundry_reasoning_stream(reasoning_agent, messages):
if reasoning_delta is not None:
yield (reasoning_delta, None)
if message is not None:
final_message = message
if final_message:
yield (
None,
ReasoningResult(
message=final_message,
steps=[ReasoningStep(result=final_message.content)],
reasoning_messages=[final_message],
success=True,
),
)
else:
yield (None, ReasoningResult(success=False, error="No reasoning content"))
elif model_type == "groq":
from agno.reasoning.groq import aget_groq_reasoning_stream
log_debug("Starting Groq Reasoning (streaming)", center=True, symbol="=")
final_message = None
async for reasoning_delta, message in aget_groq_reasoning_stream(reasoning_agent, messages):
if reasoning_delta is not None:
yield (reasoning_delta, None)
if message is not None:
final_message = message
if final_message:
yield (
None,
ReasoningResult(
message=final_message,
steps=[ReasoningStep(result=final_message.content)],
reasoning_messages=[final_message],
success=True,
),
)
else:
yield (None, ReasoningResult(success=False, error="No reasoning content"))
elif model_type == "ollama":
from agno.reasoning.ollama import aget_ollama_reasoning_stream
log_debug("Starting Ollama Reasoning (streaming)", center=True, symbol="=")
final_message = None
async for reasoning_delta, message in aget_ollama_reasoning_stream(reasoning_agent, messages):
if reasoning_delta is not None:
yield (reasoning_delta, None)
if message is not None:
final_message = message
if final_message:
yield (
None,
ReasoningResult(
message=final_message,
steps=[ReasoningStep(result=final_message.content)],
reasoning_messages=[final_message],
success=True,
),
)
else:
yield (None, ReasoningResult(success=False, error="No reasoning content"))
else:
# Fall back to non-streaming for other models
result = await self.aget_native_reasoning(model, messages)
yield (None, result)
# =========================================================================
# Default Chain-of-Thought Reasoning
# =========================================================================
def run_default_reasoning(
self, model: Model, run_messages: RunMessages
) -> Iterator[Tuple[Optional[ReasoningStep], Optional[ReasoningResult]]]:
"""
Run default Chain-of-Thought reasoning.
Yields:
Tuple of (reasoning_step, final_result)
- During reasoning: (ReasoningStep, None)
- At the end: (None, ReasoningResult)
"""
from agno.reasoning.helpers import get_next_action, update_messages_with_reasoning
reasoning_agent = self._get_default_reasoning_agent(model)
if reasoning_agent is None:
yield (None, ReasoningResult(success=False, error="Reasoning agent is None"))
return
# Validate reasoning agent output schema
if (
reasoning_agent.output_schema is not None
and isinstance(reasoning_agent.output_schema, type)
and not issubclass(reasoning_agent.output_schema, ReasoningSteps)
):
yield (
None,
ReasoningResult(
success=False,
error="Reasoning agent response model should be ReasoningSteps",
),
)
return
step_count = 1
next_action = NextAction.CONTINUE
reasoning_messages: List[Message] = []
all_reasoning_steps: List[ReasoningStep] = []
log_debug("Starting Reasoning", center=True, symbol="=")
while next_action == NextAction.CONTINUE and step_count < self.config.max_steps:
log_debug(f"Step {step_count}", center=True, symbol="=")
try:
reasoning_agent_response: RunOutput = reasoning_agent.run(input=run_messages.get_input_messages())
# Accumulate reasoning model metrics
if self.config.run_metrics is not None:
from agno.metrics import accumulate_eval_metrics
accumulate_eval_metrics(
reasoning_agent_response.metrics, self.config.run_metrics, prefix="reasoning"
)
if reasoning_agent_response.content is None or reasoning_agent_response.messages is None:
log_warning("Reasoning error. Reasoning response is empty")
break
if isinstance(reasoning_agent_response.content, str):
log_warning("Reasoning error. Content is a string, not structured output")
break
if (
reasoning_agent_response.content.reasoning_steps is None
or len(reasoning_agent_response.content.reasoning_steps) == 0
):
log_warning("Reasoning error. Reasoning steps are empty")
break
reasoning_steps: List[ReasoningStep] = reasoning_agent_response.content.reasoning_steps
all_reasoning_steps.extend(reasoning_steps)
# Yield each reasoning step
for step in reasoning_steps:
yield (step, None)
# Extract reasoning messages
first_assistant_index = next(
(i for i, m in enumerate(reasoning_agent_response.messages) if m.role == "assistant"),
len(reasoning_agent_response.messages),
)
reasoning_messages = reasoning_agent_response.messages[first_assistant_index:]
# Get the next action
next_action = get_next_action(reasoning_steps[-1])
if next_action == NextAction.FINAL_ANSWER:
break
except Exception as e:
log_error(f"Reasoning error: {e}")
break
step_count += 1
log_debug(f"Total Reasoning steps: {len(all_reasoning_steps)}")
log_debug("Reasoning finished", center=True, symbol="=")
# Update messages with reasoning
update_messages_with_reasoning(
run_messages=run_messages,
reasoning_messages=reasoning_messages,
)
# Yield final result
yield (
None,
ReasoningResult(
steps=all_reasoning_steps,
reasoning_messages=reasoning_messages,
success=True,
),
)
async def arun_default_reasoning(
self, model: Model, run_messages: RunMessages
) -> AsyncIterator[Tuple[Optional[ReasoningStep], Optional[ReasoningResult]]]:
"""
Run default Chain-of-Thought reasoning asynchronously.
Yields:
Tuple of (reasoning_step, final_result)
- During reasoning: (ReasoningStep, None)
- At the end: (None, ReasoningResult)
"""
from agno.reasoning.helpers import get_next_action, update_messages_with_reasoning
reasoning_agent = self._get_default_reasoning_agent(model)
if reasoning_agent is None:
yield (None, ReasoningResult(success=False, error="Reasoning agent is None"))
return
# Validate reasoning agent output schema
if (
reasoning_agent.output_schema is not None
and isinstance(reasoning_agent.output_schema, type)
and not issubclass(reasoning_agent.output_schema, ReasoningSteps)
):
yield (
None,
ReasoningResult(
success=False,
error="Reasoning agent response model should be ReasoningSteps",
),
)
return
step_count = 1
next_action = NextAction.CONTINUE
reasoning_messages: List[Message] = []
all_reasoning_steps: List[ReasoningStep] = []
log_debug("Starting Reasoning", center=True, symbol="=")
while next_action == NextAction.CONTINUE and step_count < self.config.max_steps:
log_debug(f"Step {step_count}", center=True, symbol="=")
step_count += 1
try:
reasoning_agent_response: RunOutput = await reasoning_agent.arun( # type: ignore[misc]
input=run_messages.get_input_messages()
)
# Accumulate reasoning model metrics
if self.config.run_metrics is not None:
from agno.metrics import accumulate_eval_metrics
accumulate_eval_metrics(
reasoning_agent_response.metrics, self.config.run_metrics, prefix="reasoning"
)
if reasoning_agent_response.content is None or reasoning_agent_response.messages is None:
log_warning("Reasoning error. Reasoning response is empty")
break
if isinstance(reasoning_agent_response.content, str):
log_warning("Reasoning error. Content is a string, not structured output")
break
if reasoning_agent_response.content.reasoning_steps is None:
log_warning("Reasoning error. Reasoning steps are empty")
break
reasoning_steps: List[ReasoningStep] = reasoning_agent_response.content.reasoning_steps
all_reasoning_steps.extend(reasoning_steps)
# Yield each reasoning step
for step in reasoning_steps:
yield (step, None)
# Extract reasoning messages
first_assistant_index = next(
(i for i, m in enumerate(reasoning_agent_response.messages) if m.role == "assistant"),
len(reasoning_agent_response.messages),
)
reasoning_messages = reasoning_agent_response.messages[first_assistant_index:]
# Get the next action
next_action = get_next_action(reasoning_steps[-1])
if next_action == NextAction.FINAL_ANSWER:
break
except Exception as e:
log_error(f"Reasoning error: {e}")
break
log_debug(f"Total Reasoning steps: {len(all_reasoning_steps)}")
log_debug("Reasoning finished", center=True, symbol="=")
# Update messages with reasoning
update_messages_with_reasoning(
run_messages=run_messages,
reasoning_messages=reasoning_messages,
)
# Yield final result
yield (
None,
ReasoningResult(
steps=all_reasoning_steps,
reasoning_messages=reasoning_messages,
success=True,
),
)
def reason(
self,
run_messages: RunMessages,
stream: bool = False,
) -> Iterator[ReasoningEvent]:
"""
Run reasoning and yield ReasoningEvent objects.
Args:
run_messages: The messages to reason about
stream: Whether to stream reasoning content
Yields:
ReasoningEvent objects for each stage of reasoning
"""
# Get the reasoning model
reasoning_model: Optional[Model] = self.config.reasoning_model
reasoning_model_provided = reasoning_model is not None
if reasoning_model is None:
yield ReasoningEvent(
event_type=ReasoningEventType.error,
error="Reasoning model is None",
)
return
# Yield started event
yield ReasoningEvent(event_type=ReasoningEventType.started)
# Check if this is a native reasoning model
if reasoning_model_provided and self.is_native_reasoning_model(reasoning_model):
# Use streaming for native models when stream is enabled
if stream:
yield from self._stream_native_reasoning_events(reasoning_model, run_messages)
else:
yield from self._get_native_reasoning_events(reasoning_model, run_messages)
else:
# Use default Chain-of-Thought reasoning
if reasoning_model_provided:
log_info(
f"Reasoning model: {reasoning_model.__class__.__name__} is not a native reasoning model, "
"defaulting to manual Chain-of-Thought reasoning"
)
yield from self._run_default_reasoning_events(reasoning_model, run_messages)
async def areason(
self,
run_messages: RunMessages,
stream: bool = False,
) -> AsyncIterator[ReasoningEvent]:
"""
Unified async reasoning interface that yields ReasoningEvent objects.
This method handles all reasoning logic and yields events that can be
converted to Agent or Team specific events by the caller.
Args:
run_messages: The messages to reason about
stream: Whether to stream reasoning content deltas
Yields:
ReasoningEvent objects for each stage of reasoning
"""
# Get the reasoning model
reasoning_model: Optional[Model] = self.config.reasoning_model
reasoning_model_provided = reasoning_model is not None
if reasoning_model is None:
yield ReasoningEvent(
event_type=ReasoningEventType.error,
error="Reasoning model is None",
)
return
# Yield started event
yield ReasoningEvent(event_type=ReasoningEventType.started)
# Check if this is a native reasoning model
if reasoning_model_provided and self.is_native_reasoning_model(reasoning_model):
# Use streaming for native models when stream is enabled
if stream:
async for event in self._astream_native_reasoning_events(reasoning_model, run_messages):
yield event
else:
async for event in self._aget_native_reasoning_events(reasoning_model, run_messages):
yield event
else:
# Use default Chain-of-Thought reasoning
if reasoning_model_provided:
log_info(
f"Reasoning model: {reasoning_model.__class__.__name__} is not a native reasoning model, "
"defaulting to manual Chain-of-Thought reasoning"
)
async for event in self._arun_default_reasoning_events(reasoning_model, run_messages):
yield event
def _stream_native_reasoning_events(self, model: Model, run_messages: RunMessages) -> Iterator[ReasoningEvent]:
"""Stream native reasoning and yield ReasoningEvent objects."""
messages = run_messages.get_input_messages()
for reasoning_delta, result in self.stream_native_reasoning(model, messages):
if reasoning_delta is not None:
yield ReasoningEvent(
event_type=ReasoningEventType.content_delta,
reasoning_content=reasoning_delta,
)
if result is not None:
if not result.success:
yield ReasoningEvent(
event_type=ReasoningEventType.error,
error=result.error,
)
return
if result.message:
run_messages.messages.append(result.message)
yield ReasoningEvent(
event_type=ReasoningEventType.completed,
reasoning_steps=result.steps,
message=result.message,
reasoning_messages=result.reasoning_messages,
)
def _get_native_reasoning_events(self, model: Model, run_messages: RunMessages) -> Iterator[ReasoningEvent]:
"""Get native reasoning (non-streaming) and yield ReasoningEvent objects."""
messages = run_messages.get_input_messages()
result = self.get_native_reasoning(model, messages)
if not result.success:
yield ReasoningEvent(
event_type=ReasoningEventType.error,
error=result.error,
)
return
if result.message:
run_messages.messages.append(result.message)
yield ReasoningEvent(
event_type=ReasoningEventType.completed,
reasoning_steps=result.steps,
message=result.message,
reasoning_messages=result.reasoning_messages,
)
def _run_default_reasoning_events(self, model: Model, run_messages: RunMessages) -> Iterator[ReasoningEvent]:
"""Run default CoT reasoning and yield ReasoningEvent objects."""
all_reasoning_steps: List[ReasoningStep] = []
for reasoning_step, result in self.run_default_reasoning(model, run_messages):
if reasoning_step is not None:
all_reasoning_steps.append(reasoning_step)
yield ReasoningEvent(
event_type=ReasoningEventType.step,
reasoning_step=reasoning_step,
)
if result is not None:
if not result.success:
yield ReasoningEvent(
event_type=ReasoningEventType.error,
error=result.error,
)
return
# Yield completed event with all steps
if all_reasoning_steps:
yield ReasoningEvent(
event_type=ReasoningEventType.completed,
reasoning_steps=all_reasoning_steps,
)
async def _astream_native_reasoning_events(
self, model: Model, run_messages: RunMessages
) -> AsyncIterator[ReasoningEvent]:
"""Stream native reasoning asynchronously and yield ReasoningEvent objects."""
messages = run_messages.get_input_messages()
async for reasoning_delta, result in self.astream_native_reasoning(model, messages):
if reasoning_delta is not None:
yield ReasoningEvent(
event_type=ReasoningEventType.content_delta,
reasoning_content=reasoning_delta,
)
if result is not None:
if not result.success:
yield ReasoningEvent(
event_type=ReasoningEventType.error,
error=result.error,
)
return
if result.message:
run_messages.messages.append(result.message)
yield ReasoningEvent(
event_type=ReasoningEventType.completed,
reasoning_steps=result.steps,
message=result.message,
reasoning_messages=result.reasoning_messages,
)
async def _aget_native_reasoning_events(
self, model: Model, run_messages: RunMessages
) -> AsyncIterator[ReasoningEvent]:
"""Get native reasoning asynchronously (non-streaming) and yield ReasoningEvent objects."""
messages = run_messages.get_input_messages()
result = await self.aget_native_reasoning(model, messages)
if not result.success:
yield ReasoningEvent(
event_type=ReasoningEventType.error,
error=result.error,
)
return
if result.message:
run_messages.messages.append(result.message)
yield ReasoningEvent(
event_type=ReasoningEventType.completed,
reasoning_steps=result.steps,
message=result.message,
reasoning_messages=result.reasoning_messages,
)
async def _arun_default_reasoning_events(
self, model: Model, run_messages: RunMessages
) -> AsyncIterator[ReasoningEvent]:
"""Run default CoT reasoning asynchronously and yield ReasoningEvent objects."""
all_reasoning_steps: List[ReasoningStep] = []
async for reasoning_step, result in self.arun_default_reasoning(model, run_messages):
if reasoning_step is not None:
all_reasoning_steps.append(reasoning_step)
yield ReasoningEvent(
event_type=ReasoningEventType.step,
reasoning_step=reasoning_step,
)
if result is not None:
if not result.success:
yield ReasoningEvent(
event_type=ReasoningEventType.error,
error=result.error,
)
return
# Yield completed event with all steps
if all_reasoning_steps:
yield ReasoningEvent(
event_type=ReasoningEventType.completed,
reasoning_steps=all_reasoning_steps,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/reasoning/manager.py",
"license": "Apache License 2.0",
"lines": 1055,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/models/azure/ai_foundry/test_reasoning_streaming.py | """Integration tests for Azure AI Foundry reasoning streaming functionality.
This test verifies that reasoning content streams correctly (not all at once)
for Azure AI Foundry models (like DeepSeek-R1) when used as a reasoning_model.
These tests verify the streaming reasoning feature where reasoning content
is delivered incrementally via RunEvent.reasoning_content_delta events.
"""
import pytest
from agno.agent import Agent
from agno.models.azure import AzureAIFoundry
from agno.run.agent import RunEvent
@pytest.fixture
async def azure_ai_foundry_model():
"""Fixture that provides an Azure AI Foundry model and cleans it up after the test."""
model = AzureAIFoundry(id="Phi-4")
yield model
model.close()
await model.aclose()
@pytest.fixture
async def azure_ai_foundry_reasoning_model():
"""Fixture that provides a DeepSeek-R1 reasoning model."""
model = AzureAIFoundry(id="DeepSeek-R1")
yield model
model.close()
await model.aclose()
def _get_reasoning_streaming_agent(main_model, reasoning_model, **kwargs):
"""Create an agent with Azure AI Foundry reasoning_model for streaming reasoning tests."""
default_config = {
"model": main_model,
"reasoning_model": reasoning_model,
"instructions": "You are an expert problem-solving assistant. Think step by step.",
"markdown": True,
"telemetry": False,
}
default_config.update(kwargs)
return Agent(**default_config)
def test_reasoning_model_streams_content_deltas(azure_ai_foundry_model, azure_ai_foundry_reasoning_model):
"""Test that Azure AI Foundry DeepSeek-R1 reasoning_model streams content via reasoning_content_delta events."""
agent = _get_reasoning_streaming_agent(azure_ai_foundry_model, azure_ai_foundry_reasoning_model)
prompt = "What is 25 * 37? Show your reasoning step by step."
# Track events
reasoning_deltas = []
reasoning_started = False
reasoning_completed = False
for event in agent.run(prompt, stream=True, stream_events=True):
if event.event == RunEvent.reasoning_started:
reasoning_started = True
elif event.event == RunEvent.reasoning_content_delta:
if event.reasoning_content:
reasoning_deltas.append(event.reasoning_content)
elif event.event == RunEvent.reasoning_completed:
reasoning_completed = True
# Assertions
assert reasoning_started, "Should have received reasoning_started event"
assert reasoning_completed, "Should have received reasoning_completed event"
assert len(reasoning_deltas) > 1, (
f"Should have received multiple reasoning_content_delta events for streaming, but got {len(reasoning_deltas)}"
)
# Verify we got actual content
full_reasoning = "".join(reasoning_deltas)
assert len(full_reasoning) > 0, "Combined reasoning content should not be empty"
@pytest.mark.asyncio
async def test_reasoning_model_streams_content_deltas_async(azure_ai_foundry_model, azure_ai_foundry_reasoning_model):
"""Test that Azure AI Foundry DeepSeek-R1 reasoning_model streams content via reasoning_content_delta events (async)."""
agent = _get_reasoning_streaming_agent(azure_ai_foundry_model, azure_ai_foundry_reasoning_model)
prompt = "What is 25 * 37? Show your reasoning step by step."
# Track events
reasoning_deltas = []
reasoning_started = False
reasoning_completed = False
async for event in agent.arun(prompt, stream=True, stream_events=True):
if event.event == RunEvent.reasoning_started:
reasoning_started = True
elif event.event == RunEvent.reasoning_content_delta:
if event.reasoning_content:
reasoning_deltas.append(event.reasoning_content)
elif event.event == RunEvent.reasoning_completed:
reasoning_completed = True
# Assertions
assert reasoning_started, "Should have received reasoning_started event"
assert reasoning_completed, "Should have received reasoning_completed event"
assert len(reasoning_deltas) > 1, (
f"Should have received multiple reasoning_content_delta events for streaming, but got {len(reasoning_deltas)}"
)
# Verify we got actual content
full_reasoning = "".join(reasoning_deltas)
assert len(full_reasoning) > 0, "Combined reasoning content should not be empty"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/azure/ai_foundry/test_reasoning_streaming.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/azure/openai/test_reasoning_streaming.py | """Integration tests for Azure OpenAI reasoning streaming functionality.
This test verifies that reasoning content streams correctly (not all at once)
for Azure OpenAI o-series models when used as a reasoning_model.
Note: Azure OpenAI o-series models (o1, o3, o4) perform internal reasoning but do not
expose the reasoning content via the API. The reasoning happens internally.
The content output is treated as reasoning for these models.
These tests verify the streaming reasoning feature where reasoning content
is delivered incrementally via RunEvent.reasoning_content_delta events.
"""
import pytest
from agno.agent import Agent
from agno.models.azure import AzureOpenAI
from agno.run.agent import RunEvent
def _get_reasoning_streaming_agent(**kwargs):
"""Create an agent with Azure OpenAI o-series reasoning_model for streaming reasoning tests."""
default_config = {
"model": AzureOpenAI(id="gpt-4o-mini"),
# o3-mini provides reasoning internally, content is treated as reasoning
"reasoning_model": AzureOpenAI(id="o3-mini"),
"instructions": "You are an expert problem-solving assistant. Think step by step.",
"markdown": True,
"telemetry": False,
}
default_config.update(kwargs)
return Agent(**default_config)
def test_reasoning_model_streams_content_deltas():
"""Test that Azure OpenAI o-series reasoning_model streams content via reasoning_content_delta events."""
agent = _get_reasoning_streaming_agent()
prompt = "What is 25 * 37? Show your reasoning step by step."
# Track events
reasoning_deltas = []
reasoning_started = False
reasoning_completed = False
for event in agent.run(prompt, stream=True, stream_events=True):
if event.event == RunEvent.reasoning_started:
reasoning_started = True
elif event.event == RunEvent.reasoning_content_delta:
if event.reasoning_content:
reasoning_deltas.append(event.reasoning_content)
elif event.event == RunEvent.reasoning_completed:
reasoning_completed = True
# Assertions
assert reasoning_started, "Should have received reasoning_started event"
assert reasoning_completed, "Should have received reasoning_completed event"
# Note: For o-series models, the content output is treated as reasoning
# The streaming behavior may vary based on model capabilities
assert len(reasoning_deltas) >= 1, (
f"Should have received at least one reasoning_content_delta event, but got {len(reasoning_deltas)}"
)
# Verify we got actual content
full_reasoning = "".join(reasoning_deltas)
assert len(full_reasoning) > 0, "Combined reasoning content should not be empty"
@pytest.mark.asyncio
async def test_reasoning_model_streams_content_deltas_async():
"""Test that Azure OpenAI o-series reasoning_model streams content via reasoning_content_delta events (async)."""
agent = _get_reasoning_streaming_agent()
prompt = "What is 25 * 37? Show your reasoning step by step."
# Track events
reasoning_deltas = []
reasoning_started = False
reasoning_completed = False
async for event in agent.arun(prompt, stream=True, stream_events=True):
if event.event == RunEvent.reasoning_started:
reasoning_started = True
elif event.event == RunEvent.reasoning_content_delta:
if event.reasoning_content:
reasoning_deltas.append(event.reasoning_content)
elif event.event == RunEvent.reasoning_completed:
reasoning_completed = True
# Assertions
assert reasoning_started, "Should have received reasoning_started event"
assert reasoning_completed, "Should have received reasoning_completed event"
assert len(reasoning_deltas) >= 1, (
f"Should have received at least one reasoning_content_delta event, but got {len(reasoning_deltas)}"
)
# Verify we got actual content
full_reasoning = "".join(reasoning_deltas)
assert len(full_reasoning) > 0, "Combined reasoning content should not be empty"
def test_reasoning_non_streaming_has_reasoning_content():
"""Test that non-streaming mode also produces reasoning content."""
agent = _get_reasoning_streaming_agent()
prompt = "What is 12 * 8?"
# Non-streaming mode
non_streaming_response = agent.run(prompt, stream=False)
non_streaming_reasoning = non_streaming_response.reasoning_content or ""
# Should have reasoning content
assert len(non_streaming_reasoning) > 0, "Non-streaming should have reasoning"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/azure/openai/test_reasoning_streaming.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/deepseek/test_reasoning_streaming.py | """Integration tests for DeepSeek reasoning streaming functionality.
This test verifies that reasoning content streams correctly (not all at once)
for DeepSeek reasoning models when used as a reasoning_model.
These tests verify the streaming reasoning feature where reasoning content
is delivered incrementally via RunEvent.reasoning_content_delta events.
"""
import pytest
from agno.agent import Agent
from agno.models.deepseek import DeepSeek
from agno.run.agent import RunEvent
def _get_reasoning_streaming_agent(**kwargs):
"""Create an agent with DeepSeek reasoning_model for streaming reasoning tests."""
default_config = {
"model": DeepSeek(id="deepseek-chat"),
"reasoning_model": DeepSeek(id="deepseek-reasoner"),
"instructions": "You are an expert problem-solving assistant. Think step by step.",
"markdown": True,
"telemetry": False,
}
default_config.update(kwargs)
return Agent(**default_config)
def test_reasoning_model_streams_content_deltas():
"""Test that DeepSeek reasoning_model streams content via reasoning_content_delta events."""
agent = _get_reasoning_streaming_agent()
prompt = "What is 25 * 37? Show your reasoning step by step."
# Track events
reasoning_deltas = []
reasoning_started = False
reasoning_completed = False
for event in agent.run(prompt, stream=True, stream_events=True):
if event.event == RunEvent.reasoning_started:
reasoning_started = True
elif event.event == RunEvent.reasoning_content_delta:
if event.reasoning_content:
reasoning_deltas.append(event.reasoning_content)
elif event.event == RunEvent.reasoning_completed:
reasoning_completed = True
# Assertions
assert reasoning_started, "Should have received reasoning_started event"
assert reasoning_completed, "Should have received reasoning_completed event"
assert len(reasoning_deltas) > 1, (
f"Should have received multiple reasoning_content_delta events for streaming, but got {len(reasoning_deltas)}"
)
# Verify we got actual content
full_reasoning = "".join(reasoning_deltas)
assert len(full_reasoning) > 0, "Combined reasoning content should not be empty"
@pytest.mark.asyncio
async def test_reasoning_model_streams_content_deltas_async():
"""Test that DeepSeek reasoning_model streams content via reasoning_content_delta events (async)."""
agent = _get_reasoning_streaming_agent()
prompt = "What is 25 * 37? Show your reasoning step by step."
# Track events
reasoning_deltas = []
reasoning_started = False
reasoning_completed = False
async for event in agent.arun(prompt, stream=True, stream_events=True):
if event.event == RunEvent.reasoning_started:
reasoning_started = True
elif event.event == RunEvent.reasoning_content_delta:
if event.reasoning_content:
reasoning_deltas.append(event.reasoning_content)
elif event.event == RunEvent.reasoning_completed:
reasoning_completed = True
# Assertions
assert reasoning_started, "Should have received reasoning_started event"
assert reasoning_completed, "Should have received reasoning_completed event"
assert len(reasoning_deltas) > 1, (
f"Should have received multiple reasoning_content_delta events for streaming, but got {len(reasoning_deltas)}"
)
# Verify we got actual content
full_reasoning = "".join(reasoning_deltas)
assert len(full_reasoning) > 0, "Combined reasoning content should not be empty"
def test_reasoning_streaming_delivers_more_events_than_non_streaming():
"""Test that streaming mode delivers multiple delta events vs single batch in non-streaming."""
agent = _get_reasoning_streaming_agent()
prompt = "What is 12 * 8?"
# Non-streaming mode
non_streaming_response = agent.run(prompt, stream=False)
non_streaming_reasoning = non_streaming_response.reasoning_content or ""
# Streaming mode - count delta events
streaming_deltas = []
for event in agent.run(prompt, stream=True, stream_events=True):
if event.event == RunEvent.reasoning_content_delta:
if event.reasoning_content:
streaming_deltas.append(event.reasoning_content)
streaming_reasoning = "".join(streaming_deltas)
# Both should have reasoning content
assert len(non_streaming_reasoning) > 0, "Non-streaming should have reasoning"
assert len(streaming_reasoning) > 0, "Streaming should have reasoning"
# Streaming should have multiple deltas (the key feature we're testing)
assert len(streaming_deltas) > 1, "Streaming should deliver multiple delta events, not just one batch"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/deepseek/test_reasoning_streaming.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/google/test_reasoning_streaming.py | """Integration tests for Gemini reasoning streaming functionality.
This test verifies that reasoning content streams correctly (not all at once)
for Gemini models with thinking_budget enabled when used as a reasoning_model.
These tests verify the streaming reasoning feature where reasoning content
is delivered incrementally via RunEvent.reasoning_content_delta events.
"""
import pytest
from agno.agent import Agent
from agno.models.google import Gemini
from agno.run.agent import RunEvent
def _get_reasoning_streaming_agent(**kwargs):
"""Create an agent with Gemini reasoning_model for streaming reasoning tests."""
default_config = {
"model": Gemini(id="gemini-2.0-flash"),
"reasoning_model": Gemini(
id="gemini-2.5-flash",
thinking_budget=1024,
include_thoughts=True,
),
"instructions": "You are an expert problem-solving assistant. Think step by step.",
"markdown": True,
"telemetry": False,
"exponential_backoff": True,
"delay_between_retries": 5,
}
default_config.update(kwargs)
return Agent(**default_config)
def test_reasoning_model_streams_content_deltas():
"""Test that Gemini reasoning_model streams content via reasoning_content_delta events."""
agent = _get_reasoning_streaming_agent()
prompt = "What is 25 * 37? Show your reasoning step by step."
# Track events
reasoning_deltas = []
reasoning_started = False
reasoning_completed = False
for event in agent.run(prompt, stream=True, stream_events=True):
if event.event == RunEvent.reasoning_started:
reasoning_started = True
elif event.event == RunEvent.reasoning_content_delta:
if event.reasoning_content:
reasoning_deltas.append(event.reasoning_content)
elif event.event == RunEvent.reasoning_completed:
reasoning_completed = True
# Assertions
assert reasoning_started, "Should have received reasoning_started event"
assert reasoning_completed, "Should have received reasoning_completed event"
assert len(reasoning_deltas) > 1, (
f"Should have received multiple reasoning_content_delta events for streaming, but got {len(reasoning_deltas)}"
)
# Verify we got actual content
full_reasoning = "".join(reasoning_deltas)
assert len(full_reasoning) > 0, "Combined reasoning content should not be empty"
@pytest.mark.asyncio
async def test_reasoning_model_streams_content_deltas_async():
"""Test that Gemini reasoning_model streams content via reasoning_content_delta events (async)."""
agent = _get_reasoning_streaming_agent()
prompt = "What is 25 * 37? Show your reasoning step by step."
# Track events
reasoning_deltas = []
reasoning_started = False
reasoning_completed = False
async for event in agent.arun(prompt, stream=True, stream_events=True):
if event.event == RunEvent.reasoning_started:
reasoning_started = True
elif event.event == RunEvent.reasoning_content_delta:
if event.reasoning_content:
reasoning_deltas.append(event.reasoning_content)
elif event.event == RunEvent.reasoning_completed:
reasoning_completed = True
# Assertions
assert reasoning_started, "Should have received reasoning_started event"
assert reasoning_completed, "Should have received reasoning_completed event"
assert len(reasoning_deltas) > 1, (
f"Should have received multiple reasoning_content_delta events for streaming, but got {len(reasoning_deltas)}"
)
# Verify we got actual content
full_reasoning = "".join(reasoning_deltas)
assert len(full_reasoning) > 0, "Combined reasoning content should not be empty"
def test_reasoning_streaming_delivers_more_events_than_non_streaming():
"""Test that streaming mode delivers delta events and produces reasoning content."""
agent = _get_reasoning_streaming_agent()
prompt = "What is 12 * 8?"
# Non-streaming mode
non_streaming_response = agent.run(prompt, stream=False)
non_streaming_reasoning = non_streaming_response.reasoning_content or ""
# Streaming mode - count delta events
streaming_deltas = []
for event in agent.run(prompt, stream=True, stream_events=True):
if event.event == RunEvent.reasoning_content_delta:
if event.reasoning_content:
streaming_deltas.append(event.reasoning_content)
streaming_reasoning = "".join(streaming_deltas)
# Both should have reasoning content
assert len(non_streaming_reasoning) > 0, "Non-streaming should have reasoning"
assert len(streaming_reasoning) > 0, "Streaming should have reasoning"
assert len(streaming_deltas) >= 1, "Streaming should deliver at least one delta event"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/google/test_reasoning_streaming.py",
"license": "Apache License 2.0",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/groq/test_reasoning_streaming.py | """Integration tests for Groq reasoning streaming functionality.
This test verifies that reasoning content streams correctly (not all at once)
for Groq models (like DeepSeek-R1 distill) when used as a reasoning_model.
These tests verify the streaming reasoning feature where reasoning content
is delivered incrementally via RunEvent.reasoning_content_delta events.
"""
import pytest
from agno.agent import Agent
from agno.models.groq import Groq
from agno.run.agent import RunEvent
@pytest.fixture(scope="module")
def groq_model():
"""Fixture that provides a Groq model and reuses it across all tests in the module."""
return Groq(id="llama-3.3-70b-versatile")
@pytest.fixture(scope="module")
def groq_reasoning_model():
"""Fixture that provides a Groq DeepSeek reasoning model."""
return Groq(id="qwen/qwen3-32b", request_params={"include_reasoning": True})
def _get_reasoning_streaming_agent(main_model, reasoning_model, **kwargs):
"""Create an agent with Groq reasoning_model for streaming reasoning tests."""
default_config = {
"model": main_model,
"reasoning_model": reasoning_model,
"instructions": "You are an expert problem-solving assistant. Think step by step.",
"markdown": True,
"telemetry": False,
}
default_config.update(kwargs)
return Agent(**default_config)
def test_reasoning_model_streams_content_deltas(groq_model, groq_reasoning_model):
"""Test that Groq DeepSeek reasoning_model streams content via reasoning_content_delta events."""
agent = _get_reasoning_streaming_agent(groq_model, groq_reasoning_model)
prompt = "What is 25 * 37? Show your reasoning step by step."
# Track events
reasoning_deltas = []
reasoning_started = False
reasoning_completed = False
for event in agent.run(prompt, stream=True, stream_events=True):
if event.event == RunEvent.reasoning_started:
reasoning_started = True
elif event.event == RunEvent.reasoning_content_delta:
if event.reasoning_content:
reasoning_deltas.append(event.reasoning_content)
elif event.event == RunEvent.reasoning_completed:
reasoning_completed = True
# Assertions
assert reasoning_started, "Should have received reasoning_started event"
assert reasoning_completed, "Should have received reasoning_completed event"
assert len(reasoning_deltas) > 1, (
f"Should have received multiple reasoning_content_delta events for streaming, but got {len(reasoning_deltas)}"
)
# Verify we got actual content
full_reasoning = "".join(reasoning_deltas)
assert len(full_reasoning) > 0, "Combined reasoning content should not be empty"
@pytest.mark.asyncio
async def test_reasoning_model_streams_content_deltas_async(groq_model, groq_reasoning_model):
"""Test that Groq DeepSeek reasoning_model streams content via reasoning_content_delta events (async)."""
agent = _get_reasoning_streaming_agent(groq_model, groq_reasoning_model)
prompt = "What is 25 * 37? Show your reasoning step by step."
# Track events
reasoning_deltas = []
reasoning_started = False
reasoning_completed = False
async for event in agent.arun(prompt, stream=True, stream_events=True):
if event.event == RunEvent.reasoning_started:
reasoning_started = True
elif event.event == RunEvent.reasoning_content_delta:
if event.reasoning_content:
reasoning_deltas.append(event.reasoning_content)
elif event.event == RunEvent.reasoning_completed:
reasoning_completed = True
# Assertions
assert reasoning_started, "Should have received reasoning_started event"
assert reasoning_completed, "Should have received reasoning_completed event"
assert len(reasoning_deltas) > 1, (
f"Should have received multiple reasoning_content_delta events for streaming, but got {len(reasoning_deltas)}"
)
# Verify we got actual content
full_reasoning = "".join(reasoning_deltas)
assert len(full_reasoning) > 0, "Combined reasoning content should not be empty"
def test_reasoning_streaming_delivers_more_events_than_non_streaming(groq_model, groq_reasoning_model):
"""Test that streaming mode delivers multiple delta events vs single batch in non-streaming."""
agent = _get_reasoning_streaming_agent(groq_model, groq_reasoning_model)
prompt = "What is 12 * 8?"
# Non-streaming mode
non_streaming_response = agent.run(prompt, stream=False)
non_streaming_reasoning = non_streaming_response.reasoning_content or ""
# Streaming mode - count delta events
streaming_deltas = []
for event in agent.run(prompt, stream=True, stream_events=True):
if event.event == RunEvent.reasoning_content_delta:
if event.reasoning_content:
streaming_deltas.append(event.reasoning_content)
streaming_reasoning = "".join(streaming_deltas)
# Both should have reasoning content
assert len(non_streaming_reasoning) > 0, "Non-streaming should have reasoning"
assert len(streaming_reasoning) > 0, "Streaming should have reasoning"
# Streaming should have multiple deltas (the key feature we're testing)
assert len(streaming_deltas) > 1, "Streaming should deliver multiple delta events, not just one batch"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/groq/test_reasoning_streaming.py",
"license": "Apache License 2.0",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/openai/chat/test_reasoning_streaming.py | """Integration tests for OpenAI reasoning streaming functionality.
This test verifies that reasoning content streams correctly (not all at once)
for OpenAI o-series models when used as a reasoning_model.
Note: OpenAI o-series models (o1, o3, o4) perform internal reasoning but do not
expose the reasoning content via the API. The reasoning happens internally.
For OpenAI Responses API with reasoning_summary, the summary is provided
as a complete block, not streamed incrementally.
These tests verify the streaming reasoning feature where reasoning content
is delivered incrementally via RunEvent.reasoning_content_delta events.
"""
import pytest
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.run.agent import RunEvent
def _get_reasoning_streaming_agent(**kwargs):
"""Create an agent with OpenAI o-series reasoning_model for streaming reasoning tests."""
default_config = {
"model": OpenAIChat(id="gpt-4o-mini"),
# o3-mini provides reasoning internally, content is treated as reasoning
"reasoning_model": OpenAIChat(id="o3-mini"),
"instructions": "You are an expert problem-solving assistant. Think step by step.",
"markdown": True,
"telemetry": False,
}
default_config.update(kwargs)
return Agent(**default_config)
def test_reasoning_model_streams_content_deltas():
"""Test that OpenAI o-series reasoning_model streams content via reasoning_content_delta events."""
agent = _get_reasoning_streaming_agent()
prompt = "What is 25 * 37? Show your reasoning step by step."
# Track events
reasoning_deltas = []
reasoning_started = False
reasoning_completed = False
for event in agent.run(prompt, stream=True, stream_events=True):
if event.event == RunEvent.reasoning_started:
reasoning_started = True
elif event.event == RunEvent.reasoning_content_delta:
if event.reasoning_content:
reasoning_deltas.append(event.reasoning_content)
elif event.event == RunEvent.reasoning_completed:
reasoning_completed = True
# Assertions
assert reasoning_started, "Should have received reasoning_started event"
assert reasoning_completed, "Should have received reasoning_completed event"
# Note: For o-series models, the content output is treated as reasoning
# The streaming behavior may vary based on model capabilities
assert len(reasoning_deltas) >= 1, (
f"Should have received at least one reasoning_content_delta event, but got {len(reasoning_deltas)}"
)
# Verify we got actual content
full_reasoning = "".join(reasoning_deltas)
assert len(full_reasoning) > 0, "Combined reasoning content should not be empty"
@pytest.mark.asyncio
async def test_reasoning_model_streams_content_deltas_async():
"""Test that OpenAI o-series reasoning_model streams content via reasoning_content_delta events (async)."""
agent = _get_reasoning_streaming_agent()
prompt = "What is 25 * 37? Show your reasoning step by step."
# Track events
reasoning_deltas = []
reasoning_started = False
reasoning_completed = False
async for event in agent.arun(prompt, stream=True, stream_events=True):
if event.event == RunEvent.reasoning_started:
reasoning_started = True
elif event.event == RunEvent.reasoning_content_delta:
if event.reasoning_content:
reasoning_deltas.append(event.reasoning_content)
elif event.event == RunEvent.reasoning_completed:
reasoning_completed = True
# Assertions
assert reasoning_started, "Should have received reasoning_started event"
assert reasoning_completed, "Should have received reasoning_completed event"
assert len(reasoning_deltas) >= 1, (
f"Should have received at least one reasoning_content_delta event, but got {len(reasoning_deltas)}"
)
# Verify we got actual content
full_reasoning = "".join(reasoning_deltas)
assert len(full_reasoning) > 0, "Combined reasoning content should not be empty"
def test_reasoning_non_streaming_has_reasoning_content():
"""Test that non-streaming mode also produces reasoning content."""
agent = _get_reasoning_streaming_agent()
prompt = "What is 12 * 8?"
# Non-streaming mode
non_streaming_response = agent.run(prompt, stream=False)
non_streaming_reasoning = non_streaming_response.reasoning_content or ""
# Should have reasoning content
assert len(non_streaming_reasoning) > 0, "Non-streaming should have reasoning"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/openai/chat/test_reasoning_streaming.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/reasoning/test_reasoning_streaming.py | """Unit tests for reasoning streaming functionality.
These tests verify that reasoning_content_delta events are properly emitted
during streaming reasoning, without requiring actual API calls.
"""
from unittest.mock import patch
import pytest
from agno.models.message import Message
from agno.reasoning.step import ReasoningStep, ReasoningSteps
from agno.run.agent import RunEvent
# ============================================================================
# Test RunEvent enum has required events
# ============================================================================
def test_run_event_has_reasoning_content_delta():
"""Test that RunEvent enum has reasoning_content_delta event."""
assert hasattr(RunEvent, "reasoning_content_delta")
assert RunEvent.reasoning_content_delta.value == "ReasoningContentDelta"
def test_run_event_has_all_reasoning_events():
"""Test that RunEvent enum has all reasoning-related events."""
assert hasattr(RunEvent, "reasoning_started")
assert hasattr(RunEvent, "reasoning_step")
assert hasattr(RunEvent, "reasoning_content_delta")
assert hasattr(RunEvent, "reasoning_completed")
assert RunEvent.reasoning_started.value == "ReasoningStarted"
assert RunEvent.reasoning_step.value == "ReasoningStep"
assert RunEvent.reasoning_content_delta.value == "ReasoningContentDelta"
assert RunEvent.reasoning_completed.value == "ReasoningCompleted"
# ============================================================================
# Test ReasoningContentDeltaEvent creation
# ============================================================================
def test_create_reasoning_content_delta_event():
"""Test that create_reasoning_content_delta_event function exists and works."""
from agno.run.agent import RunOutput
from agno.utils.events import create_reasoning_content_delta_event
# Create a mock run response
run_response = RunOutput(
run_id="test-run-id",
session_id="test-session-id",
content="",
)
# Create the event
event = create_reasoning_content_delta_event(
from_run_response=run_response,
reasoning_content="Test reasoning chunk",
)
assert event is not None
assert event.event == RunEvent.reasoning_content_delta
assert event.reasoning_content == "Test reasoning chunk"
def test_reasoning_content_delta_event_class_exists():
"""Test that ReasoningContentDeltaEvent class exists."""
from agno.run.agent import ReasoningContentDeltaEvent
assert ReasoningContentDeltaEvent is not None
# Verify it has the expected fields
event = ReasoningContentDeltaEvent(
event=RunEvent.reasoning_content_delta,
run_id="test-run-id",
session_id="test-session-id",
reasoning_content="Test content",
)
assert event.reasoning_content == "Test content"
assert event.event == RunEvent.reasoning_content_delta
# ============================================================================
# Test Anthropic streaming functions exist
# ============================================================================
def test_anthropic_streaming_functions_exist():
"""Test that Anthropic streaming functions are importable."""
from agno.reasoning.anthropic import (
aget_anthropic_reasoning_stream,
get_anthropic_reasoning_stream,
)
assert callable(get_anthropic_reasoning_stream)
assert callable(aget_anthropic_reasoning_stream)
def test_deepseek_streaming_functions_exist():
"""Test that DeepSeek streaming functions are importable."""
from agno.reasoning.deepseek import (
aget_deepseek_reasoning_stream,
get_deepseek_reasoning_stream,
)
assert callable(get_deepseek_reasoning_stream)
assert callable(aget_deepseek_reasoning_stream)
# ============================================================================
# Test streaming function signatures
# ============================================================================
def test_anthropic_stream_yields_tuples():
"""Test that Anthropic streaming function yields (delta, message) tuples."""
import inspect
from agno.reasoning.anthropic import get_anthropic_reasoning_stream
# Check it's a generator function
sig = inspect.signature(get_anthropic_reasoning_stream)
params = list(sig.parameters.keys())
assert "reasoning_agent" in params
assert "messages" in params
def test_deepseek_stream_yields_tuples():
"""Test that DeepSeek streaming function yields (delta, message) tuples."""
import inspect
from agno.reasoning.deepseek import get_deepseek_reasoning_stream
# Check signature
sig = inspect.signature(get_deepseek_reasoning_stream)
params = list(sig.parameters.keys())
assert "reasoning_agent" in params
assert "messages" in params
# ============================================================================
# Mock-based streaming tests
# ============================================================================
@patch("agno.reasoning.anthropic.get_anthropic_reasoning_stream")
def test_anthropic_stream_function_called_with_stream_events(mock_stream):
"""Test that streaming version is called when stream_events=True for Anthropic."""
# Setup mock to return iterator with deltas
final_message = Message(
role="assistant",
content="<thinking>\nTest thinking\n</thinking>",
reasoning_content="Test thinking",
)
mock_stream.return_value = iter(
[
("chunk1", None),
("chunk2", None),
(None, final_message),
]
)
# Collect results
results = list(mock_stream.return_value)
# Verify we got streaming chunks
assert len(results) == 3
assert results[0] == ("chunk1", None)
assert results[1] == ("chunk2", None)
assert results[2][0] is None
assert results[2][1] == final_message
@pytest.mark.asyncio
@patch("agno.reasoning.anthropic.aget_anthropic_reasoning_stream")
async def test_anthropic_async_stream_function(mock_stream):
"""Test async streaming version for Anthropic."""
final_message = Message(
role="assistant",
content="<thinking>\nAsync thinking\n</thinking>",
reasoning_content="Async thinking",
)
async def mock_async_gen():
yield ("async_chunk1", None)
yield ("async_chunk2", None)
yield (None, final_message)
mock_stream.return_value = mock_async_gen()
results = []
async for item in mock_stream.return_value:
results.append(item)
assert len(results) == 3
assert results[0] == ("async_chunk1", None)
assert results[1] == ("async_chunk2", None)
# ============================================================================
# Test event emission logic
# ============================================================================
def test_reasoning_events_can_be_compared():
"""Test that reasoning events can be compared correctly."""
event1 = RunEvent.reasoning_started
event2 = RunEvent.reasoning_content_delta
event3 = RunEvent.reasoning_completed
assert event1 != event2
assert event2 != event3
assert event1 == RunEvent.reasoning_started
assert event2 == RunEvent.reasoning_content_delta
def test_reasoning_event_string_values():
"""Test reasoning event string values for serialization."""
assert str(RunEvent.reasoning_started) == "RunEvent.reasoning_started"
assert RunEvent.reasoning_started.value == "ReasoningStarted"
assert RunEvent.reasoning_content_delta.value == "ReasoningContentDelta"
assert RunEvent.reasoning_completed.value == "ReasoningCompleted"
# ============================================================================
# Test ReasoningStep and ReasoningSteps
# ============================================================================
def test_reasoning_step_creation():
"""Test ReasoningStep can be created with result."""
step = ReasoningStep(result="Test reasoning result")
assert step.result == "Test reasoning result"
def test_reasoning_steps_creation():
"""Test ReasoningSteps can hold multiple steps."""
steps = ReasoningSteps(
reasoning_steps=[
ReasoningStep(result="Step 1"),
ReasoningStep(result="Step 2"),
]
)
assert len(steps.reasoning_steps) == 2
assert steps.reasoning_steps[0].result == "Step 1"
assert steps.reasoning_steps[1].result == "Step 2"
# ============================================================================
# Test event registry includes reasoning_content_delta
# ============================================================================
def test_reasoning_content_delta_in_event_registry():
"""Test that ReasoningContentDeltaEvent is in the event registry."""
from agno.run.agent import RUN_EVENT_TYPE_REGISTRY, ReasoningContentDeltaEvent
assert RunEvent.reasoning_content_delta in RUN_EVENT_TYPE_REGISTRY
assert RUN_EVENT_TYPE_REGISTRY[RunEvent.reasoning_content_delta] == ReasoningContentDeltaEvent
def test_all_reasoning_events_in_registry():
"""Test that all reasoning events are in the registry."""
from agno.run.agent import RUN_EVENT_TYPE_REGISTRY
reasoning_events = [
RunEvent.reasoning_started,
RunEvent.reasoning_step,
RunEvent.reasoning_content_delta,
RunEvent.reasoning_completed,
]
for event in reasoning_events:
assert event in RUN_EVENT_TYPE_REGISTRY, f"{event} not in registry"
# ============================================================================
# Test Message with reasoning_content
# ============================================================================
def test_message_supports_reasoning_content():
"""Test that Message class supports reasoning_content field."""
msg = Message(
role="assistant",
content="Response content",
reasoning_content="Thinking content",
)
assert msg.reasoning_content == "Thinking content"
assert msg.content == "Response content"
def test_message_reasoning_content_optional():
"""Test that reasoning_content is optional on Message."""
msg = Message(role="assistant", content="Just content")
# Should not raise, reasoning_content should be None or not set
assert msg.content == "Just content"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/reasoning/test_reasoning_streaming.py",
"license": "Apache License 2.0",
"lines": 219,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/os/scopes.py | """AgentOS RBAC Scopes
This module defines all available permission scopes for AgentOS RBAC (Role-Based Access Control).
Scope Format:
- Global resource scopes: `resource:action`
- Per-resource scopes: `resource:<resource-id>:action`
- Wildcards: `resource:*:action` for any resource
The AgentOS ID is verified via the JWT `aud` (audience) claim.
Examples:
- `system:read` - Read system config
- `agents:read` - List all agents
- `agents:web-agent:read` - Read specific agent
- `agents:web-agent:run` - Run specific agent
- `agents:*:run` - Run any agent (wildcard)
- `agent_os:admin` - Full access to everything
"""
from dataclasses import dataclass
from enum import Enum
from typing import Dict, List, Optional, Set
class AgentOSScope(str, Enum):
"""
Enum of all available AgentOS permission scopes.
Special Scopes:
- ADMIN: Grants full access to all endpoints (agent_os:admin)
Scope format:
Global Resource Scopes:
- system:read - System configuration and model information
- agents:read - List all agents
- teams:read - List all teams
- workflows:read - List all workflows
- sessions:read - View session data
- sessions:write - Create and update sessions
- sessions:delete - Delete sessions
- memories:read - View memories
- memories:write - Create and update memories
- memories:delete - Delete memories
- knowledge:read - View and search knowledge
- knowledge:write - Add and update knowledge
- knowledge:delete - Delete knowledge
- metrics:read - View metrics
- metrics:write - Refresh metrics
- evals:read - View evaluation runs
- evals:write - Create and update evaluation runs
- evals:delete - Delete evaluation runs
- traces:read - View traces and trace statistics
Per-Resource Scopes (with resource ID):
- agents:<agent-id>:read - Read specific agent
- agents:<agent-id>:run - Run specific agent
- teams:<team-id>:read - Read specific team
- teams:<team-id>:run - Run specific team
- workflows:<workflow-id>:read - Read specific workflow
- workflows:<workflow-id>:run - Run specific workflow
Wildcards:
- agents:*:run - Run any agent
- teams:*:run - Run any team
"""
# Special scopes
ADMIN = "agent_os:admin"
@dataclass
class ParsedScope:
"""Represents a parsed scope with its components."""
raw: str
scope_type: str # "admin", "global", "per_resource", or "unknown"
resource: Optional[str] = None
resource_id: Optional[str] = None
action: Optional[str] = None
is_wildcard_resource: bool = False
@property
def is_global_resource_scope(self) -> bool:
"""Check if this scope targets all resources of a type (no resource_id)."""
return self.scope_type == "global"
@property
def is_per_resource_scope(self) -> bool:
"""Check if this scope targets a specific resource (has resource_id)."""
return self.scope_type == "per_resource"
def parse_scope(scope: str, admin_scope: Optional[str] = None) -> ParsedScope:
"""
Parse a scope string into its components.
Args:
scope: The scope string to parse
admin_scope: The scope string that grants admin access (default: "agent_os:admin")
Returns:
ParsedScope object with parsed components
Examples:
>>> parse_scope("agent_os:admin")
ParsedScope(raw="agent_os:admin", scope_type="admin")
>>> parse_scope("system:read")
ParsedScope(raw="system:read", scope_type="global", resource="system", action="read")
>>> parse_scope("agents:web-agent:read")
ParsedScope(raw="...", scope_type="per_resource", resource="agents", resource_id="web-agent", action="read")
>>> parse_scope("agents:*:run")
ParsedScope(raw="...", scope_type="per_resource", resource="agents", resource_id="*", action="run", is_wildcard_resource=True)
"""
effective_admin_scope = admin_scope or AgentOSScope.ADMIN.value
if scope == effective_admin_scope:
return ParsedScope(raw=scope, scope_type="admin")
parts = scope.split(":")
# Global resource scope: resource:action (2 parts)
if len(parts) == 2:
return ParsedScope(
raw=scope,
scope_type="global",
resource=parts[0],
action=parts[1],
)
# Per-resource scope: resource:<resource-id>:action (3 parts)
if len(parts) == 3:
resource_id = parts[1]
is_wildcard_resource = resource_id == "*"
return ParsedScope(
raw=scope,
scope_type="per_resource",
resource=parts[0],
resource_id=resource_id,
action=parts[2],
is_wildcard_resource=is_wildcard_resource,
)
# Invalid format
return ParsedScope(raw=scope, scope_type="unknown")
def matches_scope(
user_scope: ParsedScope,
required_scope: ParsedScope,
resource_id: Optional[str] = None,
) -> bool:
"""
Check if a user's scope matches a required scope.
Args:
user_scope: The user's parsed scope
required_scope: The required parsed scope
resource_id: The specific resource ID being accessed
Returns:
True if the user's scope satisfies the required scope
Examples:
>>> user = parse_scope("system:read")
>>> required = parse_scope("system:read")
>>> matches_scope(user, required)
True
>>> user = parse_scope("agents:web-agent:run")
>>> required = parse_scope("agents:<id>:run")
>>> matches_scope(user, required, resource_id="web-agent")
True
>>> user = parse_scope("agents:*:run")
>>> required = parse_scope("agents:<id>:run")
>>> matches_scope(user, required, resource_id="web-agent")
True
"""
# Admin always matches
if user_scope.scope_type == "admin":
return True
# Unknown scopes don't match anything
if user_scope.scope_type == "unknown" or required_scope.scope_type == "unknown":
return False
# Resource type must match
if user_scope.resource != required_scope.resource:
return False
# Action must match
if user_scope.action != required_scope.action:
return False
# If required scope has a resource_id, check it
if required_scope.resource_id:
# User has wildcard resource access
if user_scope.is_wildcard_resource:
return True
# User has global resource access (no resource_id in user scope)
if not user_scope.resource_id:
return True
# User has specific resource access - must match
return user_scope.resource_id == resource_id
# Required scope is global (no resource_id), user scope matches if:
# - User has global scope (no resource_id), OR
# - User has wildcard resource scope
return not user_scope.resource_id or user_scope.is_wildcard_resource
def has_required_scopes(
user_scopes: List[str],
required_scopes: List[str],
resource_type: Optional[str] = None,
resource_id: Optional[str] = None,
admin_scope: Optional[str] = None,
) -> bool:
"""
Check if user has all required scopes.
Args:
user_scopes: List of scope strings the user has
required_scopes: List of scope strings required
resource_type: Type of resource being accessed ("agents", "teams", "workflows")
resource_id: Specific resource ID being accessed
admin_scope: The scope string that grants admin access (default: "agent_os:admin")
Returns:
True if user has all required scopes
Examples:
>>> has_required_scopes(
... ["agents:read"],
... ["agents:read"],
... )
True
>>> has_required_scopes(
... ["agents:web-agent:run"],
... ["agents:run"],
... resource_type="agents",
... resource_id="web-agent"
... )
True
>>> has_required_scopes(
... ["agents:*:run"],
... ["agents:run"],
... resource_type="agents",
... resource_id="any-agent"
... )
True
"""
if not required_scopes:
return True
# Parse user scopes once
parsed_user_scopes = [parse_scope(scope, admin_scope=admin_scope) for scope in user_scopes]
# Check for admin scope
if any(s.scope_type == "admin" for s in parsed_user_scopes):
return True
# Check each required scope
for required_scope_str in required_scopes:
parts = required_scope_str.split(":")
if len(parts) == 2:
resource, action = parts
# Build the required scope based on context
if resource_id and resource_type:
# Per-resource scope required
full_required_scope = f"{resource_type}:<resource-id>:{action}"
else:
# Global resource scope required
full_required_scope = required_scope_str
required = parse_scope(full_required_scope, admin_scope=admin_scope)
else:
required = parse_scope(required_scope_str, admin_scope=admin_scope)
scope_matched = False
for user_scope in parsed_user_scopes:
if matches_scope(user_scope, required, resource_id=resource_id):
scope_matched = True
break
if not scope_matched:
return False
return True
def get_accessible_resource_ids(
user_scopes: List[str],
resource_type: str,
admin_scope: Optional[str] = None,
) -> Set[str]:
"""
Get the set of resource IDs the user has access to.
Args:
user_scopes: List of scope strings the user has
resource_type: Type of resource ("agents", "teams", "workflows")
admin_scope: The scope string that grants admin access (default: "agent_os:admin")
Returns:
Set of resource IDs the user can access. Returns {"*"} for wildcard access.
Examples:
>>> get_accessible_resource_ids(
... ["agents:agent-1:read", "agents:agent-2:read"],
... "agents"
... )
{'agent-1', 'agent-2'}
>>> get_accessible_resource_ids(["agents:*:read"], "agents")
{'*'}
>>> get_accessible_resource_ids(["agents:read"], "agents")
{'*'}
>>> get_accessible_resource_ids(["admin"], "agents")
{'*'}
"""
parsed_scopes = [parse_scope(scope, admin_scope=admin_scope) for scope in user_scopes]
# Check for admin or global wildcard access
for scope in parsed_scopes:
if scope.scope_type == "admin":
return {"*"}
# Check if resource type matches
if scope.resource == resource_type:
# Global resource scope (no resource_id) grants access to all
if not scope.resource_id and scope.action in ["read", "run"]:
return {"*"}
# Wildcard resource scope grants access to all
if scope.is_wildcard_resource and scope.action in ["read", "run"]:
return {"*"}
# Collect specific resource IDs
accessible_ids: Set[str] = set()
for scope in parsed_scopes:
# Check if resource type matches
if scope.resource == resource_type:
# Specific resource ID
if scope.resource_id and not scope.is_wildcard_resource and scope.action in ["read", "run"]:
accessible_ids.add(scope.resource_id)
return accessible_ids
def get_default_scope_mappings() -> Dict[str, List[str]]:
"""
Get default scope mappings for AgentOS endpoints.
Returns a dictionary mapping route patterns (with HTTP methods) to required scope templates.
Format: "METHOD /path/pattern": ["resource:action"]
"""
return {
# System endpoints
"GET /config": ["system:read"],
"GET /models": ["system:read"],
# Agent endpoints
"GET /agents": ["agents:read"],
"GET /agents/*": ["agents:read"],
"POST /agents": ["agents:write"],
"PATCH /agents/*": ["agents:write"],
"DELETE /agents/*": ["agents:delete"],
"POST /agents/*/runs": ["agents:run"],
"POST /agents/*/runs/*/continue": ["agents:run"],
"POST /agents/*/runs/*/cancel": ["agents:run"],
# Team endpoints
"GET /teams": ["teams:read"],
"GET /teams/*": ["teams:read"],
"POST /teams": ["teams:write"],
"PATCH /teams/*": ["teams:write"],
"DELETE /teams/*": ["teams:delete"],
"POST /teams/*/runs": ["teams:run"],
"POST /teams/*/runs/*/continue": ["teams:run"],
"POST /teams/*/runs/*/cancel": ["teams:run"],
# Workflow endpoints
"GET /workflows": ["workflows:read"],
"GET /workflows/*": ["workflows:read"],
"POST /workflows": ["workflows:write"],
"PATCH /workflows/*": ["workflows:write"],
"DELETE /workflows/*": ["workflows:delete"],
"POST /workflows/*/runs": ["workflows:run"],
"POST /workflows/*/runs/*/continue": ["workflows:run"],
"POST /workflows/*/runs/*/cancel": ["workflows:run"],
# Session endpoints
"GET /sessions": ["sessions:read"],
"GET /sessions/*": ["sessions:read"],
"POST /sessions": ["sessions:write"],
"POST /sessions/*/rename": ["sessions:write"],
"PATCH /sessions/*": ["sessions:write"],
"DELETE /sessions": ["sessions:delete"],
"DELETE /sessions/*": ["sessions:delete"],
# Memory endpoints
"GET /memories": ["memories:read"],
"GET /memories/*": ["memories:read"],
"GET /memory_topics": ["memories:read"],
"GET /user_memory_stats": ["memories:read"],
"POST /memories": ["memories:write"],
"PATCH /memories/*": ["memories:write"],
"DELETE /memories": ["memories:delete"],
"DELETE /memories/*": ["memories:delete"],
"POST /optimize-memories": ["memories:write"],
# Knowledge endpoints
"GET /knowledge/content": ["knowledge:read"],
"GET /knowledge/content/*": ["knowledge:read"],
"GET /knowledge/config": ["knowledge:read"],
"POST /knowledge/content": ["knowledge:write"],
"PATCH /knowledge/content/*": ["knowledge:write"],
"POST /knowledge/search": ["knowledge:read"],
"DELETE /knowledge/content": ["knowledge:delete"],
"DELETE /knowledge/content/*": ["knowledge:delete"],
# Metrics endpoints
"GET /metrics": ["metrics:read"],
"POST /metrics/refresh": ["metrics:write"],
# Evaluation endpoints
"GET /eval-runs": ["evals:read"],
"GET /eval-runs/*": ["evals:read"],
"POST /eval-runs": ["evals:write"],
"PATCH /eval-runs/*": ["evals:write"],
"DELETE /eval-runs": ["evals:delete"],
# Trace endpoints
"GET /traces": ["traces:read"],
"GET /traces/*": ["traces:read"],
"GET /trace_session_stats": ["traces:read"],
# Schedule endpoints
"GET /schedules": ["schedules:read"],
"GET /schedules/*": ["schedules:read"],
"POST /schedules": ["schedules:write"],
"PATCH /schedules/*": ["schedules:write"],
"DELETE /schedules/*": ["schedules:delete"],
"POST /schedules/*/enable": ["schedules:write"],
"POST /schedules/*/disable": ["schedules:write"],
"POST /schedules/*/trigger": ["schedules:write"],
"GET /schedules/*/runs": ["schedules:read"],
"GET /schedules/*/runs/*": ["schedules:read"],
# Approval endpoints
"GET /approvals": ["approvals:read"],
"GET /approvals/count": ["approvals:read"],
"GET /approvals/*": ["approvals:read"],
"GET /approvals/*/status": ["approvals:read"],
"POST /approvals/*/resolve": ["approvals:write"],
"DELETE /approvals/*": ["approvals:delete"],
}
def get_scope_value(scope: AgentOSScope) -> str:
"""
Get the string value of a scope.
Args:
scope: The AgentOSScope enum value
Returns:
The string value of the scope
Example:
>>> get_scope_value(AgentOSScope.ADMIN)
'admin'
"""
return scope.value
def get_all_scopes() -> list[str]:
"""
Get a list of all available scope strings.
Returns:
List of all scope string values
Example:
>>> scopes = get_all_scopes()
>>> 'admin' in scopes
True
"""
return [scope.value for scope in AgentOSScope]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/scopes.py",
"license": "Apache License 2.0",
"lines": 406,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/agno/utils/cryptography.py | from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
def generate_rsa_keys():
"""Generate RSA key pair for RS256 JWT signing/verification."""
private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048)
# Private key PEM (used by auth server to sign tokens)
private_pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
# Public key PEM (used by AgentOS to verify tokens)
public_pem = private_key.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
return private_pem.decode("utf-8"), public_pem.decode("utf-8")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/cryptography.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/integration/os/test_authorization.py | """Integration tests for JWT middleware with RBAC (scope-based authorization).
This test suite validates the AgentOS RBAC system using simplified scopes:
- Global resource: resource:action
- Per-resource: resource:<resource-id>:action
- Wildcards: resource:*:action
- Admin: agent_os:admin - Full access to everything
The AgentOS ID is verified via the JWT `aud` (audience) claim.
"""
from datetime import UTC, datetime, timedelta
import jwt
import pytest
from fastapi.testclient import TestClient
from agno.agent.agent import Agent
from agno.os import AgentOS
from agno.os.config import AuthorizationConfig
from agno.os.middleware import JWTMiddleware, TokenSource
from agno.team.team import Team
from agno.workflow.workflow import Workflow
# Test JWT secret
JWT_SECRET = "test-secret-key-for-rbac-tests"
TEST_OS_ID = "test-os"
@pytest.fixture
def test_agent(shared_db):
"""Create a basic test agent."""
return Agent(
name="test-agent",
id="test-agent",
db=shared_db,
instructions="You are a test agent.",
)
@pytest.fixture
def second_agent(shared_db):
"""Create a second test agent for multi-agent tests."""
return Agent(
name="second-agent",
id="second-agent",
db=shared_db,
instructions="You are another test agent.",
)
@pytest.fixture
def third_agent(shared_db):
"""Create a third test agent for filtering tests."""
return Agent(
name="third-agent",
id="third-agent",
db=shared_db,
instructions="You are a third test agent.",
)
@pytest.fixture
def test_team(test_agent, second_agent, shared_db):
"""Create a basic test team."""
return Team(
name="test-team",
id="test-team",
db=shared_db,
members=[test_agent, second_agent],
)
@pytest.fixture
def second_team(test_agent, shared_db):
"""Create a second test team."""
return Team(
name="second-team",
id="second-team",
members=[test_agent],
db=shared_db,
)
@pytest.fixture
def test_workflow(shared_db):
"""Create a basic test workflow."""
async def simple_workflow(session_state):
return "workflow result"
return Workflow(
name="test-workflow",
id="test-workflow",
steps=simple_workflow,
db=shared_db,
)
@pytest.fixture
def second_workflow(shared_db):
"""Create a second test workflow."""
async def another_workflow(session_state):
return "another result"
return Workflow(
name="second-workflow",
id="second-workflow",
steps=another_workflow,
db=shared_db,
)
def create_jwt_token(
scopes: list[str],
user_id: str = "test_user",
session_id: str | None = None,
extra_claims: dict | None = None,
audience: str = TEST_OS_ID,
) -> str:
"""Helper to create a JWT token with specific scopes, claims, and audience."""
payload = {
"sub": user_id,
"session_id": session_id or f"session_{user_id}",
"aud": audience, # Audience claim for OS ID verification
"scopes": scopes,
"exp": datetime.now(UTC) + timedelta(hours=1),
"iat": datetime.now(UTC),
}
if extra_claims:
payload.update(extra_claims)
return jwt.encode(payload, JWT_SECRET, algorithm="HS256")
def test_valid_scope_grants_access(test_agent):
"""Test that having the correct scope grants access."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Create token with correct scope and audience
token = create_jwt_token(scopes=["agents:read"])
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200, response.text
def test_missing_scope_denies_access(test_agent):
"""Test that missing required scope denies access."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Create token WITHOUT the required scope (has sessions but not agents)
token = create_jwt_token(scopes=["sessions:read"])
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 403
assert "detail" in response.json()
assert "permissions" in response.json()["detail"].lower()
def test_admin_scope_grants_full_access(test_agent):
"""Test that admin scope bypasses all scope checks."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Admin token with only admin scope
token = create_jwt_token(scopes=["agent_os:admin"])
# Should access all endpoints
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200, response.text
response = client.post(
"/agents/test-agent/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201], response.text
def test_wildcard_resource_grants_all_agents(test_agent):
"""Test that wildcard resource scope grants access to all agents."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with wildcard resource scope for agents
token = create_jwt_token(
scopes=[
"agents:*:read",
"agents:*:run",
]
)
# Should grant both read and run for all agents
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
response = client.post(
"/agents/test-agent/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
def test_audience_verification(test_agent):
"""Test that audience claim is verified against AgentOS ID when verify_audience is enabled."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
)
app = agent_os.get_app()
# Manually add middleware with verify_audience=True
app.add_middleware(
JWTMiddleware,
verification_keys=[JWT_SECRET],
algorithm="HS256",
authorization=True,
verify_audience=True,
)
client = TestClient(app)
# Token with correct audience should work
token = create_jwt_token(
scopes=["agents:read", "agents:*:run"],
audience=TEST_OS_ID,
)
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
response = client.post(
"/agents/test-agent/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
def test_per_resource_scope(test_agent, second_agent):
"""Test per-resource scopes for specific agents."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with scope for only test-agent, not second-agent
token = create_jwt_token(
scopes=[
"agents:test-agent:read",
"agents:test-agent:run",
]
)
# Should be able to run test-agent
response = client.post(
"/agents/test-agent/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201], response.text
# Should NOT be able to run second-agent
response = client.post(
"/agents/second-agent/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code == 403
def test_get_agent_by_id_with_specific_scope(test_agent, second_agent):
"""Test that GET /agents/{id} works with specific resource-level scope."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with scope for only test-agent
token = create_jwt_token(scopes=["agents:test-agent:read"])
# Should be able to get test-agent
response = client.get(
"/agents/test-agent",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "test-agent"
# Should NOT be able to get second-agent
response = client.get(
"/agents/second-agent",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 403
assert "permissions" in response.json()["detail"].lower()
def test_get_agent_by_id_with_global_scope(test_agent, second_agent):
"""Test that GET /agents/{id} works with global agents:read scope."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with global agents scope
token = create_jwt_token(scopes=["agents:read"])
# Should be able to get any agent
response = client.get(
"/agents/test-agent",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "test-agent"
response = client.get(
"/agents/second-agent",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "second-agent"
def test_get_agent_by_id_with_wildcard_scope(test_agent, second_agent):
"""Test that GET /agents/{id} works with wildcard agents:*:read scope."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with wildcard agents scope
token = create_jwt_token(scopes=["agents:*:read"])
# Should be able to get any agent
response = client.get(
"/agents/test-agent",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "test-agent"
response = client.get(
"/agents/second-agent",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "second-agent"
def test_get_agent_by_id_with_admin_scope(test_agent, second_agent):
"""Test that GET /agents/{id} works with admin scope."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with admin scope
token = create_jwt_token(scopes=["agent_os:admin"])
# Should be able to get any agent
response = client.get(
"/agents/test-agent",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "test-agent"
response = client.get(
"/agents/second-agent",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "second-agent"
def test_get_agent_by_id_without_scope(test_agent):
"""Test that GET /agents/{id} is denied without proper scope."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token without any agents scope
token = create_jwt_token(scopes=["sessions:read"])
# Should NOT be able to get agent
response = client.get(
"/agents/test-agent",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 403
assert "permissions" in response.json()["detail"].lower()
def test_get_agent_by_id_with_wrong_specific_scope(test_agent, second_agent):
"""Test that GET /agents/{id} is denied with scope for different agent."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with scope for second-agent only
token = create_jwt_token(scopes=["agents:second-agent:read"])
# Should NOT be able to get test-agent
response = client.get(
"/agents/test-agent",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 403
assert "permissions" in response.json()["detail"].lower()
# Should be able to get second-agent
response = client.get(
"/agents/second-agent",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "second-agent"
def test_global_resource_scope(test_agent, second_agent):
"""Test that global resource scope grants access to all resources of that type."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with global agents scope (no resource ID specified)
token = create_jwt_token(
scopes=[
"agents:read",
"agents:run",
]
)
# Should be able to list all agents
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
# Should be able to run ANY agent
response = client.post(
"/agents/test-agent/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
response = client.post(
"/agents/second-agent/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
def test_excluded_routes_skip_jwt(test_agent):
"""Test that excluded routes (health) don't require JWT."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Health endpoint should be accessible without token (default excluded route)
response = client.get("/health")
assert response.status_code == 200
# Protected endpoints should require token
response = client.get("/agents")
assert response.status_code == 401 # Missing token
def test_expired_token_rejected(test_agent):
"""Test that expired tokens are rejected."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
)
app = agent_os.get_app()
app.add_middleware(
JWTMiddleware,
verification_keys=[JWT_SECRET],
algorithm="HS256",
authorization=True,
)
client = TestClient(app)
# Create expired token
payload = {
"sub": "test_user",
"session_id": "test_session",
"aud": TEST_OS_ID,
"scopes": ["agents:read"],
"exp": datetime.now(UTC) - timedelta(hours=1), # Expired 1 hour ago
"iat": datetime.now(UTC) - timedelta(hours=2),
}
expired_token = jwt.encode(payload, JWT_SECRET, algorithm="HS256")
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {expired_token}"},
)
assert response.status_code == 401
assert "expired" in response.json()["detail"].lower()
def test_missing_token_returns_401(test_agent):
"""Test that missing JWT token returns 401 when authorization is enabled."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Try to access without token
response = client.get("/agents")
assert response.status_code == 401
assert "detail" in response.json()
def test_invalid_token_format(test_agent):
"""Test that invalid JWT token format is rejected."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Try with malformed token
response = client.get(
"/agents",
headers={"Authorization": "Bearer invalid-token-format"},
)
assert response.status_code == 401
assert "detail" in response.json()
def test_token_from_cookie(test_agent):
"""Test JWT extraction from cookie instead of header."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
)
app = agent_os.get_app()
app.add_middleware(
JWTMiddleware,
verification_keys=[JWT_SECRET],
algorithm="HS256",
authorization=True,
token_source=TokenSource.COOKIE,
cookie_name="access_token",
)
client = TestClient(app)
# Create valid token
token = create_jwt_token(scopes=["agents:read"])
# Set token as cookie
client.cookies.set("access_token", token)
response = client.get("/agents")
assert response.status_code == 200
def test_dependencies_claims_extraction(test_agent):
"""Test that custom dependencies claims are extracted from JWT."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
)
app = agent_os.get_app()
app.add_middleware(
JWTMiddleware,
verification_keys=[JWT_SECRET],
algorithm="HS256",
authorization=False, # Just test claim extraction
dependencies_claims=["org_id", "tenant_id"],
)
client = TestClient(app)
# Create token with dependencies claims
token = create_jwt_token(
scopes=[],
extra_claims={
"org_id": "org-123",
"tenant_id": "tenant-456",
},
)
# Note: We can't directly test request.state in integration tests,
# but we can verify the request doesn't fail
response = client.get(
"/health",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
def test_session_state_claims_extraction(test_agent):
"""Test that session state claims are extracted from JWT."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
)
app = agent_os.get_app()
app.add_middleware(
JWTMiddleware,
verification_keys=[JWT_SECRET],
algorithm="HS256",
authorization=False,
session_state_claims=["theme", "language"],
)
client = TestClient(app)
# Create token with session state claims
token = create_jwt_token(
scopes=[],
extra_claims={
"theme": "dark",
"language": "en",
},
)
response = client.get(
"/health",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
def test_system_scope(test_agent):
"""Test system-level scope for reading configuration."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with system read scope
token = create_jwt_token(scopes=["system:read"])
response = client.get(
"/config",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
def test_different_audience_blocks_access(test_agent):
"""Test that tokens with different audience (OS ID) don't grant access when verify_audience is enabled."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
)
app = agent_os.get_app()
# Manually add middleware with verify_audience=True
app.add_middleware(
JWTMiddleware,
verification_keys=[JWT_SECRET],
algorithm="HS256",
authorization=True,
verify_audience=True,
)
client = TestClient(app)
# Token with DIFFERENT audience (OS ID)
token = create_jwt_token(scopes=["agents:read"], audience="different-os")
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
# Should be rejected due to audience mismatch
assert response.status_code == 401
def test_agent_filtering_with_global_scope(test_agent, second_agent, third_agent):
"""Test that global agents:read scope returns all agents."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent, third_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with global agents scope (no resource ID)
token = create_jwt_token(scopes=["agents:read"])
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
agents = response.json()
assert len(agents) == 3
agent_ids = {agent["id"] for agent in agents}
assert agent_ids == {"test-agent", "second-agent", "third-agent"}
def test_agent_filtering_with_wildcard_scope(test_agent, second_agent, third_agent):
"""Test that agents:*:read wildcard scope returns all agents."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent, third_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with wildcard resource scope
token = create_jwt_token(scopes=["agents:*:read"])
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
agents = response.json()
assert len(agents) == 3
agent_ids = {agent["id"] for agent in agents}
assert agent_ids == {"test-agent", "second-agent", "third-agent"}
def test_agent_filtering_with_specific_scope(test_agent, second_agent, third_agent):
"""Test that specific agent scope returns only that agent."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent, third_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with scope for only test-agent
token = create_jwt_token(scopes=["agents:test-agent:read"])
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
agents = response.json()
assert len(agents) == 1
assert agents[0]["id"] == "test-agent"
def test_agent_filtering_with_multiple_specific_scopes(test_agent, second_agent, third_agent):
"""Test that multiple specific scopes return only those agents."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent, third_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with scopes for test-agent and second-agent only
token = create_jwt_token(
scopes=[
"agents:test-agent:read",
"agents:second-agent:read",
]
)
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
agents = response.json()
assert len(agents) == 2
agent_ids = {agent["id"] for agent in agents}
assert agent_ids == {"test-agent", "second-agent"}
def test_agent_run_blocked_without_specific_scope(test_agent, second_agent):
"""Test that running an agent is blocked without specific run scope."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with run scope for test-agent only
token = create_jwt_token(
scopes=[
"agents:test-agent:read",
"agents:test-agent:run",
"agents:second-agent:read",
# Note: No run scope for second-agent
]
)
# Should be able to run test-agent
response = client.post(
"/agents/test-agent/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
# Should NOT be able to run second-agent
response = client.post(
"/agents/second-agent/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code == 403
def test_agent_run_with_wildcard_scope(test_agent, second_agent):
"""Test that wildcard run scope allows running any agent."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with wildcard run scope
token = create_jwt_token(
scopes=[
"agents:*:read",
"agents:*:run",
]
)
# Should be able to run both agents
response = client.post(
"/agents/test-agent/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
response = client.post(
"/agents/second-agent/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
def test_agent_run_with_global_scope(test_agent, second_agent):
"""Test that global run scope allows running any agent."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with global run scope (no resource ID)
token = create_jwt_token(
scopes=[
"agents:read",
"agents:run",
]
)
# Should be able to run both agents
response = client.post(
"/agents/test-agent/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
response = client.post(
"/agents/second-agent/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
# ============================================================================
# Resource Filtering Tests - Teams
# ============================================================================
def test_team_filtering_with_global_scope(test_team, second_team):
"""Test that global teams:read scope returns all teams."""
agent_os = AgentOS(
id=TEST_OS_ID,
teams=[test_team, second_team],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with global teams scope
token = create_jwt_token(scopes=["teams:read"])
response = client.get(
"/teams",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
teams = response.json()
assert len(teams) == 2
team_ids = {team["id"] for team in teams}
assert team_ids == {"test-team", "second-team"}
def test_team_filtering_with_wildcard_scope(test_team, second_team):
"""Test that teams:*:read wildcard scope returns all teams."""
agent_os = AgentOS(
id=TEST_OS_ID,
teams=[test_team, second_team],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with wildcard resource scope
token = create_jwt_token(scopes=["teams:*:read"])
response = client.get(
"/teams",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
teams = response.json()
assert len(teams) == 2
team_ids = {team["id"] for team in teams}
assert team_ids == {"test-team", "second-team"}
def test_team_filtering_with_specific_scope(test_team, second_team):
"""Test that specific team scope returns only that team."""
agent_os = AgentOS(
id=TEST_OS_ID,
teams=[test_team, second_team],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with scope for only test-team
token = create_jwt_token(scopes=["teams:test-team:read"])
response = client.get(
"/teams",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
teams = response.json()
assert len(teams) == 1
assert teams[0]["id"] == "test-team"
def test_get_team_by_id_with_specific_scope(test_team, second_team):
"""Test that GET /teams/{id} works with specific resource-level scope."""
agent_os = AgentOS(
id=TEST_OS_ID,
teams=[test_team, second_team],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with scope for only test-team
token = create_jwt_token(scopes=["teams:test-team:read"])
# Should be able to get test-team
response = client.get(
"/teams/test-team",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "test-team"
# Should NOT be able to get second-team
response = client.get(
"/teams/second-team",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 403
assert "permissions" in response.json()["detail"].lower()
def test_get_team_by_id_with_global_scope(test_team, second_team):
"""Test that GET /teams/{id} works with global teams:read scope."""
agent_os = AgentOS(
id=TEST_OS_ID,
teams=[test_team, second_team],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with global teams scope
token = create_jwt_token(scopes=["teams:read"])
# Should be able to get any team
response = client.get(
"/teams/test-team",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "test-team"
response = client.get(
"/teams/second-team",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "second-team"
def test_get_team_by_id_with_wildcard_scope(test_team, second_team):
"""Test that GET /teams/{id} works with wildcard teams:*:read scope."""
agent_os = AgentOS(
id=TEST_OS_ID,
teams=[test_team, second_team],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with wildcard teams scope
token = create_jwt_token(scopes=["teams:*:read"])
# Should be able to get any team
response = client.get(
"/teams/test-team",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "test-team"
response = client.get(
"/teams/second-team",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "second-team"
def test_get_team_by_id_with_admin_scope(test_team, second_team):
"""Test that GET /teams/{id} works with admin scope."""
agent_os = AgentOS(
id=TEST_OS_ID,
teams=[test_team, second_team],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with admin scope
token = create_jwt_token(scopes=["agent_os:admin"])
# Should be able to get any team
response = client.get(
"/teams/test-team",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "test-team"
response = client.get(
"/teams/second-team",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "second-team"
def test_get_team_by_id_without_scope(test_team):
"""Test that GET /teams/{id} is denied without proper scope."""
agent_os = AgentOS(
id=TEST_OS_ID,
teams=[test_team],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token without any teams scope
token = create_jwt_token(scopes=["agents:read"])
# Should NOT be able to get team
response = client.get(
"/teams/test-team",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 403
assert "permissions" in response.json()["detail"].lower()
def test_get_team_by_id_with_wrong_specific_scope(test_team, second_team):
"""Test that GET /teams/{id} is denied with scope for different team."""
agent_os = AgentOS(
id=TEST_OS_ID,
teams=[test_team, second_team],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with scope for second-team only
token = create_jwt_token(scopes=["teams:second-team:read"])
# Should NOT be able to get test-team
response = client.get(
"/teams/test-team",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 403
assert "permissions" in response.json()["detail"].lower()
# Should be able to get second-team
response = client.get(
"/teams/second-team",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "second-team"
def test_team_run_blocked_without_specific_scope(test_team, second_team):
"""Test that running a team is blocked without specific run scope."""
agent_os = AgentOS(
id=TEST_OS_ID,
teams=[test_team, second_team],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with run scope for test-team only
token = create_jwt_token(
scopes=[
"teams:test-team:read",
"teams:test-team:run",
"teams:second-team:read",
# Note: No run scope for second-team
]
)
# Should be able to run test-team
response = client.post(
"/teams/test-team/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
# Should NOT be able to run second-team
response = client.post(
"/teams/second-team/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code == 403
def test_team_run_with_wildcard_scope(test_team, second_team):
"""Test that wildcard run scope allows running any team."""
agent_os = AgentOS(
id=TEST_OS_ID,
teams=[test_team, second_team],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with wildcard run scope
token = create_jwt_token(
scopes=[
"teams:*:read",
"teams:*:run",
]
)
# Should be able to run both teams
response = client.post(
"/teams/test-team/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
response = client.post(
"/teams/second-team/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
def test_team_run_with_global_scope(test_team, second_team):
"""Test that global run scope allows running any team."""
agent_os = AgentOS(
id=TEST_OS_ID,
teams=[test_team, second_team],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with global run scope
token = create_jwt_token(
scopes=[
"teams:read",
"teams:run",
]
)
# Should be able to run both teams
response = client.post(
"/teams/test-team/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
response = client.post(
"/teams/second-team/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
# ============================================================================
# Resource Filtering Tests - Workflows
# ============================================================================
def test_workflow_filtering_with_global_scope(test_workflow, second_workflow):
"""Test that global workflows:read scope returns all workflows."""
agent_os = AgentOS(
id=TEST_OS_ID,
workflows=[test_workflow, second_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with global workflows scope
token = create_jwt_token(scopes=["workflows:read"])
response = client.get(
"/workflows",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
workflows = response.json()
assert len(workflows) == 2
workflow_ids = {workflow["id"] for workflow in workflows}
assert workflow_ids == {"test-workflow", "second-workflow"}
def test_workflow_filtering_with_wildcard_scope(test_workflow, second_workflow):
"""Test that workflows:*:read wildcard scope returns all workflows."""
agent_os = AgentOS(
id=TEST_OS_ID,
workflows=[test_workflow, second_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with wildcard resource scope
token = create_jwt_token(scopes=["workflows:*:read"])
response = client.get(
"/workflows",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
workflows = response.json()
assert len(workflows) == 2
workflow_ids = {workflow["id"] for workflow in workflows}
assert workflow_ids == {"test-workflow", "second-workflow"}
def test_workflow_filtering_with_specific_scope(test_workflow, second_workflow):
"""Test that specific workflow scope returns only that workflow."""
agent_os = AgentOS(
id=TEST_OS_ID,
workflows=[test_workflow, second_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with scope for only test-workflow
token = create_jwt_token(scopes=["workflows:test-workflow:read"])
response = client.get(
"/workflows",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
workflows = response.json()
assert len(workflows) == 1
assert workflows[0]["id"] == "test-workflow"
def test_get_workflow_by_id_with_specific_scope(test_workflow, second_workflow):
"""Test that GET /workflows/{id} works with specific resource-level scope."""
agent_os = AgentOS(
id=TEST_OS_ID,
workflows=[test_workflow, second_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with scope for only test-workflow
token = create_jwt_token(scopes=["workflows:test-workflow:read"])
# Should be able to get test-workflow
response = client.get(
"/workflows/test-workflow",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "test-workflow"
# Should NOT be able to get second-workflow
response = client.get(
"/workflows/second-workflow",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 403
assert "permissions" in response.json()["detail"].lower()
def test_get_workflow_by_id_with_global_scope(test_workflow, second_workflow):
"""Test that GET /workflows/{id} works with global workflows:read scope."""
agent_os = AgentOS(
id=TEST_OS_ID,
workflows=[test_workflow, second_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with global workflows scope
token = create_jwt_token(scopes=["workflows:read"])
# Should be able to get any workflow
response = client.get(
"/workflows/test-workflow",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "test-workflow"
response = client.get(
"/workflows/second-workflow",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "second-workflow"
def test_get_workflow_by_id_with_wildcard_scope(test_workflow, second_workflow):
"""Test that GET /workflows/{id} works with wildcard workflows:*:read scope."""
agent_os = AgentOS(
id=TEST_OS_ID,
workflows=[test_workflow, second_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with wildcard workflows scope
token = create_jwt_token(scopes=["workflows:*:read"])
# Should be able to get any workflow
response = client.get(
"/workflows/test-workflow",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "test-workflow"
response = client.get(
"/workflows/second-workflow",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "second-workflow"
def test_get_workflow_by_id_with_admin_scope(test_workflow, second_workflow):
"""Test that GET /workflows/{id} works with admin scope."""
agent_os = AgentOS(
id=TEST_OS_ID,
workflows=[test_workflow, second_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with admin scope
token = create_jwt_token(scopes=["agent_os:admin"])
# Should be able to get any workflow
response = client.get(
"/workflows/test-workflow",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "test-workflow"
response = client.get(
"/workflows/second-workflow",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "second-workflow"
def test_get_workflow_by_id_without_scope(test_workflow):
"""Test that GET /workflows/{id} is denied without proper scope."""
agent_os = AgentOS(
id=TEST_OS_ID,
workflows=[test_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token without any workflows scope
token = create_jwt_token(scopes=["agents:read"])
# Should NOT be able to get workflow
response = client.get(
"/workflows/test-workflow",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 403
assert "permissions" in response.json()["detail"].lower()
def test_get_workflow_by_id_with_wrong_specific_scope(test_workflow, second_workflow):
"""Test that GET /workflows/{id} is denied with scope for different workflow."""
agent_os = AgentOS(
id=TEST_OS_ID,
workflows=[test_workflow, second_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with scope for second-workflow only
token = create_jwt_token(scopes=["workflows:second-workflow:read"])
# Should NOT be able to get test-workflow
response = client.get(
"/workflows/test-workflow",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 403
assert "permissions" in response.json()["detail"].lower()
# Should be able to get second-workflow
response = client.get(
"/workflows/second-workflow",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert response.json()["id"] == "second-workflow"
def test_workflow_run_blocked_without_specific_scope(test_workflow, second_workflow):
"""Test that running a workflow is blocked without specific run scope."""
agent_os = AgentOS(
id=TEST_OS_ID,
workflows=[test_workflow, second_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with run scope for test-workflow only
token = create_jwt_token(
scopes=[
"workflows:test-workflow:read",
"workflows:test-workflow:run",
"workflows:second-workflow:read",
# Note: No run scope for second-workflow
]
)
# Should be able to run test-workflow
response = client.post(
"/workflows/test-workflow/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
# Should NOT be able to run second-workflow
response = client.post(
"/workflows/second-workflow/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code == 403
def test_workflow_run_with_wildcard_scope(test_workflow, second_workflow):
"""Test that wildcard run scope allows running any workflow."""
agent_os = AgentOS(
id=TEST_OS_ID,
workflows=[test_workflow, second_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with wildcard run scope
token = create_jwt_token(
scopes=[
"workflows:*:read",
"workflows:*:run",
]
)
# Should be able to run both workflows
response = client.post(
"/workflows/test-workflow/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
response = client.post(
"/workflows/second-workflow/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
def test_workflow_run_with_global_scope(test_workflow, second_workflow):
"""Test that global run scope allows running any workflow."""
agent_os = AgentOS(
id=TEST_OS_ID,
workflows=[test_workflow, second_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with global run scope
token = create_jwt_token(
scopes=[
"workflows:read",
"workflows:run",
]
)
# Should be able to run both workflows
response = client.post(
"/workflows/test-workflow/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
response = client.post(
"/workflows/second-workflow/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
# ============================================================================
# Mixed Resource Type Tests
# ============================================================================
def test_mixed_resource_filtering(test_agent, second_agent, test_team, second_team, test_workflow, second_workflow):
"""Test filtering with mixed resource types and granular scopes."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent],
teams=[test_team, second_team],
workflows=[test_workflow, second_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with mixed scopes:
# - Specific access to test-agent only
# - Global access to all teams
# - Wildcard access to all workflows
token = create_jwt_token(
scopes=[
"agents:test-agent:read",
"teams:read",
"workflows:*:read",
]
)
# Should only see test-agent
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
agents = response.json()
assert len(agents) == 1
assert agents[0]["id"] == "test-agent"
# Should see all teams (global scope)
response = client.get(
"/teams",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
teams = response.json()
assert len(teams) == 2
team_ids = {team["id"] for team in teams}
assert team_ids == {"test-team", "second-team"}
# Should see all workflows (wildcard scope)
response = client.get(
"/workflows",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
workflows = response.json()
assert len(workflows) == 2
workflow_ids = {workflow["id"] for workflow in workflows}
assert workflow_ids == {"test-workflow", "second-workflow"}
def test_no_access_to_resource_type(test_agent, test_team, test_workflow):
"""Test that users without any scope for a resource type get 403."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
teams=[test_team],
workflows=[test_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with only agents scope, no teams or workflows scope
token = create_jwt_token(
scopes=[
"agents:read",
]
)
# Should see agents
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert len(response.json()) == 1
# Should NOT see teams (no scope) - returns 403 Insufficient permissions
response = client.get(
"/teams",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 403
# Should NOT see workflows (no scope) - returns 403 Insufficient permissions
response = client.get(
"/workflows",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 403
def test_admin_sees_all_resources(test_agent, second_agent, test_team, test_workflow):
"""Test that admin scope grants access to all resources of all types."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent],
teams=[test_team],
workflows=[test_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Admin token
token = create_jwt_token(scopes=["agent_os:admin"])
# Should see all agents
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert len(response.json()) == 2
# Should see all teams
response = client.get(
"/teams",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert len(response.json()) == 1
# Should see all workflows
response = client.get(
"/workflows",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
assert len(response.json()) == 1
# Should be able to run anything
response = client.post(
"/agents/test-agent/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
response = client.post(
"/teams/test-team/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
response = client.post(
"/workflows/test-workflow/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
# ============================================================================
# Trace Endpoint Authorization Tests
# ============================================================================
def test_traces_access_with_valid_scope(test_agent):
"""Test that traces:read scope grants access to traces endpoints."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with traces:read scope
token = create_jwt_token(scopes=["traces:read"])
# Should be able to list traces
response = client.get(
"/traces",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
def test_traces_access_denied_without_scope(test_agent):
"""Test that missing traces:read scope denies access to traces endpoints."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with only agents scope, no traces scope
token = create_jwt_token(scopes=["agents:read"])
# Should NOT be able to list traces
response = client.get(
"/traces",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 403
assert "permissions" in response.json()["detail"].lower()
def test_traces_admin_access(test_agent):
"""Test that admin scope grants access to traces endpoints."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Admin token
token = create_jwt_token(scopes=["agent_os:admin"])
# Should be able to list traces
response = client.get(
"/traces",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
def test_trace_detail_access_with_valid_scope(test_agent):
"""Test that traces:read scope grants access to trace detail endpoint."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with traces:read scope
token = create_jwt_token(scopes=["traces:read"])
# Should be able to get trace detail (will return 404 since trace doesn't exist, but auth passes)
response = client.get(
"/traces/nonexistent-trace-id",
headers={"Authorization": f"Bearer {token}"},
)
# 404 means auth passed but trace not found, 403 would mean auth failed
assert response.status_code == 404
def test_trace_detail_access_denied_without_scope(test_agent):
"""Test that missing traces:read scope denies access to trace detail endpoint."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with only agents scope, no traces scope
token = create_jwt_token(scopes=["agents:read"])
# Should NOT be able to get trace detail
response = client.get(
"/traces/some-trace-id",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 403
def test_trace_session_stats_access_with_valid_scope(test_agent):
"""Test that traces:read scope grants access to trace session stats endpoint."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with traces:read scope
token = create_jwt_token(scopes=["traces:read"])
# Should be able to get trace session stats
response = client.get(
"/trace_session_stats",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
def test_trace_session_stats_access_denied_without_scope(test_agent):
"""Test that missing traces:read scope denies access to trace session stats endpoint."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with only agents scope, no traces scope
token = create_jwt_token(scopes=["agents:read"])
# Should NOT be able to get trace session stats
response = client.get(
"/trace_session_stats",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 403
def test_traces_access_with_multiple_scopes(test_agent):
"""Test that users with both traces and agents scopes can access both."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with both scopes
token = create_jwt_token(scopes=["agents:read", "traces:read"])
# Should be able to list agents
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
# Should be able to list traces
response = client.get(
"/traces",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
# ============================================================================
# Cancel Endpoint Authorization Tests
# ============================================================================
def test_agent_cancel_with_run_scope(test_agent):
"""Test that agents:run scope grants access to cancel endpoint."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with run scope
token = create_jwt_token(scopes=["agents:test-agent:run"])
# Should be able to cancel (returns 200, cancel stores intent even for nonexistent runs)
response = client.post(
"/agents/test-agent/runs/nonexistent-run-id/cancel",
headers={"Authorization": f"Bearer {token}"},
)
# 200 means auth passed and cancel intent stored, 403 would mean auth failed
assert response.status_code == 200
def test_agent_cancel_blocked_without_run_scope(test_agent, second_agent):
"""Test that cancel is blocked without run scope for that agent."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with run scope for test-agent only
token = create_jwt_token(scopes=["agents:test-agent:run"])
# Should NOT be able to cancel second-agent
response = client.post(
"/agents/second-agent/runs/some-run-id/cancel",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 403
def test_agent_cancel_with_global_scope(test_agent):
"""Test that global agents:run scope grants access to cancel any agent."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with global run scope
token = create_jwt_token(scopes=["agents:run"])
response = client.post(
"/agents/test-agent/runs/nonexistent-run-id/cancel",
headers={"Authorization": f"Bearer {token}"},
)
# 200 means auth passed and cancel intent stored
assert response.status_code == 200
def test_agent_continue_with_run_scope(test_agent):
"""Test that agents:run scope grants access to continue endpoint."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with run scope
token = create_jwt_token(scopes=["agents:test-agent:run"])
# Should be able to continue (will fail since no run exists, but auth passes)
response = client.post(
"/agents/test-agent/runs/nonexistent-run-id/continue",
headers={"Authorization": f"Bearer {token}"},
data={"tools": "[]"},
)
# Response should not be 403 (auth passed)
assert response.status_code != 403
def test_agent_continue_blocked_without_run_scope(test_agent, second_agent):
"""Test that continue is blocked without run scope for that agent."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with run scope for test-agent only
token = create_jwt_token(scopes=["agents:test-agent:run"])
# Should NOT be able to continue second-agent
response = client.post(
"/agents/second-agent/runs/some-run-id/continue",
headers={"Authorization": f"Bearer {token}"},
data={"tools": "[]"},
)
assert response.status_code == 403
def test_team_cancel_with_run_scope(test_team):
"""Test that teams:run scope grants access to cancel endpoint."""
agent_os = AgentOS(
id=TEST_OS_ID,
teams=[test_team],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with run scope
token = create_jwt_token(scopes=["teams:test-team:run"])
# Should be able to cancel (returns 200, cancel stores intent even for nonexistent runs)
response = client.post(
"/teams/test-team/runs/nonexistent-run-id/cancel",
headers={"Authorization": f"Bearer {token}"},
)
# 200 means auth passed and cancel intent stored, 403 would mean auth failed
assert response.status_code == 200
def test_team_cancel_blocked_without_run_scope(test_team, second_team):
"""Test that cancel is blocked without run scope for that team."""
agent_os = AgentOS(
id=TEST_OS_ID,
teams=[test_team, second_team],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with run scope for test-team only
token = create_jwt_token(scopes=["teams:test-team:run"])
# Should NOT be able to cancel second-team
response = client.post(
"/teams/second-team/runs/some-run-id/cancel",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 403
def test_team_cancel_with_global_scope(test_team):
"""Test that global teams:run scope grants access to cancel any team."""
agent_os = AgentOS(
id=TEST_OS_ID,
teams=[test_team],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with global run scope
token = create_jwt_token(scopes=["teams:run"])
response = client.post(
"/teams/test-team/runs/nonexistent-run-id/cancel",
headers={"Authorization": f"Bearer {token}"},
)
# 200 means auth passed and cancel intent stored
assert response.status_code == 200
def test_workflow_cancel_with_run_scope(test_workflow):
"""Test that workflows:run scope grants access to cancel endpoint."""
agent_os = AgentOS(
id=TEST_OS_ID,
workflows=[test_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with run scope
token = create_jwt_token(scopes=["workflows:test-workflow:run"])
# Should be able to cancel (returns 200, cancel stores intent even for nonexistent runs)
response = client.post(
"/workflows/test-workflow/runs/nonexistent-run-id/cancel",
headers={"Authorization": f"Bearer {token}"},
)
# 200 means auth passed and cancel intent stored, 403 would mean auth failed
assert response.status_code == 200
def test_workflow_cancel_blocked_without_run_scope(test_workflow, second_workflow):
"""Test that cancel is blocked without run scope for that workflow."""
agent_os = AgentOS(
id=TEST_OS_ID,
workflows=[test_workflow, second_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with run scope for test-workflow only
token = create_jwt_token(scopes=["workflows:test-workflow:run"])
# Should NOT be able to cancel second-workflow
response = client.post(
"/workflows/second-workflow/runs/some-run-id/cancel",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 403
def test_workflow_cancel_with_global_scope(test_workflow):
"""Test that global workflows:run scope grants access to cancel any workflow."""
agent_os = AgentOS(
id=TEST_OS_ID,
workflows=[test_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with global run scope
token = create_jwt_token(scopes=["workflows:run"])
response = client.post(
"/workflows/test-workflow/runs/nonexistent-run-id/cancel",
headers={"Authorization": f"Bearer {token}"},
)
# 200 means auth passed and cancel intent stored
assert response.status_code == 200
def test_cancel_with_wildcard_scope(test_agent, second_agent):
"""Test that wildcard scope grants access to cancel any resource."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent, second_agent],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Token with wildcard run scope
token = create_jwt_token(scopes=["agents:*:run"])
# Should be able to cancel test-agent
response = client.post(
"/agents/test-agent/runs/nonexistent-run-id/cancel",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200 # Auth passed
# Should also be able to cancel second-agent
response = client.post(
"/agents/second-agent/runs/nonexistent-run-id/cancel",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200 # Auth passed
def test_cancel_with_admin_scope(test_agent, test_team, test_workflow):
"""Test that admin scope grants access to cancel any resource type."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
teams=[test_team],
workflows=[test_workflow],
authorization=True,
authorization_config=AuthorizationConfig(verification_keys=[JWT_SECRET], algorithm="HS256"),
)
app = agent_os.get_app()
client = TestClient(app)
# Admin token
token = create_jwt_token(scopes=["agent_os:admin"])
# Should be able to cancel agent
response = client.post(
"/agents/test-agent/runs/nonexistent-run-id/cancel",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200 # Auth passed
# Should be able to cancel team
response = client.post(
"/teams/test-team/runs/nonexistent-run-id/cancel",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200 # Auth passed
# Should be able to cancel workflow
response = client.post(
"/workflows/test-workflow/runs/nonexistent-run-id/cancel",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200 # Auth passed
# ============================================================================
# JWKS File Tests
# ============================================================================
@pytest.fixture
def rsa_key_pair():
"""Generate an RSA key pair for JWKS testing."""
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
# Generate RSA key pair
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
)
# Get private key in PEM format
private_pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
).decode("utf-8")
# Get public key in PEM format
public_key = private_key.public_key()
public_pem = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
).decode("utf-8")
return {
"private_key": private_key,
"public_key": public_key,
"private_pem": private_pem,
"public_pem": public_pem,
}
@pytest.fixture
def jwks_file(rsa_key_pair, tmp_path):
"""Create a temporary JWKS file with the RSA public key."""
import base64
import json
public_key = rsa_key_pair["public_key"]
public_numbers = public_key.public_numbers()
# Convert to base64url encoding (no padding)
def int_to_base64url(value: int, length: int) -> str:
data = value.to_bytes(length, byteorder="big")
return base64.urlsafe_b64encode(data).rstrip(b"=").decode("ascii")
# RSA modulus (n) - 256 bytes for 2048-bit key
n = int_to_base64url(public_numbers.n, 256)
# RSA exponent (e) - 3 bytes for 65537
e = int_to_base64url(public_numbers.e, 3)
jwks_data = {
"keys": [
{
"kty": "RSA",
"kid": "test-key-1",
"use": "sig",
"alg": "RS256",
"n": n,
"e": e,
}
]
}
jwks_path = tmp_path / "jwks.json"
jwks_path.write_text(json.dumps(jwks_data))
return str(jwks_path)
@pytest.fixture
def jwks_file_no_kid(rsa_key_pair, tmp_path):
"""Create a JWKS file without kid (for single-key scenarios)."""
import base64
import json
public_key = rsa_key_pair["public_key"]
public_numbers = public_key.public_numbers()
def int_to_base64url(value: int, length: int) -> str:
data = value.to_bytes(length, byteorder="big")
return base64.urlsafe_b64encode(data).rstrip(b"=").decode("ascii")
n = int_to_base64url(public_numbers.n, 256)
e = int_to_base64url(public_numbers.e, 3)
jwks_data = {
"keys": [
{
"kty": "RSA",
"use": "sig",
"alg": "RS256",
"n": n,
"e": e,
}
]
}
jwks_path = tmp_path / "jwks_no_kid.json"
jwks_path.write_text(json.dumps(jwks_data))
return str(jwks_path)
def create_rs256_token(
private_key,
scopes: list[str],
user_id: str = "test_user",
kid: str | None = "test-key-1",
audience: str = TEST_OS_ID,
) -> str:
"""Create an RS256 JWT token signed with the given private key."""
payload = {
"sub": user_id,
"session_id": f"session_{user_id}",
"aud": audience,
"scopes": scopes,
"exp": datetime.now(UTC) + timedelta(hours=1),
"iat": datetime.now(UTC),
}
headers = {}
if kid:
headers["kid"] = kid
return jwt.encode(payload, private_key, algorithm="RS256", headers=headers if headers else None)
def test_jwks_file_authentication(test_agent, rsa_key_pair, jwks_file):
"""Test JWT authentication using JWKS file."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
)
app = agent_os.get_app()
app.add_middleware(
JWTMiddleware,
jwks_file=jwks_file,
algorithm="RS256",
authorization=True,
)
client = TestClient(app)
# Create token signed with the RSA private key
token = create_rs256_token(
rsa_key_pair["private_pem"],
scopes=["agents:read"],
kid="test-key-1",
)
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
def test_jwks_file_with_run_scope(test_agent, rsa_key_pair, jwks_file):
"""Test that JWKS-authenticated tokens work with run scopes."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
)
app = agent_os.get_app()
app.add_middleware(
JWTMiddleware,
jwks_file=jwks_file,
algorithm="RS256",
authorization=True,
)
client = TestClient(app)
# Create token with run scope
token = create_rs256_token(
rsa_key_pair["private_pem"],
scopes=["agents:read", "agents:run"],
kid="test-key-1",
)
response = client.post(
"/agents/test-agent/runs",
headers={"Authorization": f"Bearer {token}"},
data={"message": "test"},
)
assert response.status_code in [200, 201]
def test_jwks_file_without_kid(test_agent, rsa_key_pair, jwks_file_no_kid):
"""Test JWKS file with keys that don't have kid (uses _default)."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
)
app = agent_os.get_app()
app.add_middleware(
JWTMiddleware,
jwks_file=jwks_file_no_kid,
algorithm="RS256",
authorization=True,
)
client = TestClient(app)
# Create token without kid header (should use _default key)
token = create_rs256_token(
rsa_key_pair["private_pem"],
scopes=["agents:read"],
kid=None,
)
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
def test_jwks_file_wrong_kid_denied(test_agent, rsa_key_pair, jwks_file):
"""Test that tokens with non-matching kid are rejected."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
)
app = agent_os.get_app()
app.add_middleware(
JWTMiddleware,
jwks_file=jwks_file,
algorithm="RS256",
authorization=True,
)
client = TestClient(app)
# Create token with wrong kid
token = create_rs256_token(
rsa_key_pair["private_pem"],
scopes=["agents:read"],
kid="wrong-key-id",
)
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
# Should fail because kid doesn't match any key in JWKS
assert response.status_code == 401
def test_jwks_file_invalid_signature_denied(test_agent, rsa_key_pair, jwks_file):
"""Test that tokens signed with wrong key are rejected."""
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
)
app = agent_os.get_app()
app.add_middleware(
JWTMiddleware,
jwks_file=jwks_file,
algorithm="RS256",
authorization=True,
)
client = TestClient(app)
# Generate a different RSA key pair
different_private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
)
different_private_pem = different_private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
).decode("utf-8")
# Create token signed with different key (but correct kid)
token = create_rs256_token(
different_private_pem,
scopes=["agents:read"],
kid="test-key-1",
)
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
# Should fail because signature doesn't match
assert response.status_code == 401
def test_jwks_file_not_found_raises_error(test_agent):
"""Test that non-existent JWKS file raises ValueError."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
)
app = agent_os.get_app()
# Directly instantiate middleware to test error handling
# (app.add_middleware doesn't instantiate until first request)
with pytest.raises(ValueError, match="JWKS file not found"):
JWTMiddleware(
app=app,
jwks_file="/non/existent/jwks.json",
algorithm="RS256",
authorization=True,
)
def test_jwks_file_invalid_json_raises_error(test_agent, tmp_path):
"""Test that invalid JSON in JWKS file raises ValueError."""
# Create a file with invalid JSON
invalid_jwks = tmp_path / "invalid.json"
invalid_jwks.write_text("not valid json {{{")
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
)
app = agent_os.get_app()
# Directly instantiate middleware to test error handling
# (app.add_middleware doesn't instantiate until first request)
with pytest.raises(ValueError, match="Invalid JSON in JWKS file"):
JWTMiddleware(
app=app,
jwks_file=str(invalid_jwks),
algorithm="RS256",
authorization=True,
)
def test_jwks_with_fallback_to_verification_keys(test_agent, rsa_key_pair, jwks_file):
"""Test that verification_keys are used as fallback when JWKS doesn't match."""
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
)
app = agent_os.get_app()
# Add middleware with both JWKS file and static HS256 key
app.add_middleware(
JWTMiddleware,
jwks_file=jwks_file,
verification_keys=[JWT_SECRET],
algorithm="HS256", # Note: different algorithm
authorization=True,
)
client = TestClient(app)
# Create HS256 token (should use fallback verification_keys)
token = create_jwt_token(scopes=["agents:read"])
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
def test_jwks_env_variable(test_agent, rsa_key_pair, jwks_file, monkeypatch):
"""Test JWKS file loading from JWT_JWKS_FILE environment variable."""
# Set environment variable
monkeypatch.setenv("JWT_JWKS_FILE", jwks_file)
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
)
app = agent_os.get_app()
# Don't pass jwks_file parameter - should load from env var
app.add_middleware(
JWTMiddleware,
algorithm="RS256",
authorization=True,
)
client = TestClient(app)
token = create_rs256_token(
rsa_key_pair["private_pem"],
scopes=["agents:read"],
kid="test-key-1",
)
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
assert response.status_code == 200
def test_jwks_parameter_takes_precedence_over_env(test_agent, rsa_key_pair, jwks_file, tmp_path, monkeypatch):
"""Test that jwks_file parameter takes precedence over JWT_JWKS_FILE env var."""
import json
# Create a different JWKS file for env var (with different kid)
env_jwks = tmp_path / "env_jwks.json"
env_jwks.write_text(json.dumps({"keys": []})) # Empty keys
monkeypatch.setenv("JWT_JWKS_FILE", str(env_jwks))
agent_os = AgentOS(
id=TEST_OS_ID,
agents=[test_agent],
)
app = agent_os.get_app()
# Pass jwks_file parameter - should take precedence over env var
app.add_middleware(
JWTMiddleware,
jwks_file=jwks_file, # This has the actual key
algorithm="RS256",
authorization=True,
)
client = TestClient(app)
token = create_rs256_token(
rsa_key_pair["private_pem"],
scopes=["agents:read"],
kid="test-key-1",
)
response = client.get(
"/agents",
headers={"Authorization": f"Bearer {token}"},
)
# Should work because jwks_file parameter has the correct key
assert response.status_code == 200
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/os/test_authorization.py",
"license": "Apache License 2.0",
"lines": 2354,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/os/test_jwt_middleware_helpers.py | """Unit tests for JWT middleware helper functions."""
import pytest
from agno.os.middleware import JWTMiddleware, TokenSource
# Test JWT secret for middleware initialization
JWT_SECRET = "test-secret-key-for-helper-tests"
@pytest.fixture
def middleware():
"""Create middleware instance for testing."""
return JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
)
@pytest.fixture
def middleware_with_auth():
"""Create middleware instance with authorization enabled."""
return JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
authorization=True,
)
def test_returns_expected_default_routes():
"""Test that default excluded routes include standard paths."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
)
excluded = middleware._get_default_excluded_routes()
assert "/" in excluded
assert "/health" in excluded
assert "/docs" in excluded
assert "/redoc" in excluded
assert "/openapi.json" in excluded
assert "/docs/oauth2-redirect" in excluded
def test_returns_list():
"""Test that method returns a list."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
)
excluded = middleware._get_default_excluded_routes()
assert isinstance(excluded, list)
def test_extracts_agent_id(middleware):
"""Test extracting agent ID from path."""
path = "/agents/my-agent/runs"
result = middleware._extract_resource_id_from_path(path, "agents")
assert result == "my-agent"
def test_extracts_team_id(middleware):
"""Test extracting team ID from path."""
path = "/teams/my-team/runs"
result = middleware._extract_resource_id_from_path(path, "teams")
assert result == "my-team"
def test_extracts_workflow_id(middleware):
"""Test extracting workflow ID from path."""
path = "/workflows/my-workflow/runs"
result = middleware._extract_resource_id_from_path(path, "workflows")
assert result == "my-workflow"
def test_extracts_id_with_dashes(middleware):
"""Test extracting ID with dashes."""
path = "/agents/my-complex-agent-id/runs"
result = middleware._extract_resource_id_from_path(path, "agents")
assert result == "my-complex-agent-id"
def test_extracts_id_with_underscores(middleware):
"""Test extracting ID with underscores."""
path = "/agents/my_agent_id/runs"
result = middleware._extract_resource_id_from_path(path, "agents")
assert result == "my_agent_id"
def test_extracts_id_with_numbers(middleware):
"""Test extracting ID with numbers."""
path = "/agents/agent123/runs"
result = middleware._extract_resource_id_from_path(path, "agents")
assert result == "agent123"
def test_returns_none_for_list_endpoint(middleware):
"""Test that listing endpoint returns None."""
path = "/agents"
result = middleware._extract_resource_id_from_path(path, "agents")
assert result is None
def test_returns_none_for_wrong_resource_type(middleware):
"""Test that wrong resource type returns None."""
path = "/agents/my-agent/runs"
result = middleware._extract_resource_id_from_path(path, "teams")
assert result is None
def test_extracts_id_from_simple_path(middleware):
"""Test extracting ID from simple resource path."""
path = "/agents/test-agent"
result = middleware._extract_resource_id_from_path(path, "agents")
assert result == "test-agent"
def test_handles_uuid_style_id(middleware):
"""Test extracting UUID-style ID."""
path = "/agents/550e8400-e29b-41d4-a716-446655440000/runs"
result = middleware._extract_resource_id_from_path(path, "agents")
assert result == "550e8400-e29b-41d4-a716-446655440000"
def test_excludes_default_routes():
"""Test that default routes are excluded."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
)
assert middleware._is_route_excluded("/health") is True
assert middleware._is_route_excluded("/docs") is True
assert middleware._is_route_excluded("/redoc") is True
assert middleware._is_route_excluded("/openapi.json") is True
assert middleware._is_route_excluded("/") is True
def test_does_not_exclude_protected_routes():
"""Test that protected routes are not excluded."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
)
assert middleware._is_route_excluded("/agents") is False
assert middleware._is_route_excluded("/teams") is False
assert middleware._is_route_excluded("/workflows") is False
assert middleware._is_route_excluded("/sessions") is False
def test_custom_excluded_routes():
"""Test with custom excluded routes."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
excluded_route_paths=["/custom", "/public/*"],
)
assert middleware._is_route_excluded("/custom") is True
assert middleware._is_route_excluded("/public/data") is True
assert middleware._is_route_excluded("/protected") is False
def test_wildcard_pattern_matching():
"""Test wildcard pattern matching."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
excluded_route_paths=["/api/public/*"],
)
assert middleware._is_route_excluded("/api/public/status") is True
assert middleware._is_route_excluded("/api/public/health") is True
assert middleware._is_route_excluded("/api/private/data") is False
def test_handles_trailing_slash():
"""Test that trailing slashes are handled."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
excluded_route_paths=["/health"],
)
assert middleware._is_route_excluded("/health/") is True
assert middleware._is_route_excluded("/health") is True
def test_empty_excluded_routes():
"""Test with empty excluded routes list."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
excluded_route_paths=[],
)
assert middleware._is_route_excluded("/health") is False
assert middleware._is_route_excluded("/anything") is False
def test_returns_scopes_for_agents_list(middleware_with_auth):
"""Test scopes for listing agents."""
scopes = middleware_with_auth._get_required_scopes("GET", "/agents")
assert "agents:read" in scopes
def test_returns_scopes_for_agent_run(middleware_with_auth):
"""Test scopes for running an agent."""
scopes = middleware_with_auth._get_required_scopes("POST", "/agents/my-agent/runs")
assert "agents:run" in scopes
def test_returns_scopes_for_teams_list(middleware_with_auth):
"""Test scopes for listing teams."""
scopes = middleware_with_auth._get_required_scopes("GET", "/teams")
assert "teams:read" in scopes
def test_returns_scopes_for_team_run(middleware_with_auth):
"""Test scopes for running a team."""
scopes = middleware_with_auth._get_required_scopes("POST", "/teams/my-team/runs")
assert "teams:run" in scopes
def test_returns_scopes_for_workflows_list(middleware_with_auth):
"""Test scopes for listing workflows."""
scopes = middleware_with_auth._get_required_scopes("GET", "/workflows")
assert "workflows:read" in scopes
def test_returns_scopes_for_workflow_run(middleware_with_auth):
"""Test scopes for running a workflow."""
scopes = middleware_with_auth._get_required_scopes("POST", "/workflows/my-workflow/runs")
assert "workflows:run" in scopes
def test_returns_scopes_for_sessions(middleware_with_auth):
"""Test scopes for sessions endpoint."""
scopes = middleware_with_auth._get_required_scopes("GET", "/sessions")
assert "sessions:read" in scopes
def test_returns_scopes_for_config(middleware_with_auth):
"""Test scopes for config endpoint."""
scopes = middleware_with_auth._get_required_scopes("GET", "/config")
assert "system:read" in scopes
def test_returns_scopes_for_traces(middleware_with_auth):
"""Test scopes for traces endpoint."""
scopes = middleware_with_auth._get_required_scopes("GET", "/traces")
assert "traces:read" in scopes
def test_returns_scopes_for_trace_detail(middleware_with_auth):
"""Test scopes for trace detail endpoint."""
scopes = middleware_with_auth._get_required_scopes("GET", "/traces/trace-123")
assert "traces:read" in scopes
def test_returns_scopes_for_trace_session_stats(middleware_with_auth):
"""Test scopes for trace session stats endpoint."""
scopes = middleware_with_auth._get_required_scopes("GET", "/trace_session_stats")
assert "traces:read" in scopes
def test_returns_empty_for_unknown_route(middleware_with_auth):
"""Test that unknown routes return empty scopes."""
scopes = middleware_with_auth._get_required_scopes("GET", "/unknown/route")
assert scopes == []
def test_custom_scope_mappings():
"""Test with custom scope mappings."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
authorization=True,
scope_mappings={
"GET /custom/endpoint": ["custom:read"],
"POST /custom/action": ["custom:write"],
},
)
scopes = middleware._get_required_scopes("GET", "/custom/endpoint")
assert "custom:read" in scopes
scopes = middleware._get_required_scopes("POST", "/custom/action")
assert "custom:write" in scopes
def test_empty_scopes_for_explicitly_allowed_route():
"""Test that explicitly allowed routes return empty scopes."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
authorization=True,
scope_mappings={
"GET /public/data": [], # Explicitly allow without scopes
},
)
scopes = middleware._get_required_scopes("GET", "/public/data")
assert scopes == []
def test_header_source_message():
"""Test error message for header token source."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
token_source=TokenSource.HEADER,
)
message = middleware._get_missing_token_error_message()
assert "Authorization header missing" in message
def test_cookie_source_message():
"""Test error message for cookie token source."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
token_source=TokenSource.COOKIE,
cookie_name="my_token",
)
message = middleware._get_missing_token_error_message()
assert "my_token" in message
assert "cookie" in message.lower()
def test_both_source_message():
"""Test error message for both token sources."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
token_source=TokenSource.BOTH,
cookie_name="jwt_cookie",
)
message = middleware._get_missing_token_error_message()
assert "jwt_cookie" in message
assert "header" in message.lower()
assert "cookie" in message.lower()
def test_custom_cookie_name_in_message():
"""Test that custom cookie name appears in error message."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
token_source=TokenSource.COOKIE,
cookie_name="custom_auth_token",
)
message = middleware._get_missing_token_error_message()
assert "custom_auth_token" in message
def test_allows_all_when_no_origins_configured(middleware):
"""Test that all origins are allowed when cors_allowed_origins is None."""
assert middleware._is_origin_allowed("http://localhost:3000", None) is True
assert middleware._is_origin_allowed("https://example.com", None) is True
def test_allows_all_when_empty_origins_list(middleware):
"""Test that all origins are allowed when cors_allowed_origins is empty."""
assert middleware._is_origin_allowed("http://localhost:3000", []) is True
assert middleware._is_origin_allowed("https://example.com", []) is True
def test_allows_configured_origin(middleware):
"""Test that configured origins are allowed."""
allowed_origins = ["http://localhost:3000", "https://example.com"]
assert middleware._is_origin_allowed("http://localhost:3000", allowed_origins) is True
assert middleware._is_origin_allowed("https://example.com", allowed_origins) is True
def test_denies_unconfigured_origin(middleware):
"""Test that unconfigured origins are denied."""
allowed_origins = ["http://localhost:3000"]
assert middleware._is_origin_allowed("https://example.com", allowed_origins) is False
assert middleware._is_origin_allowed("http://malicious.com", allowed_origins) is False
def test_case_sensitive_origin_matching(middleware):
"""Test that origin matching is case sensitive."""
allowed_origins = ["http://localhost:3000"]
assert middleware._is_origin_allowed("http://localhost:3000", allowed_origins) is True
assert middleware._is_origin_allowed("HTTP://LOCALHOST:3000", allowed_origins) is False
def test_raises_error_without_verification_key():
"""Test that middleware raises error when no verification key provided."""
with pytest.raises(ValueError) as exc_info:
JWTMiddleware(
app=None,
verification_keys=None,
algorithm="HS256",
)
assert "at least one jwt verification key or jwks file is required" in str(exc_info.value).lower()
def test_authorization_enabled_implicitly_with_scope_mappings():
"""Test that authorization is enabled when scope_mappings provided."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
scope_mappings={"GET /test": ["test:read"]},
)
assert middleware.authorization is True
def test_authorization_stays_false_when_explicit():
"""Test that authorization=False is respected even with scope_mappings."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
authorization=False,
scope_mappings={"GET /test": ["test:read"]},
)
assert middleware.authorization is False
def test_default_scope_mappings_merged_with_custom():
"""Test that custom scope mappings are merged with defaults."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
authorization=True,
scope_mappings={"GET /custom": ["custom:read"]},
)
# Should have both default and custom mappings
assert "GET /agents" in middleware.scope_mappings
assert "GET /custom" in middleware.scope_mappings
assert middleware.scope_mappings["GET /custom"] == ["custom:read"]
def test_custom_scope_mappings_override_defaults():
"""Test that custom scope mappings can override defaults."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
authorization=True,
scope_mappings={"GET /agents": ["custom:agents:read"]},
)
assert middleware.scope_mappings["GET /agents"] == ["custom:agents:read"]
def test_custom_admin_scope():
"""Test that custom admin scope is used."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
admin_scope="custom:admin",
)
assert middleware.admin_scope == "custom:admin"
def test_default_admin_scope():
"""Test that default admin scope is used when not specified."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
)
assert middleware.admin_scope == "agent_os:admin"
def test_custom_claims_configuration():
"""Test custom claims configuration."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
user_id_claim="custom_user",
session_id_claim="custom_session",
scopes_claim="custom_scopes",
audience_claim="custom_aud",
)
assert middleware.user_id_claim == "custom_user"
assert middleware.session_id_claim == "custom_session"
assert middleware.scopes_claim == "custom_scopes"
assert middleware.audience_claim == "custom_aud"
def test_dependencies_and_session_state_claims():
"""Test dependencies and session state claims configuration."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
dependencies_claims=["org_id", "tenant_id"],
session_state_claims=["theme", "language"],
)
assert middleware.dependencies_claims == ["org_id", "tenant_id"]
assert middleware.session_state_claims == ["theme", "language"]
def test_token_source_configuration():
"""Test token source configuration."""
middleware = JWTMiddleware(
app=None,
verification_keys=[JWT_SECRET],
algorithm="HS256",
token_source=TokenSource.COOKIE,
cookie_name="my_jwt",
)
assert middleware.token_source == TokenSource.COOKIE
assert middleware.cookie_name == "my_jwt"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/os/test_jwt_middleware_helpers.py",
"license": "Apache License 2.0",
"lines": 411,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/compression/test_compression_manager.py | import pytest
from agno.models.message import Message
@pytest.mark.asyncio
async def test_ashould_compress_below_token_limit():
"""Test async should_compress returns False when below token limit."""
from agno.compression.manager import CompressionManager
from agno.models.openai import OpenAIChat
model = OpenAIChat(id="gpt-4o")
messages = [Message(role="user", content="Hello")]
cm = CompressionManager(compress_tool_results=True, compress_token_limit=1000)
sync_result = cm.should_compress(messages, model=model)
async_result = await cm.ashould_compress(messages, model=model)
assert sync_result == async_result
assert sync_result is False
@pytest.mark.asyncio
async def test_ashould_compress_above_token_limit():
"""Test async should_compress returns True when above token limit."""
from agno.compression.manager import CompressionManager
from agno.models.openai import OpenAIChat
model = OpenAIChat(id="gpt-4o")
messages = [Message(role="user", content="Hello " * 100)]
cm = CompressionManager(compress_tool_results=True, compress_token_limit=10)
sync_result = cm.should_compress(messages, model=model)
async_result = await cm.ashould_compress(messages, model=model)
assert sync_result == async_result
assert sync_result is True
@pytest.mark.asyncio
async def test_ashould_compress_disabled():
"""Test async should_compress returns False when compression disabled."""
from agno.compression.manager import CompressionManager
from agno.models.openai import OpenAIChat
model = OpenAIChat(id="gpt-4o")
messages = [Message(role="user", content="Hello")]
cm = CompressionManager(compress_tool_results=False)
sync_result = cm.should_compress(messages, model=model)
async_result = await cm.ashould_compress(messages, model=model)
assert sync_result == async_result
assert sync_result is False
def test_should_compress_below_token_limit():
"""Test sync should_compress returns False when below token limit."""
from agno.compression.manager import CompressionManager
from agno.models.openai import OpenAIChat
model = OpenAIChat(id="gpt-4o")
messages = [Message(role="user", content="Hello")]
cm = CompressionManager(compress_tool_results=True, compress_token_limit=1000)
result = cm.should_compress(messages, model=model)
assert result is False
def test_should_compress_above_token_limit():
"""Test sync should_compress returns True when above token limit."""
from agno.compression.manager import CompressionManager
from agno.models.openai import OpenAIChat
model = OpenAIChat(id="gpt-4o")
messages = [Message(role="user", content="Hello " * 100)]
cm = CompressionManager(compress_tool_results=True, compress_token_limit=10)
result = cm.should_compress(messages, model=model)
assert result is True
def test_should_compress_disabled():
"""Test sync should_compress returns False when compression disabled."""
from agno.compression.manager import CompressionManager
messages = [Message(role="user", content="Hello")]
cm = CompressionManager(compress_tool_results=False)
result = cm.should_compress(messages)
assert result is False
def test_should_compress_default_count_limit():
"""Test that compress_tool_results_limit defaults to 3 when nothing is set."""
from agno.compression.manager import CompressionManager
cm = CompressionManager()
assert cm.compress_tool_results_limit == 3
cm_with_token = CompressionManager(compress_token_limit=1000)
assert cm_with_token.compress_tool_results_limit is None
cm_with_count = CompressionManager(compress_tool_results_limit=5)
assert cm_with_count.compress_tool_results_limit == 5
def test_should_compress_count_based_below_limit():
"""Test should_compress with count-based limit below threshold."""
from agno.compression.manager import CompressionManager
messages = [
Message(role="user", content="Hello"),
Message(role="tool", content="Result 1", tool_name="test"),
]
cm = CompressionManager(compress_tool_results=True, compress_tool_results_limit=5)
result = cm.should_compress(messages)
assert result is False
def test_should_compress_count_based_above_limit():
"""Test should_compress with count-based limit above threshold."""
from agno.compression.manager import CompressionManager
messages = [
Message(role="user", content="Hello"),
Message(role="tool", content="Result 1", tool_name="test1"),
Message(role="tool", content="Result 2", tool_name="test2"),
Message(role="tool", content="Result 3", tool_name="test3"),
]
cm = CompressionManager(compress_tool_results=True, compress_tool_results_limit=2)
result = cm.should_compress(messages)
assert result is True
def test_should_compress_excludes_already_compressed():
"""Already compressed messages should not count toward the limit."""
from agno.compression.manager import CompressionManager
messages = [
Message(role="user", content="Hello"),
Message(role="tool", content="Result 1", tool_name="test1", compressed_content="compressed"),
Message(role="tool", content="Result 2", tool_name="test2", compressed_content="compressed"),
Message(role="tool", content="Result 3", tool_name="test3"),
]
cm = CompressionManager(compress_tool_results=True, compress_tool_results_limit=2)
result = cm.should_compress(messages)
assert result is False
@pytest.mark.asyncio
async def test_ashould_compress_count_based_below_limit():
"""Test async should_compress with count-based limit below threshold."""
from agno.compression.manager import CompressionManager
messages = [
Message(role="user", content="Hello"),
Message(role="tool", content="Result 1", tool_name="test"),
]
cm = CompressionManager(compress_tool_results=True, compress_tool_results_limit=5)
sync_result = cm.should_compress(messages)
async_result = await cm.ashould_compress(messages)
assert sync_result == async_result
assert sync_result is False
@pytest.mark.asyncio
async def test_ashould_compress_count_based_above_limit():
"""Test async should_compress with count-based limit above threshold."""
from agno.compression.manager import CompressionManager
messages = [
Message(role="user", content="Hello"),
Message(role="tool", content="Result 1", tool_name="test1"),
Message(role="tool", content="Result 2", tool_name="test2"),
Message(role="tool", content="Result 3", tool_name="test3"),
]
cm = CompressionManager(compress_tool_results=True, compress_tool_results_limit=2)
sync_result = cm.should_compress(messages)
async_result = await cm.ashould_compress(messages)
assert sync_result == async_result
assert sync_result is True
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/compression/test_compression_manager.py",
"license": "Apache License 2.0",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/knowledge/test_knowledge_content_hash.py | """Tests for Knowledge._build_content_hash() method, verifying hash includes name and description."""
from agno.knowledge.content import Content, FileData
from agno.knowledge.document.base import Document
from agno.knowledge.knowledge import Knowledge
from agno.vectordb.base import VectorDb
class MockVectorDb(VectorDb):
"""Minimal VectorDb stub for testing."""
def create(self) -> None:
pass
async def async_create(self) -> None:
pass
def name_exists(self, name: str) -> bool:
return False
def async_name_exists(self, name: str) -> bool:
return False
def id_exists(self, id: str) -> bool:
return False
def content_hash_exists(self, content_hash: str) -> bool:
return False
def insert(self, content_hash: str, documents, filters=None) -> None:
pass
async def async_insert(self, content_hash: str, documents, filters=None) -> None:
pass
def upsert(self, content_hash: str, documents, filters=None) -> None:
pass
async def async_upsert(self, content_hash: str, documents, filters=None) -> None:
pass
def search(self, query: str, limit: int = 5, filters=None):
return []
async def async_search(self, query: str, limit: int = 5, filters=None):
return []
def drop(self) -> None:
pass
async def async_drop(self) -> None:
pass
def exists(self) -> bool:
return True
async def async_exists(self) -> bool:
return True
def delete(self) -> bool:
return True
def delete_by_id(self, id: str) -> bool:
return True
def delete_by_name(self, name: str) -> bool:
return True
def delete_by_metadata(self, metadata) -> bool:
return True
def update_metadata(self, content_id: str, metadata) -> None:
pass
def delete_by_content_id(self, content_id: str) -> bool:
return True
def get_supported_search_types(self):
return ["vector"]
def test_url_hash_without_name_or_description():
"""Test that URL hash without name/description is backward compatible."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(url="https://example.com/doc.pdf")
content2 = Content(url="https://example.com/doc.pdf")
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
assert hash1 == hash2
assert isinstance(hash1, str)
assert len(hash1) == 64 # SHA256 hex digest length
def test_url_hash_with_different_names():
"""Test that same URL with different names produces different hashes."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(url="https://example.com/doc.pdf", name="Document 1")
content2 = Content(url="https://example.com/doc.pdf", name="Document 2")
content3 = Content(url="https://example.com/doc.pdf") # No name
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
hash3 = knowledge._build_content_hash(content3)
# All hashes should be different
assert hash1 != hash2
assert hash1 != hash3
assert hash2 != hash3
def test_url_hash_with_different_descriptions():
"""Test that same URL with different descriptions produces different hashes."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(url="https://example.com/doc.pdf", description="First description")
content2 = Content(url="https://example.com/doc.pdf", description="Second description")
content3 = Content(url="https://example.com/doc.pdf") # No description
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
hash3 = knowledge._build_content_hash(content3)
# All hashes should be different
assert hash1 != hash2
assert hash1 != hash3
assert hash2 != hash3
def test_url_hash_with_name_and_description():
"""Test that URL hash includes both name and description."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(url="https://example.com/doc.pdf", name="Document 1", description="Description 1")
content2 = Content(url="https://example.com/doc.pdf", name="Document 1", description="Description 2")
content3 = Content(url="https://example.com/doc.pdf", name="Document 2", description="Description 1")
content4 = Content(url="https://example.com/doc.pdf", name="Document 1", description="Description 1")
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
hash3 = knowledge._build_content_hash(content3)
hash4 = knowledge._build_content_hash(content4)
# Same name and description should produce same hash
assert hash1 == hash4
# Different name or description should produce different hashes
assert hash1 != hash2 # Different description
assert hash1 != hash3 # Different name
def test_path_hash_with_name_and_description():
"""Test that path hash includes both name and description."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(path="/path/to/file.pdf", name="File 1", description="Desc 1")
content2 = Content(path="/path/to/file.pdf", name="File 1", description="Desc 2")
content3 = Content(path="/path/to/file.pdf", name="File 2", description="Desc 1")
content4 = Content(path="/path/to/file.pdf") # No name or description
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
hash3 = knowledge._build_content_hash(content3)
hash4 = knowledge._build_content_hash(content4)
# Different combinations should produce different hashes
assert hash1 != hash2
assert hash1 != hash3
assert hash1 != hash4
assert hash2 != hash3
assert hash2 != hash4
assert hash3 != hash4
def test_path_hash_backward_compatibility():
"""Test that path hash without name/description is backward compatible."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(path="/path/to/file.pdf")
content2 = Content(path="/path/to/file.pdf")
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
assert hash1 == hash2
def test_same_url_name_description_produces_same_hash():
"""Test that identical URL, name, and description produce the same hash."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(url="https://example.com/doc.pdf", name="Document", description="Description")
content2 = Content(url="https://example.com/doc.pdf", name="Document", description="Description")
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
assert hash1 == hash2
def test_hash_order_matters():
"""Test that the order of name and description in hash is consistent."""
knowledge = Knowledge(vector_db=MockVectorDb())
# Same URL, name, description should always produce same hash
content = Content(url="https://example.com/doc.pdf", name="Document", description="Description")
hash1 = knowledge._build_content_hash(content)
hash2 = knowledge._build_content_hash(content)
hash3 = knowledge._build_content_hash(content)
# Should be deterministic
assert hash1 == hash2 == hash3
def test_hash_with_only_name():
"""Test hash with URL and name but no description."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(url="https://example.com/doc.pdf", name="Document 1")
content2 = Content(url="https://example.com/doc.pdf", name="Document 2")
content3 = Content(url="https://example.com/doc.pdf") # No name
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
hash3 = knowledge._build_content_hash(content3)
assert hash1 != hash2
assert hash1 != hash3
assert hash2 != hash3
def test_hash_with_only_description():
"""Test hash with URL and description but no name."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(url="https://example.com/doc.pdf", description="Description 1")
content2 = Content(url="https://example.com/doc.pdf", description="Description 2")
content3 = Content(url="https://example.com/doc.pdf") # No description
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
hash3 = knowledge._build_content_hash(content3)
assert hash1 != hash2
assert hash1 != hash3
assert hash2 != hash3
def test_file_data_hash_with_filename():
"""Test that file_data hash uses filename when available."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(file_data=FileData(content="test content", filename="file1.pdf"))
content2 = Content(file_data=FileData(content="test content", filename="file2.pdf"))
content3 = Content(file_data=FileData(content="different content", filename="file1.pdf"))
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
hash3 = knowledge._build_content_hash(content3)
# Different filenames should produce different hashes
assert hash1 != hash2
# Same filename should produce same hash (even with different content)
assert hash1 == hash3
def test_file_data_hash_with_type():
"""Test that file_data hash uses type when filename is not available."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(file_data=FileData(content="test content", type="application/pdf"))
content2 = Content(file_data=FileData(content="test content", type="text/plain"))
content3 = Content(file_data=FileData(content="different content", type="application/pdf"))
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
hash3 = knowledge._build_content_hash(content3)
# Different types should produce different hashes
assert hash1 != hash2
# Same type should produce same hash (even with different content)
assert hash1 == hash3
def test_file_data_hash_with_size():
"""Test that file_data hash uses size when filename and type are not available."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(file_data=FileData(content="test content", size=1024))
content2 = Content(file_data=FileData(content="test content", size=2048))
content3 = Content(file_data=FileData(content="different content", size=1024))
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
hash3 = knowledge._build_content_hash(content3)
# Different sizes should produce different hashes
assert hash1 != hash2
# Same size should produce same hash (even with different content)
assert hash1 == hash3
def test_file_data_hash_with_content_fallback():
"""Test that file_data hash uses content hash when no filename/type/size/name/description."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(file_data=FileData(content="test content 1"))
content2 = Content(file_data=FileData(content="test content 2"))
content3 = Content(file_data=FileData(content="test content 1"))
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
hash3 = knowledge._build_content_hash(content3)
# Different content should produce different hashes
assert hash1 != hash2
# Same content should produce same hash
assert hash1 == hash3
def test_file_data_hash_with_name_and_description():
"""Test that file_data hash includes both name/description and file_data fields."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(
name="Document 1",
description="Description 1",
file_data=FileData(content="test content", filename="file1.pdf", type="application/pdf", size=1024),
)
content2 = Content(
name="Document 1",
description="Description 1",
file_data=FileData(content="different content", filename="file1.pdf", type="application/pdf", size=1024),
)
content3 = Content(
name="Document 1",
description="Description 1",
file_data=FileData(content="test content", filename="file2.pdf", type="application/pdf", size=1024),
)
content4 = Content(
name="Document 2",
description="Description 1",
file_data=FileData(content="test content", filename="file1.pdf", type="application/pdf", size=1024),
)
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
hash3 = knowledge._build_content_hash(content3)
hash4 = knowledge._build_content_hash(content4)
# Same name/description/filename should produce same hash (content difference ignored when filename present)
assert hash1 == hash2
# Different filename should produce different hash
assert hash1 != hash3
# Different name should produce different hash
assert hash1 != hash4
def test_file_data_hash_priority_filename_over_type():
"""Test that filename takes priority over type."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(file_data=FileData(content="test", filename="file.pdf", type="application/pdf"))
content2 = Content(file_data=FileData(content="test", filename="file.pdf", type="text/plain"))
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
# Same filename should produce same hash regardless of type
assert hash1 == hash2
def test_file_data_hash_priority_type_over_size():
"""Test that type takes priority over size."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(file_data=FileData(content="test", type="application/pdf", size=1024))
content2 = Content(file_data=FileData(content="test", type="application/pdf", size=2048))
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
# Same type should produce same hash regardless of size
assert hash1 == hash2
def test_file_data_hash_with_name_only():
"""Test file_data hash with name but no description."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(name="Document 1", file_data=FileData(content="test content", filename="file1.pdf"))
content2 = Content(name="Document 2", file_data=FileData(content="test content", filename="file1.pdf"))
content3 = Content(file_data=FileData(content="test content", filename="file1.pdf"))
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
hash3 = knowledge._build_content_hash(content3)
# Different names should produce different hashes
assert hash1 != hash2
# Name + filename should be different from just filename
assert hash1 != hash3
def test_file_data_hash_with_description_only():
"""Test file_data hash with description but no name."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(description="Description 1", file_data=FileData(content="test content", filename="file1.pdf"))
content2 = Content(description="Description 2", file_data=FileData(content="test content", filename="file1.pdf"))
content3 = Content(file_data=FileData(content="test content", filename="file1.pdf"))
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
hash3 = knowledge._build_content_hash(content3)
# Different descriptions should produce different hashes
assert hash1 != hash2
# Description + filename should be different from just filename
assert hash1 != hash3
def test_file_data_hash_bytes_content():
"""Test file_data hash with bytes content."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(file_data=FileData(content=b"test content bytes"))
content2 = Content(file_data=FileData(content=b"test content bytes"))
content3 = Content(file_data=FileData(content=b"different bytes"))
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
hash3 = knowledge._build_content_hash(content3)
# Same bytes content should produce same hash
assert hash1 == hash2
# Different bytes content should produce different hash
assert hash1 != hash3
def test_file_data_hash_string_vs_bytes_same_content():
"""Test that string and bytes with same content produce different hashes (different types)."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(file_data=FileData(content="test content"))
content2 = Content(file_data=FileData(content=b"test content"))
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
# String and bytes are different types, so they should produce different hashes
assert hash1 != hash2
def test_file_data_hash_all_fields_present():
"""Test file_data hash when all fields are present."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(
name="Doc 1",
description="Desc 1",
file_data=FileData(content="content", filename="file.pdf", type="application/pdf", size=1024),
)
content2 = Content(
name="Doc 1",
description="Desc 1",
file_data=FileData(content="different", filename="file.pdf", type="application/pdf", size=1024),
)
content3 = Content(
name="Doc 1",
description="Desc 1",
file_data=FileData(content="content", filename="other.pdf", type="application/pdf", size=1024),
)
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
hash3 = knowledge._build_content_hash(content3)
# Same name/description/filename should produce same hash (content/type/size differences ignored when filename present)
assert hash1 == hash2
# Different filename should produce different hash
assert hash1 != hash3
def test_file_data_hash_empty_hash_parts_fallback():
"""Test that file_data with no name/description/fields uses content hash."""
knowledge = Knowledge(vector_db=MockVectorDb())
# FileData with content but no filename, type, size, name, or description
content1 = Content(file_data=FileData(content="content1"))
content2 = Content(file_data=FileData(content="content2"))
content3 = Content(file_data=FileData(content="content1"))
hash1 = knowledge._build_content_hash(content1)
hash2 = knowledge._build_content_hash(content2)
hash3 = knowledge._build_content_hash(content3)
# Different content should produce different hashes
assert hash1 != hash2
# Same content should produce same hash
assert hash1 == hash3
# Verify hash is valid SHA256
assert isinstance(hash1, str)
assert len(hash1) == 64
def test_document_content_hash_uses_document_url():
"""Documents from different URLs get unique content hashes."""
knowledge = Knowledge(vector_db=MockVectorDb())
content = Content(url="https://example.com")
doc1 = Document(content="Page 1 content", meta_data={"url": "https://example.com/page1"})
doc2 = Document(content="Page 2 content", meta_data={"url": "https://example.com/page2"})
doc3 = Document(content="Page 3 content", meta_data={"url": "https://example.com/page3"})
hash1 = knowledge._build_document_content_hash(doc1, content)
hash2 = knowledge._build_document_content_hash(doc2, content)
hash3 = knowledge._build_document_content_hash(doc3, content)
# Different URLs should produce different hashes
assert hash1 != hash2
assert hash2 != hash3
assert hash1 != hash3
# Verify hashes are valid SHA256
assert len(hash1) == 64
assert len(hash2) == 64
assert len(hash3) == 64
def test_document_content_hash_is_deterministic():
"""Same document URL produces same hash (deterministic)."""
knowledge = Knowledge(vector_db=MockVectorDb())
content = Content(url="https://example.com")
doc1 = Document(content="Page 1 content", meta_data={"url": "https://example.com/page1"})
doc2 = Document(content="Different content", meta_data={"url": "https://example.com/page1"})
hash1 = knowledge._build_document_content_hash(doc1, content)
hash2 = knowledge._build_document_content_hash(doc2, content)
# Same URL should produce same hash regardless of content
assert hash1 == hash2
def test_document_content_hash_includes_content_name():
"""Document hash includes content name for uniqueness."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(url="https://example.com", name="Site A")
content2 = Content(url="https://example.com", name="Site B")
doc = Document(content="Page content", meta_data={"url": "https://example.com/page"})
hash1 = knowledge._build_document_content_hash(doc, content1)
hash2 = knowledge._build_document_content_hash(doc, content2)
# Different content names should produce different hashes
assert hash1 != hash2
def test_document_content_hash_includes_content_description():
"""Document hash includes content description for uniqueness."""
knowledge = Knowledge(vector_db=MockVectorDb())
content1 = Content(url="https://example.com", description="Description A")
content2 = Content(url="https://example.com", description="Description B")
doc = Document(content="Page content", meta_data={"url": "https://example.com/page"})
hash1 = knowledge._build_document_content_hash(doc, content1)
hash2 = knowledge._build_document_content_hash(doc, content2)
# Different descriptions should produce different hashes
assert hash1 != hash2
def test_document_content_hash_fallback_to_content_url():
"""Document without URL in meta_data falls back to content URL."""
knowledge = Knowledge(vector_db=MockVectorDb())
content = Content(url="https://example.com/fallback")
doc = Document(content="Page content", meta_data={})
hash1 = knowledge._build_document_content_hash(doc, content)
# Should produce a valid hash using content URL
assert len(hash1) == 64
def test_document_content_hash_fallback_to_content_hash():
"""Document without any URL falls back to document content hash."""
knowledge = Knowledge(vector_db=MockVectorDb())
content = Content() # No URL or path
doc1 = Document(content="Page 1 content", meta_data={})
doc2 = Document(content="Page 2 content", meta_data={})
doc3 = Document(content="Page 1 content", meta_data={})
hash1 = knowledge._build_document_content_hash(doc1, content)
hash2 = knowledge._build_document_content_hash(doc2, content)
hash3 = knowledge._build_document_content_hash(doc3, content)
# Different content should produce different hashes
assert hash1 != hash2
# Same content should produce same hash
assert hash1 == hash3
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/knowledge/test_knowledge_content_hash.py",
"license": "Apache License 2.0",
"lines": 436,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/async_mysql/test_trace_upsert.py | """
Test script to reproduce the UniqueViolation race condition in upsert_trace.
This script demonstrates the race condition that occurs when multiple concurrent
calls to upsert_trace() attempt to insert the same trace_id.
The race condition window:
1. Task A: SELECT - finds no existing trace
2. Task B: SELECT - finds no existing trace (before A's INSERT commits)
3. Task A: INSERT - succeeds
4. Task B: INSERT - FAILS with IntegrityError (Duplicate entry)
"""
import asyncio
import uuid
from datetime import datetime, timezone
from agno.db.mysql import AsyncMySQLDb
from agno.tracing.schemas import Trace
def create_test_trace(trace_id: str, name: str, task_id: int) -> Trace:
"""Create a test Trace object."""
now = datetime.now(timezone.utc)
return Trace(
trace_id=trace_id,
name=name,
status="OK",
start_time=now,
end_time=now,
duration_ms=100,
total_spans=1,
error_count=0,
run_id=None,
session_id=None,
user_id=None,
agent_id=f"agent-{task_id}",
team_id=None,
workflow_id=None,
created_at=now,
)
async def concurrent_create_trace(
db: AsyncMySQLDb,
trace: Trace,
task_id: int,
barrier: asyncio.Barrier,
) -> dict:
"""Run a single concurrent task that tries to create a trace using AsyncMySQLDb."""
result = {"task_id": task_id, "success": False, "error": None}
try:
# Wait for all tasks to be ready
print(f" Task {task_id:2d}: Waiting at barrier...")
await barrier.wait()
# All tasks release simultaneously - RACE CONDITION WINDOW
print(f" Task {task_id:2d}: Calling db.upsert_trace()...")
await db.upsert_trace(trace)
result["success"] = True
print(f" Task {task_id:2d}: SUCCESS")
except Exception as e:
error_str = str(e)
result["error"] = error_str
# Check for the specific IntegrityError (MySQL's Duplicate entry)
if "Duplicate entry" in error_str or "IntegrityError" in error_str:
print(f" Task {task_id:2d}: FAILED - IntegrityError (Duplicate entry)!")
# Print the full error
print(f"\n{'!' * 60}")
print("FULL ERROR:")
print(f"{'!' * 60}")
print(f"ERROR Error creating trace: {e}")
print(f"{'!' * 60}\n")
else:
print(f" Task {task_id:2d}: FAILED - {type(e).__name__}: {error_str[:100]}")
return result
async def cleanup_trace(db: AsyncMySQLDb, trace_id: str):
"""Delete a specific trace from the table."""
try:
table = await db._get_table(table_type="traces", create_table_if_not_found=True)
if table is not None:
async with db.async_session_factory() as sess, sess.begin():
from sqlalchemy import delete
await sess.execute(delete(table).where(table.c.trace_id == trace_id))
except Exception as e:
print(f"Cleanup error (can be ignored): {e}")
async def run_race_test(db: AsyncMySQLDb, num_tasks: int = 10):
"""Run a single race condition test using AsyncMySQLDb.upsert_trace()."""
# Use a unique trace_id for this test run
trace_id = f"race-test-{uuid.uuid4().hex[:8]}"
print(f"\n{'=' * 60}")
print("RACE CONDITION TEST (ASYNC MYSQL)")
print(f"{'=' * 60}")
print(f"Trace ID: {trace_id}")
print(f"Concurrent tasks: {num_tasks}")
print(f"{'=' * 60}\n")
# Create barrier for synchronization
barrier = asyncio.Barrier(num_tasks)
# Create traces - all with the same trace_id
traces = [create_test_trace(trace_id, f"Agent.run-task-{i}", i) for i in range(num_tasks)]
# Launch all tasks concurrently
tasks = [asyncio.create_task(concurrent_create_trace(db, traces[i], i, barrier)) for i in range(num_tasks)]
# Wait for all tasks
results = await asyncio.gather(*tasks, return_exceptions=True)
# Analyze results
print(f"\n{'=' * 60}")
print("RESULTS")
print(f"{'=' * 60}")
successes = sum(1 for r in results if not isinstance(r, Exception) and r["success"])
failures = sum(1 for r in results if isinstance(r, Exception) or not r["success"])
print(f"\nSuccesses: {successes}")
print(f"Failures: {failures}")
# Cleanup - commented out to see entries in database
# await cleanup_trace(db, trace_id)
async def main():
# Database configuration - MySQL connection
db_url = "mysql+asyncmy://ai:ai@localhost:3306/ai"
print(f"Database URL: {db_url}")
# Create AsyncMySQLDb instance
db = AsyncMySQLDb(
db_url=db_url,
traces_table="agno_traces_race_test",
)
try:
# Pre-create/cache the table to avoid table creation race conditions
# This ensures the table exists before concurrent tests start
print("Initializing table...")
await db._get_table(table_type="traces", create_table_if_not_found=True)
print("Table ready.")
# Run multiple attempts
attempts = 5
tasks_per_attempt = 15
print(f"\n{'#' * 60}")
print(f"RUNNING {attempts} ATTEMPTS WITH {tasks_per_attempt} CONCURRENT TASKS EACH")
print(f"{'#' * 60}")
for attempt in range(attempts):
print(f"\n--- Attempt {attempt + 1}/{attempts} ---")
await run_race_test(db, tasks_per_attempt)
# Final summary
print(f"\n{'#' * 60}")
print("FINAL SUMMARY")
print(f"{'#' * 60}")
print(f"Total attempts: {attempts}")
print(f"Tasks per attempt: {tasks_per_attempt}")
print("\nNote: Check ERROR logs above for IntegrityError (Duplicate entry) errors.")
print("If you see ERROR logs, the race condition exists and needs the upsert fix.")
finally:
# Cleanup: dispose of the engine
if db.db_engine:
await db.db_engine.dispose()
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/async_mysql/test_trace_upsert.py",
"license": "Apache License 2.0",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/async_postgres/test_trace_upsert.py | """
Test script to reproduce the UniqueViolation race condition in upsert_trace.
This script demonstrates the race condition that occurs when multiple concurrent
calls to upsert_trace() attempt to insert the same trace_id.
The race condition window:
1. Task A: SELECT - finds no existing trace
2. Task B: SELECT - finds no existing trace (before A's INSERT commits)
3. Task A: INSERT - succeeds
4. Task B: INSERT - FAILS with UniqueViolation
"""
import asyncio
import uuid
from datetime import datetime, timezone
from agno.db.postgres import AsyncPostgresDb
from agno.tracing.schemas import Trace
def create_test_trace(trace_id: str, name: str, task_id: int) -> Trace:
"""Create a test Trace object."""
now = datetime.now(timezone.utc)
return Trace(
trace_id=trace_id,
name=name,
status="OK",
start_time=now,
end_time=now,
duration_ms=100,
total_spans=1,
error_count=0,
run_id=None,
session_id=None,
user_id=None,
agent_id=f"agent-{task_id}",
team_id=None,
workflow_id=None,
created_at=now,
)
async def concurrent_create_trace(
db: AsyncPostgresDb,
trace: Trace,
task_id: int,
barrier: asyncio.Barrier,
) -> dict:
"""Run a single concurrent task that tries to create a trace using AsyncPostgresDb."""
result = {"task_id": task_id, "success": False, "error": None}
try:
# Wait for all tasks to be ready
print(f" Task {task_id:2d}: Waiting at barrier...")
await barrier.wait()
# All tasks release simultaneously - RACE CONDITION WINDOW
print(f" Task {task_id:2d}: Calling db.upsert_trace()...")
await db.upsert_trace(trace)
result["success"] = True
print(f" Task {task_id:2d}: SUCCESS")
except Exception as e:
error_str = str(e)
result["error"] = error_str
# Check for the specific UniqueViolation error
if "UniqueViolation" in error_str or "duplicate key" in error_str.lower():
print(f" Task {task_id:2d}: FAILED - UniqueViolation!")
# Print the full error like the user's original error
print(f"\n{'!' * 60}")
print("FULL ERROR (same as user's original error):")
print(f"{'!' * 60}")
print(f"ERROR Error creating trace: {e}")
print(f"{'!' * 60}\n")
else:
print(f" Task {task_id:2d}: FAILED - {type(e).__name__}: {error_str[:100]}")
return result
async def cleanup_trace(db: AsyncPostgresDb, trace_id: str):
"""Delete a specific trace from the table."""
try:
table = await db._get_table(table_type="traces", create_table_if_not_found=True)
if table is not None:
async with db.async_session_factory() as sess, sess.begin():
from sqlalchemy import delete
await sess.execute(delete(table).where(table.c.trace_id == trace_id))
except Exception as e:
print(f"Cleanup error (can be ignored): {e}")
async def run_race_test(db: AsyncPostgresDb, num_tasks: int = 10):
"""Run a single race condition test using AsyncPostgresDb.upsert_trace()."""
# Use a unique trace_id for this test run
trace_id = f"race-test-{uuid.uuid4().hex[:8]}"
print(f"\n{'=' * 60}")
print("RACE CONDITION TEST")
print(f"{'=' * 60}")
print(f"Trace ID: {trace_id}")
print(f"Concurrent tasks: {num_tasks}")
print(f"{'=' * 60}\n")
# Create barrier for synchronization
barrier = asyncio.Barrier(num_tasks)
# Create traces - all with the same trace_id
traces = [create_test_trace(trace_id, f"Agent.run-task-{i}", i) for i in range(num_tasks)]
# Launch all tasks concurrently
tasks = [asyncio.create_task(concurrent_create_trace(db, traces[i], i, barrier)) for i in range(num_tasks)]
# Wait for all tasks
results = await asyncio.gather(*tasks, return_exceptions=True)
# Analyze results
print(f"\n{'=' * 60}")
print("RESULTS")
print(f"{'=' * 60}")
successes = sum(1 for r in results if not isinstance(r, Exception) and r["success"])
failures = sum(1 for r in results if isinstance(r, Exception) or not r["success"])
print(f"\nSuccesses: {successes}")
print(f"Failures: {failures}")
# Cleanup - commented out to see entries in database
# await cleanup_trace(db, trace_id)
async def main():
# Database configuration - same as cookbook example
db_url = "postgresql+psycopg_async://ai:ai@localhost:5532/ai"
print(f"Database URL: {db_url.split('@')[1] if '@' in db_url else db_url}")
# Create AsyncPostgresDb instance (same pattern as cookbook)
db = AsyncPostgresDb(
db_url=db_url,
db_schema="ai",
traces_table="agno_traces_race_test",
)
try:
# Pre-create/cache the table to avoid table creation race conditions
# This ensures the table exists before concurrent tests start
print("Initializing table...")
await db._get_table(table_type="traces", create_table_if_not_found=True)
print("Table ready.")
# Run multiple attempts
attempts = 5
tasks_per_attempt = 15
print(f"\n{'#' * 60}")
print(f"RUNNING {attempts} ATTEMPTS WITH {tasks_per_attempt} CONCURRENT TASKS EACH")
print(f"{'#' * 60}")
for attempt in range(attempts):
print(f"\n--- Attempt {attempt + 1}/{attempts} ---")
await run_race_test(db, tasks_per_attempt)
# Final summary
print(f"\n{'#' * 60}")
print("FINAL SUMMARY")
print(f"{'#' * 60}")
print(f"Total attempts: {attempts}")
print(f"Tasks per attempt: {tasks_per_attempt}")
print("\nNote: Check ERROR logs above for UniqueViolation errors.")
print("If you see ERROR logs, the race condition exists and needs the upsert fix.")
finally:
# Cleanup: dispose of the engine
if db.db_engine:
await db.db_engine.dispose()
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/async_postgres/test_trace_upsert.py",
"license": "Apache License 2.0",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/postgres/test_trace_upsert.py | """
Test script to reproduce the UniqueViolation race condition in upsert_trace (SYNC version).
This script demonstrates the race condition that occurs when multiple concurrent
calls to upsert_trace() attempt to insert the same trace_id using the synchronous
PostgresDb class.
The race condition window:
1. Thread A: SELECT - finds no existing trace
2. Thread B: SELECT - finds no existing trace (before A's INSERT commits)
3. Thread A: INSERT - succeeds
4. Thread B: INSERT - FAILS with UniqueViolation
"""
import uuid
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime, timezone
from threading import Barrier
from agno.db.postgres import PostgresDb
from agno.tracing.schemas import Trace
def create_test_trace(trace_id: str, name: str, task_id: int) -> Trace:
"""Create a test Trace object."""
now = datetime.now(timezone.utc)
return Trace(
trace_id=trace_id,
name=name,
status="OK",
start_time=now,
end_time=now,
duration_ms=100,
total_spans=1,
error_count=0,
run_id=None,
session_id=None,
user_id=None,
agent_id=f"agent-{task_id}",
team_id=None,
workflow_id=None,
created_at=now,
)
def concurrent_create_trace(
db: PostgresDb,
trace: Trace,
task_id: int,
barrier: Barrier,
) -> dict:
"""Run a single concurrent task that tries to create a trace using PostgresDb."""
result = {"task_id": task_id, "success": False, "error": None}
try:
# Wait for all threads to be ready
print(f" Task {task_id:2d}: Waiting at barrier...")
barrier.wait()
# All threads release simultaneously - RACE CONDITION WINDOW
print(f" Task {task_id:2d}: Calling db.upsert_trace()...")
db.upsert_trace(trace)
result["success"] = True
print(f" Task {task_id:2d}: SUCCESS")
except Exception as e:
error_str = str(e)
result["error"] = error_str
# Check for the specific UniqueViolation error
if "UniqueViolation" in error_str or "duplicate key" in error_str.lower():
print(f" Task {task_id:2d}: FAILED - UniqueViolation!")
# Print the full error like the user's original error
print(f"\n{'!' * 60}")
print("FULL ERROR (same as user's original error):")
print(f"{'!' * 60}")
print(f"ERROR Error creating trace: {e}")
print(f"{'!' * 60}\n")
else:
print(f" Task {task_id:2d}: FAILED - {type(e).__name__}: {error_str[:100]}")
return result
def cleanup_trace(db: PostgresDb, trace_id: str):
"""Delete a specific trace from the table."""
try:
table = db._get_table(table_type="traces", create_table_if_not_found=True)
if table is not None:
with db.session_factory() as sess, sess.begin():
from sqlalchemy import delete
sess.execute(delete(table).where(table.c.trace_id == trace_id))
except Exception as e:
print(f"Cleanup error (can be ignored): {e}")
def run_race_test(db: PostgresDb, num_tasks: int = 10):
"""Run a single race condition test using PostgresDb.upsert_trace()."""
# Use a unique trace_id for this test run
trace_id = f"race-test-{uuid.uuid4().hex[:8]}"
print(f"\n{'=' * 60}")
print("RACE CONDITION TEST (SYNC)")
print(f"{'=' * 60}")
print(f"Trace ID: {trace_id}")
print(f"Concurrent threads: {num_tasks}")
print(f"{'=' * 60}\n")
# Create barrier for synchronization
barrier = Barrier(num_tasks)
# Create traces - all with the same trace_id
traces = [create_test_trace(trace_id, f"Agent.run-task-{i}", i) for i in range(num_tasks)]
# Launch all tasks concurrently using ThreadPoolExecutor
results = []
with ThreadPoolExecutor(max_workers=num_tasks) as executor:
futures = [executor.submit(concurrent_create_trace, db, traces[i], i, barrier) for i in range(num_tasks)]
for future in as_completed(futures):
try:
results.append(future.result())
except Exception as e:
results.append({"task_id": -1, "success": False, "error": str(e)})
# Analyze results
print(f"\n{'=' * 60}")
print("RESULTS")
print(f"{'=' * 60}")
successes = sum(1 for r in results if r["success"])
failures = sum(1 for r in results if not r["success"])
print(f"\nSuccesses: {successes}")
print(f"Failures: {failures}")
# Cleanup - commented out to see entries in database
# cleanup_trace(db, trace_id)
def main():
# Database configuration - same as cookbook example
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
print(f"Database URL: {db_url.split('@')[1] if '@' in db_url else db_url}")
# Create PostgresDb instance (same pattern as cookbook)
db = PostgresDb(
db_url=db_url,
db_schema="ai",
traces_table="agno_traces_race_test_sync",
)
try:
# Pre-create/cache the table to avoid table creation race conditions
# This ensures the table exists before concurrent tests start
print("Initializing table...")
db._get_table(table_type="traces", create_table_if_not_found=True)
print("Table ready.")
# Run multiple attempts
attempts = 5
tasks_per_attempt = 15
print(f"\n{'#' * 60}")
print(f"RUNNING {attempts} ATTEMPTS WITH {tasks_per_attempt} CONCURRENT THREADS EACH")
print(f"{'#' * 60}")
for attempt in range(attempts):
print(f"\n--- Attempt {attempt + 1}/{attempts} ---")
run_race_test(db, tasks_per_attempt)
# Final summary
print(f"\n{'#' * 60}")
print("FINAL SUMMARY")
print(f"{'#' * 60}")
print(f"Total attempts: {attempts}")
print(f"Tasks per attempt: {tasks_per_attempt}")
print("\nNote: Check ERROR logs above for UniqueViolation errors.")
print("If you see ERROR logs, the race condition exists and needs the upsert fix.")
finally:
# Cleanup: dispose of the engine
if db.db_engine:
db.db_engine.dispose()
if __name__ == "__main__":
main()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/postgres/test_trace_upsert.py",
"license": "Apache License 2.0",
"lines": 151,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/sqlite/test_upsert_trace.py | """
Test script to reproduce the UniqueViolation race condition in upsert_trace (SYNC version).
This script demonstrates the race condition that occurs when multiple concurrent
calls to upsert_trace() attempt to insert the same trace_id using the synchronous
SqliteDb class.
The race condition window:
1. Thread A: SELECT - finds no existing trace
2. Thread B: SELECT - finds no existing trace (before A's INSERT commits)
3. Thread A: INSERT - succeeds
4. Thread B: INSERT - FAILS with IntegrityError (UNIQUE constraint failed)
"""
import uuid
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime, timezone
from threading import Barrier
from agno.db.sqlite import SqliteDb
from agno.tracing.schemas import Trace
def create_test_trace(trace_id: str, name: str, task_id: int) -> Trace:
"""Create a test Trace object."""
now = datetime.now(timezone.utc)
return Trace(
trace_id=trace_id,
name=name,
status="OK",
start_time=now,
end_time=now,
duration_ms=100,
total_spans=1,
error_count=0,
run_id=None,
session_id=None,
user_id=None,
agent_id=f"agent-{task_id}",
team_id=None,
workflow_id=None,
created_at=now,
)
def concurrent_create_trace(
db: SqliteDb,
trace: Trace,
task_id: int,
barrier: Barrier,
) -> dict:
"""Run a single concurrent task that tries to create a trace using SqliteDb."""
result = {"task_id": task_id, "success": False, "error": None}
try:
# Wait for all threads to be ready
print(f" Task {task_id:2d}: Waiting at barrier...")
barrier.wait()
# All threads release simultaneously - RACE CONDITION WINDOW
print(f" Task {task_id:2d}: Calling db.upsert_trace()...")
db.upsert_trace(trace)
result["success"] = True
print(f" Task {task_id:2d}: SUCCESS")
except Exception as e:
error_str = str(e)
result["error"] = error_str
# Check for the specific IntegrityError (SQLite's equivalent of UniqueViolation)
if "UNIQUE constraint failed" in error_str or "IntegrityError" in error_str:
print(f" Task {task_id:2d}: FAILED - IntegrityError (UNIQUE constraint)!")
# Print the full error
print(f"\n{'!' * 60}")
print("FULL ERROR:")
print(f"{'!' * 60}")
print(f"ERROR Error creating trace: {e}")
print(f"{'!' * 60}\n")
else:
print(f" Task {task_id:2d}: FAILED - {type(e).__name__}: {error_str[:100]}")
return result
def cleanup_trace(db: SqliteDb, trace_id: str):
"""Delete a specific trace from the table."""
try:
table = db._get_table(table_type="traces", create_table_if_not_found=True)
if table is not None:
with db.session_factory() as sess, sess.begin():
from sqlalchemy import delete
sess.execute(delete(table).where(table.c.trace_id == trace_id))
except Exception as e:
print(f"Cleanup error (can be ignored): {e}")
def run_race_test(db: SqliteDb, num_tasks: int = 10):
"""Run a single race condition test using SqliteDb.upsert_trace()."""
# Use a unique trace_id for this test run
trace_id = f"race-test-{uuid.uuid4().hex[:8]}"
print(f"\n{'=' * 60}")
print("RACE CONDITION TEST (SYNC SQLITE)")
print(f"{'=' * 60}")
print(f"Trace ID: {trace_id}")
print(f"Concurrent threads: {num_tasks}")
print(f"{'=' * 60}\n")
# Create barrier for synchronization
barrier = Barrier(num_tasks)
# Create traces - all with the same trace_id
traces = [create_test_trace(trace_id, f"Agent.run-task-{i}", i) for i in range(num_tasks)]
# Launch all tasks concurrently using ThreadPoolExecutor
results = []
with ThreadPoolExecutor(max_workers=num_tasks) as executor:
futures = [executor.submit(concurrent_create_trace, db, traces[i], i, barrier) for i in range(num_tasks)]
for future in as_completed(futures):
try:
results.append(future.result())
except Exception as e:
results.append({"task_id": -1, "success": False, "error": str(e)})
# Analyze results
print(f"\n{'=' * 60}")
print("RESULTS")
print(f"{'=' * 60}")
successes = sum(1 for r in results if r["success"])
failures = sum(1 for r in results if not r["success"])
print(f"\nSuccesses: {successes}")
print(f"Failures: {failures}")
# Cleanup - commented out to see entries in database
# cleanup_trace(db, trace_id)
def main():
# Database configuration - SQLite file
db_file = "tmp/race_test_sync.db"
print(f"Database file: {db_file}")
# Create SqliteDb instance
db = SqliteDb(
db_file=db_file,
traces_table="agno_traces_race_test_sync",
)
try:
# Pre-create/cache the table to avoid table creation race conditions
# This ensures the table exists before concurrent tests start
print("Initializing table...")
db._get_table(table_type="traces", create_table_if_not_found=True)
print("Table ready.")
# Run multiple attempts
attempts = 5
tasks_per_attempt = 15
print(f"\n{'#' * 60}")
print(f"RUNNING {attempts} ATTEMPTS WITH {tasks_per_attempt} CONCURRENT THREADS EACH")
print(f"{'#' * 60}")
for attempt in range(attempts):
print(f"\n--- Attempt {attempt + 1}/{attempts} ---")
run_race_test(db, tasks_per_attempt)
# Final summary
print(f"\n{'#' * 60}")
print("FINAL SUMMARY")
print(f"{'#' * 60}")
print(f"Total attempts: {attempts}")
print(f"Tasks per attempt: {tasks_per_attempt}")
print("\nNote: Check ERROR logs above for IntegrityError (UNIQUE constraint) errors.")
print("If you see ERROR logs, the race condition exists and needs the upsert fix.")
finally:
# Cleanup: dispose of the engine
if db.db_engine:
db.db_engine.dispose()
if __name__ == "__main__":
main()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/sqlite/test_upsert_trace.py",
"license": "Apache License 2.0",
"lines": 150,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/knowledge/test_knowledge_readers.py | """Tests for Knowledge.get_readers() method, specifically testing list to dict conversion."""
from agno.knowledge.knowledge import Knowledge
from agno.knowledge.reader.base import Reader
from agno.knowledge.reader.text_reader import TextReader
from agno.vectordb.base import VectorDb
class MockVectorDb(VectorDb):
"""Minimal VectorDb stub for testing."""
def create(self) -> None:
pass
async def async_create(self) -> None:
pass
def name_exists(self, name: str) -> bool:
return False
def async_name_exists(self, name: str) -> bool:
return False
def id_exists(self, id: str) -> bool:
return False
def content_hash_exists(self, content_hash: str) -> bool:
return False
def insert(self, content_hash: str, documents, filters=None) -> None:
pass
async def async_insert(self, content_hash: str, documents, filters=None) -> None:
pass
def upsert(self, content_hash: str, documents, filters=None) -> None:
pass
async def async_upsert(self, content_hash: str, documents, filters=None) -> None:
pass
def search(self, query: str, limit: int = 5, filters=None):
return []
async def async_search(self, query: str, limit: int = 5, filters=None):
return []
def drop(self) -> None:
pass
async def async_drop(self) -> None:
pass
def exists(self) -> bool:
return True
async def async_exists(self) -> bool:
return True
def delete(self) -> bool:
return True
def delete_by_id(self, id: str) -> bool:
return True
def delete_by_name(self, name: str) -> bool:
return True
def delete_by_metadata(self, metadata) -> bool:
return True
def update_metadata(self, content_id: str, metadata) -> None:
pass
def delete_by_content_id(self, content_id: str) -> bool:
return True
def get_supported_search_types(self):
return ["vector"]
class CustomReader(Reader):
"""Custom reader for testing."""
def __init__(self, name: str = None, **kwargs):
super().__init__(name=name, **kwargs)
@classmethod
def get_supported_chunking_strategies(cls):
from agno.knowledge.chunking.strategy import ChunkingStrategyType
return [ChunkingStrategyType.FIXED_SIZE_CHUNKER]
@classmethod
def get_supported_content_types(cls):
from agno.knowledge.types import ContentType
return [ContentType.TXT]
def read(self, obj, name=None):
return []
def test_get_readers_with_none():
"""Test that get_readers() initializes empty dict when readers is None."""
knowledge = Knowledge(vector_db=MockVectorDb())
knowledge.readers = None
result = knowledge.get_readers()
assert isinstance(result, dict)
assert len(result) == 0
assert knowledge.readers == {}
def test_get_readers_with_empty_dict():
"""Test that get_readers() returns existing empty dict."""
knowledge = Knowledge(vector_db=MockVectorDb())
knowledge.readers = {}
result = knowledge.get_readers()
assert isinstance(result, dict)
assert len(result) == 0
assert result is knowledge.readers
def test_get_readers_with_existing_dict():
"""Test that get_readers() returns existing dict unchanged."""
knowledge = Knowledge(vector_db=MockVectorDb())
reader1 = TextReader(name="reader1")
reader2 = TextReader(name="reader2")
knowledge.readers = {"reader1": reader1, "reader2": reader2}
result = knowledge.get_readers()
assert isinstance(result, dict)
assert len(result) == 2
assert result["reader1"] is reader1
assert result["reader2"] is reader2
def test_get_readers_converts_list_to_dict():
"""Test that get_readers() converts a list of readers to a dict."""
knowledge = Knowledge(vector_db=MockVectorDb())
reader1 = TextReader(name="Custom Reader 1")
reader2 = TextReader(name="Custom Reader 2")
knowledge.readers = [reader1, reader2]
result = knowledge.get_readers()
assert isinstance(result, dict)
assert len(result) == 2
# Check that readers are in the dict (keys are generated from names)
assert all(isinstance(key, str) for key in result.keys())
assert all(isinstance(val, Reader) for val in result.values())
assert reader1 in result.values()
assert reader2 in result.values()
# Verify the conversion happened
assert isinstance(knowledge.readers, dict)
def test_get_readers_handles_duplicate_keys():
"""Test that get_readers() handles duplicate keys by appending counter."""
knowledge = Knowledge(vector_db=MockVectorDb())
# Create readers with same name to force duplicate keys
reader1 = TextReader(name="custom_reader")
reader2 = TextReader(name="custom_reader")
reader3 = TextReader(name="custom_reader")
knowledge.readers = [reader1, reader2, reader3]
result = knowledge.get_readers()
assert isinstance(result, dict)
assert len(result) == 3
# Check that keys are unique
keys = list(result.keys())
assert len(keys) == len(set(keys))
# Check that all readers are present
assert reader1 in result.values()
assert reader2 in result.values()
assert reader3 in result.values()
def test_get_readers_skips_non_reader_objects():
"""Test that get_readers() skips non-Reader objects in the list."""
knowledge = Knowledge(vector_db=MockVectorDb())
reader1 = TextReader(name="reader1")
non_reader = "not a reader"
reader2 = TextReader(name="reader2")
knowledge.readers = [reader1, non_reader, reader2]
result = knowledge.get_readers()
assert isinstance(result, dict)
assert len(result) == 2
assert reader1 in result.values()
assert reader2 in result.values()
assert non_reader not in result.values()
def test_get_readers_handles_empty_list():
"""Test that get_readers() handles empty list."""
knowledge = Knowledge(vector_db=MockVectorDb())
knowledge.readers = []
result = knowledge.get_readers()
assert isinstance(result, dict)
assert len(result) == 0
def test_get_readers_resets_unexpected_types():
"""Test that get_readers() resets to empty dict for unexpected types."""
knowledge = Knowledge(vector_db=MockVectorDb())
knowledge.readers = "not a list or dict"
result = knowledge.get_readers()
assert isinstance(result, dict)
assert len(result) == 0
assert knowledge.readers == {}
def test_get_readers_with_readers_without_names():
"""Test that get_readers() generates keys from class name when reader has no name."""
knowledge = Knowledge(vector_db=MockVectorDb())
reader1 = TextReader() # No name
reader2 = CustomReader() # No name
knowledge.readers = [reader1, reader2]
result = knowledge.get_readers()
assert isinstance(result, dict)
assert len(result) == 2
# Keys should be generated from class names
keys = list(result.keys())
assert any("textreader" in key.lower() for key in keys)
assert any("customreader" in key.lower() for key in keys)
def test_get_readers_preserves_existing_dict_on_multiple_calls():
"""Test that get_readers() preserves the dict on multiple calls."""
knowledge = Knowledge(vector_db=MockVectorDb())
reader1 = TextReader(name="reader1")
reader2 = TextReader(name="reader2")
knowledge.readers = {"reader1": reader1, "reader2": reader2}
result1 = knowledge.get_readers()
result2 = knowledge.get_readers()
assert result1 is result2
assert result1 is knowledge.readers
assert len(result1) == 2
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/knowledge/test_knowledge_readers.py",
"license": "Apache License 2.0",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.