sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
agno-agi/agno:libs/agno/agno/knowledge/reader/youtube_reader.py | import asyncio
from typing import List, Optional
from agno.knowledge.chunking.recursive import RecursiveChunking
from agno.knowledge.chunking.strategy import ChunkingStrategy, ChunkingStrategyType
from agno.knowledge.document.base import Document
from agno.knowledge.reader.base import Reader
from agno.knowledge.types import ContentType
from agno.utils.log import log_debug, log_error, log_info
try:
from youtube_transcript_api import YouTubeTranscriptApi
except ImportError:
raise ImportError(
"`youtube_transcript_api` not installed. Please install it via `pip install youtube_transcript_api`."
)
class YouTubeReader(Reader):
"""Reader for YouTube video transcripts"""
def __init__(self, chunking_strategy: Optional[ChunkingStrategy] = RecursiveChunking(), **kwargs):
super().__init__(chunking_strategy=chunking_strategy, **kwargs)
@classmethod
def get_supported_chunking_strategies(cls) -> List[ChunkingStrategyType]:
"""Get the list of supported chunking strategies for YouTube readers."""
return [
ChunkingStrategyType.RECURSIVE_CHUNKER,
ChunkingStrategyType.CODE_CHUNKER,
ChunkingStrategyType.AGENTIC_CHUNKER,
ChunkingStrategyType.DOCUMENT_CHUNKER,
ChunkingStrategyType.SEMANTIC_CHUNKER,
ChunkingStrategyType.FIXED_SIZE_CHUNKER,
]
@classmethod
def get_supported_content_types(cls) -> List[ContentType]:
return [ContentType.YOUTUBE]
def read(self, url: str, name: Optional[str] = None) -> List[Document]:
try:
# Extract video ID from URL
video_id = url.split("v=")[-1].split("&")[0]
log_info(f"Reading transcript for video: {video_id}")
# Get transcript
log_debug(f"Fetching transcript for video: {video_id}")
# Create an instance of YouTubeTranscriptApi
ytt_api = YouTubeTranscriptApi()
transcript_data = ytt_api.fetch(video_id)
# Combine transcript segments into full text
transcript_text = ""
for segment in transcript_data:
transcript_text += f"{segment.text} "
documents = [
Document(
name=name or f"youtube_{video_id}",
id=f"youtube_{video_id}",
meta_data={"video_url": url, "video_id": video_id},
content=transcript_text.strip(),
)
]
if self.chunk:
chunked_documents = []
for document in documents:
chunked_documents.extend(self.chunk_document(document))
return chunked_documents
return documents
except Exception as e:
log_error(f"Error reading transcript for {url}: {e}")
return []
async def async_read(self, url: str) -> List[Document]:
return await asyncio.get_event_loop().run_in_executor(None, self.read, url)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/reader/youtube_reader.py",
"license": "Apache License 2.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/knowledge/remote_content/remote_content.py | from dataclasses import dataclass
from typing import Optional, Union
from agno.cloud.aws.s3.bucket import S3Bucket
from agno.cloud.aws.s3.object import S3Object
@dataclass
class S3Content:
def __init__(
self,
bucket_name: Optional[str] = None,
bucket: Optional[S3Bucket] = None,
key: Optional[str] = None,
object: Optional[S3Object] = None,
prefix: Optional[str] = None,
config_id: Optional[str] = None,
):
self.bucket_name = bucket_name
self.bucket = bucket
self.key = key
self.object = object
self.prefix = prefix
self.config_id = config_id
if bucket_name is None and bucket is None:
raise ValueError("Either bucket_name or bucket must be provided")
if key is None and object is None and prefix is None:
raise ValueError("Either key, object, or prefix must be provided")
if bucket_name is not None and bucket is not None:
raise ValueError("Either bucket_name or bucket must be provided, not both")
if sum(x is not None for x in [key, object, prefix]) > 1:
raise ValueError("Only one of key, object, or prefix should be provided")
if self.bucket_name is not None:
self.bucket = S3Bucket(name=self.bucket_name)
def get_config(self):
return {
"bucket_name": self.bucket_name,
"bucket": self.bucket,
"key": self.key,
"object": self.object,
"prefix": self.prefix,
"config_id": self.config_id,
}
@dataclass
class GCSContent:
def __init__(
self,
bucket=None, # Type hint removed to avoid import issues
bucket_name: Optional[str] = None,
blob_name: Optional[str] = None,
prefix: Optional[str] = None,
config_id: Optional[str] = None,
):
self.bucket = bucket
self.bucket_name = bucket_name
self.blob_name = blob_name
self.prefix = prefix
self.config_id = config_id
if self.bucket is None and self.bucket_name is None:
raise ValueError("No bucket or bucket_name provided")
if self.bucket is not None and self.bucket_name is not None:
raise ValueError("Provide either bucket or bucket_name")
if self.blob_name is None and self.prefix is None:
raise ValueError("Either blob_name or prefix must be provided")
def get_config(self):
return {
"bucket": self.bucket,
"bucket_name": self.bucket_name,
"blob_name": self.blob_name,
"prefix": self.prefix,
"config_id": self.config_id,
}
@dataclass
class SharePointContent:
"""Content reference for SharePoint files."""
def __init__(
self,
config_id: str,
file_path: Optional[str] = None,
folder_path: Optional[str] = None,
site_path: Optional[str] = None,
drive_id: Optional[str] = None,
):
self.config_id = config_id
self.file_path = file_path
self.folder_path = folder_path
self.site_path = site_path
self.drive_id = drive_id
if self.file_path is None and self.folder_path is None:
raise ValueError("Either file_path or folder_path must be provided")
if self.file_path is not None and self.folder_path is not None:
raise ValueError("Provide either file_path or folder_path, not both")
def get_config(self):
return {
"config_id": self.config_id,
"file_path": self.file_path,
"folder_path": self.folder_path,
"site_path": self.site_path,
"drive_id": self.drive_id,
}
@dataclass
class GitHubContent:
"""Content reference for GitHub files."""
def __init__(
self,
config_id: str,
file_path: Optional[str] = None,
folder_path: Optional[str] = None,
branch: Optional[str] = None,
):
self.config_id = config_id
self.file_path = file_path
self.folder_path = folder_path
self.branch = branch
if self.file_path is None and self.folder_path is None:
raise ValueError("Either file_path or folder_path must be provided")
if self.file_path is not None and self.folder_path is not None:
raise ValueError("Provide either file_path or folder_path, not both")
def get_config(self):
return {
"config_id": self.config_id,
"file_path": self.file_path,
"folder_path": self.folder_path,
"branch": self.branch,
}
@dataclass
class AzureBlobContent:
"""Content reference for Azure Blob Storage files.
Used with AzureBlobConfig to load files from Azure Blob Storage containers.
Supports loading single blobs or entire prefixes (folders).
"""
def __init__(
self,
config_id: str,
blob_name: Optional[str] = None,
prefix: Optional[str] = None,
):
self.config_id = config_id
self.blob_name = blob_name
self.prefix = prefix
if self.blob_name is None and self.prefix is None:
raise ValueError("Either blob_name or prefix must be provided")
if self.blob_name is not None and self.prefix is not None:
raise ValueError("Provide either blob_name or prefix, not both")
def get_config(self):
return {
"config_id": self.config_id,
"blob_name": self.blob_name,
"prefix": self.prefix,
}
RemoteContent = Union[S3Content, GCSContent, SharePointContent, GitHubContent, AzureBlobContent]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/remote_content/remote_content.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/knowledge/reranker/base.py | from typing import List
from pydantic import BaseModel, ConfigDict
from agno.knowledge.document import Document
class Reranker(BaseModel):
"""Base class for rerankers"""
model_config = ConfigDict(arbitrary_types_allowed=True, populate_by_name=True)
def rerank(self, query: str, documents: List[Document]) -> List[Document]:
raise NotImplementedError
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/reranker/base.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/knowledge/reranker/cohere.py | from typing import Any, Dict, List, Optional
from agno.knowledge.document import Document
from agno.knowledge.reranker.base import Reranker
from agno.utils.log import logger
try:
from cohere import Client as CohereClient
except ImportError:
raise ImportError("cohere not installed, please run pip install cohere")
class CohereReranker(Reranker):
model: str = "rerank-multilingual-v3.0"
api_key: Optional[str] = None
cohere_client: Optional[CohereClient] = None
top_n: Optional[int] = None
@property
def client(self) -> CohereClient:
if self.cohere_client:
return self.cohere_client
_client_params: Dict[str, Any] = {}
if self.api_key:
_client_params["api_key"] = self.api_key
return CohereClient(**_client_params)
def _rerank(self, query: str, documents: List[Document]) -> List[Document]:
# Validate input documents and top_n
if not documents:
return []
top_n = self.top_n
if top_n and not (0 < top_n):
logger.warning(f"top_n should be a positive integer, got {self.top_n}, setting top_n to None")
top_n = None
compressed_docs: list[Document] = []
_docs = [doc.content for doc in documents]
response = self.client.rerank(query=query, documents=_docs, model=self.model)
for r in response.results:
doc = documents[r.index]
doc.reranking_score = r.relevance_score
compressed_docs.append(doc)
# Order by relevance score
compressed_docs.sort(
key=lambda x: x.reranking_score if x.reranking_score is not None else float("-inf"),
reverse=True,
)
# Limit to top_n if specified
if top_n:
compressed_docs = compressed_docs[:top_n]
return compressed_docs
def rerank(self, query: str, documents: List[Document]) -> List[Document]:
try:
return self._rerank(query=query, documents=documents)
except Exception as e:
logger.error(f"Error reranking documents: {e}. Returning original documents")
return documents
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/reranker/cohere.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/knowledge/reranker/sentence_transformer.py | from typing import Any, Dict, List, Optional
from agno.knowledge.document import Document
from agno.knowledge.reranker.base import Reranker
from agno.utils.log import logger
try:
from sentence_transformers import CrossEncoder
except ImportError:
raise ImportError("`sentence-transformers` not installed, please run `pip install sentence-transformers`")
class SentenceTransformerReranker(Reranker):
model: str = "BAAI/bge-reranker-v2-m3"
model_kwargs: Optional[Dict[str, Any]] = None
top_n: Optional[int] = None
_cross_encoder: Optional[CrossEncoder] = None
@property
def client(self) -> CrossEncoder:
if self._cross_encoder is None:
self._cross_encoder = CrossEncoder(model_name_or_path=self.model, model_kwargs=self.model_kwargs)
return self._cross_encoder
def _rerank(self, query: str, documents: List[Document]) -> List[Document]:
if not documents:
return []
top_n = self.top_n
if top_n and not (0 < top_n):
logger.warning(f"top_n should be a positive integer, got {self.top_n}, setting top_n to None")
top_n = None
compressed_docs: list[Document] = []
sentence_pairs = [[query, doc.content] for doc in documents]
scores = self.client.predict(sentence_pairs).tolist()
for index, score in enumerate(scores):
doc = documents[index]
doc.reranking_score = score
compressed_docs.append(doc)
compressed_docs.sort(
key=lambda x: x.reranking_score if x.reranking_score is not None else float("-inf"),
reverse=True,
)
if top_n:
compressed_docs = compressed_docs[:top_n]
return compressed_docs
def rerank(self, query: str, documents: List[Document]) -> List[Document]:
try:
return self._rerank(query=query, documents=documents)
except Exception as e:
logger.error(f"Error reranking documents: {e}. Returning original documents")
return documents
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/reranker/sentence_transformer.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/knowledge/types.py | from enum import Enum
from typing import Any
from pydantic import BaseModel
class ContentType(str, Enum):
"""Enum for content types supported by knowledge readers."""
# Generic types
FILE = "file"
URL = "url"
TEXT = "text"
TOPIC = "topic"
YOUTUBE = "youtube"
# Document file extensions
PDF = ".pdf"
TXT = ".txt"
MARKDOWN = ".md"
DOCX = ".docx"
DOC = ".doc"
PPTX = ".pptx"
JSON = ".json"
# Spreadsheet file extensions
CSV = ".csv"
XLSX = ".xlsx"
XLS = ".xls"
def get_content_type_enum(content_type_str: str) -> ContentType:
"""Convert a content type string to ContentType enum."""
return ContentType(content_type_str)
class KnowledgeFilter(BaseModel):
key: str
value: Any
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/types.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/knowledge/utils.py | from typing import Any, Dict, List, Optional
from agno.knowledge.reader.base import Reader
from agno.knowledge.reader.reader_factory import ReaderFactory
from agno.knowledge.types import ContentType
from agno.utils.log import log_debug
RESERVED_AGNO_KEY = "_agno"
def merge_user_metadata(
existing: Optional[Dict[str, Any]],
incoming: Optional[Dict[str, Any]],
) -> Optional[Dict[str, Any]]:
"""Deep-merge two metadata dicts, preserving the ``_agno`` sub-key from both sides.
Top-level keys from *incoming* overwrite those in *existing* (except ``_agno``).
Keys inside ``_agno`` are merged individually so that info added
after initial source info is not lost.
"""
if not existing:
return incoming
if not incoming:
return existing
merged = dict(existing)
for key, value in incoming.items():
if key == RESERVED_AGNO_KEY:
old_agno = merged.get(RESERVED_AGNO_KEY, {}) or {}
new_agno = value if isinstance(value, dict) else {}
merged[RESERVED_AGNO_KEY] = {**old_agno, **new_agno}
else:
merged[key] = value
return merged
def set_agno_metadata(
metadata: Optional[Dict[str, Any]],
key: str,
value: Any,
) -> Dict[str, Any]:
"""Set a key under the reserved ``_agno`` namespace in metadata."""
if metadata is None:
metadata = {}
agno_meta = metadata.get(RESERVED_AGNO_KEY, {}) or {}
agno_meta[key] = value
metadata[RESERVED_AGNO_KEY] = agno_meta
return metadata
def get_agno_metadata(
metadata: Optional[Dict[str, Any]],
key: str,
) -> Any:
"""Get a key from the reserved ``_agno`` namespace in metadata."""
if not metadata:
return None
agno_meta = metadata.get(RESERVED_AGNO_KEY)
if not isinstance(agno_meta, dict):
return None
return agno_meta.get(key)
def strip_agno_metadata(
metadata: Optional[Dict[str, Any]],
) -> Optional[Dict[str, Any]]:
"""Return a copy of *metadata* without the reserved ``_agno`` key.
Useful before sending metadata to the vector DB where only
user-defined fields should be searchable.
"""
if not metadata:
return metadata
return {k: v for k, v in metadata.items() if k != RESERVED_AGNO_KEY}
def _get_chunker_class(strategy_type):
"""Get the chunker class for a given strategy type without instantiation."""
from agno.knowledge.chunking.strategy import ChunkingStrategyType
# Map strategy types to their corresponding classes
strategy_class_mapping = {
ChunkingStrategyType.AGENTIC_CHUNKER: lambda: _import_class(
"agno.knowledge.chunking.agentic", "AgenticChunking"
),
ChunkingStrategyType.CODE_CHUNKER: lambda: _import_class("agno.knowledge.chunking.code", "CodeChunking"),
ChunkingStrategyType.DOCUMENT_CHUNKER: lambda: _import_class(
"agno.knowledge.chunking.document", "DocumentChunking"
),
ChunkingStrategyType.RECURSIVE_CHUNKER: lambda: _import_class(
"agno.knowledge.chunking.recursive", "RecursiveChunking"
),
ChunkingStrategyType.SEMANTIC_CHUNKER: lambda: _import_class(
"agno.knowledge.chunking.semantic", "SemanticChunking"
),
ChunkingStrategyType.FIXED_SIZE_CHUNKER: lambda: _import_class(
"agno.knowledge.chunking.fixed", "FixedSizeChunking"
),
ChunkingStrategyType.ROW_CHUNKER: lambda: _import_class("agno.knowledge.chunking.row", "RowChunking"),
ChunkingStrategyType.MARKDOWN_CHUNKER: lambda: _import_class(
"agno.knowledge.chunking.markdown", "MarkdownChunking"
),
}
if strategy_type not in strategy_class_mapping:
raise ValueError(f"Unknown strategy type: {strategy_type}")
return strategy_class_mapping[strategy_type]()
def _import_class(module_name: str, class_name: str):
"""Dynamically import a class from a module."""
import importlib
module = importlib.import_module(module_name)
return getattr(module, class_name)
def get_reader_info(reader_key: str) -> Dict:
"""Get information about a reader without instantiating it.
Uses class methods and static metadata from ReaderFactory to avoid
the overhead of creating reader instances.
"""
try:
# Get the reader CLASS without instantiation
reader_class = ReaderFactory.get_reader_class(reader_key)
# Get metadata from static registry (no instantiation needed)
metadata = ReaderFactory.READER_METADATA.get(reader_key, {})
# Call class methods directly (no instance needed)
supported_strategies = reader_class.get_supported_chunking_strategies() # type: ignore[attr-defined]
supported_content_types = reader_class.get_supported_content_types() # type: ignore[attr-defined]
return {
"id": reader_key,
"name": metadata.get("name", reader_class.__name__),
"description": metadata.get("description", f"{reader_class.__name__} reader"),
"chunking_strategies": [strategy.value for strategy in supported_strategies],
"content_types": [ct.value for ct in supported_content_types],
}
except ImportError as e:
# Skip readers with missing dependencies
raise ValueError(f"Reader '{reader_key}' has missing dependencies: {str(e)}")
except Exception as e:
raise ValueError(f"Unknown reader: {reader_key}. Error: {str(e)}")
def get_reader_info_from_instance(reader: Reader, reader_id: str) -> Dict:
"""Get information about a reader instance."""
try:
reader_class = reader.__class__
supported_strategies = reader_class.get_supported_chunking_strategies()
supported_content_types = reader_class.get_supported_content_types()
return {
"id": reader_id,
"name": getattr(reader, "name", reader_class.__name__),
"description": getattr(reader, "description", f"Custom {reader_class.__name__}"),
"chunking_strategies": [strategy.value for strategy in supported_strategies],
"content_types": [ct.value for ct in supported_content_types],
}
except Exception as e:
raise ValueError(f"Failed to get info for reader '{reader_id}': {str(e)}")
def get_all_readers_info(knowledge_instance: Optional[Any] = None) -> List[Dict]:
"""Get information about all available readers, including custom readers from a Knowledge instance.
Custom readers are added first and take precedence over factory readers with the same ID.
Args:
knowledge_instance: Optional Knowledge instance to include custom readers from.
Returns:
List of reader info dictionaries (custom readers first, then factory readers).
"""
readers_info = []
seen_ids: set = set()
# 1. Add custom readers FIRST (they take precedence over factory readers)
if knowledge_instance is not None:
custom_readers = knowledge_instance.get_readers()
if isinstance(custom_readers, dict):
for reader_id, reader in custom_readers.items():
try:
reader_info = get_reader_info_from_instance(reader, reader_id)
readers_info.append(reader_info)
seen_ids.add(reader_id)
except ValueError as e:
log_debug(f"Skipping custom reader '{reader_id}': {e}")
continue
# 2. Add factory readers (skip if custom reader with same ID already exists)
keys = ReaderFactory.get_all_reader_keys()
for key in keys:
if key in seen_ids:
# Custom reader with this ID already added, skip factory version
continue
try:
reader_info = get_reader_info(key)
readers_info.append(reader_info)
except ValueError as e:
# Skip readers with missing dependencies or other issues
log_debug(f"Skipping reader '{key}': {e}")
continue
return readers_info
def get_content_types_to_readers_mapping(knowledge_instance: Optional[Any] = None) -> Dict[str, List[str]]:
"""Get mapping of content types to list of reader IDs that support them.
Args:
knowledge_instance: Optional Knowledge instance to include custom readers from.
Returns:
Dictionary mapping content type strings (ContentType enum values) to list of reader IDs.
"""
content_type_mapping: Dict[str, List[str]] = {}
readers_info = get_all_readers_info(knowledge_instance)
for reader_info in readers_info:
reader_id = reader_info["id"]
content_types = reader_info.get("content_types", [])
for content_type in content_types:
if content_type not in content_type_mapping:
content_type_mapping[content_type] = []
# Avoid duplicates
if reader_id not in content_type_mapping[content_type]:
content_type_mapping[content_type].append(reader_id)
return content_type_mapping
def get_chunker_info(chunker_key: str) -> Dict:
"""Get information about a chunker without instantiating it."""
try:
# Use chunking strategies directly
from agno.knowledge.chunking.strategy import ChunkingStrategyType
try:
# Use the chunker key directly as the strategy type value
strategy_type = ChunkingStrategyType.from_string(chunker_key)
# Get class directly without instantiation
chunker_class = _get_chunker_class(strategy_type)
# Extract class information
class_name = chunker_class.__name__
docstring = chunker_class.__doc__ or f"{class_name} chunking strategy"
# Check class __init__ signature for chunk_size and overlap parameters
metadata = {}
import inspect
try:
sig = inspect.signature(chunker_class.__init__)
param_names = set(sig.parameters.keys())
# If class has chunk_size or max_chunk_size parameter, set default chunk_size
if "chunk_size" in param_names or "max_chunk_size" in param_names:
metadata["chunk_size"] = 5000
# If class has overlap parameter, set default overlap
if "overlap" in param_names:
metadata["chunk_overlap"] = 0
except Exception:
# If we can't inspect, skip metadata
pass
return {
"key": chunker_key,
"class_name": class_name,
"name": chunker_key,
"description": docstring.strip(),
"strategy_type": strategy_type.value,
"metadata": metadata,
}
except ValueError:
raise ValueError(f"Unknown chunker key: {chunker_key}")
except ImportError as e:
# Skip chunkers with missing dependencies
raise ValueError(f"Chunker '{chunker_key}' has missing dependencies: {str(e)}")
except Exception as e:
raise ValueError(f"Unknown chunker: {chunker_key}. Error: {str(e)}")
def get_all_content_types() -> List[ContentType]:
"""Get all available content types as ContentType enums."""
return list(ContentType)
def get_all_chunkers_info() -> List[Dict]:
"""Get information about all available chunkers."""
chunkers_info = []
from agno.knowledge.chunking.strategy import ChunkingStrategyType
keys = [strategy_type.value for strategy_type in ChunkingStrategyType]
for key in keys:
try:
chunker_info = get_chunker_info(key)
chunkers_info.append(chunker_info)
except ValueError as e:
log_debug(f"Skipping chunker '{key}': {e}")
continue
return chunkers_info
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/utils.py",
"license": "Apache License 2.0",
"lines": 249,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/models/metrics.py | """Backward-compatible re-exports from agno.metrics.
All metric classes now live in agno.metrics. This shim keeps
``from agno.models.metrics import Metrics`` working everywhere.
"""
from agno.metrics import ( # noqa: F401
BaseMetrics,
MessageMetrics,
Metrics,
ModelMetrics,
ModelType,
RunMetrics,
SessionMetrics,
ToolCallMetrics,
accumulate_eval_metrics,
accumulate_model_metrics,
merge_background_metrics,
)
# Explicit re-export for type checkers
__all__ = [
"BaseMetrics",
"MessageMetrics",
"Metrics",
"ModelMetrics",
"ModelType",
"RunMetrics",
"SessionMetrics",
"ToolCallMetrics",
"accumulate_eval_metrics",
"accumulate_model_metrics",
"merge_background_metrics",
]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/metrics.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/models/utils.py | from typing import Optional, Union
from agno.models.base import Model
def _get_model_class(model_id: str, model_provider: str) -> Model:
if model_provider == "aimlapi":
from agno.models.aimlapi import AIMLAPI
return AIMLAPI(id=model_id)
elif model_provider == "anthropic":
from agno.models.anthropic import Claude
return Claude(id=model_id)
elif model_provider == "aws-bedrock":
from agno.models.aws import AwsBedrock
return AwsBedrock(id=model_id)
elif model_provider == "aws-claude":
from agno.models.aws import Claude as AWSClaude
return AWSClaude(id=model_id)
elif model_provider == "azure-ai-foundry":
from agno.models.azure import AzureAIFoundry
return AzureAIFoundry(id=model_id)
elif model_provider == "azure-openai":
from agno.models.azure import AzureOpenAI
return AzureOpenAI(id=model_id)
elif model_provider == "cerebras":
from agno.models.cerebras import Cerebras
return Cerebras(id=model_id)
elif model_provider == "cerebras-openai":
from agno.models.cerebras import CerebrasOpenAI
return CerebrasOpenAI(id=model_id)
elif model_provider == "cohere":
from agno.models.cohere import Cohere
return Cohere(id=model_id)
elif model_provider == "cometapi":
from agno.models.cometapi import CometAPI
return CometAPI(id=model_id)
elif model_provider == "dashscope":
from agno.models.dashscope import DashScope
return DashScope(id=model_id)
elif model_provider == "deepinfra":
from agno.models.deepinfra import DeepInfra
return DeepInfra(id=model_id)
elif model_provider == "deepseek":
from agno.models.deepseek import DeepSeek
return DeepSeek(id=model_id)
elif model_provider == "fireworks":
from agno.models.fireworks import Fireworks
return Fireworks(id=model_id)
elif model_provider == "google":
from agno.models.google import Gemini
return Gemini(id=model_id)
elif model_provider == "groq":
from agno.models.groq import Groq
return Groq(id=model_id)
elif model_provider == "huggingface":
from agno.models.huggingface import HuggingFace
return HuggingFace(id=model_id)
elif model_provider == "ibm":
from agno.models.ibm import WatsonX
return WatsonX(id=model_id)
elif model_provider == "internlm":
from agno.models.internlm import InternLM
return InternLM(id=model_id)
elif model_provider == "langdb":
from agno.models.langdb import LangDB
return LangDB(id=model_id)
elif model_provider == "litellm":
from agno.models.litellm import LiteLLM
return LiteLLM(id=model_id)
elif model_provider == "litellm-openai":
from agno.models.litellm import LiteLLMOpenAI
return LiteLLMOpenAI(id=model_id)
elif model_provider == "llama-cpp":
from agno.models.llama_cpp import LlamaCpp
return LlamaCpp(id=model_id)
elif model_provider == "llama-openai":
from agno.models.meta import LlamaOpenAI
return LlamaOpenAI(id=model_id)
elif model_provider == "lmstudio":
from agno.models.lmstudio import LMStudio
return LMStudio(id=model_id)
elif model_provider == "meta":
from agno.models.meta import Llama
return Llama(id=model_id)
elif model_provider == "mistral":
from agno.models.mistral import MistralChat
return MistralChat(id=model_id)
elif model_provider == "moonshot":
from agno.models.moonshot import MoonShot
return MoonShot(id=model_id)
elif model_provider == "nebius":
from agno.models.nebius import Nebius
return Nebius(id=model_id)
elif model_provider == "neosantara":
from agno.models.neosantara import Neosantara
return Neosantara(id=model_id)
elif model_provider == "nexus":
from agno.models.nexus import Nexus
return Nexus(id=model_id)
elif model_provider == "nvidia":
from agno.models.nvidia import Nvidia
return Nvidia(id=model_id)
elif model_provider == "ollama":
from agno.models.ollama import Ollama
return Ollama(id=model_id)
elif model_provider == "openai":
from agno.models.openai import OpenAIChat
return OpenAIChat(id=model_id)
elif model_provider == "openai-responses":
from agno.models.openai import OpenAIResponses
return OpenAIResponses(id=model_id)
elif model_provider == "openrouter":
from agno.models.openrouter import OpenRouter
return OpenRouter(id=model_id)
elif model_provider == "perplexity":
from agno.models.perplexity import Perplexity
return Perplexity(id=model_id)
elif model_provider == "portkey":
from agno.models.portkey import Portkey
return Portkey(id=model_id)
elif model_provider == "requesty":
from agno.models.requesty import Requesty
return Requesty(id=model_id)
elif model_provider == "sambanova":
from agno.models.sambanova import Sambanova
return Sambanova(id=model_id)
elif model_provider == "siliconflow":
from agno.models.siliconflow import Siliconflow
return Siliconflow(id=model_id)
elif model_provider == "together":
from agno.models.together import Together
return Together(id=model_id)
elif model_provider == "vercel":
from agno.models.vercel import V0
return V0(id=model_id)
elif model_provider == "vertexai-claude":
from agno.models.vertexai.claude import Claude as VertexAIClaude
return VertexAIClaude(id=model_id)
elif model_provider == "vllm":
from agno.models.vllm import VLLM
return VLLM(id=model_id)
elif model_provider == "xai":
from agno.models.xai import xAI
return xAI(id=model_id)
else:
raise ValueError(f"Model provider '{model_provider}' is not supported.")
def _parse_model_string(model_string: str) -> Model:
if not model_string or not isinstance(model_string, str):
raise ValueError(f"Model string must be a non-empty string, got: {model_string}")
if ":" not in model_string:
raise ValueError(
f"Invalid model string format: '{model_string}'. Model strings should be in format '<provider>:<model_id>' e.g. 'openai:gpt-4o'"
)
parts = model_string.split(":", 1)
if len(parts) != 2:
raise ValueError(
f"Invalid model string format: '{model_string}'. Model strings should be in format '<provider>:<model_id>' e.g. 'openai:gpt-4o'"
)
model_provider, model_id = parts
model_provider = model_provider.strip().lower()
model_id = model_id.strip()
if not model_provider or not model_id:
raise ValueError(
f"Invalid model string format: '{model_string}'. Model strings should be in format '<provider>:<model_id>' e.g. 'openai:gpt-4o'"
)
return _get_model_class(model_id, model_provider)
def get_model(model: Union[Model, str, None]) -> Optional[Model]:
if model is None:
return None
elif isinstance(model, Model):
return model
elif isinstance(model, str):
return _parse_model_string(model)
else:
raise ValueError("Model must be a Model instance, string, or None")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/utils.py",
"license": "Apache License 2.0",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/app.py | from contextlib import asynccontextmanager
from functools import partial
from os import getenv
from typing import Any, Dict, List, Literal, Optional, Union
from uuid import uuid4
from fastapi import APIRouter, FastAPI, HTTPException
from fastapi.exceptions import RequestValidationError
from fastapi.responses import JSONResponse
from fastapi.routing import APIRoute
from httpx import HTTPStatusError
from rich import box
from rich.panel import Panel
from starlette.requests import Request
from agno.agent import Agent, RemoteAgent
from agno.db.base import AsyncBaseDb, BaseDb
from agno.knowledge.knowledge import Knowledge
from agno.os.config import (
AgentOSConfig,
AuthorizationConfig,
DatabaseConfig,
EvalsConfig,
EvalsDomainConfig,
KnowledgeConfig,
KnowledgeDatabaseConfig,
KnowledgeDomainConfig,
KnowledgeInstanceConfig,
MemoryConfig,
MemoryDomainConfig,
MetricsConfig,
MetricsDomainConfig,
SessionConfig,
SessionDomainConfig,
TracesConfig,
TracesDomainConfig,
)
from agno.os.interfaces.base import BaseInterface
from agno.os.router import get_base_router, get_websocket_router
from agno.os.routers.agents import get_agent_router
from agno.os.routers.approvals import get_approval_router
from agno.os.routers.components import get_components_router
from agno.os.routers.database import get_database_router
from agno.os.routers.evals import get_eval_router
from agno.os.routers.health import get_health_router
from agno.os.routers.home import get_home_router
from agno.os.routers.knowledge import get_knowledge_router
from agno.os.routers.memory import get_memory_router
from agno.os.routers.metrics import get_metrics_router
from agno.os.routers.registry import get_registry_router
from agno.os.routers.schedules import get_schedule_router
from agno.os.routers.session import get_session_router
from agno.os.routers.teams import get_team_router
from agno.os.routers.traces import get_traces_router
from agno.os.routers.workflows import get_workflow_router
from agno.os.settings import AgnoAPISettings
from agno.os.utils import (
_generate_knowledge_id,
collect_mcp_tools_from_team,
collect_mcp_tools_from_workflow,
find_conflicting_routes,
load_yaml_config,
resolve_origins,
setup_tracing_for_os,
update_cors_middleware,
)
from agno.registry import Registry
from agno.remote.base import RemoteDb, RemoteKnowledge
from agno.team import RemoteTeam, Team
from agno.utils.log import log_debug, log_error, log_info, log_warning
from agno.utils.string import generate_id, generate_id_from_name
from agno.workflow import RemoteWorkflow, Workflow
@asynccontextmanager
async def mcp_lifespan(_, mcp_tools):
"""Manage MCP connection lifecycle inside a FastAPI app"""
for tool in mcp_tools:
await tool.connect()
yield
for tool in mcp_tools:
await tool.close()
@asynccontextmanager
async def http_client_lifespan(_):
"""Manage httpx client lifecycle for proper connection pool cleanup."""
from agno.utils.http import aclose_default_clients
yield
await aclose_default_clients()
@asynccontextmanager
async def db_lifespan(app: FastAPI, agent_os: "AgentOS"):
"""Initializes databases in the event loop and closes them on shutdown."""
if agent_os.auto_provision_dbs:
agent_os._initialize_sync_databases()
await agent_os._initialize_async_databases()
yield
await agent_os._close_databases()
@asynccontextmanager
async def scheduler_lifespan(app: FastAPI, agent_os: "AgentOS"):
"""Start and stop the scheduler poller."""
from agno.scheduler import ScheduleExecutor, SchedulePoller
if agent_os._scheduler_base_url is None:
log_info(
"scheduler_base_url not set, using default http://127.0.0.1:7777. "
"If your server is running on a different port, set scheduler_base_url to match."
)
base_url = agent_os._scheduler_base_url or "http://127.0.0.1:7777"
internal_token = agent_os._internal_service_token
if internal_token is None:
raise ValueError("internal_service_token must be set when scheduler is enabled")
executor = ScheduleExecutor(
base_url=base_url,
internal_service_token=internal_token,
)
poller = SchedulePoller(
db=agent_os.db,
executor=executor,
poll_interval=agent_os._scheduler_poll_interval,
)
app.state.scheduler_executor = executor
app.state.scheduler_poller = poller
await poller.start()
yield
await poller.stop()
def _combine_app_lifespans(lifespans: list) -> Any:
"""Combine multiple FastAPI app lifespan context managers into one."""
if len(lifespans) == 1:
return lifespans[0]
from contextlib import asynccontextmanager
@asynccontextmanager
async def combined_lifespan(app):
async def _run_nested(index: int):
if index >= len(lifespans):
yield
return
async with lifespans[index](app):
async for _ in _run_nested(index + 1):
yield
async for _ in _run_nested(0):
yield
return combined_lifespan
def _get_disabled_feature_router(prefix: str, tag: str, requires: str) -> APIRouter:
"""Return a stub router that returns 503 for a feature that requires a missing dependency."""
detail = f"{tag} not available: pass a `{requires}` to AgentOS to enable this feature."
router = APIRouter(tags=[tag])
for path in [prefix, f"{prefix}/{{path:path}}"]:
@router.api_route(path, methods=["GET", "POST", "PUT", "PATCH", "DELETE"])
async def _disabled() -> None:
raise HTTPException(status_code=503, detail=detail)
return router
class AgentOS:
def __init__(
self,
id: Optional[str] = None,
name: Optional[str] = None,
description: Optional[str] = None,
version: Optional[str] = None,
db: Optional[Union[BaseDb, AsyncBaseDb]] = None,
agents: Optional[List[Union[Agent, RemoteAgent]]] = None,
teams: Optional[List[Union[Team, RemoteTeam]]] = None,
workflows: Optional[List[Union[Workflow, RemoteWorkflow]]] = None,
knowledge: Optional[List[Knowledge]] = None,
interfaces: Optional[List[BaseInterface]] = None,
a2a_interface: bool = False,
authorization: bool = False,
authorization_config: Optional[AuthorizationConfig] = None,
cors_allowed_origins: Optional[List[str]] = None,
config: Optional[Union[str, AgentOSConfig]] = None,
settings: Optional[AgnoAPISettings] = None,
lifespan: Optional[Any] = None,
enable_mcp_server: bool = False,
base_app: Optional[FastAPI] = None,
on_route_conflict: Literal["preserve_agentos", "preserve_base_app", "error"] = "preserve_agentos",
tracing: bool = False,
auto_provision_dbs: bool = True,
run_hooks_in_background: bool = False,
telemetry: bool = True,
registry: Optional[Registry] = None,
scheduler: bool = False,
scheduler_poll_interval: int = 15,
scheduler_base_url: Optional[str] = None,
internal_service_token: Optional[str] = None,
):
"""Initialize AgentOS.
Args:
id: Unique identifier for this AgentOS instance
name: Name of the AgentOS instance
description: Description of the AgentOS instance
version: Version of the AgentOS instance
db: Default database for the AgentOS instance. Agents, teams and workflows with no db will use this one.
agents: List of agents to include in the OS
teams: List of teams to include in the OS
workflows: List of workflows to include in the OS
knowledge: List of knowledge bases to include in the OS
interfaces: List of interfaces to include in the OS
a2a_interface: Whether to expose the OS agents and teams in an A2A server
config: Configuration file path or AgentOSConfig instance
settings: API settings for the OS
lifespan: Optional lifespan context manager for the FastAPI app
enable_mcp_server: Whether to enable MCP (Model Context Protocol)
base_app: Optional base FastAPI app to use for the AgentOS. All routes and middleware will be added to this app.
on_route_conflict: What to do when a route conflict is detected in case a custom base_app is provided.
auto_provision_dbs: Whether to automatically provision databases
authorization: Whether to enable authorization
authorization_config: Configuration for the authorization middleware
cors_allowed_origins: List of allowed CORS origins (will be merged with default Agno domains)
tracing: If True, enables OpenTelemetry tracing for all agents and teams in the OS
run_hooks_in_background: If True, run agent/team pre/post hooks as FastAPI background tasks (non-blocking)
telemetry: Whether to enable telemetry
registry: Optional registry to use for the AgentOS
scheduler: Whether to enable the cron scheduler
scheduler_poll_interval: Seconds between scheduler poll cycles (default: 15)
scheduler_base_url: Base URL for scheduler HTTP calls (default: http://127.0.0.1:7777)
internal_service_token: Token for scheduler-to-OS auth (auto-generated if not provided)
"""
if not agents and not workflows and not teams and not knowledge and not db:
raise ValueError("Either agents, teams, workflows, knowledge bases or a database must be provided.")
self.config = load_yaml_config(config) if isinstance(config, str) else config
self.agents: Optional[List[Union[Agent, RemoteAgent]]] = agents
self.workflows: Optional[List[Union[Workflow, RemoteWorkflow]]] = workflows
self.teams: Optional[List[Union[Team, RemoteTeam]]] = teams
self.a2a_interface = a2a_interface
self.knowledge = knowledge
self.settings: AgnoAPISettings = settings or AgnoAPISettings()
self.auto_provision_dbs = auto_provision_dbs
self._app_set = False
if base_app:
self.base_app: Optional[FastAPI] = base_app
self._app_set = True
self.on_route_conflict = on_route_conflict
else:
self.base_app = None
self._app_set = False
self.on_route_conflict = on_route_conflict
self.interfaces = interfaces or []
self.name = name
self.id = id
if not self.id:
self.id = generate_id(self.name) if self.name else str(uuid4())
self.version = version
self.description = description
self.db = db
self.telemetry = telemetry
self.tracing = tracing
self.enable_mcp_server = enable_mcp_server
self.lifespan = lifespan
self.registry = registry
# RBAC
self.authorization = authorization
self.authorization_config = authorization_config
# CORS configuration - merge user-provided origins with defaults from settings
self.cors_allowed_origins = resolve_origins(cors_allowed_origins, self.settings.cors_origin_list)
# If True, run agent/team hooks as FastAPI background tasks
self.run_hooks_in_background = run_hooks_in_background
# Scheduler configuration
self._scheduler_enabled = scheduler
self._scheduler_poll_interval = scheduler_poll_interval
self._scheduler_base_url = scheduler_base_url
if self._scheduler_enabled and not internal_service_token:
import secrets
internal_service_token = secrets.token_urlsafe(32)
self._internal_service_token = internal_service_token
# List of all MCP tools used inside the AgentOS
self.mcp_tools: List[Any] = []
self._mcp_app: Optional[Any] = None
self._initialize_agents()
self._initialize_teams()
self._initialize_workflows()
# Populate registry with code-defined agents/teams
self._populate_registry()
# Check for duplicate IDs
self._raise_if_duplicate_ids()
if self.tracing:
self._setup_tracing()
if self.telemetry:
from agno.api.os import OSLaunch, log_os_telemetry
log_os_telemetry(launch=OSLaunch(os_id=self.id, data=self._get_telemetry_data()))
def _add_agent_os_to_lifespan_function(self, lifespan):
"""
Inspect a lifespan function and wrap it to pass agent_os if it accepts it.
Returns:
A wrapped lifespan that passes agent_os if the lifespan function expects it.
"""
# Getting the actual function inside the lifespan
lifespan_function = lifespan
if hasattr(lifespan, "__wrapped__"):
lifespan_function = lifespan.__wrapped__
try:
from inspect import signature
# Inspecting the lifespan function signature to find its parameters
sig = signature(lifespan_function)
params = list(sig.parameters.keys())
# If the lifespan function expects the 'agent_os' parameter, add it
if "agent_os" in params:
return partial(lifespan, agent_os=self)
else:
return lifespan
except (ValueError, TypeError):
return lifespan
def resync(self, app: FastAPI) -> None:
"""Resync the AgentOS to discover, initialize and configure: agents, teams, workflows, databases and knowledge bases."""
self._initialize_agents()
self._initialize_teams()
self._initialize_workflows()
# Populate registry with code-defined agents/teams
self._populate_registry()
# Check for duplicate IDs
self._raise_if_duplicate_ids()
self._auto_discover_databases()
self._auto_discover_knowledge_instances()
if self.enable_mcp_server:
from agno.os.mcp import get_mcp_server
self._mcp_app = get_mcp_server(self)
self._reprovision_routers(app=app)
def _reprovision_routers(self, app: FastAPI) -> None:
"""Re-provision all routes for the AgentOS."""
updated_routers = [
get_home_router(self),
get_session_router(dbs=self.dbs),
get_memory_router(dbs=self.dbs),
get_eval_router(dbs=self.dbs, agents=self.agents, teams=self.teams),
get_metrics_router(dbs=self.dbs),
get_knowledge_router(knowledge_instances=self.knowledge_instances),
get_traces_router(dbs=self.dbs),
get_database_router(self, settings=self.settings),
]
# Routes that require a database
if self.db is not None:
if isinstance(self.db, BaseDb):
updated_routers.append(get_components_router(os_db=self.db, registry=self.registry))
else:
updated_routers.append(_get_disabled_feature_router("/components", "Components", "sync db (BaseDb)"))
updated_routers.append(get_schedule_router(os_db=self.db, settings=self.settings))
updated_routers.append(get_approval_router(os_db=self.db, settings=self.settings))
else:
for prefix, tag in [("/components", "Components"), ("/schedules", "Schedules"), ("/approvals", "Approvals")]:
updated_routers.append(_get_disabled_feature_router(prefix, tag, "db"))
# Registry router
if self.registry is not None:
updated_routers.append(get_registry_router(registry=self.registry))
else:
updated_routers.append(_get_disabled_feature_router("/registry", "Registry", "registry"))
# Clear all previously existing routes
app.router.routes = [
route
for route in app.router.routes
if hasattr(route, "path")
and route.path in ["/docs", "/redoc", "/openapi.json", "/docs/oauth2-redirect"]
or route.path.startswith("/mcp") # type: ignore
]
# Add the built-in routes
self._add_built_in_routes(app=app)
# Add the updated routes
for router in updated_routers:
self._add_router(app, router)
# Mount MCP if needed
if self.enable_mcp_server and self._mcp_app:
app.mount("/", self._mcp_app)
def _add_built_in_routes(self, app: FastAPI) -> None:
"""Add all AgentOSbuilt-in routes to the given app."""
# Add the home router if MCP server is not enabled
if not self.enable_mcp_server:
self._add_router(app, get_home_router(self))
self._add_router(app, get_health_router(health_endpoint="/health"))
self._add_router(app, get_base_router(self, settings=self.settings))
self._add_router(app, get_agent_router(self, settings=self.settings, registry=self.registry))
self._add_router(app, get_team_router(self, settings=self.settings, registry=self.registry))
self._add_router(app, get_workflow_router(self, settings=self.settings))
self._add_router(app, get_websocket_router(self, settings=self.settings))
# Add A2A interface if relevant
has_a2a_interface = False
for interface in self.interfaces:
if not has_a2a_interface and interface.__class__.__name__ == "A2A":
has_a2a_interface = True
interface_router = interface.get_router()
self._add_router(app, interface_router)
if self.a2a_interface and not has_a2a_interface:
from agno.os.interfaces.a2a import A2A
a2a_interface = A2A(agents=self.agents, teams=self.teams, workflows=self.workflows)
self.interfaces.append(a2a_interface)
self._add_router(app, a2a_interface.get_router())
def _raise_if_duplicate_ids(self) -> None:
"""Check for duplicate IDs within each entity type.
Raises:
ValueError: If duplicate IDs are found within the same entity type
"""
duplicate_ids: List[str] = []
for entities in [self.agents, self.teams, self.workflows]:
if not entities:
continue
seen_ids: set[str] = set()
for entity in entities:
entity_id = entity.id
if entity_id is None:
continue
if entity_id in seen_ids:
if entity_id not in duplicate_ids:
duplicate_ids.append(entity_id)
else:
seen_ids.add(entity_id)
if duplicate_ids:
raise ValueError(f"Duplicate IDs found in AgentOS: {', '.join(repr(id_) for id_ in duplicate_ids)}")
def _make_app(self, lifespan: Optional[Any] = None) -> FastAPI:
return FastAPI(
title=self.name or "Agno AgentOS",
version=self.version or "1.0.0",
description=self.description or "Your multi-agent operating system.",
docs_url="/docs" if self.settings.docs_enabled else None,
redoc_url="/redoc" if self.settings.docs_enabled else None,
openapi_url="/openapi.json" if self.settings.docs_enabled else None,
lifespan=lifespan,
)
def _initialize_agents(self) -> None:
"""Initialize and configure all agents for AgentOS usage."""
if not self.agents:
return
for agent in self.agents:
if isinstance(agent, RemoteAgent):
continue
# Set the default db to agents without their own
if self.db is not None and agent.db is None:
agent.db = self.db
# Track all MCP tools to later handle their connection
if agent.tools and isinstance(agent.tools, list):
for tool in agent.tools:
# Checking if the tool is an instance of MCPTools, MultiMCPTools, or a subclass of those
if hasattr(type(tool), "__mro__"):
mro_names = {cls.__name__ for cls in type(tool).__mro__}
if mro_names & {"MCPTools", "MultiMCPTools"}:
if tool not in self.mcp_tools:
self.mcp_tools.append(tool)
agent.initialize_agent()
# Required for the built-in routes to work
agent.store_events = True
# Propagate run_hooks_in_background setting from AgentOS to agents
agent._run_hooks_in_background = self.run_hooks_in_background
def _initialize_teams(self) -> None:
"""Initialize and configure all teams for AgentOS usage."""
if not self.teams:
return
for team in self.teams:
if isinstance(team, RemoteTeam):
continue
# Set the default db to teams without their own
if self.db is not None and team.db is None:
team.db = self.db
# Track all MCP tools recursively
collect_mcp_tools_from_team(team, self.mcp_tools)
team.initialize_team()
if isinstance(team.members, list):
for member in team.members:
if isinstance(member, Agent):
member.team_id = None
member.initialize_agent()
elif isinstance(member, Team):
member.initialize_team()
# Required for the built-in routes to work
team.store_events = True
# Propagate run_hooks_in_background setting to team and all nested members
team.propagate_run_hooks_in_background(self.run_hooks_in_background)
def _initialize_workflows(self) -> None:
"""Initialize and configure all workflows for AgentOS usage."""
if not self.workflows:
return
for workflow in self.workflows:
if isinstance(workflow, RemoteWorkflow):
continue
# Set the default db to workflows without their own
if self.db is not None and workflow.db is None:
workflow.db = self.db
# Track MCP tools recursively in workflow members
collect_mcp_tools_from_workflow(workflow, self.mcp_tools)
if not workflow.id:
workflow.id = generate_id_from_name(workflow.name)
# Required for the built-in routes to work
workflow.store_events = True
# Propagate run_hooks_in_background setting to workflow and all its step agents/teams
workflow.propagate_run_hooks_in_background(self.run_hooks_in_background)
def _populate_registry(self) -> None:
"""Populate the registry with code-defined agents and teams.
This ensures that workflows loaded from DB can rehydrate their steps
using code-defined agents/teams via the registry.
"""
if self.registry is None:
self.registry = Registry()
if self.agents:
existing_agent_ids = {getattr(a, "id", None) for a in self.registry.agents}
for agent in self.agents:
agent_id = getattr(agent, "id", None)
if not isinstance(agent, RemoteAgent) and agent_id is not None and agent_id not in existing_agent_ids:
self.registry.agents.append(agent)
existing_agent_ids.add(agent_id)
if self.teams:
existing_team_ids = {getattr(t, "id", None) for t in self.registry.teams}
for team in self.teams:
team_id = getattr(team, "id", None)
if not isinstance(team, RemoteTeam) and team_id is not None and team_id not in existing_team_ids:
self.registry.teams.append(team)
existing_team_ids.add(team_id)
def _setup_tracing(self) -> None:
"""Set up OpenTelemetry tracing for this AgentOS.
Uses the AgentOS db if provided, otherwise falls back to the first available
database from agents/teams/workflows.
"""
# Use AgentOS db if explicitly provided
if self.db is not None:
setup_tracing_for_os(db=self.db)
return
# Fall back to finding the first available database
db: Optional[Union[BaseDb, AsyncBaseDb, RemoteDb]] = None
for agent in self.agents or []:
if agent.db:
db = agent.db
break
if db is None:
for team in self.teams or []:
if team.db:
db = team.db
break
if db is None:
for workflow in self.workflows or []:
if workflow.db:
db = workflow.db
break
if db is None:
log_warning(
"tracing=True but no database found. "
"Provide 'db' parameter to AgentOS or to at least one agent/team/workflow."
)
return
setup_tracing_for_os(db=db)
def get_app(self) -> FastAPI:
if self.base_app:
fastapi_app = self.base_app
# Initialize MCP server if enabled
if self.enable_mcp_server:
from agno.os.mcp import get_mcp_server
self._mcp_app = get_mcp_server(self)
# Collect all lifespans that need to be combined
lifespans = []
# The user provided lifespan
if self.lifespan:
# Wrap the user lifespan with agent_os parameter
wrapped_lifespan = self._add_agent_os_to_lifespan_function(self.lifespan)
lifespans.append(wrapped_lifespan)
# The provided app's existing lifespan
if fastapi_app.router.lifespan_context:
lifespans.append(fastapi_app.router.lifespan_context)
# The MCP tools lifespan
if self.mcp_tools:
lifespans.append(partial(mcp_lifespan, mcp_tools=self.mcp_tools))
# The /mcp server lifespan
if self.enable_mcp_server and self._mcp_app:
lifespans.append(self._mcp_app.lifespan)
# The async database lifespan
lifespans.append(partial(db_lifespan, agent_os=self))
# The scheduler lifespan (after db so tables exist)
if self._scheduler_enabled and self.db is not None:
lifespans.append(partial(scheduler_lifespan, agent_os=self))
# The httpx client cleanup lifespan (should be last to close after other lifespans)
lifespans.append(http_client_lifespan)
# Combine lifespans and set them in the app
if lifespans:
fastapi_app.router.lifespan_context = _combine_app_lifespans(lifespans)
else:
lifespans = []
# User provided lifespan
if self.lifespan:
lifespans.append(self._add_agent_os_to_lifespan_function(self.lifespan))
# MCP tools lifespan
if self.mcp_tools:
lifespans.append(partial(mcp_lifespan, mcp_tools=self.mcp_tools))
# MCP server lifespan
if self.enable_mcp_server:
from agno.os.mcp import get_mcp_server
self._mcp_app = get_mcp_server(self)
lifespans.append(self._mcp_app.lifespan)
# Async database initialization lifespan
lifespans.append(partial(db_lifespan, agent_os=self)) # type: ignore
# The scheduler lifespan (after db so tables exist)
if self._scheduler_enabled and self.db is not None:
lifespans.append(partial(scheduler_lifespan, agent_os=self))
# The httpx client cleanup lifespan (should be last to close after other lifespans)
lifespans.append(http_client_lifespan)
final_lifespan = _combine_app_lifespans(lifespans) if lifespans else None
fastapi_app = self._make_app(lifespan=final_lifespan)
self._add_built_in_routes(app=fastapi_app)
self._auto_discover_databases()
self._auto_discover_knowledge_instances()
routers = [
get_session_router(dbs=self.dbs),
get_memory_router(dbs=self.dbs),
get_eval_router(dbs=self.dbs, agents=self.agents, teams=self.teams),
get_metrics_router(dbs=self.dbs),
get_knowledge_router(knowledge_instances=self.knowledge_instances),
get_traces_router(dbs=self.dbs),
get_database_router(self, settings=self.settings),
]
# Routes that require a database
if self.db is not None:
if isinstance(self.db, BaseDb):
routers.append(get_components_router(os_db=self.db, registry=self.registry))
else:
routers.append(_get_disabled_feature_router("/components", "Components", "sync db (BaseDb)"))
routers.append(get_schedule_router(os_db=self.db, settings=self.settings))
routers.append(get_approval_router(os_db=self.db, settings=self.settings))
else:
log_debug("Components, Scheduler, and Approval routers not enabled: requires a db to be provided to AgentOS")
for prefix, tag in [("/components", "Components"), ("/schedules", "Schedules"), ("/approvals", "Approvals")]:
routers.append(_get_disabled_feature_router(prefix, tag, "db"))
# Registry router
if self.registry is not None:
routers.append(get_registry_router(registry=self.registry))
else:
log_debug("Registry router not enabled: requires a registry to be provided to AgentOS")
routers.append(_get_disabled_feature_router("/registry", "Registry", "registry"))
for router in routers:
self._add_router(fastapi_app, router)
# Mount MCP if needed
if self.enable_mcp_server and self._mcp_app:
fastapi_app.mount("/", self._mcp_app)
if not self._app_set:
@fastapi_app.exception_handler(RequestValidationError)
async def validation_exception_handler(_: Request, exc: RequestValidationError) -> JSONResponse:
log_error(f"Validation error (422): {exc.errors()}")
return JSONResponse(
status_code=422,
content={"detail": exc.errors()},
)
@fastapi_app.exception_handler(HTTPException)
async def http_exception_handler(_, exc: HTTPException) -> JSONResponse:
log_error(f"HTTP exception: {exc.status_code} {exc.detail}")
return JSONResponse(
status_code=exc.status_code,
content={"detail": str(exc.detail)},
)
@fastapi_app.exception_handler(HTTPStatusError)
async def http_status_error_handler(_: Request, exc: HTTPStatusError) -> JSONResponse:
status_code = exc.response.status_code
detail = exc.response.text
log_error(f"Downstream server returned HTTP status error: {status_code} {detail}")
return JSONResponse(
status_code=status_code,
content={"detail": detail},
)
@fastapi_app.exception_handler(Exception)
async def general_exception_handler(_: Request, exc: Exception) -> JSONResponse:
import traceback
log_error(f"Unhandled exception:\n{traceback.format_exc(limit=5)}")
return JSONResponse(
status_code=getattr(exc, "status_code", 500),
content={"detail": str(exc)},
)
# Update CORS middleware
update_cors_middleware(fastapi_app, self.cors_allowed_origins) # type: ignore
# Set agent_os_id and cors_allowed_origins on app state
# This allows middleware (like JWT) to access these values
fastapi_app.state.agent_os_id = self.id
fastapi_app.state.cors_allowed_origins = self.cors_allowed_origins
# Store internal service token for scheduler auth bypass
if self._internal_service_token:
fastapi_app.state.internal_service_token = self._internal_service_token
# Add JWT middleware if authorization is enabled
if self.authorization:
# Set authorization_enabled flag on settings so security key validation is skipped
self.settings.authorization_enabled = True
jwt_configured = bool(getenv("JWT_VERIFICATION_KEY") or getenv("JWT_JWKS_FILE"))
security_key_set = bool(self.settings.os_security_key)
if jwt_configured and security_key_set:
log_warning(
"Both JWT configuration (JWT_VERIFICATION_KEY or JWT_JWKS_FILE) and OS_SECURITY_KEY are set. "
"With authorization=True, only JWT authorization will be used. "
"Consider removing OS_SECURITY_KEY from your environment."
)
self._add_jwt_middleware(fastapi_app)
# Add trailing slash normalization middleware
from agno.os.middleware.trailing_slash import TrailingSlashMiddleware
fastapi_app.add_middleware(TrailingSlashMiddleware)
return fastapi_app
def _add_jwt_middleware(self, fastapi_app: FastAPI) -> None:
from agno.os.middleware.jwt import JWTMiddleware, JWTValidator
verify_audience = False
jwks_file = None
verification_keys = None
algorithm = "RS256"
if self.authorization_config:
algorithm = self.authorization_config.algorithm or "RS256"
verification_keys = self.authorization_config.verification_keys
jwks_file = self.authorization_config.jwks_file
verify_audience = self.authorization_config.verify_audience or False
log_info(f"Adding JWT middleware for authorization (algorithm: {algorithm})")
# Create validator and store on app.state for WebSocket access
jwt_validator = JWTValidator(
verification_keys=verification_keys,
jwks_file=jwks_file,
algorithm=algorithm,
)
fastapi_app.state.jwt_validator = jwt_validator
# Add middleware to stack
fastapi_app.add_middleware(
JWTMiddleware,
verification_keys=verification_keys,
jwks_file=jwks_file,
algorithm=algorithm,
authorization=self.authorization,
verify_audience=verify_audience,
)
def get_routes(self) -> List[Any]:
"""Retrieve all routes from the FastAPI app.
Returns:
List[Any]: List of routes included in the FastAPI app.
"""
app = self.get_app()
return app.routes
def _add_router(self, fastapi_app: FastAPI, router: APIRouter) -> None:
"""Add a router to the FastAPI app, avoiding route conflicts.
Args:
router: The APIRouter to add
"""
conflicts = find_conflicting_routes(fastapi_app, router)
conflicting_routes = [conflict["route"] for conflict in conflicts]
if conflicts and self._app_set:
if self.on_route_conflict == "preserve_base_app":
# Skip conflicting AgentOS routes, prefer user's existing routes
for conflict in conflicts:
methods_str = ", ".join(conflict["methods"]) # type: ignore
log_debug(
f"Skipping conflicting AgentOS route: {methods_str} {conflict['path']} - "
f"Using existing custom route instead"
)
# Create a new router without the conflicting routes
filtered_router = APIRouter()
for route in router.routes:
if route not in conflicting_routes:
filtered_router.routes.append(route)
# Use the filtered router if it has any routes left
if filtered_router.routes:
fastapi_app.include_router(filtered_router)
elif self.on_route_conflict == "preserve_agentos":
# Log warnings but still add all routes (AgentOS routes will override)
for conflict in conflicts:
methods_str = ", ".join(conflict["methods"]) # type: ignore
log_warning(
f"Route conflict detected: {methods_str} {conflict['path']} - "
f"AgentOS route will override existing custom route"
)
# Remove conflicting routes
for route in fastapi_app.routes:
for conflict in conflicts:
if isinstance(route, APIRoute):
if route.path == conflict["path"] and list(route.methods) == list(conflict["methods"]): # type: ignore
fastapi_app.routes.pop(fastapi_app.routes.index(route))
fastapi_app.include_router(router)
elif self.on_route_conflict == "error":
conflicting_paths = [conflict["path"] for conflict in conflicts]
raise ValueError(f"Route conflict detected: {conflicting_paths}")
else:
# No conflicts, add router normally
fastapi_app.include_router(router)
def _get_telemetry_data(self) -> Dict[str, Any]:
"""Get the telemetry data for the OS"""
agent_ids = []
team_ids = []
workflow_ids = []
for agent in self.agents or []:
agent_ids.append(agent.id)
for team in self.teams or []:
team_ids.append(team.id)
for workflow in self.workflows or []:
workflow_ids.append(workflow.id)
return {
"agents": agent_ids,
"teams": team_ids,
"workflows": workflow_ids,
"interfaces": [interface.type for interface in self.interfaces] if self.interfaces else None,
}
def _auto_discover_databases(self) -> None:
"""Auto-discover and initialize the databases used by all contextual agents, teams and workflows."""
dbs: Dict[str, List[Union[BaseDb, AsyncBaseDb, RemoteDb]]] = {}
knowledge_dbs: Dict[
str, List[Union[BaseDb, AsyncBaseDb, RemoteDb]]
] = {} # Track databases specifically used for knowledge
for agent in self.agents or []:
if agent.db:
self._register_db_with_validation(dbs, agent.db)
agent_contents_db = getattr(agent.knowledge, "contents_db", None) if agent.knowledge else None
if agent_contents_db:
self._register_db_with_validation(knowledge_dbs, agent_contents_db)
for team in self.teams or []:
if team.db:
self._register_db_with_validation(dbs, team.db)
team_contents_db = getattr(team.knowledge, "contents_db", None) if team.knowledge else None
if team_contents_db:
self._register_db_with_validation(knowledge_dbs, team_contents_db)
for workflow in self.workflows or []:
if workflow.db:
self._register_db_with_validation(dbs, workflow.db)
for knowledge_base in self.knowledge or []:
if knowledge_base.contents_db:
self._register_db_with_validation(knowledge_dbs, knowledge_base.contents_db)
for interface in self.interfaces or []:
if interface.agent and interface.agent.db:
self._register_db_with_validation(dbs, interface.agent.db)
elif interface.team and interface.team.db:
self._register_db_with_validation(dbs, interface.team.db)
# Register AgentOS db if provided
if self.db is not None:
self._register_db_with_validation(dbs, self.db)
self.dbs = dbs
self.knowledge_dbs = knowledge_dbs
# Initialize all discovered databases
if self.auto_provision_dbs:
self._pending_async_db_init = True
def _initialize_sync_databases(self) -> None:
"""Initialize sync databases."""
from itertools import chain
unique_dbs = list(
{
id(db): db
for db in chain(
chain.from_iterable(self.dbs.values()), chain.from_iterable(self.knowledge_dbs.values())
)
}.values()
)
for db in unique_dbs:
if isinstance(db, AsyncBaseDb):
continue # Skip async dbs
try:
if hasattr(db, "_create_all_tables") and callable(db._create_all_tables):
db._create_all_tables()
except Exception as e:
log_warning(f"Failed to initialize {db.__class__.__name__} (id: {db.id}): {e}")
async def _initialize_async_databases(self) -> None:
"""Initialize async databases."""
from itertools import chain
unique_dbs = list(
{
id(db): db
for db in chain(
chain.from_iterable(self.dbs.values()), chain.from_iterable(self.knowledge_dbs.values())
)
}.values()
)
for db in unique_dbs:
if not isinstance(db, AsyncBaseDb):
continue # Skip sync dbs
try:
if hasattr(db, "_create_all_tables") and callable(db._create_all_tables):
await db._create_all_tables()
except Exception as e:
log_warning(f"Failed to initialize async {db.__class__.__name__} (id: {db.id}): {e}")
async def _close_databases(self) -> None:
"""Close all database connections and release connection pools."""
from itertools import chain
if not hasattr(self, "dbs") or not hasattr(self, "knowledge_dbs"):
return
unique_dbs = list(
{
id(db): db
for db in chain(
chain.from_iterable(self.dbs.values()), chain.from_iterable(self.knowledge_dbs.values())
)
}.values()
)
for db in unique_dbs:
try:
if hasattr(db, "close") and callable(db.close):
if isinstance(db, AsyncBaseDb):
await db.close()
else:
db.close()
except Exception as e:
log_warning(f"Failed to close {db.__class__.__name__} (id: {db.id}): {e}")
def _get_db_table_names(self, db: BaseDb) -> Dict[str, str]:
"""Get the table names for a database"""
table_names = {
"session_table_name": db.session_table_name,
"culture_table_name": db.culture_table_name,
"memory_table_name": db.memory_table_name,
"metrics_table_name": db.metrics_table_name,
"evals_table_name": db.eval_table_name,
"knowledge_table_name": db.knowledge_table_name,
}
return {k: v for k, v in table_names.items() if v is not None}
def _register_db_with_validation(
self,
registered_dbs: Dict[str, List[Union[BaseDb, AsyncBaseDb, RemoteDb]]],
db: Union[BaseDb, AsyncBaseDb, RemoteDb],
) -> None:
"""Register a database in the contextual OS after validating it is not conflicting with registered databases"""
if db.id in registered_dbs:
registered_dbs[db.id].append(db)
else:
registered_dbs[db.id] = [db]
def _auto_discover_knowledge_instances(self) -> None:
"""Auto-discover the knowledge instances used by all contextual agents, teams and workflows."""
seen_instances: set[int] = set() # Track by object identity
knowledge_instances: List[Union[Knowledge, RemoteKnowledge]] = []
def _add_knowledge_if_not_duplicate(knowledge: Any) -> None:
"""Add knowledge instance if it's not already in the list (by object identity)."""
# Only handle Knowledge and RemoteKnowledge instances that have contents_db
contents_db = getattr(knowledge, "contents_db", None)
if not contents_db:
return
# Deduplicate by object identity to allow multiple knowledge instances with the same contents_db
if id(knowledge) in seen_instances:
return
seen_instances.add(id(knowledge))
# Only append if it's a Knowledge or RemoteKnowledge instance
if isinstance(knowledge, (Knowledge, RemoteKnowledge)):
knowledge_instances.append(knowledge)
for agent in self.agents or []:
if agent.knowledge:
_add_knowledge_if_not_duplicate(agent.knowledge)
for team in self.teams or []:
if team.knowledge:
_add_knowledge_if_not_duplicate(team.knowledge)
for knowledge_base in self.knowledge or []:
_add_knowledge_if_not_duplicate(knowledge_base)
self.knowledge_instances = knowledge_instances
# Validate that all knowledge instances have unique names
# Duplicate names cause content isolation issues since linked_to uses the name
self._validate_knowledge_instance_names()
def _validate_knowledge_instance_names(self) -> None:
"""Validate that all knowledge instances have unique names.
Raises:
ValueError: If duplicate knowledge instance names are detected.
"""
# Track seen combinations of (name, db_id, table) to detect true duplicates
# Same name is OK if using different contents_db or different table
seen_combinations: dict[tuple[str, str, str], str] = {} # (name, db_id, table) -> description
duplicates: list[str] = []
for knowledge in self.knowledge_instances:
contents_db = getattr(knowledge, "contents_db", None)
if not contents_db:
continue
db_id = getattr(contents_db, "id", None)
if not db_id:
continue
# Get the name (with fallback)
knowledge_name = getattr(knowledge, "name", None) or f"knowledge_{db_id}"
table_name = getattr(contents_db, "knowledge_table_name", "unknown")
# Create unique key based on name + db + table
key = (knowledge_name, db_id, table_name)
if key in seen_combinations:
duplicates.append(f"'{knowledge_name}' in table '{table_name}'")
else:
seen_combinations[key] = table_name
if duplicates:
unique_duplicates = list(set(duplicates))
error_msg = (
f"Duplicate knowledge instances detected:\n"
f" {', '.join(unique_duplicates)}\n\n"
"Each knowledge instance must have a unique combination of (knowledgename, database, table). "
"To fix this, give each knowledge instance a unique `name` parameter."
)
log_error(error_msg)
raise ValueError(error_msg)
def _get_session_config(self) -> SessionConfig:
session_config = self.config.session if self.config and self.config.session else SessionConfig()
if session_config.dbs is None:
session_config.dbs = []
dbs_with_specific_config = [db.db_id for db in session_config.dbs]
for db_id, dbs in self.dbs.items():
if db_id not in dbs_with_specific_config:
# Collect unique table names from all databases with the same id
unique_tables = list(set(db.session_table_name for db in dbs))
session_config.dbs.append(
DatabaseConfig(
db_id=db_id,
domain_config=SessionDomainConfig(display_name=db_id),
tables=unique_tables,
)
)
return session_config
def _get_memory_config(self) -> MemoryConfig:
memory_config = self.config.memory if self.config and self.config.memory else MemoryConfig()
if memory_config.dbs is None:
memory_config.dbs = []
dbs_with_specific_config = [db.db_id for db in memory_config.dbs]
for db_id, dbs in self.dbs.items():
if db_id not in dbs_with_specific_config:
# Collect unique table names from all databases with the same id
unique_tables = list(set(db.memory_table_name for db in dbs))
memory_config.dbs.append(
DatabaseConfig(
db_id=db_id,
domain_config=MemoryDomainConfig(display_name=db_id),
tables=unique_tables,
)
)
return memory_config
def _get_knowledge_config(self) -> KnowledgeConfig:
knowledge_config = self.config.knowledge if self.config and self.config.knowledge else KnowledgeConfig()
if knowledge_config.dbs is None:
knowledge_config.dbs = []
if knowledge_config.knowledge_instances is None:
knowledge_config.knowledge_instances = []
# Track seen knowledge IDs to deduplicate
seen_knowledge_ids: set[str] = set()
# Build flat list of knowledge instances
for knowledge in self.knowledge_instances:
contents_db = getattr(knowledge, "contents_db", None)
if not contents_db:
continue
db_id = getattr(contents_db, "id", None)
if not db_id:
continue
table_name = getattr(contents_db, "knowledge_table_name", "unknown")
knowledge_name = getattr(knowledge, "name", None) or f"knowledge_{db_id}"
knowledge_id = _generate_knowledge_id(knowledge_name, db_id, table_name)
# Skip if already processed (deduplicate by knowledge_id)
if knowledge_id in seen_knowledge_ids:
continue
seen_knowledge_ids.add(knowledge_id)
instance_config = KnowledgeInstanceConfig(
id=knowledge_id,
name=knowledge_name,
description=getattr(knowledge, "description", None),
db_id=db_id,
table=table_name,
)
knowledge_config.knowledge_instances.append(instance_config)
# Build KnowledgeDatabaseConfig for each db with its tables (as strings)
dbs_with_specific_config = [db.db_id for db in knowledge_config.dbs]
for db_id, dbs in self.knowledge_dbs.items():
if db_id not in dbs_with_specific_config:
# Get all unique table names for this db
unique_tables = list(set(db.knowledge_table_name for db in dbs))
knowledge_config.dbs.append(
KnowledgeDatabaseConfig(
db_id=db_id,
domain_config=KnowledgeDomainConfig(display_name=db_id),
tables=unique_tables,
)
)
return knowledge_config
def _get_metrics_config(self) -> MetricsConfig:
metrics_config = self.config.metrics if self.config and self.config.metrics else MetricsConfig()
if metrics_config.dbs is None:
metrics_config.dbs = []
dbs_with_specific_config = [db.db_id for db in metrics_config.dbs]
for db_id, dbs in self.dbs.items():
if db_id not in dbs_with_specific_config:
# Collect unique table names from all databases with the same id
unique_tables = list(set(db.metrics_table_name for db in dbs))
metrics_config.dbs.append(
DatabaseConfig(
db_id=db_id,
domain_config=MetricsDomainConfig(display_name=db_id),
tables=unique_tables,
)
)
return metrics_config
def _get_evals_config(self) -> EvalsConfig:
evals_config = self.config.evals if self.config and self.config.evals else EvalsConfig()
if evals_config.dbs is None:
evals_config.dbs = []
dbs_with_specific_config = [db.db_id for db in evals_config.dbs]
for db_id, dbs in self.dbs.items():
if db_id not in dbs_with_specific_config:
# Collect unique table names from all databases with the same id
unique_tables = list(set(db.eval_table_name for db in dbs))
evals_config.dbs.append(
DatabaseConfig(
db_id=db_id,
domain_config=EvalsDomainConfig(display_name=db_id),
tables=unique_tables,
)
)
return evals_config
def _get_traces_config(self) -> TracesConfig:
traces_config = self.config.traces if self.config and self.config.traces else TracesConfig()
if traces_config.dbs is None:
traces_config.dbs = []
dbs_with_specific_config = [db.db_id for db in traces_config.dbs]
# If AgentOS db is explicitly set, only use that database for traces
if self.db is not None:
if self.db.id not in dbs_with_specific_config:
traces_config.dbs.append(
DatabaseConfig(
db_id=self.db.id,
domain_config=TracesDomainConfig(display_name=self.db.id),
)
)
else:
# Fall back to all discovered databases
for db_id in self.dbs.keys():
if db_id not in dbs_with_specific_config:
traces_config.dbs.append(
DatabaseConfig(
db_id=db_id,
domain_config=TracesDomainConfig(display_name=db_id),
)
)
return traces_config
def serve(
self,
app: Union[str, FastAPI],
*,
host: str = "localhost",
port: int = 7777,
reload: bool = False,
reload_includes: Optional[List[str]] = None,
reload_excludes: Optional[List[str]] = None,
workers: Optional[int] = None,
access_log: bool = False,
**kwargs,
):
import uvicorn
if getenv("AGNO_API_RUNTIME", "").lower() == "stg":
public_endpoint = "https://os-stg.agno.com/"
else:
public_endpoint = "https://os.agno.com/"
# Create a terminal panel to announce OS initialization and provide useful info
from rich.align import Align
from rich.console import Console, Group
panel_group = [
Align.center(f"[bold cyan]{public_endpoint}[/bold cyan]"),
Align.center(f"\n\n[bold dark_orange]OS running on:[/bold dark_orange] http://{host}:{port}"),
]
if self.authorization:
panel_group.append(
Align.center("\n\n[bold chartreuse3]:lock: JWT Authorization Enabled[/bold chartreuse3]")
)
elif bool(self.settings.os_security_key):
panel_group.append(Align.center("\n\n[bold chartreuse3]:lock: Security Key Enabled[/bold chartreuse3]"))
console = Console()
console.print(
Panel(
Group(*panel_group),
title="AgentOS",
expand=False,
border_style="dark_orange",
box=box.DOUBLE_EDGE,
padding=(2, 2),
)
)
# Adding *.yaml to reload_includes to reload the app when the yaml config file changes.
if reload and reload_includes is not None:
reload_includes = ["*.yaml", "*.yml"]
uvicorn.run(
app=app,
host=host,
port=port,
reload=reload,
reload_includes=reload_includes,
reload_excludes=reload_excludes,
workers=workers,
access_log=access_log,
lifespan="on",
**kwargs,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/app.py",
"license": "Apache License 2.0",
"lines": 1148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/auth.py | import asyncio
import hmac
from os import getenv
from typing import Any, List, Optional, Set
from fastapi import Depends, HTTPException, Request
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
from agno.os.scopes import get_accessible_resource_ids, has_required_scopes
from agno.os.settings import AgnoAPISettings
# Create a global HTTPBearer instance
security = HTTPBearer(auto_error=False)
# Scopes granted to the internal service token (used by the scheduler executor).
# Shared constant so auth.py and jwt.py stay in sync.
INTERNAL_SERVICE_SCOPES: List[str] = [
"agents:read",
"agents:run",
"teams:read",
"teams:run",
"workflows:read",
"workflows:run",
"schedules:read",
"schedules:write",
"schedules:delete",
]
def get_auth_token_from_request(request: Request) -> Optional[str]:
"""
Extract the JWT/Bearer token from the Authorization header.
This is used to forward the auth token to remote agents/teams/workflows
when making requests through the gateway.
Args:
request: The FastAPI request object
Returns:
The bearer token string if present, None otherwise
Usage:
auth_token = get_auth_token_from_request(request)
if auth_token and isinstance(agent, RemoteAgent):
await agent.arun(message, auth_token=auth_token)
"""
auth_header = request.headers.get("Authorization")
if auth_header and auth_header.lower().startswith("bearer "):
return auth_header[7:] # Remove "Bearer " prefix
return None
def _is_jwt_configured() -> bool:
"""Check if JWT authentication is configured via environment variables.
This covers cases where JWT middleware is set up manually (not via authorization=True).
"""
return bool(getenv("JWT_VERIFICATION_KEY") or getenv("JWT_JWKS_FILE"))
def get_authentication_dependency(settings: AgnoAPISettings):
"""
Create an authentication dependency function for FastAPI routes.
This handles security key authentication (OS_SECURITY_KEY).
When JWT authorization is enabled (via authorization=True, JWT environment variables,
or manually added JWT middleware), this dependency is skipped as JWT middleware
handles authentication.
Args:
settings: The API settings containing the security key and authorization flag
Returns:
A dependency function that can be used with FastAPI's Depends()
"""
async def auth_dependency(request: Request, credentials: HTTPAuthorizationCredentials = Depends(security)) -> bool:
# If JWT authorization is enabled via settings (authorization=True on AgentOS)
if settings and settings.authorization_enabled:
return True
# Check if JWT middleware has already handled authentication
if getattr(request.state, "authenticated", False):
return True
# Also skip if JWT is configured via environment variables
if _is_jwt_configured():
return True
# If no security key is set, skip authentication entirely
if not settings or not settings.os_security_key:
return True
# If security is enabled but no authorization header provided, fail
if not credentials:
raise HTTPException(status_code=401, detail="Authorization header required")
token = credentials.credentials
# Check internal service token (used by scheduler executor)
internal_token = getattr(request.app.state, "internal_service_token", None)
if internal_token and hmac.compare_digest(token, internal_token):
request.state.authenticated = True
request.state.user_id = "__scheduler__"
request.state.scopes = list(INTERNAL_SERVICE_SCOPES)
return True
# Verify the token against security key
if token != settings.os_security_key:
raise HTTPException(status_code=401, detail="Invalid authentication token")
return True
return auth_dependency
def validate_websocket_token(token: str, settings: AgnoAPISettings) -> bool:
"""
Validate a bearer token for WebSocket authentication (legacy os_security_key method).
When JWT authorization is enabled (via authorization=True or JWT environment variables),
this validation is skipped as JWT middleware handles authentication.
Args:
token: The bearer token to validate
settings: The API settings containing the security key and authorization flag
Returns:
True if the token is valid or authentication is disabled, False otherwise
"""
# If JWT authorization is enabled, skip security key validation
if settings and settings.authorization_enabled:
return True
# Also skip if JWT is configured via environment variables (manual JWT middleware setup)
if _is_jwt_configured():
return True
# If no security key is set, skip authentication entirely
if not settings or not settings.os_security_key:
return True
# Verify the token matches the configured security key
return token == settings.os_security_key
def get_accessible_resources(request: Request, resource_type: str) -> Set[str]:
"""
Get the set of resource IDs the user has access to based on their scopes.
This function is used to filter lists of resources (agents, teams, workflows)
based on the user's scopes from their JWT token.
Args:
request: The FastAPI request object (contains request.state.scopes)
resource_type: Type of resource ("agents", "teams", "workflows")
Returns:
Set of resource IDs the user can access. Returns {"*"} for wildcard access.
Usage:
accessible_ids = get_accessible_resources(request, "agents")
if "*" not in accessible_ids:
agents = [a for a in agents if a.id in accessible_ids]
Examples:
>>> # User with specific agent access
>>> # Token scopes: ["agent-os:my-os:agents:my-agent:read"]
>>> get_accessible_resources(request, "agents")
{'my-agent'}
>>> # User with wildcard access
>>> # Token scopes: ["agent-os:my-os:agents:*:read"] or ["admin"]
>>> get_accessible_resources(request, "agents")
{'*'}
>>> # User with agent-os level access (global resource scope)
>>> # Token scopes: ["agent-os:my-os:agents:read"]
>>> get_accessible_resources(request, "agents")
{'*'}
"""
# Check if accessible_resource_ids is already cached in request state (set by JWT middleware)
# This happens when user doesn't have global scope but has specific resource scopes
cached_ids = getattr(request.state, "accessible_resource_ids", None)
if cached_ids is not None:
return cached_ids
# Get user's scopes from request state (set by JWT middleware)
user_scopes = getattr(request.state, "scopes", [])
# Get accessible resource IDs
accessible_ids = get_accessible_resource_ids(user_scopes=user_scopes, resource_type=resource_type)
return accessible_ids
def filter_resources_by_access(request: Request, resources: List, resource_type: str) -> List:
"""
Filter a list of resources based on user's access permissions.
Args:
request: The FastAPI request object
resources: List of resource objects (agents, teams, or workflows) with 'id' attribute
resource_type: Type of resource ("agents", "teams", "workflows")
Returns:
Filtered list of resources the user has access to
Usage:
agents = filter_resources_by_access(request, all_agents, "agents")
teams = filter_resources_by_access(request, all_teams, "teams")
workflows = filter_resources_by_access(request, all_workflows, "workflows")
Examples:
>>> # User with specific access
>>> agents = [Agent(id="agent-1"), Agent(id="agent-2"), Agent(id="agent-3")]
>>> # Token scopes: ["agent-os:my-os:agents:agent-1:read", "agent-os:my-os:agents:agent-2:read"]
>>> filter_resources_by_access(request, agents, "agents")
[Agent(id="agent-1"), Agent(id="agent-2")]
>>> # User with wildcard access
>>> # Token scopes: ["admin"]
>>> filter_resources_by_access(request, agents, "agents")
[Agent(id="agent-1"), Agent(id="agent-2"), Agent(id="agent-3")]
"""
accessible_ids = get_accessible_resources(request, resource_type)
# Wildcard access - return all resources
if "*" in accessible_ids:
return resources
# Filter to only accessible resources
return [r for r in resources if r.id in accessible_ids]
def check_resource_access(request: Request, resource_id: str, resource_type: str, action: str = "read") -> bool:
"""
Check if user has access to a specific resource.
Args:
request: The FastAPI request object
resource_id: ID of the resource to check
resource_type: Type of resource ("agents", "teams", "workflows")
action: Action to check ("read", "run", etc.)
Returns:
True if user has access, False otherwise
Usage:
if not check_resource_access(request, agent_id, "agents", "run"):
raise HTTPException(status_code=403, detail="Access denied")
Examples:
>>> # Token scopes: ["agent-os:my-os:agents:my-agent:read", "agent-os:my-os:agents:my-agent:run"]
>>> check_resource_access(request, "my-agent", "agents", "run")
True
>>> check_resource_access(request, "other-agent", "agents", "run")
False
"""
accessible_ids = get_accessible_resources(request, resource_type)
# Wildcard access grants all permissions
if "*" in accessible_ids:
return True
# Check if user has access to this specific resource
return resource_id in accessible_ids
def require_resource_access(resource_type: str, action: str, resource_id_param: str):
"""
Create a dependency that checks if the user has access to a specific resource.
This dependency factory creates a FastAPI dependency that automatically checks
authorization when authorization is enabled. It extracts the resource ID from
the path parameters and verifies the user has the required access.
Args:
resource_type: Type of resource ("agents", "teams", "workflows")
action: Action to check ("read", "run")
resource_id_param: Name of the path parameter containing the resource ID
Returns:
A dependency function for use with FastAPI's Depends()
Usage:
@router.post("/agents/{agent_id}/runs")
async def create_agent_run(
agent_id: str,
request: Request,
_: None = Depends(require_resource_access("agents", "run", "agent_id")),
):
...
@router.get("/agents/{agent_id}")
async def get_agent(
agent_id: str,
request: Request,
_: None = Depends(require_resource_access("agents", "read", "agent_id")),
):
...
Examples:
>>> # Creates dependency for checking agent run access
>>> dep = require_resource_access("agents", "run", "agent_id")
>>> # Creates dependency for checking team read access
>>> dep = require_resource_access("teams", "read", "team_id")
"""
# Map resource_type to singular form for error messages
resource_singular = {
"agents": "agent",
"teams": "team",
"workflows": "workflow",
}.get(resource_type, resource_type.rstrip("s"))
async def dependency(request: Request):
# Only check authorization if it's enabled
if not getattr(request.state, "authorization_enabled", False):
return
# Get the resource_id from path parameters
resource_id = request.path_params.get(resource_id_param)
if resource_id and not check_resource_access(request, resource_id, resource_type, action):
raise HTTPException(status_code=403, detail=f"Access denied to {action} this {resource_singular}")
return dependency
def require_approval_resolved(db: Any) -> Any:
"""
Dependency factory that blocks a run continuation when a pending admin-required
approval exists for the run.
Designed to sit alongside ``require_resource_access`` in the route's
``dependencies`` list. Pass the OS-level DB adapter at router-creation time
(the same pattern used by ``get_approval_router``).
Usage::
dependencies=[
Depends(require_resource_access("agents", "run", "agent_id")),
Depends(require_approval_resolved(os.db)),
]
"""
async def dependency(request: Request) -> None:
# Mirror require_resource_access: skip entirely when authorization is disabled.
if not getattr(request.state, "authorization_enabled", False):
return
if db is None:
return
# Callers with approvals:write (admins) bypass this gate — they can
# force-continue a run for operational or debugging purposes.
user_scopes: List[str] = getattr(request.state, "scopes", [])
if has_required_scopes(user_scopes, ["approvals:write"]):
return
run_id: Optional[str] = request.path_params.get("run_id")
if not run_id:
return
fn = getattr(db, "get_approvals", None)
if fn is None:
return
try:
if asyncio.iscoroutinefunction(fn):
result = await fn(run_id=run_id, status="pending", approval_type="required")
else:
result = fn(run_id=run_id, status="pending", approval_type="required")
approvals = result[0] if isinstance(result, tuple) else result
if approvals:
raise HTTPException(
status_code=403,
detail="This run requires admin approval before it can be continued",
)
except HTTPException:
raise
except Exception as exc:
# DB doesn't support approvals or another transient error — let the
# run continue so non-approval setups are unaffected.
from agno.utils.log import log_warning
log_warning(f"Approval resolution check skipped due to error: {exc}")
return
return dependency
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/auth.py",
"license": "Apache License 2.0",
"lines": 301,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/agno/os/config.py | """Schemas related to the AgentOS configuration"""
from typing import Generic, List, Optional, TypeVar
from pydantic import BaseModel, field_validator
class AuthorizationConfig(BaseModel):
"""Configuration for the JWT middleware"""
verification_keys: Optional[List[str]] = None
jwks_file: Optional[str] = None
algorithm: Optional[str] = None
verify_audience: Optional[bool] = None
class EvalsDomainConfig(BaseModel):
"""Configuration for the Evals domain of the AgentOS"""
display_name: Optional[str] = None
available_models: Optional[List[str]] = None
class SessionDomainConfig(BaseModel):
"""Configuration for the Session domain of the AgentOS"""
display_name: Optional[str] = None
class KnowledgeDomainConfig(BaseModel):
"""Configuration for the Knowledge domain of the AgentOS"""
display_name: Optional[str] = None
class KnowledgeInstanceConfig(BaseModel):
"""Configuration for a single knowledge instance"""
id: str
name: str
description: Optional[str] = None
db_id: str
table: str
class MetricsDomainConfig(BaseModel):
"""Configuration for the Metrics domain of the AgentOS"""
display_name: Optional[str] = None
class MemoryDomainConfig(BaseModel):
"""Configuration for the Memory domain of the AgentOS"""
display_name: Optional[str] = None
class TracesDomainConfig(BaseModel):
"""Configuration for the Traces domain of the AgentOS"""
display_name: Optional[str] = None
DomainConfigType = TypeVar("DomainConfigType")
class DatabaseConfig(BaseModel, Generic[DomainConfigType]):
"""Configuration for a domain when used with the contextual database"""
db_id: str
domain_config: Optional[DomainConfigType] = None
tables: Optional[List[str]] = None
class EvalsConfig(EvalsDomainConfig):
"""Configuration for the Evals domain of the AgentOS"""
dbs: Optional[List[DatabaseConfig[EvalsDomainConfig]]] = None
class SessionConfig(SessionDomainConfig):
"""Configuration for the Session domain of the AgentOS"""
dbs: Optional[List[DatabaseConfig[SessionDomainConfig]]] = None
class MemoryConfig(MemoryDomainConfig):
"""Configuration for the Memory domain of the AgentOS"""
dbs: Optional[List[DatabaseConfig[MemoryDomainConfig]]] = None
class KnowledgeDatabaseConfig(BaseModel):
"""Configuration for a knowledge database with its tables"""
db_id: str
domain_config: Optional[KnowledgeDomainConfig] = None
tables: List[str] = []
class KnowledgeConfig(KnowledgeDomainConfig):
"""Configuration for the Knowledge domain of the AgentOS"""
dbs: Optional[List[KnowledgeDatabaseConfig]] = None
knowledge_instances: Optional[List[KnowledgeInstanceConfig]] = None
class MetricsConfig(MetricsDomainConfig):
"""Configuration for the Metrics domain of the AgentOS"""
dbs: Optional[List[DatabaseConfig[MetricsDomainConfig]]] = None
class TracesConfig(TracesDomainConfig):
"""Configuration for the Traces domain of the AgentOS"""
dbs: Optional[List[DatabaseConfig[TracesDomainConfig]]] = None
class ChatConfig(BaseModel):
"""Configuration for the Chat page of the AgentOS"""
quick_prompts: dict[str, list[str]]
# Limit the number of quick prompts to 3 (per agent/team/workflow)
@field_validator("quick_prompts")
@classmethod
def limit_lists(cls, v):
for key, lst in v.items():
if len(lst) > 3:
raise ValueError(f"Too many quick prompts for '{key}', maximum allowed is 3")
return v
class AgentOSConfig(BaseModel):
"""General configuration for an AgentOS instance"""
available_models: Optional[List[str]] = None
chat: Optional[ChatConfig] = None
evals: Optional[EvalsConfig] = None
knowledge: Optional[KnowledgeConfig] = None
memory: Optional[MemoryConfig] = None
session: Optional[SessionConfig] = None
metrics: Optional[MetricsConfig] = None
traces: Optional[TracesConfig] = None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/config.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/interfaces/agui/agui.py | """Main class for the AG-UI app, used to expose an Agno Agent or Team in an AG-UI compatible format."""
from typing import List, Optional, Union
from fastapi.routing import APIRouter
from agno.agent import Agent
from agno.agent.remote import RemoteAgent
from agno.os.interfaces.agui.router import attach_routes
from agno.os.interfaces.base import BaseInterface
from agno.team import Team
from agno.team.remote import RemoteTeam
class AGUI(BaseInterface):
type = "agui"
router: APIRouter
def __init__(
self,
agent: Optional[Union[Agent, RemoteAgent]] = None,
team: Optional[Union[Team, RemoteTeam]] = None,
prefix: str = "",
tags: Optional[List[str]] = None,
):
"""
Initialize the AGUI interface.
Args:
agent: The agent to expose via AG-UI
team: The team to expose via AG-UI
prefix: Custom prefix for the router (e.g., "/agui/v1", "/chat/public")
tags: Custom tags for the router (e.g., ["AGUI", "Chat"], defaults to ["AGUI"])
"""
self.agent = agent
self.team = team
self.prefix = prefix
self.tags = tags or ["AGUI"]
if not (self.agent or self.team):
raise ValueError("AGUI requires an agent or a team")
def get_router(self) -> APIRouter:
self.router = APIRouter(prefix=self.prefix, tags=self.tags) # type: ignore
self.router = attach_routes(router=self.router, agent=self.agent, team=self.team)
return self.router
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/interfaces/agui/agui.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/os/interfaces/agui/router.py | """Async router handling exposing an Agno Agent or Team in an AG-UI compatible format."""
import logging
import uuid
from typing import AsyncIterator, Optional, Union
try:
from ag_ui.core import (
BaseEvent,
EventType,
RunAgentInput,
RunErrorEvent,
RunStartedEvent,
)
from ag_ui.encoder import EventEncoder
except ImportError as e:
raise ImportError("`ag_ui` not installed. Please install it with `pip install -U ag-ui-protocol`") from e
from fastapi import APIRouter
from fastapi.responses import StreamingResponse
from agno.agent import Agent, RemoteAgent
from agno.os.interfaces.agui.utils import (
async_stream_agno_response_as_agui_events,
convert_agui_messages_to_agno_messages,
validate_agui_state,
)
from agno.team.remote import RemoteTeam
from agno.team.team import Team
logger = logging.getLogger(__name__)
async def run_agent(agent: Union[Agent, RemoteAgent], run_input: RunAgentInput) -> AsyncIterator[BaseEvent]:
"""Run the contextual Agent, mapping AG-UI input messages to Agno format, and streaming the response in AG-UI format."""
run_id = run_input.run_id or str(uuid.uuid4())
try:
# Preparing the input for the Agent and emitting the run started event
messages = convert_agui_messages_to_agno_messages(run_input.messages or [])
yield RunStartedEvent(type=EventType.RUN_STARTED, thread_id=run_input.thread_id, run_id=run_id)
# Look for user_id in run_input.forwarded_props
user_id = None
if run_input.forwarded_props and isinstance(run_input.forwarded_props, dict):
user_id = run_input.forwarded_props.get("user_id")
# Validating the session state is of the expected type (dict)
session_state = validate_agui_state(run_input.state, run_input.thread_id)
# Request streaming response from agent
response_stream = agent.arun( # type: ignore
input=messages,
session_id=run_input.thread_id,
stream=True,
stream_events=True,
user_id=user_id,
session_state=session_state,
run_id=run_id,
)
# Stream the response content in AG-UI format
async for event in async_stream_agno_response_as_agui_events(
response_stream=response_stream, # type: ignore
thread_id=run_input.thread_id,
run_id=run_id,
):
yield event
# Emit a RunErrorEvent if any error occurs
except Exception as e:
logger.error(f"Error running agent: {e}", exc_info=True)
yield RunErrorEvent(type=EventType.RUN_ERROR, message=str(e))
async def run_team(team: Union[Team, RemoteTeam], input: RunAgentInput) -> AsyncIterator[BaseEvent]:
"""Run the contextual Team, mapping AG-UI input messages to Agno format, and streaming the response in AG-UI format."""
run_id = input.run_id or str(uuid.uuid4())
try:
# Extract the last user message for team execution
messages = convert_agui_messages_to_agno_messages(input.messages or [])
yield RunStartedEvent(type=EventType.RUN_STARTED, thread_id=input.thread_id, run_id=run_id)
# Look for user_id in input.forwarded_props
user_id = None
if input.forwarded_props and isinstance(input.forwarded_props, dict):
user_id = input.forwarded_props.get("user_id")
# Validating the session state is of the expected type (dict)
session_state = validate_agui_state(input.state, input.thread_id)
# Request streaming response from team
response_stream = team.arun( # type: ignore
input=messages,
session_id=input.thread_id,
stream=True,
stream_steps=True,
user_id=user_id,
session_state=session_state,
run_id=run_id,
)
# Stream the response content in AG-UI format
async for event in async_stream_agno_response_as_agui_events(
response_stream=response_stream, thread_id=input.thread_id, run_id=run_id
):
yield event
except Exception as e:
logger.error(f"Error running team: {e}", exc_info=True)
yield RunErrorEvent(type=EventType.RUN_ERROR, message=str(e))
def attach_routes(
router: APIRouter, agent: Optional[Union[Agent, RemoteAgent]] = None, team: Optional[Union[Team, RemoteTeam]] = None
) -> APIRouter:
if agent is None and team is None:
raise ValueError("Either agent or team must be provided.")
encoder = EventEncoder()
@router.post(
"/agui",
name="run_agent",
)
async def run_agent_agui(run_input: RunAgentInput):
async def event_generator():
if agent:
async for event in run_agent(agent, run_input):
encoded_event = encoder.encode(event)
yield encoded_event
elif team:
async for event in run_team(team, run_input):
encoded_event = encoder.encode(event)
yield encoded_event
return StreamingResponse(
event_generator(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "POST, GET, OPTIONS",
"Access-Control-Allow-Headers": "*",
},
)
@router.get("/status")
async def get_status():
return {"status": "available"}
return router
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/interfaces/agui/router.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/interfaces/agui/utils.py | """Logic used by the AG-UI router."""
import json
import uuid
from collections.abc import Iterator
from dataclasses import asdict, dataclass, is_dataclass
from typing import Any, AsyncIterator, Dict, List, Optional, Set, Tuple, Union
from ag_ui.core import (
BaseEvent,
CustomEvent,
EventType,
RunFinishedEvent,
StepFinishedEvent,
StepStartedEvent,
TextMessageContentEvent,
TextMessageEndEvent,
TextMessageStartEvent,
ToolCallArgsEvent,
ToolCallEndEvent,
ToolCallResultEvent,
ToolCallStartEvent,
)
from ag_ui.core.types import Message as AGUIMessage
from pydantic import BaseModel
from agno.models.message import Message
from agno.run.agent import RunContentEvent, RunEvent, RunOutputEvent, RunPausedEvent
from agno.run.team import RunContentEvent as TeamRunContentEvent
from agno.run.team import TeamRunEvent, TeamRunOutputEvent
from agno.utils.log import log_debug, log_warning
from agno.utils.message import get_text_from_message
def validate_agui_state(state: Any, thread_id: str) -> Optional[Dict[str, Any]]:
"""Validate the given AGUI state is of the expected type (dict)."""
if state is None:
return None
if isinstance(state, dict):
return state
if isinstance(state, BaseModel):
try:
return state.model_dump()
except Exception:
pass
if is_dataclass(state):
try:
return asdict(state) # type: ignore
except Exception:
pass
if hasattr(state, "to_dict") and callable(getattr(state, "to_dict")):
try:
result = state.to_dict() # type: ignore
if isinstance(result, dict):
return result
except Exception:
pass
log_warning(f"AGUI state must be a dict, got {type(state).__name__}. State will be ignored. Thread: {thread_id}")
return None
@dataclass
class EventBuffer:
"""Buffer to manage event ordering constraints, relevant when mapping Agno responses to AG-UI events."""
active_tool_call_ids: Set[str] # All currently active tool calls
ended_tool_call_ids: Set[str] # All tool calls that have ended
current_text_message_id: str = "" # ID of the current text message context (for tool call parenting)
next_text_message_id: str = "" # Pre-generated ID for the next text message
pending_tool_calls_parent_id: str = "" # Parent message ID for pending tool calls
def __init__(self):
self.active_tool_call_ids = set()
self.ended_tool_call_ids = set()
self.current_text_message_id = ""
self.next_text_message_id = str(uuid.uuid4())
self.pending_tool_calls_parent_id = ""
def start_tool_call(self, tool_call_id: str) -> None:
"""Start a new tool call."""
self.active_tool_call_ids.add(tool_call_id)
def end_tool_call(self, tool_call_id: str) -> None:
"""End a tool call."""
self.active_tool_call_ids.discard(tool_call_id)
self.ended_tool_call_ids.add(tool_call_id)
def start_text_message(self) -> str:
"""Start a new text message and return its ID."""
# Use the pre-generated next ID as current, and generate a new next ID
self.current_text_message_id = self.next_text_message_id
self.next_text_message_id = str(uuid.uuid4())
return self.current_text_message_id
def get_parent_message_id_for_tool_call(self) -> str:
"""Get the message ID to use as parent for tool calls."""
# If we have a pending parent ID set (from text message end), use that
if self.pending_tool_calls_parent_id:
return self.pending_tool_calls_parent_id
# Otherwise use current text message ID
return self.current_text_message_id
def set_pending_tool_calls_parent_id(self, parent_id: str) -> None:
"""Set the parent message ID for upcoming tool calls."""
self.pending_tool_calls_parent_id = parent_id
def clear_pending_tool_calls_parent_id(self) -> None:
"""Clear the pending parent ID when a new text message starts."""
self.pending_tool_calls_parent_id = ""
def convert_agui_messages_to_agno_messages(messages: List[AGUIMessage]) -> List[Message]:
"""Convert AG-UI messages to Agno messages."""
# First pass: collect all tool_call_ids that have results
tool_call_ids_with_results: Set[str] = set()
for msg in messages:
if msg.role == "tool" and msg.tool_call_id:
tool_call_ids_with_results.add(msg.tool_call_id)
# Second pass: convert messages
result: List[Message] = []
seen_tool_call_ids: Set[str] = set()
for msg in messages:
if msg.role == "tool":
# Deduplicate tool results - keep only first occurrence
if msg.tool_call_id in seen_tool_call_ids:
log_debug(f"Skipping duplicate AGUI tool result: {msg.tool_call_id}")
continue
seen_tool_call_ids.add(msg.tool_call_id)
result.append(Message(role="tool", tool_call_id=msg.tool_call_id, content=msg.content))
elif msg.role == "assistant":
tool_calls = None
if msg.tool_calls:
# Filter tool_calls to only those with results in this message sequence
filtered_calls = [call for call in msg.tool_calls if call.id in tool_call_ids_with_results]
if filtered_calls:
tool_calls = [call.model_dump() for call in filtered_calls]
result.append(Message(role="assistant", content=msg.content, tool_calls=tool_calls))
elif msg.role == "user":
result.append(Message(role="user", content=msg.content))
elif msg.role == "system":
pass # Skip - agent builds its own system message from configuration
else:
log_warning(f"Unknown AGUI message role: {msg.role}")
return result
def extract_team_response_chunk_content(response: TeamRunContentEvent) -> str:
"""Given a response stream chunk, find and extract the content."""
# Handle Team members' responses
members_content = []
if hasattr(response, "member_responses") and response.member_responses: # type: ignore
for member_resp in response.member_responses: # type: ignore
if isinstance(member_resp, RunContentEvent):
member_content = extract_response_chunk_content(member_resp)
if member_content:
members_content.append(f"Team member: {member_content}")
elif isinstance(member_resp, TeamRunContentEvent):
member_content = extract_team_response_chunk_content(member_resp)
if member_content:
members_content.append(f"Team member: {member_content}")
members_response = "\n".join(members_content) if members_content else ""
# Handle structured outputs
main_content = get_text_from_message(response.content) if response.content is not None else ""
return main_content + members_response
def extract_response_chunk_content(response: RunContentEvent) -> str:
"""Given a response stream chunk, find and extract the content."""
if hasattr(response, "messages") and response.messages: # type: ignore
for msg in reversed(response.messages): # type: ignore
if hasattr(msg, "role") and msg.role == "assistant" and hasattr(msg, "content") and msg.content:
# Handle structured outputs from messages
return get_text_from_message(msg.content)
# Handle structured outputs
return get_text_from_message(response.content) if response.content is not None else ""
def _create_events_from_chunk(
chunk: Union[RunOutputEvent, TeamRunOutputEvent],
message_id: str,
message_started: bool,
event_buffer: EventBuffer,
) -> Tuple[List[BaseEvent], bool, str]:
"""
Process a single chunk and return events to emit + updated message_started state.
Args:
chunk: The event chunk to process
message_id: Current message identifier
message_started: Whether a message is currently active
event_buffer: Event buffer for tracking tool call state
Returns:
Tuple of (events_to_emit, new_message_started_state, message_id)
"""
events_to_emit: List[BaseEvent] = []
# Extract content if the contextual event is a content event
if chunk.event == RunEvent.run_content:
content = extract_response_chunk_content(chunk) # type: ignore
elif chunk.event == TeamRunEvent.run_content:
content = extract_team_response_chunk_content(chunk) # type: ignore
else:
content = None
# Handle text responses
if content is not None:
# Handle the message start event, emitted once per message
if not message_started:
message_started = True
message_id = event_buffer.start_text_message()
# Clear pending tool calls parent ID when starting new text message
event_buffer.clear_pending_tool_calls_parent_id()
start_event = TextMessageStartEvent(
type=EventType.TEXT_MESSAGE_START,
message_id=message_id,
role="assistant",
)
events_to_emit.append(start_event)
# Handle the text content event, emitted once per text chunk
if content is not None and content != "":
content_event = TextMessageContentEvent(
type=EventType.TEXT_MESSAGE_CONTENT,
message_id=message_id,
delta=content,
)
events_to_emit.append(content_event) # type: ignore
# Handle starting a new tool
elif chunk.event == RunEvent.tool_call_started or chunk.event == TeamRunEvent.tool_call_started:
if chunk.tool is not None: # type: ignore
tool_call = chunk.tool # type: ignore
# End current text message and handle for tool calls
current_message_id = message_id
if message_started:
# End the current text message
end_message_event = TextMessageEndEvent(type=EventType.TEXT_MESSAGE_END, message_id=current_message_id)
events_to_emit.append(end_message_event)
# Set this message as the parent for any upcoming tool calls
# This ensures multiple sequential tool calls all use the same parent
event_buffer.set_pending_tool_calls_parent_id(current_message_id)
# Reset message started state and generate new message_id for future messages
message_started = False
message_id = str(uuid.uuid4())
# Get the parent message ID - this will use pending parent if set, ensuring multiple tool calls in sequence have the same parent
parent_message_id = event_buffer.get_parent_message_id_for_tool_call()
if not parent_message_id:
# Create parent message for tool calls without preceding assistant message
parent_message_id = str(uuid.uuid4())
# Emit a text message to serve as the parent
text_start = TextMessageStartEvent(
type=EventType.TEXT_MESSAGE_START,
message_id=parent_message_id,
role="assistant",
)
events_to_emit.append(text_start)
text_end = TextMessageEndEvent(
type=EventType.TEXT_MESSAGE_END,
message_id=parent_message_id,
)
events_to_emit.append(text_end)
# Set this as the pending parent for subsequent tool calls in this batch
event_buffer.set_pending_tool_calls_parent_id(parent_message_id)
start_event = ToolCallStartEvent(
type=EventType.TOOL_CALL_START,
tool_call_id=tool_call.tool_call_id, # type: ignore
tool_call_name=tool_call.tool_name, # type: ignore
parent_message_id=parent_message_id,
)
events_to_emit.append(start_event)
args_event = ToolCallArgsEvent(
type=EventType.TOOL_CALL_ARGS,
tool_call_id=tool_call.tool_call_id, # type: ignore
delta=json.dumps(tool_call.tool_args),
)
events_to_emit.append(args_event) # type: ignore
# Handle tool call completion
elif chunk.event == RunEvent.tool_call_completed or chunk.event == TeamRunEvent.tool_call_completed:
if chunk.tool is not None: # type: ignore
tool_call = chunk.tool # type: ignore
if tool_call.tool_call_id not in event_buffer.ended_tool_call_ids:
end_event = ToolCallEndEvent(
type=EventType.TOOL_CALL_END,
tool_call_id=tool_call.tool_call_id, # type: ignore
)
events_to_emit.append(end_event)
if tool_call.result is not None:
result_event = ToolCallResultEvent(
type=EventType.TOOL_CALL_RESULT,
tool_call_id=tool_call.tool_call_id, # type: ignore
content=str(tool_call.result),
role="tool",
message_id=str(uuid.uuid4()),
)
events_to_emit.append(result_event)
# Handle reasoning
elif chunk.event == RunEvent.reasoning_started:
step_started_event = StepStartedEvent(type=EventType.STEP_STARTED, step_name="reasoning")
events_to_emit.append(step_started_event)
elif chunk.event == RunEvent.reasoning_completed:
step_finished_event = StepFinishedEvent(type=EventType.STEP_FINISHED, step_name="reasoning")
events_to_emit.append(step_finished_event)
# Handle custom events
elif chunk.event == RunEvent.custom_event:
# Use the name of the event class if available, otherwise default to the CustomEvent
try:
custom_event_name = chunk.__class__.__name__
except Exception:
custom_event_name = chunk.event
# Use the complete Agno event as value if parsing it works, else the event content field
try:
custom_event_value = chunk.to_dict()
except Exception:
custom_event_value = chunk.content # type: ignore
custom_event = CustomEvent(name=custom_event_name, value=custom_event_value)
events_to_emit.append(custom_event)
return events_to_emit, message_started, message_id
def _create_completion_events(
chunk: Union[RunOutputEvent, TeamRunOutputEvent],
event_buffer: EventBuffer,
message_started: bool,
message_id: str,
thread_id: str,
run_id: str,
) -> List[BaseEvent]:
"""Create events for run completion."""
events_to_emit: List[BaseEvent] = []
# End remaining active tool calls if needed
for tool_call_id in list(event_buffer.active_tool_call_ids):
if tool_call_id not in event_buffer.ended_tool_call_ids:
end_event = ToolCallEndEvent(
type=EventType.TOOL_CALL_END,
tool_call_id=tool_call_id,
)
events_to_emit.append(end_event)
# End the message and run, denoting the end of the session
if message_started:
end_message_event = TextMessageEndEvent(type=EventType.TEXT_MESSAGE_END, message_id=message_id)
events_to_emit.append(end_message_event)
# Emit external execution tools
if isinstance(chunk, RunPausedEvent):
external_tools = chunk.tools_awaiting_external_execution
if external_tools:
# First, emit an assistant message for external tool calls
assistant_message_id = str(uuid.uuid4())
assistant_start_event = TextMessageStartEvent(
type=EventType.TEXT_MESSAGE_START,
message_id=assistant_message_id,
role="assistant",
)
events_to_emit.append(assistant_start_event)
# Add any text content if present for the assistant message
if chunk.content:
content_event = TextMessageContentEvent(
type=EventType.TEXT_MESSAGE_CONTENT,
message_id=assistant_message_id,
delta=str(chunk.content),
)
events_to_emit.append(content_event)
# End the assistant message
assistant_end_event = TextMessageEndEvent(
type=EventType.TEXT_MESSAGE_END,
message_id=assistant_message_id,
)
events_to_emit.append(assistant_end_event)
# Emit tool call events for external execution
for tool in external_tools:
if tool.tool_call_id is None or tool.tool_name is None:
continue
start_event = ToolCallStartEvent(
type=EventType.TOOL_CALL_START,
tool_call_id=tool.tool_call_id,
tool_call_name=tool.tool_name,
parent_message_id=assistant_message_id, # Use the assistant message as parent
)
events_to_emit.append(start_event)
args_event = ToolCallArgsEvent(
type=EventType.TOOL_CALL_ARGS,
tool_call_id=tool.tool_call_id,
delta=json.dumps(tool.tool_args),
)
events_to_emit.append(args_event)
end_event = ToolCallEndEvent(
type=EventType.TOOL_CALL_END,
tool_call_id=tool.tool_call_id,
)
events_to_emit.append(end_event)
run_finished_event = RunFinishedEvent(type=EventType.RUN_FINISHED, thread_id=thread_id, run_id=run_id)
events_to_emit.append(run_finished_event)
return events_to_emit
def _emit_event_logic(event: BaseEvent, event_buffer: EventBuffer) -> List[BaseEvent]:
"""Process an event and return events to actually emit."""
events_to_emit: List[BaseEvent] = [event]
# Update the event buffer state for tracking purposes
if event.type == EventType.TOOL_CALL_START:
tool_call_id = getattr(event, "tool_call_id", None)
if tool_call_id:
event_buffer.start_tool_call(tool_call_id)
elif event.type == EventType.TOOL_CALL_END:
tool_call_id = getattr(event, "tool_call_id", None)
if tool_call_id:
event_buffer.end_tool_call(tool_call_id)
return events_to_emit
def stream_agno_response_as_agui_events(
response_stream: Iterator[Union[RunOutputEvent, TeamRunOutputEvent]], thread_id: str, run_id: str
) -> Iterator[BaseEvent]:
"""Map the Agno response stream to AG-UI format, handling event ordering constraints."""
message_id = "" # Will be set by EventBuffer when text message starts
message_started = False
event_buffer = EventBuffer()
stream_completed = False
completion_chunk = None
for chunk in response_stream:
# Check if this is a completion event
if (
chunk.event == RunEvent.run_completed
or chunk.event == TeamRunEvent.run_completed
or chunk.event == RunEvent.run_paused
):
# Store completion chunk but don't process it yet
completion_chunk = chunk
stream_completed = True
else:
# Process regular chunk immediately
events_from_chunk, message_started, message_id = _create_events_from_chunk(
chunk, message_id, message_started, event_buffer
)
for event in events_from_chunk:
events_to_emit = _emit_event_logic(event_buffer=event_buffer, event=event)
for emit_event in events_to_emit:
yield emit_event
# Process ONLY completion cleanup events, not content from completion chunk
if completion_chunk:
completion_events = _create_completion_events(
completion_chunk, event_buffer, message_started, message_id, thread_id, run_id
)
for event in completion_events:
events_to_emit = _emit_event_logic(event_buffer=event_buffer, event=event)
for emit_event in events_to_emit:
yield emit_event
# Ensure completion events are always emitted even when stream ends naturally
if not stream_completed:
# Create a synthetic completion event to ensure proper cleanup
from agno.run.agent import RunCompletedEvent
synthetic_completion = RunCompletedEvent()
completion_events = _create_completion_events(
synthetic_completion, event_buffer, message_started, message_id, thread_id, run_id
)
for event in completion_events:
events_to_emit = _emit_event_logic(event_buffer=event_buffer, event=event)
for emit_event in events_to_emit:
yield emit_event
# Async version - thin wrapper
async def async_stream_agno_response_as_agui_events(
response_stream: AsyncIterator[Union[RunOutputEvent, TeamRunOutputEvent]],
thread_id: str,
run_id: str,
) -> AsyncIterator[BaseEvent]:
"""Map the Agno response stream to AG-UI format, handling event ordering constraints."""
message_id = "" # Will be set by EventBuffer when text message starts
message_started = False
event_buffer = EventBuffer()
stream_completed = False
completion_chunk = None
async for chunk in response_stream:
# Check if this is a completion event
if (
chunk.event == RunEvent.run_completed
or chunk.event == TeamRunEvent.run_completed
or chunk.event == RunEvent.run_paused
):
# Store completion chunk but don't process it yet
completion_chunk = chunk
stream_completed = True
else:
# Process regular chunk immediately
events_from_chunk, message_started, message_id = _create_events_from_chunk(
chunk, message_id, message_started, event_buffer
)
for event in events_from_chunk:
events_to_emit = _emit_event_logic(event_buffer=event_buffer, event=event)
for emit_event in events_to_emit:
yield emit_event
# Process ONLY completion cleanup events, not content from completion chunk
if completion_chunk:
completion_events = _create_completion_events(
completion_chunk, event_buffer, message_started, message_id, thread_id, run_id
)
for event in completion_events:
events_to_emit = _emit_event_logic(event_buffer=event_buffer, event=event)
for emit_event in events_to_emit:
yield emit_event
# Ensure completion events are always emitted even when stream ends naturally
if not stream_completed:
# Create a synthetic completion event to ensure proper cleanup
from agno.run.agent import RunCompletedEvent
synthetic_completion = RunCompletedEvent()
completion_events = _create_completion_events(
synthetic_completion, event_buffer, message_started, message_id, thread_id, run_id
)
for event in completion_events:
events_to_emit = _emit_event_logic(event_buffer=event_buffer, event=event)
for emit_event in events_to_emit:
yield emit_event
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/interfaces/agui/utils.py",
"license": "Apache License 2.0",
"lines": 476,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/interfaces/base.py | from abc import ABC, abstractmethod
from typing import List, Optional, Union
from fastapi import APIRouter
from agno.agent import Agent, RemoteAgent
from agno.team import RemoteTeam, Team
from agno.workflow import RemoteWorkflow, Workflow
class BaseInterface(ABC):
type: str
version: str = "1.0"
agent: Optional[Union[Agent, RemoteAgent]] = None
team: Optional[Union[Team, RemoteTeam]] = None
workflow: Optional[Union[Workflow, RemoteWorkflow]] = None
prefix: str
tags: List[str]
router: APIRouter
@abstractmethod
def get_router(self, use_async: bool = True, **kwargs) -> APIRouter:
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/interfaces/base.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/os/interfaces/slack/router.py | from __future__ import annotations
from ssl import SSLContext
from typing import Any, Dict, List, Literal, Optional, Union
from fastapi import APIRouter, BackgroundTasks, HTTPException, Request
from pydantic import BaseModel, Field
from agno.agent import Agent, RemoteAgent
from agno.os.interfaces.slack.events import process_event
from agno.os.interfaces.slack.helpers import (
download_event_files_async,
extract_event_context,
send_slack_message_async,
should_respond,
upload_response_media_async,
)
from agno.os.interfaces.slack.security import verify_slack_signature
from agno.os.interfaces.slack.state import StreamState
from agno.team import RemoteTeam, Team
from agno.tools.slack import SlackTools
from agno.utils.log import log_error
from agno.workflow import RemoteWorkflow, Workflow
# Slack sends lifecycle events for bots with these subtypes. Without this
# filter the router would try to process its own messages, causing infinite loops.
_IGNORED_SUBTYPES = frozenset(
{
"bot_message",
"bot_add",
"bot_remove",
"bot_enable",
"bot_disable",
"message_changed",
"message_deleted",
}
)
# User-facing error message for failed requests
_ERROR_MESSAGE = "Sorry, there was an error processing your message."
class SlackEventResponse(BaseModel):
status: str = Field(default="ok")
class SlackChallengeResponse(BaseModel):
challenge: str = Field(description="Challenge string to echo back to Slack")
def attach_routes(
router: APIRouter,
agent: Optional[Union[Agent, RemoteAgent]] = None,
team: Optional[Union[Team, RemoteTeam]] = None,
workflow: Optional[Union[Workflow, RemoteWorkflow]] = None,
reply_to_mentions_only: bool = True,
token: Optional[str] = None,
signing_secret: Optional[str] = None,
streaming: bool = True,
loading_messages: Optional[List[str]] = None,
task_display_mode: str = "plan",
loading_text: str = "Thinking...",
suggested_prompts: Optional[List[Dict[str, str]]] = None,
ssl: Optional[SSLContext] = None,
buffer_size: int = 100,
max_file_size: int = 1_073_741_824, # 1GB
) -> APIRouter:
# Inner functions capture config via closure to keep each instance isolated
entity = agent or team or workflow
# entity_type drives event dispatch (agent vs team vs workflow events)
entity_type: Literal["agent", "team", "workflow"] = "agent" if agent else "team" if team else "workflow"
raw_name = getattr(entity, "name", None)
# entity_name labels task cards; entity_id namespaces session IDs
entity_name = raw_name if isinstance(raw_name, str) else entity_type
# Multiple Slack instances can be mounted on one FastAPI app (e.g. /research
# and /analyst). op_suffix makes each operation_id unique to avoid collisions.
op_suffix = entity_name.lower().replace(" ", "_")
entity_id = getattr(entity, "id", None) or entity_name
slack_tools = SlackTools(token=token, ssl=ssl, max_file_size=max_file_size)
@router.post(
"/events",
operation_id=f"slack_events_{op_suffix}",
name="slack_events",
description="Process incoming Slack events",
response_model=Union[SlackChallengeResponse, SlackEventResponse],
response_model_exclude_none=True,
responses={
200: {"description": "Event processed successfully"},
400: {"description": "Missing Slack headers"},
403: {"description": "Invalid Slack signature"},
},
)
async def slack_events(request: Request, background_tasks: BackgroundTasks):
# ACK immediately, process in background. Slack retries after ~3s if it
# doesn't get a 200, so long-running agent calls must not block the response.
body = await request.body()
timestamp = request.headers.get("X-Slack-Request-Timestamp")
slack_signature = request.headers.get("X-Slack-Signature", "")
if not timestamp or not slack_signature:
raise HTTPException(status_code=400, detail="Missing Slack headers")
if not verify_slack_signature(body, timestamp, slack_signature, signing_secret=signing_secret):
raise HTTPException(status_code=403, detail="Invalid signature")
# Slack retries after ~3s if it doesn't get a 200. Since we ACK
# immediately and process in background, retries are always duplicates.
# Trade-off: if the server crashes mid-processing, the retried event
# carrying the same payload won't be reprocessed — acceptable for chat.
if request.headers.get("X-Slack-Retry-Num"):
return SlackEventResponse(status="ok")
data = await request.json()
if data.get("type") == "url_verification":
return SlackChallengeResponse(challenge=data.get("challenge"))
if "event" in data:
event = data["event"]
event_type = event.get("type")
# setSuggestedPrompts requires "Agents & AI Apps" mode (streaming UX only)
if event_type == "assistant_thread_started" and streaming:
background_tasks.add_task(_handle_thread_started, event)
# Bot self-loop prevention: check bot_id at both the top-level event
# and inside message_changed's nested "message" object. Without the
# nested check, edited bot messages would be reprocessed as new events.
elif (
event.get("bot_id")
or (event.get("message") or {}).get("bot_id")
or event.get("subtype") in _IGNORED_SUBTYPES
):
pass
elif streaming:
background_tasks.add_task(_stream_slack_response, data)
else:
background_tasks.add_task(_process_slack_event, event)
return SlackEventResponse(status="ok")
async def _process_slack_event(event: dict):
if not should_respond(event, reply_to_mentions_only):
return
from slack_sdk.web.async_client import AsyncWebClient
ctx = extract_event_context(event)
# Namespace with entity_id so threads don't collide across mounted interfaces
session_id = f"{entity_id}:{ctx['thread_id']}"
async_client = AsyncWebClient(token=slack_tools.token, ssl=ssl)
try:
await async_client.assistant_threads_setStatus(
channel_id=ctx["channel_id"],
thread_ts=ctx["thread_id"],
status=loading_text,
)
except Exception:
pass
try:
files, images, videos, audio, skipped = await download_event_files_async(
slack_tools.token, event, slack_tools.max_file_size
)
message_text = ctx["message_text"]
if skipped:
notice = "[Skipped files: " + ", ".join(skipped) + "]"
message_text = f"{notice}\n{message_text}"
run_kwargs: Dict[str, Any] = {
# Thread-scoped (not user-scoped) so all participants share context
"user_id": None,
"session_id": session_id,
"files": files or None,
"images": images or None,
"videos": videos or None,
"audio": audio or None,
}
response = await entity.arun(message_text, **run_kwargs) # type: ignore[union-attr]
if response:
if response.status == "ERROR":
log_error(f"Error processing message: {response.content}")
await send_slack_message_async(
async_client,
channel=ctx["channel_id"],
message=f"{_ERROR_MESSAGE} Please try again later.",
thread_ts=ctx["thread_id"],
)
return
if hasattr(response, "reasoning_content") and response.reasoning_content:
rc = str(response.reasoning_content)
formatted = "*Reasoning:*\n> " + rc.replace("\n", "\n> ")
await send_slack_message_async(
async_client,
channel=ctx["channel_id"],
message=formatted,
thread_ts=ctx["thread_id"],
)
content = str(response.content) if response.content else ""
await send_slack_message_async(
async_client,
channel=ctx["channel_id"],
message=content,
thread_ts=ctx["thread_id"],
)
await upload_response_media_async(async_client, response, ctx["channel_id"], ctx["thread_id"])
except Exception as e:
log_error(f"Error processing slack event: {e}")
await send_slack_message_async(
async_client,
channel=ctx["channel_id"],
message=_ERROR_MESSAGE,
thread_ts=ctx["thread_id"],
)
finally:
# Clear "Thinking..." status. In streaming mode stream.stop() handles
# this automatically, but the non-streaming path must clear explicitly.
try:
await async_client.assistant_threads_setStatus(
channel_id=ctx["channel_id"], thread_ts=ctx["thread_id"], status=""
)
except Exception:
pass
async def _stream_slack_response(data: dict):
from slack_sdk.web.async_client import AsyncWebClient
event = data["event"]
if not should_respond(event, reply_to_mentions_only):
return
ctx = extract_event_context(event)
session_id = f"{entity_id}:{ctx['thread_id']}"
# Not consistently placed across Slack event envelope shapes
team_id = data.get("team_id") or event.get("team")
# CRITICAL: recipient_user_id must be the HUMAN user, not the bot.
# event["user"] = human who sent the message. data["authorizations"][0]["user_id"]
# = the bot's own user ID. Using the bot ID causes Slack to stream content
# to an invisible recipient, resulting in a blank bubble until stopStream.
user_id = ctx["user"]
async_client = AsyncWebClient(token=slack_tools.token, ssl=ssl)
state = StreamState(entity_type=entity_type, entity_name=entity_name)
stream = None
try:
try:
status_kwargs: Dict[str, Any] = {
"channel_id": ctx["channel_id"],
"thread_ts": ctx["thread_id"],
"status": loading_text,
}
if loading_messages:
status_kwargs["loading_messages"] = loading_messages
await async_client.assistant_threads_setStatus(**status_kwargs)
except Exception:
pass
files, images, videos, audio, skipped = await download_event_files_async(
slack_tools.token, event, slack_tools.max_file_size
)
message_text = ctx["message_text"]
if skipped:
notice = "[Skipped files: " + ", ".join(skipped) + "]"
message_text = f"{notice}\n{message_text}"
run_kwargs: Dict[str, Any] = {
"stream": True,
# Enables event-level chunks for task card and tool lifecycle rendering
"stream_events": True,
# Thread-scoped (not user-scoped) so all participants share context
"user_id": None,
"session_id": session_id,
"files": files or None,
"images": images or None,
"videos": videos or None,
"audio": audio or None,
}
response_stream = entity.arun(message_text, **run_kwargs) # type: ignore[union-attr]
if response_stream is None:
try:
await async_client.assistant_threads_setStatus(
channel_id=ctx["channel_id"], thread_ts=ctx["thread_id"], status=""
)
except Exception:
pass
return
# Deferred so "Thinking..." indicator stays visible during file
# download and agent startup (opening earlier shows a blank bubble)
stream = await async_client.chat_stream(
channel=ctx["channel_id"],
thread_ts=ctx["thread_id"],
recipient_team_id=team_id,
recipient_user_id=user_id,
task_display_mode=task_display_mode,
buffer_size=buffer_size,
)
async for chunk in response_stream:
state.collect_media(chunk)
ev = getattr(chunk, "event", None)
if ev:
if await process_event(ev, chunk, state, stream):
break
if state.has_content():
if not state.title_set:
state.title_set = True
title = ctx["message_text"][:50].strip() or "New conversation"
try:
await async_client.assistant_threads_setTitle(
channel_id=ctx["channel_id"], thread_ts=ctx["thread_id"], title=title
)
except Exception:
pass
await stream.append(markdown_text=state.flush())
# Default to complete when no terminal error/cancel event arrived
final_status: Literal["in_progress", "complete", "error"] = state.terminal_status or "complete"
completion_chunks = state.resolve_all_pending(final_status) if state.task_cards else []
stop_kwargs: Dict[str, Any] = {}
if state.has_content():
stop_kwargs["markdown_text"] = state.flush()
if completion_chunks:
stop_kwargs["chunks"] = completion_chunks
await stream.stop(**stop_kwargs)
await upload_response_media_async(async_client, state, ctx["channel_id"], ctx["thread_id"])
except Exception as e:
log_error(
f"Error streaming slack response: {e} [channel={ctx['channel_id']}, thread={ctx['thread_id']}, user={user_id}]"
)
try:
await async_client.assistant_threads_setStatus(
channel_id=ctx["channel_id"], thread_ts=ctx["thread_id"], status=""
)
except Exception:
pass
# Clean up open stream so Slack doesn't show stuck progress indicators
if stream is not None:
try:
stop_kwargs_err: Dict[str, Any] = {}
if state.task_cards:
stop_kwargs_err["chunks"] = state.resolve_all_pending("error")
await stream.stop(**stop_kwargs_err)
except Exception:
pass
await send_slack_message_async(
async_client,
channel=ctx["channel_id"],
message=_ERROR_MESSAGE,
thread_ts=ctx["thread_id"],
)
async def _handle_thread_started(event: dict):
from slack_sdk.web.async_client import AsyncWebClient
async_client = AsyncWebClient(token=slack_tools.token, ssl=ssl)
thread_info = event.get("assistant_thread", {})
channel_id = thread_info.get("channel_id", "")
thread_ts = thread_info.get("thread_ts", "")
if not channel_id or not thread_ts:
return
prompts = suggested_prompts or [
{"title": "Help", "message": "What can you help me with?"},
{"title": "Search", "message": "Search the web for..."},
]
try:
await async_client.assistant_threads_setSuggestedPrompts(
channel_id=channel_id, thread_ts=thread_ts, prompts=prompts
)
except Exception as e:
log_error(f"Failed to set suggested prompts: {e}")
return router
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/interfaces/slack/router.py",
"license": "Apache License 2.0",
"lines": 336,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/interfaces/slack/slack.py | from ssl import SSLContext
from typing import Dict, List, Optional, Union
from fastapi.routing import APIRouter
from agno.agent import Agent, RemoteAgent
from agno.os.interfaces.base import BaseInterface
from agno.os.interfaces.slack.router import attach_routes
from agno.team import RemoteTeam, Team
from agno.workflow import RemoteWorkflow, Workflow
class Slack(BaseInterface):
type = "slack"
router: APIRouter
def __init__(
self,
agent: Optional[Union[Agent, RemoteAgent]] = None,
team: Optional[Union[Team, RemoteTeam]] = None,
workflow: Optional[Union[Workflow, RemoteWorkflow]] = None,
prefix: str = "/slack",
tags: Optional[List[str]] = None,
reply_to_mentions_only: bool = True,
token: Optional[str] = None,
signing_secret: Optional[str] = None,
streaming: bool = True,
loading_messages: Optional[List[str]] = None,
task_display_mode: str = "plan",
loading_text: str = "Thinking...",
suggested_prompts: Optional[List[Dict[str, str]]] = None,
ssl: Optional[SSLContext] = None,
buffer_size: int = 100,
max_file_size: int = 1_073_741_824, # 1GB
):
self.agent = agent
self.team = team
self.workflow = workflow
self.prefix = prefix
self.tags = tags or ["Slack"]
self.reply_to_mentions_only = reply_to_mentions_only
self.token = token
self.signing_secret = signing_secret
self.streaming = streaming
self.loading_messages = loading_messages
self.task_display_mode = task_display_mode
self.loading_text = loading_text
self.suggested_prompts = suggested_prompts
self.ssl = ssl
self.buffer_size = buffer_size
self.max_file_size = max_file_size
if not (self.agent or self.team or self.workflow):
raise ValueError("Slack requires an agent, team, or workflow")
def get_router(self) -> APIRouter:
self.router = attach_routes(
router=APIRouter(prefix=self.prefix, tags=self.tags), # type: ignore
agent=self.agent,
team=self.team,
workflow=self.workflow,
reply_to_mentions_only=self.reply_to_mentions_only,
token=self.token,
signing_secret=self.signing_secret,
streaming=self.streaming,
loading_messages=self.loading_messages,
task_display_mode=self.task_display_mode,
loading_text=self.loading_text,
suggested_prompts=self.suggested_prompts,
ssl=self.ssl,
buffer_size=self.buffer_size,
max_file_size=self.max_file_size,
)
return self.router
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/interfaces/slack/slack.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/os/interfaces/whatsapp/router.py | import base64
from os import getenv
from typing import Optional, Union
from fastapi import APIRouter, BackgroundTasks, HTTPException, Request
from fastapi.responses import PlainTextResponse
from agno.agent.agent import Agent
from agno.agent.remote import RemoteAgent
from agno.media import Audio, File, Image, Video
from agno.team.remote import RemoteTeam
from agno.team.team import Team
from agno.tools.whatsapp import WhatsAppTools
from agno.utils.log import log_error, log_info, log_warning
from agno.utils.whatsapp import get_media_async, send_image_message_async, typing_indicator_async, upload_media_async
from agno.workflow import RemoteWorkflow, Workflow
from .security import validate_webhook_signature
def attach_routes(
router: APIRouter,
agent: Optional[Union[Agent, RemoteAgent]] = None,
team: Optional[Union[Team, RemoteTeam]] = None,
workflow: Optional[Union[Workflow, RemoteWorkflow]] = None,
) -> APIRouter:
if agent is None and team is None and workflow is None:
raise ValueError("Either agent, team, or workflow must be provided.")
# Create WhatsApp tools instance once for reuse
whatsapp_tools = WhatsAppTools(async_mode=True)
@router.get("/status")
async def status():
return {"status": "available"}
@router.get("/webhook")
async def verify_webhook(request: Request):
"""Handle WhatsApp webhook verification"""
mode = request.query_params.get("hub.mode")
token = request.query_params.get("hub.verify_token")
challenge = request.query_params.get("hub.challenge")
verify_token = getenv("WHATSAPP_VERIFY_TOKEN")
if not verify_token:
raise HTTPException(status_code=500, detail="WHATSAPP_VERIFY_TOKEN is not set")
if mode == "subscribe" and token == verify_token:
if not challenge:
raise HTTPException(status_code=400, detail="No challenge received")
return PlainTextResponse(content=challenge)
raise HTTPException(status_code=403, detail="Invalid verify token or mode")
@router.post("/webhook")
async def webhook(request: Request, background_tasks: BackgroundTasks):
"""Handle incoming WhatsApp messages"""
try:
# Get raw payload for signature validation
payload = await request.body()
signature = request.headers.get("X-Hub-Signature-256")
# Validate webhook signature
if not validate_webhook_signature(payload, signature):
log_warning("Invalid webhook signature")
raise HTTPException(status_code=403, detail="Invalid signature")
body = await request.json()
# Validate webhook data
if body.get("object") != "whatsapp_business_account":
log_warning(f"Received non-WhatsApp webhook object: {body.get('object')}")
return {"status": "ignored"}
# Process messages in background
for entry in body.get("entry", []):
for change in entry.get("changes", []):
messages = change.get("value", {}).get("messages", [])
if not messages:
continue
message = messages[0]
background_tasks.add_task(process_message, message, agent, team, workflow)
return {"status": "processing"}
except Exception as e:
log_error(f"Error processing webhook: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
async def process_message(
message: dict,
agent: Optional[Union[Agent, RemoteAgent]],
team: Optional[Union[Team, RemoteTeam]],
workflow: Optional[Union[Workflow, RemoteWorkflow]] = None,
):
"""Process a single WhatsApp message in the background"""
try:
message_image = None
message_video = None
message_audio = None
message_doc = None
message_id = message.get("id")
await typing_indicator_async(message_id)
if message.get("type") == "text":
message_text = message["text"]["body"]
elif message.get("type") == "image":
try:
message_text = message["image"]["caption"]
except Exception:
message_text = "Describe the image"
message_image = message["image"]["id"]
elif message.get("type") == "video":
try:
message_text = message["video"]["caption"]
except Exception:
message_text = "Describe the video"
message_video = message["video"]["id"]
elif message.get("type") == "audio":
message_text = "Reply to audio"
message_audio = message["audio"]["id"]
elif message.get("type") == "document":
message_text = "Process the document"
message_doc = message["document"]["id"]
else:
return
phone_number = message["from"]
log_info(f"Processing message from {phone_number}: {message_text}")
# Generate and send response
if agent:
response = await agent.arun( # type: ignore[misc]
message_text,
user_id=phone_number,
session_id=f"wa:{phone_number}",
images=[Image(content=await get_media_async(message_image))] if message_image else None,
files=[File(content=await get_media_async(message_doc))] if message_doc else None,
videos=[Video(content=await get_media_async(message_video))] if message_video else None,
audio=[Audio(content=await get_media_async(message_audio))] if message_audio else None,
)
elif team:
response = await team.arun( # type: ignore
message_text,
user_id=phone_number,
session_id=f"wa:{phone_number}",
files=[File(content=await get_media_async(message_doc))] if message_doc else None,
images=[Image(content=await get_media_async(message_image))] if message_image else None,
videos=[Video(content=await get_media_async(message_video))] if message_video else None,
audio=[Audio(content=await get_media_async(message_audio))] if message_audio else None,
)
elif workflow:
response = await workflow.arun( # type: ignore
message_text,
user_id=phone_number,
session_id=f"wa:{phone_number}",
images=[Image(content=await get_media_async(message_image))] if message_image else None,
files=[File(content=await get_media_async(message_doc))] if message_doc else None,
videos=[Video(content=await get_media_async(message_video))] if message_video else None,
audio=[Audio(content=await get_media_async(message_audio))] if message_audio else None,
)
if response.status == "ERROR":
await _send_whatsapp_message(
phone_number, "Sorry, there was an error processing your message. Please try again later."
)
log_error(response.content)
return
if response.reasoning_content:
await _send_whatsapp_message(phone_number, f"Reasoning: \n{response.reasoning_content}", italics=True)
if response.images:
number_of_images = len(response.images)
log_info(f"images generated: f{number_of_images}")
for i in range(number_of_images):
image_content = response.images[i].content
image_bytes = None
if isinstance(image_content, bytes):
try:
decoded_string = image_content.decode("utf-8")
image_bytes = base64.b64decode(decoded_string)
except UnicodeDecodeError:
image_bytes = image_content
elif isinstance(image_content, str):
image_bytes = base64.b64decode(image_content)
else:
log_error(f"Unexpected image content type: {type(image_content)} for user {phone_number}")
if image_bytes:
media_id = await upload_media_async(
media_data=image_bytes, mime_type="image/png", filename="image.png"
)
await send_image_message_async(media_id=media_id, recipient=phone_number, text=response.content)
else:
log_warning(
f"Could not process image content for user {phone_number}. Type: {type(image_content)}"
)
await _send_whatsapp_message(phone_number, response.content) # type: ignore
else:
await _send_whatsapp_message(phone_number, response.content) # type: ignore
except Exception as e:
log_error(f"Error processing message: {str(e)}")
try:
await _send_whatsapp_message(
phone_number, "Sorry, there was an error processing your message. Please try again later."
)
except Exception as send_error:
log_error(f"Error sending error message: {str(send_error)}")
async def _send_whatsapp_message(recipient: str, message: str, italics: bool = False):
if len(message) <= 4096:
if italics:
# Handle multi-line messages by making each line italic
formatted_message = "\n".join([f"_{line}_" for line in message.split("\n")])
await whatsapp_tools.send_text_message_async(recipient=recipient, text=formatted_message)
else:
await whatsapp_tools.send_text_message_async(recipient=recipient, text=message)
return
# Split message into batches of 4000 characters (WhatsApp message limit is 4096)
message_batches = [message[i : i + 4000] for i in range(0, len(message), 4000)]
# Add a prefix with the batch number
for i, batch in enumerate(message_batches, 1):
batch_message = f"[{i}/{len(message_batches)}] {batch}"
if italics:
# Handle multi-line messages by making each line italic
formatted_batch = "\n".join([f"_{line}_" for line in batch_message.split("\n")])
await whatsapp_tools.send_text_message_async(recipient=recipient, text=formatted_batch)
else:
await whatsapp_tools.send_text_message_async(recipient=recipient, text=batch_message)
return router
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/interfaces/whatsapp/router.py",
"license": "Apache License 2.0",
"lines": 204,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/interfaces/whatsapp/whatsapp.py | from typing import List, Optional, Union
from fastapi.routing import APIRouter
from agno.agent import Agent, RemoteAgent
from agno.os.interfaces.base import BaseInterface
from agno.os.interfaces.whatsapp.router import attach_routes
from agno.team import RemoteTeam, Team
from agno.workflow import RemoteWorkflow, Workflow
class Whatsapp(BaseInterface):
type = "whatsapp"
router: APIRouter
def __init__(
self,
agent: Optional[Union[Agent, RemoteAgent]] = None,
team: Optional[Union[Team, RemoteTeam]] = None,
workflow: Optional[Union[Workflow, RemoteWorkflow]] = None,
prefix: str = "/whatsapp",
tags: Optional[List[str]] = None,
):
self.agent = agent
self.team = team
self.workflow = workflow
self.prefix = prefix
self.tags = tags or ["Whatsapp"]
if not (self.agent or self.team or self.workflow):
raise ValueError("Whatsapp requires an agent, team, or workflow")
def get_router(self) -> APIRouter:
self.router = APIRouter(prefix=self.prefix, tags=self.tags) # type: ignore
self.router = attach_routes(router=self.router, agent=self.agent, team=self.team, workflow=self.workflow)
return self.router
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/interfaces/whatsapp/whatsapp.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/os/mcp.py | """Router for MCP interface providing Model Context Protocol endpoints."""
import logging
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union, cast
from uuid import uuid4
from fastmcp import FastMCP
from fastmcp.server.http import (
StarletteWithLifespan,
)
from agno.db.base import AsyncBaseDb, BaseDb, SessionType
from agno.db.schemas import UserMemory
from agno.os.routers.memory.schemas import (
UserMemorySchema,
)
from agno.os.schema import (
AgentSessionDetailSchema,
AgentSummaryResponse,
ConfigResponse,
InterfaceResponse,
RunSchema,
SessionSchema,
TeamRunSchema,
TeamSessionDetailSchema,
TeamSummaryResponse,
WorkflowRunSchema,
WorkflowSessionDetailSchema,
WorkflowSummaryResponse,
)
from agno.os.utils import (
get_agent_by_id,
get_db,
get_team_by_id,
get_workflow_by_id,
)
from agno.remote.base import RemoteDb
from agno.run.agent import RunOutput
from agno.run.team import TeamRunOutput
from agno.run.workflow import WorkflowRunOutput
from agno.session import AgentSession, TeamSession, WorkflowSession
if TYPE_CHECKING:
from agno.os.app import AgentOS
logger = logging.getLogger(__name__)
def get_mcp_server(
os: "AgentOS",
) -> StarletteWithLifespan:
"""Attach MCP routes to the provided router."""
# Create an MCP server
mcp = FastMCP(os.name or "AgentOS")
@mcp.tool(
name="get_agentos_config",
description="Get the configuration of the AgentOS",
tags={"core"},
output_schema=ConfigResponse.model_json_schema(),
) # type: ignore
async def config() -> ConfigResponse:
return ConfigResponse(
os_id=os.id or "AgentOS",
description=os.description,
available_models=os.config.available_models if os.config else [],
databases=[db.id for db_list in os.dbs.values() for db in db_list],
chat=os.config.chat if os.config else None,
session=os._get_session_config(),
memory=os._get_memory_config(),
knowledge=os._get_knowledge_config(),
evals=os._get_evals_config(),
metrics=os._get_metrics_config(),
traces=os._get_traces_config(),
agents=[AgentSummaryResponse.from_agent(agent) for agent in os.agents] if os.agents else [],
teams=[TeamSummaryResponse.from_team(team) for team in os.teams] if os.teams else [],
workflows=[WorkflowSummaryResponse.from_workflow(w) for w in os.workflows] if os.workflows else [],
interfaces=[
InterfaceResponse(type=interface.type, version=interface.version, route=interface.prefix)
for interface in os.interfaces
],
)
# ==================== Core Run Tools ====================
@mcp.tool(name="run_agent", description="Run an agent with a message", tags={"core"}) # type: ignore
async def run_agent(agent_id: str, message: str) -> RunOutput:
agent = get_agent_by_id(agent_id, os.agents)
if agent is None:
raise Exception(f"Agent {agent_id} not found")
return await agent.arun(message) # type: ignore[misc]
@mcp.tool(name="run_team", description="Run a team with a message", tags={"core"}) # type: ignore
async def run_team(team_id: str, message: str) -> TeamRunOutput:
team = get_team_by_id(team_id, os.teams)
if team is None:
raise Exception(f"Team {team_id} not found")
return await team.arun(message) # type: ignore[misc]
@mcp.tool(name="run_workflow", description="Run a workflow with a message", tags={"core"}) # type: ignore
async def run_workflow(workflow_id: str, message: str) -> WorkflowRunOutput:
workflow = get_workflow_by_id(workflow_id, os.workflows)
if workflow is None:
raise Exception(f"Workflow {workflow_id} not found")
return await workflow.arun(message)
# ==================== Session Management Tools ====================
@mcp.tool(
name="get_sessions",
description="Get paginated list of sessions with optional filtering by type, component, user, and name",
tags={"session"},
) # type: ignore
async def get_sessions(
db_id: str,
session_type: str = "agent",
component_id: Optional[str] = None,
user_id: Optional[str] = None,
session_name: Optional[str] = None,
limit: int = 20,
page: int = 1,
sort_by: str = "created_at",
sort_order: str = "desc",
) -> Dict[str, Any]:
db = await get_db(os.dbs, db_id)
session_type_enum = SessionType(session_type)
if isinstance(db, RemoteDb):
result = await db.get_sessions(
session_type=session_type_enum,
component_id=component_id,
user_id=user_id,
session_name=session_name,
limit=limit,
page=page,
sort_by=sort_by,
sort_order=sort_order,
db_id=db_id,
)
return result.model_dump()
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
sessions, total_count = await db.get_sessions(
session_type=session_type_enum,
component_id=component_id,
user_id=user_id,
session_name=session_name,
limit=limit,
page=page,
sort_by=sort_by,
sort_order=sort_order,
deserialize=False,
)
else:
sessions, total_count = db.get_sessions(
session_type=session_type_enum,
component_id=component_id,
user_id=user_id,
session_name=session_name,
limit=limit,
page=page,
sort_by=sort_by,
sort_order=sort_order,
deserialize=False,
)
return {
"data": [SessionSchema.from_dict(session).model_dump() for session in sessions], # type: ignore
"meta": {
"page": page,
"limit": limit,
"total_count": total_count,
"total_pages": (total_count + limit - 1) // limit if limit > 0 else 0, # type: ignore
},
}
@mcp.tool(
name="get_session",
description="Get detailed information about a specific session by ID",
tags={"session"},
) # type: ignore
async def get_session(
session_id: str,
db_id: str,
session_type: str = "agent",
user_id: Optional[str] = None,
) -> Dict[str, Any]:
db = await get_db(os.dbs, db_id)
session_type_enum = SessionType(session_type)
if isinstance(db, RemoteDb):
result = await db.get_session(
session_id=session_id,
session_type=session_type_enum,
user_id=user_id,
db_id=db_id,
)
return result.model_dump()
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
session = await db.get_session(session_id=session_id, session_type=session_type_enum, user_id=user_id)
else:
db = cast(BaseDb, db)
session = db.get_session(session_id=session_id, session_type=session_type_enum, user_id=user_id)
if not session:
raise Exception(f"Session {session_id} not found")
if session_type_enum == SessionType.AGENT:
return AgentSessionDetailSchema.from_session(session).model_dump() # type: ignore
elif session_type_enum == SessionType.TEAM:
return TeamSessionDetailSchema.from_session(session).model_dump() # type: ignore
else:
return WorkflowSessionDetailSchema.from_session(session).model_dump() # type: ignore
@mcp.tool(
name="create_session",
description="Create a new session for an agent, team, or workflow",
tags={"session"},
) # type: ignore
async def create_session(
db_id: str,
session_type: str = "agent",
session_id: Optional[str] = None,
session_name: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
workflow_id: Optional[str] = None,
) -> Dict[str, Any]:
import time
db = await get_db(os.dbs, db_id)
session_type_enum = SessionType(session_type)
# Generate session_id if not provided
session_id = session_id or str(uuid4())
if isinstance(db, RemoteDb):
result = await db.create_session(
session_type=session_type_enum,
session_id=session_id,
session_name=session_name,
session_state=session_state,
metadata=metadata,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
workflow_id=workflow_id,
db_id=db_id,
)
return result.model_dump()
# Prepare session_data
session_data: Dict[str, Any] = {}
if session_state is not None:
session_data["session_state"] = session_state
if session_name is not None:
session_data["session_name"] = session_name
current_time = int(time.time())
# Create the appropriate session type
session: Union[AgentSession, TeamSession, WorkflowSession]
if session_type_enum == SessionType.AGENT:
session = AgentSession(
session_id=session_id,
agent_id=agent_id,
user_id=user_id,
session_data=session_data if session_data else None,
metadata=metadata,
created_at=current_time,
updated_at=current_time,
)
elif session_type_enum == SessionType.TEAM:
session = TeamSession(
session_id=session_id,
team_id=team_id,
user_id=user_id,
session_data=session_data if session_data else None,
metadata=metadata,
created_at=current_time,
updated_at=current_time,
)
else:
session = WorkflowSession(
session_id=session_id,
workflow_id=workflow_id,
user_id=user_id,
session_data=session_data if session_data else None,
metadata=metadata,
created_at=current_time,
updated_at=current_time,
)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
created_session = await db.upsert_session(session, deserialize=True)
else:
created_session = db.upsert_session(session, deserialize=True)
if not created_session:
raise Exception("Failed to create session")
if session_type_enum == SessionType.AGENT:
return AgentSessionDetailSchema.from_session(created_session).model_dump() # type: ignore
elif session_type_enum == SessionType.TEAM:
return TeamSessionDetailSchema.from_session(created_session).model_dump() # type: ignore
else:
return WorkflowSessionDetailSchema.from_session(created_session).model_dump() # type: ignore
@mcp.tool(
name="get_session_runs",
description="Get all runs for a specific session",
tags={"session"},
) # type: ignore
async def get_session_runs(
session_id: str,
db_id: str,
session_type: str = "agent",
user_id: Optional[str] = None,
) -> List[Dict[str, Any]]:
db = await get_db(os.dbs, db_id)
session_type_enum = SessionType(session_type)
if isinstance(db, RemoteDb):
result = await db.get_session_runs(
session_id=session_id,
session_type=session_type_enum,
user_id=user_id,
db_id=db_id,
)
return [r.model_dump() for r in result]
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
session = await db.get_session(
session_id=session_id, session_type=session_type_enum, user_id=user_id, deserialize=False
)
else:
session = db.get_session(
session_id=session_id, session_type=session_type_enum, user_id=user_id, deserialize=False
)
if not session:
raise Exception(f"Session {session_id} not found")
runs = session.get("runs") # type: ignore
if not runs:
return []
run_responses: List[Dict[str, Any]] = []
for run in runs:
if session_type_enum == SessionType.AGENT:
run_responses.append(RunSchema.from_dict(run).model_dump())
elif session_type_enum == SessionType.TEAM:
if run.get("agent_id") is not None:
run_responses.append(RunSchema.from_dict(run).model_dump())
else:
run_responses.append(TeamRunSchema.from_dict(run).model_dump())
else:
if run.get("workflow_id") is not None:
run_responses.append(WorkflowRunSchema.from_dict(run).model_dump())
elif run.get("team_id") is not None:
run_responses.append(TeamRunSchema.from_dict(run).model_dump())
else:
run_responses.append(RunSchema.from_dict(run).model_dump())
return run_responses
@mcp.tool(
name="get_session_run",
description="Get a specific run from a session",
tags={"session"},
) # type: ignore
async def get_session_run(
session_id: str,
run_id: str,
db_id: str,
session_type: str = "agent",
user_id: Optional[str] = None,
) -> Dict[str, Any]:
db = await get_db(os.dbs, db_id)
session_type_enum = SessionType(session_type)
if isinstance(db, RemoteDb):
result = await db.get_session_run(
session_id=session_id,
run_id=run_id,
session_type=session_type_enum,
user_id=user_id,
db_id=db_id,
)
return result.model_dump()
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
session = await db.get_session(
session_id=session_id, session_type=session_type_enum, user_id=user_id, deserialize=False
)
else:
session = db.get_session(
session_id=session_id, session_type=session_type_enum, user_id=user_id, deserialize=False
)
if not session:
raise Exception(f"Session {session_id} not found")
runs = session.get("runs") # type: ignore
if not runs:
raise Exception(f"Session {session_id} has no runs")
target_run = None
for run in runs:
if run.get("run_id") == run_id:
target_run = run
break
if not target_run:
raise Exception(f"Run {run_id} not found in session {session_id}")
if target_run.get("workflow_id") is not None:
return WorkflowRunSchema.from_dict(target_run).model_dump()
elif target_run.get("team_id") is not None:
return TeamRunSchema.from_dict(target_run).model_dump()
else:
return RunSchema.from_dict(target_run).model_dump()
@mcp.tool(
name="rename_session",
description="Rename an existing session",
tags={"session"},
) # type: ignore
async def rename_session(
session_id: str,
session_name: str,
db_id: str,
session_type: str = "agent",
user_id: Optional[str] = None,
) -> Dict[str, Any]:
db = await get_db(os.dbs, db_id)
session_type_enum = SessionType(session_type)
if isinstance(db, RemoteDb):
result = await db.rename_session(
session_id=session_id,
session_name=session_name,
session_type=session_type_enum,
db_id=db_id,
)
return result.model_dump()
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
session = await db.rename_session(
session_id=session_id, session_type=session_type_enum, session_name=session_name, user_id=user_id
)
else:
db = cast(BaseDb, db)
session = db.rename_session(
session_id=session_id, session_type=session_type_enum, session_name=session_name, user_id=user_id
)
if not session:
raise Exception(f"Session {session_id} not found")
if session_type_enum == SessionType.AGENT:
return AgentSessionDetailSchema.from_session(session).model_dump() # type: ignore
elif session_type_enum == SessionType.TEAM:
return TeamSessionDetailSchema.from_session(session).model_dump() # type: ignore
else:
return WorkflowSessionDetailSchema.from_session(session).model_dump() # type: ignore
@mcp.tool(
name="update_session",
description="Update session properties like name, state, metadata, or summary",
tags={"session"},
) # type: ignore
async def update_session(
session_id: str,
db_id: str,
session_type: str = "agent",
session_name: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None,
summary: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
) -> Dict[str, Any]:
db = await get_db(os.dbs, db_id)
session_type_enum = SessionType(session_type)
if isinstance(db, RemoteDb):
result = await db.update_session(
session_id=session_id,
session_type=session_type_enum,
session_name=session_name,
session_state=session_state,
metadata=metadata,
summary=summary,
user_id=user_id,
db_id=db_id,
)
return result.model_dump()
# Get the existing session
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
existing_session = await db.get_session(
session_id=session_id, session_type=session_type_enum, user_id=user_id, deserialize=True
)
else:
existing_session = db.get_session(
session_id=session_id, session_type=session_type_enum, user_id=user_id, deserialize=True
)
if not existing_session:
raise Exception(f"Session {session_id} not found")
# Update session properties
if session_name is not None:
if existing_session.session_data is None: # type: ignore
existing_session.session_data = {} # type: ignore
existing_session.session_data["session_name"] = session_name # type: ignore
if session_state is not None:
if existing_session.session_data is None: # type: ignore
existing_session.session_data = {} # type: ignore
existing_session.session_data["session_state"] = session_state # type: ignore
if metadata is not None:
existing_session.metadata = metadata # type: ignore
if summary is not None:
from agno.session.summary import SessionSummary
existing_session.summary = SessionSummary.from_dict(summary) # type: ignore
# Upsert the updated session
if isinstance(db, AsyncBaseDb):
updated_session = await db.upsert_session(existing_session, deserialize=True) # type: ignore
else:
updated_session = db.upsert_session(existing_session, deserialize=True) # type: ignore
if not updated_session:
raise Exception("Failed to update session")
if session_type_enum == SessionType.AGENT:
return AgentSessionDetailSchema.from_session(updated_session).model_dump() # type: ignore
elif session_type_enum == SessionType.TEAM:
return TeamSessionDetailSchema.from_session(updated_session).model_dump() # type: ignore
else:
return WorkflowSessionDetailSchema.from_session(updated_session).model_dump() # type: ignore
@mcp.tool(
name="delete_session",
description="Delete a specific session and all its runs",
tags={"session"},
) # type: ignore
async def delete_session(
session_id: str,
db_id: str,
user_id: Optional[str] = None,
) -> str:
db = await get_db(os.dbs, db_id)
if isinstance(db, RemoteDb):
await db.delete_session(session_id=session_id, db_id=db_id)
return "Session deleted successfully"
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
await db.delete_session(session_id=session_id, user_id=user_id)
else:
db = cast(BaseDb, db)
db.delete_session(session_id=session_id, user_id=user_id)
return "Session deleted successfully"
@mcp.tool(
name="delete_sessions",
description="Delete multiple sessions by their IDs",
tags={"session"},
) # type: ignore
async def delete_sessions(
session_ids: List[str],
db_id: str,
session_types: Optional[List[str]] = None,
user_id: Optional[str] = None,
) -> str:
db = await get_db(os.dbs, db_id)
if isinstance(db, RemoteDb):
# Convert session_types strings to SessionType enums
session_type_enums = [SessionType(st) for st in session_types] if session_types else []
await db.delete_sessions(session_ids=session_ids, session_types=session_type_enums, db_id=db_id)
return "Sessions deleted successfully"
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
await db.delete_sessions(session_ids=session_ids, user_id=user_id)
else:
db = cast(BaseDb, db)
db.delete_sessions(session_ids=session_ids, user_id=user_id)
return "Sessions deleted successfully"
# ==================== Memory Management Tools ====================
@mcp.tool(name="create_memory", description="Create a new user memory", tags={"memory"}) # type: ignore
async def create_memory(
db_id: str,
memory: str,
user_id: str,
topics: Optional[List[str]] = None,
) -> UserMemorySchema:
db = await get_db(os.dbs, db_id)
if isinstance(db, RemoteDb):
return await db.create_memory(
memory=memory,
topics=topics or [],
user_id=user_id,
db_id=db_id,
)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
user_memory = await db.upsert_user_memory(
memory=UserMemory(
memory_id=str(uuid4()),
memory=memory,
topics=topics or [],
user_id=user_id,
),
deserialize=False,
)
else:
db = cast(BaseDb, db)
user_memory = db.upsert_user_memory(
memory=UserMemory(
memory_id=str(uuid4()),
memory=memory,
topics=topics or [],
user_id=user_id,
),
deserialize=False,
)
if not user_memory:
raise Exception("Failed to create memory")
return UserMemorySchema.from_dict(user_memory) # type: ignore
@mcp.tool(
name="get_memory",
description="Get a specific memory by ID",
tags={"memory"},
) # type: ignore
async def get_memory(
memory_id: str,
db_id: str,
user_id: Optional[str] = None,
) -> UserMemorySchema:
db = await get_db(os.dbs, db_id)
if isinstance(db, RemoteDb):
return await db.get_memory(memory_id=memory_id, user_id=user_id, db_id=db_id)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
user_memory = await db.get_user_memory(memory_id=memory_id, user_id=user_id, deserialize=False)
else:
db = cast(BaseDb, db)
user_memory = db.get_user_memory(memory_id=memory_id, user_id=user_id, deserialize=False)
if not user_memory:
raise Exception(f"Memory {memory_id} not found")
return UserMemorySchema.from_dict(user_memory) # type: ignore
@mcp.tool(
name="get_memories",
description="Get a paginated list of memories with optional filtering",
tags={"memory"},
) # type: ignore
async def get_memories(
db_id: str,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
topics: Optional[List[str]] = None,
search_content: Optional[str] = None,
limit: int = 20,
page: int = 1,
sort_by: str = "updated_at",
sort_order: str = "desc",
) -> Dict[str, Any]:
db = await get_db(os.dbs, db_id)
if isinstance(db, RemoteDb):
result = await db.get_memories(
user_id=user_id or "",
agent_id=agent_id,
team_id=team_id,
topics=topics,
search_content=search_content,
limit=limit,
page=page,
sort_by=sort_by,
sort_order=sort_order,
db_id=db_id,
)
return result.model_dump()
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
user_memories, total_count = await db.get_user_memories(
limit=limit,
page=page,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
topics=topics,
search_content=search_content,
sort_by=sort_by,
sort_order=sort_order,
deserialize=False,
)
else:
db = cast(BaseDb, db)
user_memories, total_count = db.get_user_memories(
limit=limit,
page=page,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
topics=topics,
search_content=search_content,
sort_by=sort_by,
sort_order=sort_order,
deserialize=False,
)
memories = [UserMemorySchema.from_dict(m) for m in user_memories] # type: ignore
return {
"data": [m.model_dump() for m in memories if m is not None],
"meta": {
"page": page,
"limit": limit,
"total_count": total_count,
"total_pages": (total_count + limit - 1) // limit if limit > 0 else 0, # type: ignore
},
}
@mcp.tool(name="update_memory", description="Update an existing memory", tags={"memory"}) # type: ignore
async def update_memory(
db_id: str,
memory_id: str,
memory: str,
user_id: str,
topics: Optional[List[str]] = None,
) -> UserMemorySchema:
db = await get_db(os.dbs, db_id)
if isinstance(db, RemoteDb):
return await db.update_memory(
memory_id=memory_id,
memory=memory,
topics=topics or [],
user_id=user_id,
db_id=db_id,
)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
user_memory = await db.upsert_user_memory(
memory=UserMemory(
memory_id=memory_id,
memory=memory,
topics=topics or [],
user_id=user_id,
),
deserialize=False,
)
else:
db = cast(BaseDb, db)
user_memory = db.upsert_user_memory(
memory=UserMemory(
memory_id=memory_id,
memory=memory,
topics=topics or [],
user_id=user_id,
),
deserialize=False,
)
if not user_memory:
raise Exception("Failed to update memory")
return UserMemorySchema.from_dict(user_memory) # type: ignore
@mcp.tool(name="delete_memory", description="Delete a specific memory by ID", tags={"memory"}) # type: ignore
async def delete_memory(
db_id: str,
memory_id: str,
user_id: Optional[str] = None,
) -> str:
db = await get_db(os.dbs, db_id)
if isinstance(db, RemoteDb):
await db.delete_memory(memory_id=memory_id, user_id=user_id, db_id=db_id)
return "Memory deleted successfully"
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
await db.delete_user_memory(memory_id=memory_id, user_id=user_id)
else:
db = cast(BaseDb, db)
db.delete_user_memory(memory_id=memory_id, user_id=user_id)
return "Memory deleted successfully"
@mcp.tool(
name="delete_memories",
description="Delete multiple memories by their IDs",
tags={"memory"},
) # type: ignore
async def delete_memories(
memory_ids: List[str],
db_id: str,
user_id: Optional[str] = None,
) -> str:
db = await get_db(os.dbs, db_id)
if isinstance(db, RemoteDb):
await db.delete_memories(memory_ids=memory_ids, user_id=user_id, db_id=db_id)
return "Memories deleted successfully"
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
await db.delete_user_memories(memory_ids=memory_ids, user_id=user_id)
else:
db = cast(BaseDb, db)
db.delete_user_memories(memory_ids=memory_ids, user_id=user_id)
return "Memories deleted successfully"
# Use http_app for Streamable HTTP transport (modern MCP standard)
mcp_app = mcp.http_app(path="/mcp")
# Add JWT middleware to MCP app if authorization is enabled
if os.authorization and os.authorization_config:
from agno.os.middleware.jwt import JWTMiddleware
mcp_app.add_middleware(
JWTMiddleware,
verification_keys=os.authorization_config.verification_keys,
jwks_file=os.authorization_config.jwks_file,
algorithm=os.authorization_config.algorithm or "RS256",
authorization=os.authorization,
verify_audience=os.authorization_config.verify_audience or False,
)
return mcp_app
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/mcp.py",
"license": "Apache License 2.0",
"lines": 764,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/router.py | import json
from typing import TYPE_CHECKING, List, cast
from fastapi import (
APIRouter,
Depends,
HTTPException,
WebSocket,
)
from agno.exceptions import RemoteServerUnavailableError
from agno.os.auth import get_authentication_dependency, validate_websocket_token
from agno.os.managers import websocket_manager
from agno.os.routers.workflows.router import handle_workflow_subscription, handle_workflow_via_websocket
from agno.os.schema import (
AgentSummaryResponse,
BadRequestResponse,
ConfigResponse,
InterfaceResponse,
InternalServerErrorResponse,
Model,
NotFoundResponse,
TeamSummaryResponse,
UnauthenticatedResponse,
ValidationErrorResponse,
WorkflowSummaryResponse,
)
from agno.os.settings import AgnoAPISettings
from agno.utils.log import logger
if TYPE_CHECKING:
from agno.os.app import AgentOS
def get_base_router(
os: "AgentOS",
settings: AgnoAPISettings = AgnoAPISettings(),
) -> APIRouter:
"""
Create the base FastAPI router with comprehensive OpenAPI documentation.
This router provides endpoints for:
- Core system operations (health, config, models)
- Agent management and execution
- Team collaboration and coordination
- Workflow automation and orchestration
All endpoints include detailed documentation, examples, and proper error handling.
"""
router = APIRouter(
dependencies=[Depends(get_authentication_dependency(settings))],
responses={
400: {"description": "Bad Request", "model": BadRequestResponse},
401: {"description": "Unauthorized", "model": UnauthenticatedResponse},
404: {"description": "Not Found", "model": NotFoundResponse},
422: {"description": "Validation Error", "model": ValidationErrorResponse},
500: {"description": "Internal Server Error", "model": InternalServerErrorResponse},
},
)
# -- Main Routes ---
@router.get(
"/config",
response_model=ConfigResponse,
response_model_exclude_none=True,
tags=["Core"],
operation_id="get_config",
summary="Get OS Configuration",
description=(
"Retrieve the complete configuration of the AgentOS instance, including:\n\n"
"- Available models and databases\n"
"- Registered agents, teams, and workflows\n"
"- Chat, session, memory, knowledge, and evaluation configurations\n"
"- Available interfaces and their routes"
),
responses={
200: {
"description": "OS configuration retrieved successfully",
"content": {
"application/json": {
"example": {
"id": "demo",
"description": "Example AgentOS configuration",
"available_models": [],
"databases": ["9c884dc4-9066-448c-9074-ef49ec7eb73c"],
"session": {
"dbs": [
{
"db_id": "9c884dc4-9066-448c-9074-ef49ec7eb73c",
"domain_config": {"display_name": "Sessions"},
}
]
},
"metrics": {
"dbs": [
{
"db_id": "9c884dc4-9066-448c-9074-ef49ec7eb73c",
"domain_config": {"display_name": "Metrics"},
}
]
},
"memory": {
"dbs": [
{
"db_id": "9c884dc4-9066-448c-9074-ef49ec7eb73c",
"domain_config": {"display_name": "Memory"},
}
]
},
"knowledge": {
"dbs": [
{
"db_id": "9c884dc4-9066-448c-9074-ef49ec7eb73c",
"domain_config": {"display_name": "Knowledge"},
}
]
},
"evals": {
"dbs": [
{
"db_id": "9c884dc4-9066-448c-9074-ef49ec7eb73c",
"domain_config": {"display_name": "Evals"},
}
]
},
"agents": [
{
"id": "main-agent",
"name": "Main Agent",
"db_id": "9c884dc4-9066-448c-9074-ef49ec7eb73c",
}
],
"teams": [],
"workflows": [],
"interfaces": [],
}
}
},
}
},
)
async def config() -> ConfigResponse:
try:
agent_summaries = []
if os.agents:
for agent in os.agents:
agent_summaries.append(AgentSummaryResponse.from_agent(agent))
team_summaries = []
if os.teams:
for team in os.teams:
team_summaries.append(TeamSummaryResponse.from_team(team))
workflow_summaries = []
if os.workflows:
for workflow in os.workflows:
workflow_summaries.append(WorkflowSummaryResponse.from_workflow(workflow))
except RemoteServerUnavailableError as e:
raise HTTPException(
status_code=502,
detail=f"Failed to fetch config from remote AgentOS: {e}",
)
return ConfigResponse(
os_id=os.id or "Unnamed OS",
description=os.description,
available_models=os.config.available_models if os.config else [],
os_database=os.db.id if os.db else None,
databases=list({db.id for db_id, dbs in os.dbs.items() for db in dbs}),
chat=os.config.chat if os.config else None,
session=os._get_session_config(),
memory=os._get_memory_config(),
knowledge=os._get_knowledge_config(),
evals=os._get_evals_config(),
metrics=os._get_metrics_config(),
agents=agent_summaries,
teams=team_summaries,
workflows=workflow_summaries,
traces=os._get_traces_config(),
interfaces=[
InterfaceResponse(type=interface.type, version=interface.version, route=interface.prefix)
for interface in os.interfaces
],
)
@router.get(
"/models",
response_model=List[Model],
response_model_exclude_none=True,
tags=["Core"],
operation_id="get_models",
summary="Get Available Models",
description=(
"Retrieve a list of all unique models currently used by agents and teams in this OS instance. "
"This includes the model ID and provider information for each model."
),
responses={
200: {
"description": "List of models retrieved successfully",
"content": {
"application/json": {
"example": [
{"id": "gpt-4", "provider": "openai"},
{"id": "claude-3-sonnet", "provider": "anthropic"},
]
}
},
}
},
)
async def get_models() -> List[Model]:
"""Return the list of all models used by agents and teams in the contextual OS"""
unique_models = {}
# Collect models from local agents
if os.agents:
for agent in os.agents:
model = cast(Model, agent.model)
if model and model.id is not None and model.provider is not None:
key = (model.id, model.provider)
if key not in unique_models:
unique_models[key] = Model(id=model.id, provider=model.provider)
# Collect models from local teams
if os.teams:
for team in os.teams:
model = cast(Model, team.model)
if model and model.id is not None and model.provider is not None:
key = (model.id, model.provider)
if key not in unique_models:
unique_models[key] = Model(id=model.id, provider=model.provider)
return list(unique_models.values())
return router
def get_websocket_router(
os: "AgentOS",
settings: AgnoAPISettings = AgnoAPISettings(),
) -> APIRouter:
"""
Create WebSocket router without HTTP authentication dependencies.
WebSocket endpoints handle authentication internally via message-based auth.
"""
ws_router = APIRouter()
@ws_router.websocket(
"/workflows/ws",
name="workflow_websocket",
)
async def workflow_websocket_endpoint(websocket: WebSocket):
"""WebSocket endpoint for receiving real-time workflow events"""
requires_auth = bool(settings.os_security_key)
await websocket_manager.connect(websocket, requires_auth=requires_auth)
try:
while True:
data = await websocket.receive_text()
message = json.loads(data)
action = message.get("action")
# Handle authentication first
if action == "authenticate":
token = message.get("token")
if not token:
await websocket.send_text(json.dumps({"event": "auth_error", "error": "Token is required"}))
continue
if validate_websocket_token(token, settings):
await websocket_manager.authenticate_websocket(websocket)
else:
await websocket.send_text(json.dumps({"event": "auth_error", "error": "Invalid token"}))
continue
# Check authentication for all other actions (only when required)
elif requires_auth and not websocket_manager.is_authenticated(websocket):
await websocket.send_text(
json.dumps(
{
"event": "auth_required",
"error": "Authentication required. Send authenticate action with valid token.",
}
)
)
continue
# Handle authenticated actions
elif action == "ping":
await websocket.send_text(json.dumps({"event": "pong"}))
elif action == "start-workflow":
# Handle workflow execution directly via WebSocket
await handle_workflow_via_websocket(websocket, message, os)
elif action == "reconnect":
# Subscribe/reconnect to an existing workflow run
await handle_workflow_subscription(websocket, message, os)
else:
await websocket.send_text(json.dumps({"event": "error", "error": f"Unknown action: {action}"}))
except Exception as e:
if "1012" not in str(e) and "1001" not in str(e):
logger.error(f"WebSocket error: {e}")
finally:
# Clean up the websocket connection
await websocket_manager.disconnect_websocket(websocket)
return ws_router
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/router.py",
"license": "Apache License 2.0",
"lines": 281,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/evals/evals.py | import logging
from copy import deepcopy
from typing import List, Optional, Union, cast
from fastapi import APIRouter, Depends, HTTPException, Query, Request
from agno.agent import Agent, RemoteAgent
from agno.db.base import AsyncBaseDb, BaseDb
from agno.db.schemas.evals import EvalFilterType, EvalType
from agno.models.utils import get_model
from agno.os.auth import get_auth_token_from_request, get_authentication_dependency
from agno.os.routers.evals.schemas import (
DeleteEvalRunsRequest,
EvalRunInput,
EvalSchema,
UpdateEvalRunRequest,
)
from agno.os.routers.evals.utils import (
run_accuracy_eval,
run_agent_as_judge_eval,
run_performance_eval,
run_reliability_eval,
)
from agno.os.schema import (
BadRequestResponse,
InternalServerErrorResponse,
NotFoundResponse,
PaginatedResponse,
PaginationInfo,
SortOrder,
UnauthenticatedResponse,
ValidationErrorResponse,
)
from agno.os.settings import AgnoAPISettings
from agno.os.utils import get_agent_by_id, get_db, get_team_by_id
from agno.remote.base import RemoteDb
from agno.team import RemoteTeam, Team
from agno.utils.log import log_warning
logger = logging.getLogger(__name__)
def get_eval_router(
dbs: dict[str, list[Union[BaseDb, AsyncBaseDb, RemoteDb]]],
agents: Optional[List[Union[Agent, RemoteAgent]]] = None,
teams: Optional[List[Union[Team, RemoteTeam]]] = None,
settings: AgnoAPISettings = AgnoAPISettings(),
) -> APIRouter:
"""Create eval router with comprehensive OpenAPI documentation for agent/team evaluation endpoints."""
router = APIRouter(
dependencies=[Depends(get_authentication_dependency(settings))],
tags=["Evals"],
responses={
400: {"description": "Bad Request", "model": BadRequestResponse},
401: {"description": "Unauthorized", "model": UnauthenticatedResponse},
404: {"description": "Not Found", "model": NotFoundResponse},
422: {"description": "Validation Error", "model": ValidationErrorResponse},
500: {"description": "Internal Server Error", "model": InternalServerErrorResponse},
},
)
return attach_routes(router=router, dbs=dbs, agents=agents, teams=teams)
def attach_routes(
router: APIRouter,
dbs: dict[str, list[Union[BaseDb, AsyncBaseDb, RemoteDb]]],
agents: Optional[List[Union[Agent, RemoteAgent]]] = None,
teams: Optional[List[Union[Team, RemoteTeam]]] = None,
) -> APIRouter:
@router.get(
"/eval-runs",
response_model=PaginatedResponse[EvalSchema],
status_code=200,
operation_id="get_eval_runs",
summary="List Evaluation Runs",
description=(
"Retrieve paginated evaluation runs with filtering and sorting options. "
"Filter by agent, team, workflow, model, or evaluation type."
),
responses={
200: {
"description": "Evaluation runs retrieved successfully",
"content": {
"application/json": {
"example": {
"data": [
{
"id": "a03fa2f4-900d-482d-afe0-470d4cd8d1f4",
"agent_id": "basic-agent",
"model_id": "gpt-4o",
"model_provider": "OpenAI",
"team_id": None,
"workflow_id": None,
"name": "Test ",
"evaluated_component_name": None,
"eval_type": "reliability",
"eval_data": {
"eval_status": "PASSED",
"failed_tool_calls": [],
"passed_tool_calls": ["multiply"],
},
"eval_input": {"expected_tool_calls": ["multiply"]},
"created_at": "2025-08-27T15:41:59Z",
"updated_at": "2025-08-27T15:41:59Z",
}
]
}
}
},
}
},
)
async def get_eval_runs(
request: Request,
agent_id: Optional[str] = Query(default=None, description="Agent ID"),
team_id: Optional[str] = Query(default=None, description="Team ID"),
workflow_id: Optional[str] = Query(default=None, description="Workflow ID"),
model_id: Optional[str] = Query(default=None, description="Model ID"),
filter_type: Optional[EvalFilterType] = Query(default=None, description="Filter type", alias="type"),
eval_types: Optional[List[EvalType]] = Depends(parse_eval_types_filter),
limit: Optional[int] = Query(default=20, description="Number of eval runs to return", ge=1),
page: Optional[int] = Query(default=1, description="Page number", ge=0),
sort_by: Optional[str] = Query(default="created_at", description="Field to sort by"),
sort_order: Optional[SortOrder] = Query(default="desc", description="Sort order (asc or desc)"),
db_id: Optional[str] = Query(default=None, description="The ID of the database to use"),
table: Optional[str] = Query(default=None, description="The database table to use"),
) -> PaginatedResponse[EvalSchema]:
db = await get_db(dbs, db_id, table)
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.get_eval_runs(
limit=limit,
page=page,
sort_by=sort_by,
sort_order=sort_order.value if sort_order else None,
agent_id=agent_id,
team_id=team_id,
workflow_id=workflow_id,
model_id=model_id,
eval_types=eval_types,
filter_type=filter_type.value if filter_type else None,
headers=headers,
)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
eval_runs, total_count = await db.get_eval_runs(
limit=limit,
page=page,
sort_by=sort_by,
sort_order=sort_order,
agent_id=agent_id,
team_id=team_id,
workflow_id=workflow_id,
model_id=model_id,
eval_type=eval_types,
filter_type=filter_type,
deserialize=False,
)
else:
eval_runs, total_count = db.get_eval_runs( # type: ignore
limit=limit,
page=page,
sort_by=sort_by,
sort_order=sort_order,
agent_id=agent_id,
team_id=team_id,
workflow_id=workflow_id,
model_id=model_id,
eval_type=eval_types,
filter_type=filter_type,
deserialize=False,
)
return PaginatedResponse(
data=[EvalSchema.from_dict(eval_run) for eval_run in eval_runs], # type: ignore
meta=PaginationInfo(
page=page,
limit=limit,
total_count=total_count, # type: ignore
total_pages=(total_count + limit - 1) // limit if limit is not None and limit > 0 else 0, # type: ignore
),
)
@router.get(
"/eval-runs/{eval_run_id}",
response_model=EvalSchema,
status_code=200,
operation_id="get_eval_run",
summary="Get Evaluation Run",
description="Retrieve detailed results and metrics for a specific evaluation run.",
responses={
200: {
"description": "Evaluation run details retrieved successfully",
"content": {
"application/json": {
"example": {
"id": "a03fa2f4-900d-482d-afe0-470d4cd8d1f4",
"agent_id": "basic-agent",
"model_id": "gpt-4o",
"model_provider": "OpenAI",
"team_id": None,
"workflow_id": None,
"name": "Test ",
"evaluated_component_name": None,
"eval_type": "reliability",
"eval_data": {
"eval_status": "PASSED",
"failed_tool_calls": [],
"passed_tool_calls": ["multiply"],
},
"eval_input": {"expected_tool_calls": ["multiply"]},
"created_at": "2025-08-27T15:41:59Z",
"updated_at": "2025-08-27T15:41:59Z",
}
}
},
},
404: {"description": "Evaluation run not found", "model": NotFoundResponse},
},
)
async def get_eval_run(
request: Request,
eval_run_id: str,
db_id: Optional[str] = Query(default=None, description="The ID of the database to use"),
table: Optional[str] = Query(default=None, description="Table to query eval run from"),
) -> EvalSchema:
db = await get_db(dbs, db_id, table)
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.get_eval_run(eval_run_id=eval_run_id, db_id=db_id, table=table, headers=headers)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
eval_run = await db.get_eval_run(eval_run_id=eval_run_id, deserialize=False)
else:
eval_run = db.get_eval_run(eval_run_id=eval_run_id, deserialize=False)
if not eval_run:
raise HTTPException(status_code=404, detail=f"Eval run with id '{eval_run_id}' not found")
return EvalSchema.from_dict(eval_run) # type: ignore
@router.delete(
"/eval-runs",
status_code=204,
operation_id="delete_eval_runs",
summary="Delete Evaluation Runs",
description="Delete multiple evaluation runs by their IDs. This action cannot be undone.",
responses={
204: {},
500: {"description": "Failed to delete evaluation runs", "model": InternalServerErrorResponse},
},
)
async def delete_eval_runs(
http_request: Request,
request: DeleteEvalRunsRequest,
db_id: Optional[str] = Query(default=None, description="Database ID to use for deletion"),
table: Optional[str] = Query(default=None, description="Table to use for deletion"),
) -> None:
try:
db = await get_db(dbs, db_id, table)
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(http_request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.delete_eval_runs(
eval_run_ids=request.eval_run_ids, db_id=db_id, table=table, headers=headers
)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
await db.delete_eval_runs(eval_run_ids=request.eval_run_ids)
else:
db.delete_eval_runs(eval_run_ids=request.eval_run_ids)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to delete eval runs: {e}")
@router.patch(
"/eval-runs/{eval_run_id}",
response_model=EvalSchema,
status_code=200,
operation_id="update_eval_run",
summary="Update Evaluation Run",
description="Update the name or other properties of an existing evaluation run.",
responses={
200: {
"description": "Evaluation run updated successfully",
"content": {
"application/json": {
"example": {
"id": "a03fa2f4-900d-482d-afe0-470d4cd8d1f4",
"agent_id": "basic-agent",
"model_id": "gpt-4o",
"model_provider": "OpenAI",
"team_id": None,
"workflow_id": None,
"name": "Test ",
"evaluated_component_name": None,
"eval_type": "reliability",
"eval_data": {
"eval_status": "PASSED",
"failed_tool_calls": [],
"passed_tool_calls": ["multiply"],
},
"eval_input": {"expected_tool_calls": ["multiply"]},
"created_at": "2025-08-27T15:41:59Z",
"updated_at": "2025-08-27T15:41:59Z",
}
}
},
},
404: {"description": "Evaluation run not found", "model": NotFoundResponse},
500: {"description": "Failed to update evaluation run", "model": InternalServerErrorResponse},
},
)
async def update_eval_run(
http_request: Request,
eval_run_id: str,
request: UpdateEvalRunRequest,
db_id: Optional[str] = Query(default=None, description="The ID of the database to use"),
table: Optional[str] = Query(default=None, description="Table to use for rename operation"),
) -> EvalSchema:
try:
db = await get_db(dbs, db_id, table)
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(http_request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.update_eval_run(
eval_run_id=eval_run_id, name=request.name, db_id=db_id, table=table, headers=headers
)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
eval_run = await db.rename_eval_run(eval_run_id=eval_run_id, name=request.name, deserialize=False)
else:
eval_run = db.rename_eval_run(eval_run_id=eval_run_id, name=request.name, deserialize=False)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to rename eval run: {e}")
if not eval_run:
raise HTTPException(status_code=404, detail=f"Eval run with id '{eval_run_id}' not found")
return EvalSchema.from_dict(eval_run) # type: ignore
@router.post(
"/eval-runs",
response_model=EvalSchema,
status_code=200,
operation_id="run_eval",
summary="Execute Evaluation",
description=(
"Run evaluation tests on agents or teams. Supports accuracy, agent-as-judge, performance, and reliability evaluations. "
"Requires either agent_id or team_id, but not both."
),
responses={
200: {
"description": "Evaluation executed successfully",
"content": {
"application/json": {
"example": {
"id": "f2b2d72f-e9e2-4f0e-8810-0a7e1ff58614",
"agent_id": "basic-agent",
"model_id": "gpt-4o",
"model_provider": "OpenAI",
"team_id": None,
"workflow_id": None,
"name": None,
"evaluated_component_name": None,
"eval_type": "reliability",
"eval_data": {
"eval_status": "PASSED",
"failed_tool_calls": [],
"passed_tool_calls": ["multiply"],
},
"created_at": "2025-08-27T15:41:59Z",
"updated_at": "2025-08-27T15:41:59Z",
}
}
},
},
400: {"description": "Invalid request - provide either agent_id or team_id", "model": BadRequestResponse},
404: {"description": "Agent or team not found", "model": NotFoundResponse},
},
)
async def run_eval(
request: Request,
eval_run_input: EvalRunInput,
db_id: Optional[str] = Query(default=None, description="Database ID to use for evaluation"),
table: Optional[str] = Query(default=None, description="Table to use for evaluation"),
) -> Optional[EvalSchema]:
db = await get_db(dbs, db_id, table)
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.create_eval_run(
eval_type=eval_run_input.eval_type,
input_text=eval_run_input.input,
agent_id=eval_run_input.agent_id,
team_id=eval_run_input.team_id,
model_id=eval_run_input.model_id,
model_provider=eval_run_input.model_provider,
expected_output=eval_run_input.expected_output,
expected_tool_calls=eval_run_input.expected_tool_calls,
num_iterations=eval_run_input.num_iterations,
db_id=db_id,
table=table,
headers=headers,
)
if eval_run_input.agent_id and eval_run_input.team_id:
raise HTTPException(status_code=400, detail="Only one of agent_id or team_id must be provided")
if eval_run_input.agent_id:
agent = get_agent_by_id(agent_id=eval_run_input.agent_id, agents=agents)
if not agent:
raise HTTPException(status_code=404, detail=f"Agent with id '{eval_run_input.agent_id}' not found")
if isinstance(agent, RemoteAgent):
log_warning("Evaluation against remote agents are not supported yet")
return None
default_model = None
if (
hasattr(agent, "model")
and agent.model is not None
and eval_run_input.model_id is not None
and eval_run_input.model_provider is not None
):
default_model = deepcopy(agent.model)
if eval_run_input.model_id != agent.model.id or eval_run_input.model_provider != agent.model.provider:
model_provider = eval_run_input.model_provider.lower()
model_id = eval_run_input.model_id.lower()
model_string = f"{model_provider}:{model_id}"
model = get_model(model_string)
agent.model = model
team = None
elif eval_run_input.team_id:
team = get_team_by_id(team_id=eval_run_input.team_id, teams=teams)
if not team:
raise HTTPException(status_code=404, detail=f"Team with id '{eval_run_input.team_id}' not found")
if isinstance(team, RemoteTeam):
log_warning("Evaluation against remote teams are not supported yet")
return None
# If model_id/model_provider specified, override team's model temporarily
default_model = None
if (
hasattr(team, "model")
and team.model is not None
and eval_run_input.model_id is not None
and eval_run_input.model_provider is not None
):
default_model = deepcopy(team.model) # Save original
if eval_run_input.model_id != team.model.id or eval_run_input.model_provider != team.model.provider:
model_provider = eval_run_input.model_provider.lower()
model_id = eval_run_input.model_id.lower()
model_string = f"{model_provider}:{model_id}"
model = get_model(model_string)
team.model = model # Override temporarily
agent = None
else:
raise HTTPException(status_code=400, detail="One of agent_id or team_id must be provided")
# Run the evaluation
if eval_run_input.eval_type == EvalType.ACCURACY:
if isinstance(agent, RemoteAgent) or isinstance(team, RemoteTeam):
# TODO: Handle remote evaluation
log_warning("Evaluation against remote agents are not supported yet")
return None
return await run_accuracy_eval(
eval_run_input=eval_run_input, db=db, agent=agent, team=team, default_model=default_model
)
elif eval_run_input.eval_type == EvalType.AGENT_AS_JUDGE:
return await run_agent_as_judge_eval(
eval_run_input=eval_run_input,
db=db,
agent=agent,
team=team,
default_model=default_model, # type: ignore
)
elif eval_run_input.eval_type == EvalType.PERFORMANCE:
if isinstance(agent, RemoteAgent) or isinstance(team, RemoteTeam):
# TODO: Handle remote evaluation
log_warning("Evaluation against remote agents are not supported yet")
return None
return await run_performance_eval(
eval_run_input=eval_run_input, db=db, agent=agent, team=team, default_model=default_model
)
else:
if isinstance(agent, RemoteAgent) or isinstance(team, RemoteTeam):
# TODO: Handle remote evaluation
log_warning("Evaluation against remote agents are not supported yet")
return None
return await run_reliability_eval(
eval_run_input=eval_run_input, db=db, agent=agent, team=team, default_model=default_model
)
return router
def parse_eval_types_filter(
eval_types: Optional[str] = Query(
default=None,
description="Comma-separated eval types (accuracy,agent_as_judge,performance,reliability)",
examples=["accuracy,agent_as_judge,performance,reliability"],
),
) -> Optional[List[EvalType]]:
"""Parse comma-separated eval types into EvalType enums for filtering evaluation runs."""
if not eval_types:
return None
try:
return [EvalType(item.strip()) for item in eval_types.split(",")]
except ValueError as e:
valid_types = ", ".join([t.value for t in EvalType])
raise HTTPException(status_code=422, detail=f"Invalid eval_type: {e}. Valid types: {valid_types}")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/evals/evals.py",
"license": "Apache License 2.0",
"lines": 488,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/evals/schemas.py | from dataclasses import asdict
from datetime import datetime
from typing import Any, Dict, List, Literal, Optional
from pydantic import BaseModel, Field
from agno.db.schemas.evals import EvalType
from agno.eval import AccuracyResult, AgentAsJudgeResult, PerformanceResult, ReliabilityResult
from agno.eval.accuracy import AccuracyEval
from agno.eval.agent_as_judge import AgentAsJudgeEval
from agno.eval.performance import PerformanceEval
from agno.eval.reliability import ReliabilityEval
from agno.os.utils import to_utc_datetime
class EvalRunInput(BaseModel):
agent_id: Optional[str] = Field(None, description="Agent ID to evaluate")
team_id: Optional[str] = Field(None, description="Team ID to evaluate")
model_id: Optional[str] = Field(None, description="Model ID to use for evaluation")
model_provider: Optional[str] = Field(None, description="Model provider name")
eval_type: EvalType = Field(..., description="Type of evaluation to run (accuracy, performance, or reliability)")
input: str = Field(..., description="Input text/query for the evaluation", min_length=1)
additional_guidelines: Optional[str] = Field(None, description="Additional guidelines for the evaluation")
additional_context: Optional[str] = Field(None, description="Additional context for the evaluation")
num_iterations: int = Field(1, description="Number of times to run the evaluation", ge=1, le=100)
name: Optional[str] = Field(None, description="Name for this evaluation run")
# Accuracy eval specific fields
expected_output: Optional[str] = Field(None, description="Expected output for accuracy evaluation")
# AgentAsJudge eval specific fields
criteria: Optional[str] = Field(None, description="Evaluation criteria for agent-as-judge evaluation")
scoring_strategy: Optional[Literal["numeric", "binary"]] = Field(
"binary", description="Scoring strategy: 'numeric' (1-10 with threshold) or 'binary' (PASS/FAIL)"
)
threshold: Optional[int] = Field(
7, description="Score threshold for pass/fail (1-10), only used with numeric scoring", ge=1, le=10
)
# Performance eval specific fields
warmup_runs: int = Field(0, description="Number of warmup runs before measuring performance", ge=0, le=10)
# Reliability eval specific fields
expected_tool_calls: Optional[List[str]] = Field(None, description="Expected tool calls for reliability evaluation")
class EvalSchema(BaseModel):
id: str = Field(..., description="Unique identifier for the evaluation run")
agent_id: Optional[str] = Field(None, description="Agent ID that was evaluated")
model_id: Optional[str] = Field(None, description="Model ID used in evaluation")
model_provider: Optional[str] = Field(None, description="Model provider name")
team_id: Optional[str] = Field(None, description="Team ID that was evaluated")
workflow_id: Optional[str] = Field(None, description="Workflow ID that was evaluated")
name: Optional[str] = Field(None, description="Name of the evaluation run")
evaluated_component_name: Optional[str] = Field(None, description="Name of the evaluated component")
eval_type: EvalType = Field(..., description="Type of evaluation (accuracy, performance, or reliability)")
eval_data: Dict[str, Any] = Field(..., description="Evaluation results and metrics")
eval_input: Optional[Dict[str, Any]] = Field(None, description="Input parameters used for the evaluation")
created_at: Optional[datetime] = Field(None, description="Timestamp when evaluation was created")
updated_at: Optional[datetime] = Field(None, description="Timestamp when evaluation was last updated")
@classmethod
def from_dict(cls, eval_run: Dict[str, Any]) -> "EvalSchema":
return cls(
id=eval_run["run_id"],
name=eval_run.get("name"),
agent_id=eval_run.get("agent_id"),
model_id=eval_run.get("model_id"),
model_provider=eval_run.get("model_provider"),
team_id=eval_run.get("team_id"),
workflow_id=eval_run.get("workflow_id"),
evaluated_component_name=eval_run.get("evaluated_component_name"),
eval_type=eval_run["eval_type"],
eval_data=eval_run["eval_data"],
eval_input=eval_run.get("eval_input"),
created_at=to_utc_datetime(eval_run.get("created_at")),
updated_at=to_utc_datetime(eval_run.get("updated_at")),
)
@classmethod
def from_accuracy_eval(cls, accuracy_eval: AccuracyEval, result: AccuracyResult) -> "EvalSchema":
model_provider = (
accuracy_eval.agent.model.provider
if accuracy_eval.agent and accuracy_eval.agent.model
else accuracy_eval.team.model.provider
if accuracy_eval.team and accuracy_eval.team.model
else None
)
return cls(
id=accuracy_eval.eval_id,
name=accuracy_eval.name,
agent_id=accuracy_eval.agent.id if accuracy_eval.agent else None,
team_id=accuracy_eval.team.id if accuracy_eval.team else None,
workflow_id=None,
model_id=accuracy_eval.agent.model.id if accuracy_eval.agent else accuracy_eval.team.model.id, # type: ignore
model_provider=model_provider,
eval_type=EvalType.ACCURACY,
eval_data=asdict(result),
)
@classmethod
def from_agent_as_judge_eval(
cls,
agent_as_judge_eval: AgentAsJudgeEval,
result: AgentAsJudgeResult,
model_id: Optional[str] = None,
model_provider: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> "EvalSchema":
return cls(
id=result.run_id,
name=agent_as_judge_eval.name,
agent_id=agent_id,
team_id=team_id,
workflow_id=None,
model_id=model_id,
model_provider=model_provider,
eval_type=EvalType.AGENT_AS_JUDGE,
eval_data=asdict(result),
)
@classmethod
def from_performance_eval(
cls,
performance_eval: PerformanceEval,
result: PerformanceResult,
model_id: Optional[str] = None,
model_provider: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> "EvalSchema":
return cls(
id=performance_eval.eval_id,
name=performance_eval.name,
agent_id=agent_id,
team_id=team_id,
workflow_id=None,
model_id=model_id,
model_provider=model_provider,
eval_type=EvalType.PERFORMANCE,
eval_data=asdict(result),
)
@classmethod
def from_reliability_eval(
cls,
reliability_eval: ReliabilityEval,
result: ReliabilityResult,
model_id: Optional[str] = None,
model_provider: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> "EvalSchema":
return cls(
id=reliability_eval.eval_id,
name=reliability_eval.name,
agent_id=agent_id,
team_id=team_id,
workflow_id=None,
model_id=model_id,
model_provider=model_provider,
eval_type=EvalType.RELIABILITY,
eval_data=asdict(result),
)
class DeleteEvalRunsRequest(BaseModel):
eval_run_ids: List[str] = Field(..., description="List of evaluation run IDs to delete", min_length=1)
class UpdateEvalRunRequest(BaseModel):
name: str = Field(..., description="New name for the evaluation run", min_length=1, max_length=255)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/evals/schemas.py",
"license": "Apache License 2.0",
"lines": 154,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/evals/utils.py | from typing import Optional, Union
from fastapi import HTTPException
from agno.agent import Agent, RemoteAgent
from agno.db.base import AsyncBaseDb, BaseDb
from agno.eval.accuracy import AccuracyEval
from agno.eval.agent_as_judge import AgentAsJudgeEval
from agno.eval.performance import PerformanceEval
from agno.eval.reliability import ReliabilityEval
from agno.models.base import Model
from agno.os.routers.evals.schemas import EvalRunInput, EvalSchema
from agno.team import RemoteTeam, Team
async def run_accuracy_eval(
eval_run_input: EvalRunInput,
db: Union[BaseDb, AsyncBaseDb],
agent: Optional[Agent] = None,
team: Optional[Team] = None,
default_model: Optional[Model] = None,
) -> EvalSchema:
"""Run an Accuracy evaluation for the given agent or team"""
if not eval_run_input.expected_output:
raise HTTPException(status_code=400, detail="expected_output is required for accuracy evaluation")
accuracy_eval = AccuracyEval(
db=db,
agent=agent,
team=team,
input=eval_run_input.input,
expected_output=eval_run_input.expected_output,
additional_guidelines=eval_run_input.additional_guidelines,
additional_context=eval_run_input.additional_context,
num_iterations=eval_run_input.num_iterations or 1,
name=eval_run_input.name,
model=default_model,
)
result = await accuracy_eval.arun(print_results=False, print_summary=False)
if not result:
raise HTTPException(status_code=500, detail="Failed to run accuracy evaluation")
eval_run = EvalSchema.from_accuracy_eval(accuracy_eval=accuracy_eval, result=result)
# Restore original model after eval
if default_model is not None:
if agent is not None:
agent.model = default_model
elif team is not None:
team.model = default_model
return eval_run
async def run_agent_as_judge_eval(
eval_run_input: EvalRunInput,
db: Union[BaseDb, AsyncBaseDb],
agent: Optional[Union[Agent, RemoteAgent]] = None,
team: Optional[Union[Team, RemoteTeam]] = None,
default_model: Optional[Model] = None,
) -> EvalSchema:
"""Run an AgentAsJudge evaluation for the given agent or team"""
if not eval_run_input.criteria:
raise HTTPException(status_code=400, detail="criteria is required for agent-as-judge evaluation")
# Run agent/team to get output
if agent:
agent_response = await agent.arun(eval_run_input.input, stream=False) # type: ignore[misc]
output = str(agent_response.content) if agent_response.content else ""
agent_id = agent.id
team_id = None
elif team:
team_response = await team.arun(eval_run_input.input, stream=False) # type: ignore[misc]
output = str(team_response.content) if team_response.content else ""
agent_id = None
team_id = team.id
else:
raise HTTPException(status_code=400, detail="Either agent_id or team_id must be provided")
agent_as_judge_eval = AgentAsJudgeEval(
db=db,
criteria=eval_run_input.criteria,
scoring_strategy=eval_run_input.scoring_strategy or "binary",
threshold=eval_run_input.threshold or 7,
additional_guidelines=eval_run_input.additional_guidelines,
name=eval_run_input.name,
model=default_model,
)
result = await agent_as_judge_eval.arun(
input=eval_run_input.input, output=output, print_results=False, print_summary=False
)
if not result:
raise HTTPException(status_code=500, detail="Failed to run agent as judge evaluation")
# Use evaluator's model
eval_model_id = agent_as_judge_eval.model.id if agent_as_judge_eval.model is not None else None
eval_model_provider = agent_as_judge_eval.model.provider if agent_as_judge_eval.model is not None else None
eval_run = EvalSchema.from_agent_as_judge_eval(
agent_as_judge_eval=agent_as_judge_eval,
result=result,
agent_id=agent_id,
team_id=team_id,
model_id=eval_model_id,
model_provider=eval_model_provider,
)
# Restore original model after eval
if default_model is not None:
if agent is not None and isinstance(agent, Agent):
agent.model = default_model
elif team is not None and isinstance(team, Team):
team.model = default_model
return eval_run
async def run_performance_eval(
eval_run_input: EvalRunInput,
db: Union[BaseDb, AsyncBaseDb],
agent: Optional[Agent] = None,
team: Optional[Team] = None,
default_model: Optional[Model] = None,
) -> EvalSchema:
"""Run a performance evaluation for the given agent or team"""
if agent:
async def run_component(): # type: ignore
return await agent.arun(eval_run_input.input, stream=False) # type: ignore[misc]
model_id = agent.model.id if agent and agent.model else None
model_provider = agent.model.provider if agent and agent.model else None
elif team:
async def run_component(): # type: ignore
return await team.arun(eval_run_input.input, stream=False) # type: ignore[misc]
model_id = team.model.id if team and team.model else None
model_provider = team.model.provider if team and team.model else None
performance_eval = PerformanceEval(
db=db,
name=eval_run_input.name,
func=run_component,
num_iterations=eval_run_input.num_iterations or 10,
warmup_runs=eval_run_input.warmup_runs,
agent_id=agent.id if agent else None,
team_id=team.id if team else None,
model_id=model_id,
model_provider=model_provider,
)
result = await performance_eval.arun(print_results=False, print_summary=False)
if not result:
raise HTTPException(status_code=500, detail="Failed to run performance evaluation")
eval_run = EvalSchema.from_performance_eval(
performance_eval=performance_eval,
result=result,
agent_id=agent.id if agent else None,
team_id=team.id if team else None,
model_id=model_id,
model_provider=model_provider,
)
# Restore original model after eval
if default_model is not None:
if agent is not None:
agent.model = default_model
elif team is not None:
team.model = default_model
return eval_run
async def run_reliability_eval(
eval_run_input: EvalRunInput,
db: Union[BaseDb, AsyncBaseDb],
agent: Optional[Agent] = None,
team: Optional[Team] = None,
default_model: Optional[Model] = None,
) -> EvalSchema:
"""Run a reliability evaluation for the given agent or team"""
if not eval_run_input.expected_tool_calls:
raise HTTPException(status_code=400, detail="expected_tool_calls is required for reliability evaluations")
if agent:
agent_response = await agent.arun(eval_run_input.input, stream=False) # type: ignore[misc]
reliability_eval = ReliabilityEval(
db=db,
name=eval_run_input.name,
agent_response=agent_response,
expected_tool_calls=eval_run_input.expected_tool_calls,
)
model_id = agent.model.id if agent and agent.model else None
model_provider = agent.model.provider if agent and agent.model else None
elif team:
team_response = await team.arun(eval_run_input.input, stream=False) # type: ignore[misc]
reliability_eval = ReliabilityEval(
db=db,
name=eval_run_input.name,
team_response=team_response,
expected_tool_calls=eval_run_input.expected_tool_calls,
)
model_id = team.model.id if team and team.model else None
model_provider = team.model.provider if team and team.model else None
result = await reliability_eval.arun(print_results=False)
if not result:
raise HTTPException(status_code=500, detail="Failed to run reliability evaluation")
eval_run = EvalSchema.from_reliability_eval(
reliability_eval=reliability_eval,
result=result,
agent_id=agent.id if agent else None,
model_id=model_id,
model_provider=model_provider,
)
# Restore original model after eval
if default_model is not None:
if agent is not None:
agent.model = default_model
elif team is not None:
team.model = default_model
return eval_run
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/evals/utils.py",
"license": "Apache License 2.0",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/knowledge/knowledge.py | import json
import logging
import math
from typing import Any, Dict, List, Optional, Union
from fastapi import APIRouter, BackgroundTasks, Depends, File, Form, HTTPException, Path, Query, Request, UploadFile
from agno.db.base import AsyncBaseDb
from agno.knowledge.content import Content, FileData
from agno.knowledge.knowledge import Knowledge
from agno.knowledge.reader import ReaderFactory
from agno.knowledge.reader.base import Reader
from agno.knowledge.remote_content.s3 import S3Config
from agno.knowledge.utils import get_all_chunkers_info, get_all_readers_info, get_content_types_to_readers_mapping
from agno.os.auth import get_auth_token_from_request, get_authentication_dependency
from agno.os.routers.knowledge.schemas import (
ChunkerSchema,
ConfigResponseSchema,
ContentResponseSchema,
ContentStatus,
ContentStatusResponse,
ContentUpdateSchema,
ReaderSchema,
RemoteContentSourceSchema,
SourceFileSchema,
SourceFilesResponseSchema,
SourceFolderSchema,
VectorDbSchema,
VectorSearchRequestSchema,
VectorSearchResult,
)
from agno.os.schema import (
BadRequestResponse,
InternalServerErrorResponse,
NotFoundResponse,
PaginatedResponse,
PaginationInfo,
SortOrder,
UnauthenticatedResponse,
ValidationErrorResponse,
)
from agno.os.settings import AgnoAPISettings
from agno.os.utils import get_knowledge_instance
from agno.remote.base import RemoteKnowledge
from agno.utils.log import log_debug, log_error, log_info
from agno.utils.string import generate_id
logger = logging.getLogger(__name__)
def get_knowledge_router(
knowledge_instances: List[Union[Knowledge, RemoteKnowledge]], settings: AgnoAPISettings = AgnoAPISettings()
) -> APIRouter:
"""Create knowledge router with comprehensive OpenAPI documentation for content management endpoints."""
router = APIRouter(
dependencies=[Depends(get_authentication_dependency(settings))],
tags=["Knowledge"],
responses={
400: {"description": "Bad Request", "model": BadRequestResponse},
401: {"description": "Unauthorized", "model": UnauthenticatedResponse},
404: {"description": "Not Found", "model": NotFoundResponse},
422: {"description": "Validation Error", "model": ValidationErrorResponse},
500: {"description": "Internal Server Error", "model": InternalServerErrorResponse},
},
)
return attach_routes(router=router, knowledge_instances=knowledge_instances)
def attach_routes(router: APIRouter, knowledge_instances: List[Union[Knowledge, RemoteKnowledge]]) -> APIRouter:
@router.post(
"/knowledge/content",
response_model=ContentResponseSchema,
status_code=202,
operation_id="upload_content",
summary="Upload Content",
description=(
"Upload content to the knowledge base. Supports file uploads, text content, or URLs. "
"Content is processed asynchronously in the background. Supports custom readers and chunking strategies."
),
responses={
202: {
"description": "Content upload accepted for processing",
"content": {
"application/json": {
"example": {
"id": "content-123",
"name": "example-document.pdf",
"description": "Sample document for processing",
"metadata": {"category": "documentation", "priority": "high"},
"status": "processing",
}
}
},
},
400: {
"description": "Invalid request - malformed metadata or missing content",
"model": BadRequestResponse,
},
422: {"description": "Validation error in form data", "model": ValidationErrorResponse},
},
)
async def upload_content(
request: Request,
background_tasks: BackgroundTasks,
name: Optional[str] = Form(None, description="Content name (auto-generated from file/URL if not provided)"),
description: Optional[str] = Form(None, description="Content description for context"),
url: Optional[str] = Form(None, description="URL to fetch content from (JSON array or single URL string)"),
metadata: Optional[str] = Form(None, description="JSON metadata object for additional content properties"),
file: Optional[UploadFile] = File(None, description="File to upload for processing"),
text_content: Optional[str] = Form(None, description="Raw text content to process"),
reader_id: Optional[str] = Form(None, description="ID of the reader to use for content processing"),
chunker: Optional[str] = Form(None, description="Chunking strategy to apply during processing"),
chunk_size: Optional[int] = Form(None, description="Chunk size to use for processing"),
chunk_overlap: Optional[int] = Form(None, description="Chunk overlap to use for processing"),
db_id: Optional[str] = Query(default=None, description="Database ID to use for content storage"),
knowledge_id: Optional[str] = Query(default=None, description="Knowledge base ID to upload to"),
):
knowledge = get_knowledge_instance(knowledge_instances, db_id, knowledge_id)
parsed_metadata = None
if metadata:
try:
parsed_metadata = json.loads(metadata)
except json.JSONDecodeError:
# If it's not valid JSON, treat as a simple key-value pair
parsed_metadata = {"value": metadata} if metadata != "string" else None
if isinstance(knowledge, RemoteKnowledge):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await knowledge.upload_content(
name=name,
description=description,
url=url,
metadata=parsed_metadata,
file=file,
text_content=text_content,
reader_id=reader_id,
chunker=chunker,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
headers=headers,
)
if file:
content_bytes = await file.read()
elif text_content:
content_bytes = text_content.encode("utf-8")
else:
content_bytes = None
parsed_urls = None
if url and url.strip():
try:
parsed_urls = json.loads(url)
log_debug(f"Parsed URLs: {parsed_urls}")
except json.JSONDecodeError:
# If it's not valid JSON, treat as a single URL string
parsed_urls = url
# # Parse metadata with proper error handling
parsed_metadata = None
if metadata:
try:
parsed_metadata = json.loads(metadata)
except json.JSONDecodeError:
# If it's not valid JSON, treat as a simple key-value pair
parsed_metadata = {"value": metadata}
if text_content:
file_data = FileData(
content=content_bytes,
type="manual",
)
elif file:
file_data = FileData(
content=content_bytes,
type=file.content_type if file.content_type else None,
filename=file.filename,
size=file.size,
)
else:
file_data = None
if not name:
if file and file.filename:
name = file.filename
elif url:
name = parsed_urls
content = Content(
name=name,
description=description,
url=parsed_urls,
metadata=parsed_metadata,
file_data=file_data,
size=file.size if file else None if text_content else None,
)
content_hash = knowledge._build_content_hash(content)
content.content_hash = content_hash
content.id = generate_id(content_hash)
background_tasks.add_task(process_content, knowledge, content, reader_id, chunker, chunk_size, chunk_overlap)
response = ContentResponseSchema(
id=content.id,
name=name,
description=description,
metadata=parsed_metadata,
status=ContentStatus.PROCESSING,
)
return response
@router.post(
"/knowledge/remote-content",
response_model=ContentResponseSchema,
status_code=202,
operation_id="upload_remote_content",
summary="Upload Remote Content",
description=(
"Upload content from a remote source (S3, GCS, SharePoint, GitHub) to the knowledge base. "
"Content is processed asynchronously in the background. "
),
responses={
202: {
"description": "Remote content upload accepted for processing",
"content": {
"application/json": {
"example": {
"id": "content-456",
"name": "reports/q1-2024.pdf",
"description": "Q1 Report from S3",
"metadata": {"source": "s3-docs"},
"status": "processing",
}
}
},
},
400: {
"description": "Invalid request - unknown config or missing path",
"model": BadRequestResponse,
},
422: {"description": "Validation error in request body", "model": ValidationErrorResponse},
},
)
async def upload_remote_content(
request: Request,
background_tasks: BackgroundTasks,
config_id: str = Form(..., description="ID of the configured remote content source (from /knowledge/config)"),
path: str = Form(..., description="Path to file or folder in the remote source"),
name: Optional[str] = Form(None, description="Content name (auto-generated if not provided)"),
description: Optional[str] = Form(None, description="Content description"),
metadata: Optional[str] = Form(None, description="JSON metadata object"),
reader_id: Optional[str] = Form(None, description="ID of the reader to use for processing"),
chunker: Optional[str] = Form(None, description="Chunking strategy to apply"),
chunk_size: Optional[int] = Form(None, description="Chunk size for processing"),
chunk_overlap: Optional[int] = Form(None, description="Chunk overlap for processing"),
db_id: Optional[str] = Query(default=None, description="Database ID to use for content storage"),
knowledge_id: Optional[str] = Query(default=None, description="Knowledge base ID to upload to"),
):
knowledge = get_knowledge_instance(knowledge_instances, db_id, knowledge_id)
if isinstance(knowledge, RemoteKnowledge):
# TODO: Forward to remote knowledge instance
raise HTTPException(status_code=501, detail="Remote content upload not yet supported for RemoteKnowledge")
# Validate that the config_id exists in configured sources
config = knowledge._get_remote_config_by_id(config_id)
if config is None:
raise HTTPException(
status_code=400,
detail=f"Unknown content source: {config_id}. Check /knowledge/config for available sources.",
)
# Parse metadata if provided
parsed_metadata = None
if metadata:
try:
parsed_metadata = json.loads(metadata)
except json.JSONDecodeError:
parsed_metadata = {"value": metadata}
# Use the config's factory methods to create the remote content object
# If path ends with '/', treat as folder, otherwise treat as file
is_folder = path.endswith("/")
if is_folder:
if hasattr(config, "folder"):
remote_content = config.folder(path.rstrip("/"))
else:
raise HTTPException(status_code=400, detail=f"Config {config_id} does not support folder uploads")
else:
if hasattr(config, "file"):
remote_content = config.file(path)
else:
raise HTTPException(status_code=400, detail=f"Config {config_id} does not support file uploads")
# Set name from path if not provided
content_name = name or path
content = Content(
name=content_name,
description=description,
metadata=parsed_metadata,
remote_content=remote_content,
)
content_hash = knowledge._build_content_hash(content)
content.content_hash = content_hash
content.id = generate_id(content_hash)
background_tasks.add_task(process_content, knowledge, content, reader_id, chunker, chunk_size, chunk_overlap)
response = ContentResponseSchema(
id=content.id,
name=content_name,
description=description,
metadata=parsed_metadata,
status=ContentStatus.PROCESSING,
)
return response
@router.patch(
"/knowledge/content/{content_id}",
response_model=ContentResponseSchema,
status_code=200,
operation_id="update_content",
summary="Update Content",
description=(
"Update content properties such as name, description, metadata, or processing configuration. "
"Allows modification of existing content without re-uploading."
),
responses={
200: {
"description": "Content updated successfully",
"content": {
"application/json": {
"example": {
"id": "3c2fc685-d451-4d47-b0c0-b9a544c672b7",
"name": "example.pdf",
"description": "",
"type": "application/pdf",
"size": "251261",
"linked_to": None,
"metadata": {},
"access_count": 1,
"status": "completed",
"status_message": "",
"created_at": "2025-09-08T15:22:53Z",
"updated_at": "2025-09-08T15:22:54Z",
}
}
},
},
400: {
"description": "Invalid request - malformed metadata or invalid reader_id",
"model": BadRequestResponse,
},
404: {"description": "Content not found", "model": NotFoundResponse},
},
)
async def update_content(
request: Request,
content_id: str = Path(..., description="Content ID"),
name: Optional[str] = Form(None, description="Content name"),
description: Optional[str] = Form(None, description="Content description"),
metadata: Optional[str] = Form(None, description="Content metadata as JSON string"),
reader_id: Optional[str] = Form(None, description="ID of the reader to use for processing"),
db_id: Optional[str] = Query(default=None, description="Database ID to use"),
knowledge_id: Optional[str] = Query(default=None, description="Knowledge base ID to use"),
) -> Optional[ContentResponseSchema]:
knowledge = get_knowledge_instance(knowledge_instances, db_id, knowledge_id)
# Parse metadata JSON string if provided
parsed_metadata = None
if metadata and metadata.strip():
try:
parsed_metadata = json.loads(metadata)
except json.JSONDecodeError:
raise HTTPException(status_code=400, detail="Invalid JSON format for metadata")
if isinstance(knowledge, RemoteKnowledge):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await knowledge.update_content(
content_id=content_id,
name=name,
description=description,
metadata=parsed_metadata,
reader_id=reader_id,
headers=headers,
)
# Create ContentUpdateSchema object from form data
update_data = ContentUpdateSchema(
name=name if name and name.strip() else None,
description=description if description and description.strip() else None,
metadata=parsed_metadata,
reader_id=reader_id if reader_id and reader_id.strip() else None,
)
content = Content(
id=content_id,
name=update_data.name,
description=update_data.description,
metadata=update_data.metadata,
)
if update_data.reader_id:
if knowledge.readers and update_data.reader_id in knowledge.readers:
content.reader = knowledge.readers[update_data.reader_id]
else:
raise HTTPException(status_code=400, detail=f"Invalid reader_id: {update_data.reader_id}")
# Use async patch method if contents_db is an AsyncBaseDb, otherwise use sync patch method
updated_content_dict = None
try:
if knowledge.contents_db is not None and isinstance(knowledge.contents_db, AsyncBaseDb):
updated_content_dict = await knowledge.apatch_content(content)
else:
updated_content_dict = knowledge.patch_content(content)
except Exception as e:
log_error(f"Error updating content: {str(e)}")
raise HTTPException(status_code=500, detail=f"Error updating content: {str(e)}")
if not updated_content_dict:
raise HTTPException(status_code=404, detail=f"Content not found: {content_id}")
return ContentResponseSchema.from_dict(updated_content_dict)
@router.get(
"/knowledge/content",
response_model=PaginatedResponse[ContentResponseSchema],
status_code=200,
operation_id="get_content",
summary="List Content",
description=(
"Retrieve paginated list of all content in the knowledge base with filtering and sorting options. "
"Filter by status, content type, or metadata properties."
),
responses={
200: {
"description": "Content list retrieved successfully",
"content": {
"application/json": {
"example": {
"data": [
{
"id": "3c2fc685-d451-4d47-b0c0-b9a544c672b7",
"name": "example.pdf",
"description": "",
"type": "application/pdf",
"size": "251261",
"linked_to": None,
"metadata": {},
"access_count": 1,
"status": "completed",
"status_message": "",
"created_at": "2025-09-08T15:22:53Z",
"updated_at": "2025-09-08T15:22:54Z",
},
],
"meta": {"page": 1, "limit": 20, "total_pages": 1, "total_count": 2},
}
}
},
}
},
)
async def get_content(
request: Request,
limit: Optional[int] = Query(default=20, description="Number of content entries to return", ge=1),
page: Optional[int] = Query(default=1, description="Page number", ge=0),
sort_by: Optional[str] = Query(default="created_at", description="Field to sort by"),
sort_order: Optional[SortOrder] = Query(default="desc", description="Sort order (asc or desc)"),
db_id: Optional[str] = Query(default=None, description="Database ID to use"),
knowledge_id: Optional[str] = Query(default=None, description="Knowledge base ID to use"),
) -> PaginatedResponse[ContentResponseSchema]:
knowledge = get_knowledge_instance(knowledge_instances, db_id, knowledge_id)
if isinstance(knowledge, RemoteKnowledge):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await knowledge.get_content(
limit=limit,
page=page,
sort_by=sort_by,
sort_order=sort_order.value if sort_order else None,
headers=headers,
)
contents, count = await knowledge.aget_content(limit=limit, page=page, sort_by=sort_by, sort_order=sort_order)
return PaginatedResponse(
data=[
ContentResponseSchema.from_dict(
{
"id": content.id,
"name": content.name,
"description": content.description,
"file_type": content.file_type,
"size": content.size,
"metadata": content.metadata,
"status": content.status,
"status_message": content.status_message,
"created_at": content.created_at,
"updated_at": content.updated_at,
}
)
for content in contents
],
meta=PaginationInfo(
page=page,
limit=limit,
total_count=count,
total_pages=math.ceil(count / limit) if limit is not None and limit > 0 else 0,
),
)
@router.get(
"/knowledge/content/{content_id}",
response_model=ContentResponseSchema,
status_code=200,
operation_id="get_content_by_id",
summary="Get Content by ID",
description="Retrieve detailed information about a specific content item including processing status and metadata.",
responses={
200: {
"description": "Content details retrieved successfully",
"content": {
"application/json": {
"example": {
"id": "3c2fc685-d451-4d47-b0c0-b9a544c672b7",
"name": "example.pdf",
"description": "",
"type": "application/pdf",
"size": "251261",
"linked_to": None,
"metadata": {},
"access_count": 1,
"status": "completed",
"status_message": "",
"created_at": "2025-09-08T15:22:53Z",
"updated_at": "2025-09-08T15:22:54Z",
}
}
},
},
404: {"description": "Content not found", "model": NotFoundResponse},
},
)
async def get_content_by_id(
request: Request,
content_id: str,
db_id: Optional[str] = Query(default=None, description="Database ID to use"),
knowledge_id: Optional[str] = Query(default=None, description="Knowledge base ID to use"),
) -> ContentResponseSchema:
knowledge = get_knowledge_instance(knowledge_instances, db_id, knowledge_id)
if isinstance(knowledge, RemoteKnowledge):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await knowledge.get_content_by_id(content_id=content_id, headers=headers)
content = await knowledge.aget_content_by_id(content_id=content_id)
if not content:
raise HTTPException(status_code=404, detail=f"Content not found: {content_id}")
response = ContentResponseSchema.from_dict(
{
"id": content_id,
"name": content.name,
"description": content.description,
"file_type": content.file_type,
"size": len(content.file_data.content) if content.file_data and content.file_data.content else 0,
"metadata": content.metadata,
"status": content.status,
"status_message": content.status_message,
"created_at": content.created_at,
"updated_at": content.updated_at,
}
)
return response
@router.delete(
"/knowledge/content/{content_id}",
response_model=ContentResponseSchema,
status_code=200,
response_model_exclude_none=True,
operation_id="delete_content_by_id",
summary="Delete Content by ID",
description="Permanently remove a specific content item from the knowledge base. This action cannot be undone.",
responses={
200: {},
404: {"description": "Content not found", "model": NotFoundResponse},
500: {"description": "Failed to delete content", "model": InternalServerErrorResponse},
},
)
async def delete_content_by_id(
request: Request,
content_id: str,
db_id: Optional[str] = Query(default=None, description="Database ID to use"),
knowledge_id: Optional[str] = Query(default=None, description="Knowledge base ID to use"),
) -> ContentResponseSchema:
knowledge = get_knowledge_instance(knowledge_instances, db_id, knowledge_id)
if isinstance(knowledge, RemoteKnowledge):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
await knowledge.delete_content_by_id(content_id=content_id, headers=headers)
else:
await knowledge.aremove_content_by_id(content_id=content_id)
return ContentResponseSchema(
id=content_id,
)
@router.delete(
"/knowledge/content",
status_code=200,
operation_id="delete_all_content",
summary="Delete All Content",
description=(
"Permanently remove all content from the knowledge base. This is a destructive operation that "
"cannot be undone. Use with extreme caution."
),
responses={
200: {},
500: {"description": "Failed to delete all content", "model": InternalServerErrorResponse},
},
)
async def delete_all_content(
request: Request,
db_id: Optional[str] = Query(default=None, description="Database ID to use"),
knowledge_id: Optional[str] = Query(default=None, description="Knowledge base ID to use"),
):
knowledge = get_knowledge_instance(knowledge_instances, db_id, knowledge_id)
if isinstance(knowledge, RemoteKnowledge):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await knowledge.delete_all_content(headers=headers)
await knowledge.aremove_all_content()
return "success"
@router.get(
"/knowledge/content/{content_id}/status",
status_code=200,
response_model=ContentStatusResponse,
operation_id="get_content_status",
summary="Get Content Status",
description=(
"Retrieve the current processing status of a content item. Useful for monitoring "
"asynchronous content processing progress and identifying any processing errors."
),
responses={
200: {
"description": "Content status retrieved successfully",
"content": {
"application/json": {
"examples": {
"completed": {
"summary": "Example completed content status",
"value": {
"status": "completed",
"status_message": "",
},
}
}
}
},
},
404: {"description": "Content not found", "model": NotFoundResponse},
},
)
async def get_content_status(
request: Request,
content_id: str,
db_id: Optional[str] = Query(default=None, description="Database ID to use"),
knowledge_id: Optional[str] = Query(default=None, description="Knowledge base ID to use"),
) -> ContentStatusResponse:
knowledge = get_knowledge_instance(knowledge_instances, db_id, knowledge_id)
if isinstance(knowledge, RemoteKnowledge):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await knowledge.get_content_status(content_id=content_id, headers=headers)
knowledge_status, status_message = await knowledge.aget_content_status(content_id=content_id)
# Handle the case where content is not found
if knowledge_status is None:
return ContentStatusResponse(
id=content_id, status=ContentStatus.FAILED, status_message=status_message or "Content not found"
)
# Convert knowledge ContentStatus to schema ContentStatus (they have same values)
if hasattr(knowledge_status, "value"):
status_value = knowledge_status.value
else:
status_value = str(knowledge_status)
# Convert string status to ContentStatus enum if needed (for backward compatibility and mocks)
if isinstance(status_value, str):
try:
status = ContentStatus(status_value.lower())
except ValueError:
# Handle legacy or unknown statuses gracefully
if "failed" in status_value.lower():
status = ContentStatus.FAILED
elif "completed" in status_value.lower():
status = ContentStatus.COMPLETED
else:
status = ContentStatus.PROCESSING
else:
status = ContentStatus.PROCESSING
return ContentStatusResponse(id=content_id, status=status, status_message=status_message or "")
@router.post(
"/knowledge/search",
status_code=200,
operation_id="search_knowledge",
summary="Search Knowledge",
description="Search the knowledge base for relevant documents using query, filters and search type.",
response_model=PaginatedResponse[VectorSearchResult],
responses={
200: {
"description": "Search results retrieved successfully",
"content": {
"application/json": {
"example": {
"data": [
{
"id": "doc_123",
"content": "Jordan Mitchell - Software Engineer with skills in JavaScript, React, Python",
"name": "cv_1",
"meta_data": {"page": 1, "chunk": 1},
"usage": {"total_tokens": 14},
"reranking_score": 0.95,
"content_id": "content_456",
}
],
"meta": {"page": 1, "limit": 20, "total_pages": 2, "total_count": 35},
}
}
},
},
400: {"description": "Invalid search parameters"},
404: {"description": "No documents found"},
},
)
async def search_knowledge(
http_request: Request, request: VectorSearchRequestSchema
) -> PaginatedResponse[VectorSearchResult]:
import time
start_time = time.time()
knowledge = get_knowledge_instance(knowledge_instances, request.db_id, request.knowledge_id)
if isinstance(knowledge, RemoteKnowledge):
auth_token = get_auth_token_from_request(http_request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await knowledge.search_knowledge(
query=request.query,
max_results=request.max_results,
filters=request.filters,
search_type=request.search_type,
headers=headers,
)
# For now, validate the vector db ids exist in the knowledge base
# We will add more logic around this once we have multi vectordb support
# If vector db ids are provided, check if any of them match the knowledge's vector db
if request.vector_db_ids:
if knowledge.vector_db and knowledge.vector_db.id:
if knowledge.vector_db.id not in request.vector_db_ids:
raise HTTPException(
status_code=400,
detail=f"None of the provided Vector DB IDs {request.vector_db_ids} match the knowledge base Vector DB ID {knowledge.vector_db.id}",
)
else:
raise HTTPException(status_code=400, detail="Knowledge base has no vector database configured")
# Calculate pagination parameters
meta = request.meta
limit = meta.limit if meta and meta.limit is not None else 20
page = meta.page if meta and meta.page is not None else 1
# Use max_results if specified, otherwise use a higher limit for search then paginate
search_limit = request.max_results
results = await knowledge.asearch(
query=request.query, max_results=search_limit, filters=request.filters, search_type=request.search_type
)
# Calculate pagination
total_results = len(results)
start_idx = (page - 1) * limit
# Ensure start_idx doesn't exceed the total results
if start_idx >= total_results and total_results > 0:
# If page is beyond available results, return empty results
paginated_results = []
else:
end_idx = min(start_idx + limit, total_results)
paginated_results = results[start_idx:end_idx]
search_time_ms = (time.time() - start_time) * 1000
# Convert Document objects to serializable format
document_results = [VectorSearchResult.from_document(doc) for doc in paginated_results]
# Calculate pagination info
total_pages = (total_results + limit - 1) // limit # Ceiling division
return PaginatedResponse(
data=document_results,
meta=PaginationInfo(
page=page,
limit=limit,
total_pages=total_pages,
total_count=total_results,
search_time_ms=search_time_ms,
),
)
@router.get(
"/knowledge/config",
status_code=200,
operation_id="get_knowledge_config",
summary="Get Config",
description=(
"Retrieve available readers, chunkers, and configuration options for content processing. "
"This endpoint provides metadata about supported file types, processing strategies, and filters."
),
responses={
200: {
"description": "Knowledge configuration retrieved successfully",
"content": {
"application/json": {
"example": {
"readers": {
"website": {
"id": "website",
"name": "WebsiteReader",
"description": "Reads website files",
"chunkers": [
"AgenticChunker",
"DocumentChunker",
"RecursiveChunker",
"SemanticChunker",
"FixedSizeChunker",
],
},
"firecrawl": {
"id": "firecrawl",
"name": "FirecrawlReader",
"description": "Reads firecrawl files",
"chunkers": [
"SemanticChunker",
"FixedSizeChunker",
"AgenticChunker",
"DocumentChunker",
"RecursiveChunker",
],
},
"youtube": {
"id": "youtube",
"name": "YoutubeReader",
"description": "Reads youtube files",
"chunkers": [
"RecursiveChunker",
"AgenticChunker",
"DocumentChunker",
"SemanticChunker",
"FixedSizeChunker",
],
},
"web_search": {
"id": "web_search",
"name": "WebSearchReader",
"description": "Reads web_search files",
"chunkers": [
"AgenticChunker",
"DocumentChunker",
"RecursiveChunker",
"SemanticChunker",
"FixedSizeChunker",
],
},
"arxiv": {
"id": "arxiv",
"name": "ArxivReader",
"description": "Reads arxiv files",
"chunkers": [
"FixedSizeChunker",
"AgenticChunker",
"DocumentChunker",
"RecursiveChunker",
"SemanticChunker",
],
},
"csv": {
"id": "csv",
"name": "CsvReader",
"description": "Reads csv files",
"chunkers": [
"RowChunker",
"FixedSizeChunker",
"AgenticChunker",
"DocumentChunker",
"RecursiveChunker",
],
},
"docx": {
"id": "docx",
"name": "DocxReader",
"description": "Reads docx files",
"chunkers": [
"DocumentChunker",
"FixedSizeChunker",
"SemanticChunker",
"AgenticChunker",
"RecursiveChunker",
],
},
"gcs": {
"id": "gcs",
"name": "GcsReader",
"description": "Reads gcs files",
"chunkers": [
"FixedSizeChunker",
"AgenticChunker",
"DocumentChunker",
"RecursiveChunker",
"SemanticChunker",
],
},
"json": {
"id": "json",
"name": "JsonReader",
"description": "Reads json files",
"chunkers": [
"FixedSizeChunker",
"AgenticChunker",
"DocumentChunker",
"RecursiveChunker",
"SemanticChunker",
],
},
"markdown": {
"id": "markdown",
"name": "MarkdownReader",
"description": "Reads markdown files",
"chunkers": [
"MarkdownChunker",
"DocumentChunker",
"AgenticChunker",
"RecursiveChunker",
"SemanticChunker",
"FixedSizeChunker",
],
},
"pdf": {
"id": "pdf",
"name": "PdfReader",
"description": "Reads pdf files",
"chunkers": [
"DocumentChunker",
"FixedSizeChunker",
"AgenticChunker",
"SemanticChunker",
"RecursiveChunker",
],
},
"text": {
"id": "text",
"name": "TextReader",
"description": "Reads text files",
"chunkers": [
"CodeChunker",
"FixedSizeChunker",
"AgenticChunker",
"DocumentChunker",
"RecursiveChunker",
"SemanticChunker",
],
},
},
"readersForType": {
"url": [
"url",
"website",
"firecrawl",
"youtube",
"web_search",
"gcs",
],
"youtube": ["youtube"],
"text": ["web_search"],
"topic": ["arxiv"],
"file": ["csv", "gcs"],
".csv": ["csv", "field_labeled_csv"],
".xlsx": ["excel"],
".xls": ["excel"],
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": ["excel"],
"application/vnd.ms-excel": ["excel"],
".docx": ["docx"],
".doc": ["docx"],
".json": ["json"],
".md": ["markdown"],
".pdf": ["pdf"],
".txt": ["text"],
},
"chunkers": {
"AgenticChunker": {
"key": "AgenticChunker",
"name": "AgenticChunker",
"description": "Chunking strategy that uses an LLM to determine natural breakpoints in the text",
"metadata": {"chunk_size": 5000},
},
"CodeChunker": {
"key": "CodeChunker",
"name": "CodeChunker",
"description": "The CodeChunker splits code into chunks based on its structure, leveraging Abstract Syntax Trees (ASTs) to create contextually relevant segments",
"metadata": {"chunk_size": 2048},
},
"DocumentChunker": {
"key": "DocumentChunker",
"name": "DocumentChunker",
"description": "A chunking strategy that splits text based on document structure like paragraphs and sections",
"metadata": {
"chunk_size": 5000,
"chunk_overlap": 0,
},
},
"FixedSizeChunker": {
"key": "FixedSizeChunker",
"name": "FixedSizeChunker",
"description": "Chunking strategy that splits text into fixed-size chunks with optional overlap",
"metadata": {
"chunk_size": 5000,
"chunk_overlap": 0,
},
},
"MarkdownChunker": {
"key": "MarkdownChunker",
"name": "MarkdownChunker",
"description": "A chunking strategy that splits markdown based on structure like headers, paragraphs and sections",
"metadata": {
"chunk_size": 5000,
"chunk_overlap": 0,
},
},
"RecursiveChunker": {
"key": "RecursiveChunker",
"name": "RecursiveChunker",
"description": "Chunking strategy that recursively splits text into chunks by finding natural break points",
"metadata": {
"chunk_size": 5000,
"chunk_overlap": 0,
},
},
"RowChunker": {
"key": "RowChunker",
"name": "RowChunker",
"description": "RowChunking chunking strategy",
"metadata": {},
},
"SemanticChunker": {
"key": "SemanticChunker",
"name": "SemanticChunker",
"description": "Chunking strategy that splits text into semantic chunks using chonkie",
"metadata": {"chunk_size": 5000},
},
},
"vector_dbs": [
{
"id": "vector_db_1",
"name": "Vector DB 1",
"description": "Vector DB 1 description",
"search_types": ["vector", "keyword", "hybrid"],
}
],
"filters": ["filter_tag_1", "filter_tag2"],
}
}
},
}
},
)
async def get_config(
request: Request,
db_id: Optional[str] = Query(default=None, description="Database ID to use"),
knowledge_id: Optional[str] = Query(default=None, description="Knowledge base ID to use"),
) -> ConfigResponseSchema:
knowledge = get_knowledge_instance(knowledge_instances, db_id, knowledge_id)
if isinstance(knowledge, RemoteKnowledge):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await knowledge.get_config(headers=headers)
# Get factory readers info (including custom readers from this knowledge instance)
readers_info = get_all_readers_info(knowledge)
reader_schemas = {}
# Add factory readers
for reader_info in readers_info:
reader_schemas[reader_info["id"]] = ReaderSchema(
id=reader_info["id"],
name=reader_info["name"],
description=reader_info.get("description"),
chunkers=reader_info.get("chunking_strategies", []),
)
# Add custom readers from knowledge.readers
readers_result: Any = knowledge.get_readers() or {}
# Ensure readers_dict is a dictionary (defensive check)
if not isinstance(readers_result, dict):
readers_dict: Dict[str, Reader] = {}
else:
readers_dict = readers_result
if readers_dict:
for reader_id, reader in readers_dict.items():
# Get chunking strategies from the reader
chunking_strategies = []
try:
strategies = reader.get_supported_chunking_strategies()
chunking_strategies = [strategy.value for strategy in strategies]
except Exception:
chunking_strategies = []
# Check if this reader ID already exists in factory readers
if reader_id not in reader_schemas:
reader_schemas[reader_id] = ReaderSchema(
id=reader_id,
name=getattr(reader, "name", reader.__class__.__name__),
description=getattr(reader, "description", f"Custom {reader.__class__.__name__}"),
chunkers=chunking_strategies,
)
# Get content types to readers mapping (including custom readers from this knowledge instance)
types_of_readers = get_content_types_to_readers_mapping(knowledge)
chunkers_list = get_all_chunkers_info()
# Convert chunkers list to dictionary format expected by schema
chunkers_dict = {}
for chunker_info in chunkers_list:
chunker_key = chunker_info.get("key")
if chunker_key:
chunkers_dict[chunker_key] = ChunkerSchema(
key=chunker_key,
name=chunker_info.get("name"),
description=chunker_info.get("description"),
metadata=chunker_info.get("metadata", {}),
)
vector_dbs = []
if knowledge.vector_db:
search_types = knowledge.vector_db.get_supported_search_types()
name = knowledge.vector_db.name
db_id = knowledge.vector_db.id
vector_dbs.append(
VectorDbSchema(
id=db_id,
name=name,
description=knowledge.vector_db.description,
search_types=search_types,
)
)
filters = await knowledge.aget_valid_filters()
# Get remote content sources if available
remote_content_sources = None
if hasattr(knowledge, "_get_remote_configs") and callable(knowledge._get_remote_configs):
remote_configs = knowledge._get_remote_configs()
if remote_configs:
from agno.os.routers.knowledge.schemas import RemoteContentSourceSchema
remote_content_sources = [
RemoteContentSourceSchema(
id=config.id,
name=config.name,
type=config.__class__.__name__.replace("Config", "").lower(),
metadata=config.metadata,
)
for config in remote_configs
]
return ConfigResponseSchema(
readers=reader_schemas,
vector_dbs=vector_dbs,
readersForType=types_of_readers,
chunkers=chunkers_dict,
filters=filters,
remote_content_sources=remote_content_sources,
)
@router.get(
"/knowledge/{knowledge_id}/sources",
response_model=List[RemoteContentSourceSchema],
status_code=200,
operation_id="list_content_sources",
summary="List Content Sources",
description="List all registered content sources (S3, GCS, SharePoint, GitHub) for the knowledge base.",
responses={
200: {
"description": "Content sources retrieved successfully",
"content": {
"application/json": {
"example": [
{
"id": "company-s3",
"name": "Company Documents",
"type": "s3",
"prefix": "documents/",
}
]
}
},
},
404: {"description": "Knowledge base not found", "model": NotFoundResponse},
},
)
async def list_sources(
request: Request,
knowledge_id: str = Path(..., description="ID of the knowledge base"),
db_id: Optional[str] = Query(default=None, description="The ID of the database to use"),
) -> List[RemoteContentSourceSchema]:
knowledge = get_knowledge_instance(knowledge_instances, db_id, knowledge_id)
if isinstance(knowledge, RemoteKnowledge):
raise HTTPException(status_code=501, detail="Source listing not yet supported for RemoteKnowledge")
if not hasattr(knowledge, "_get_remote_configs") or not callable(knowledge._get_remote_configs):
return []
remote_configs = knowledge._get_remote_configs()
if not remote_configs:
return []
return [
RemoteContentSourceSchema(
id=config.id,
name=config.name,
type=config.__class__.__name__.replace("Config", "").lower(),
metadata=config.metadata,
prefix=getattr(config, "prefix", None),
)
for config in remote_configs
]
@router.get(
"/knowledge/{knowledge_id}/sources/{source_id}/files",
response_model=SourceFilesResponseSchema,
status_code=200,
operation_id="list_source_files",
summary="List Files in Source",
description=(
"List available files and folders in a specific content source. Supports pagination and folder navigation."
),
responses={
200: {
"description": "Files listed successfully",
"content": {
"application/json": {
"example": {
"source_id": "company-s3",
"source_name": "Company Documents",
"prefix": "reports/",
"folders": [{"prefix": "reports/2024/", "name": "2024", "is_empty": False}],
"files": [
{
"key": "reports/annual-summary.pdf",
"name": "annual-summary.pdf",
"size": 102400,
"last_modified": "2024-01-15T10:30:00Z",
"content_type": "application/pdf",
}
],
"meta": {"page": 1, "limit": 100, "total_pages": 1, "total_count": 1},
}
}
},
},
404: {"description": "Knowledge base or content source not found", "model": NotFoundResponse},
400: {"description": "Unsupported source type", "model": BadRequestResponse},
},
)
async def list_source_files(
request: Request,
knowledge_id: str = Path(..., description="ID of the knowledge base"),
source_id: str = Path(..., description="ID of the content source"),
prefix: Optional[str] = Query(default=None, description="Path prefix to filter files"),
limit: int = Query(default=100, ge=1, le=1000, description="Number of files per page"),
page: int = Query(default=1, ge=1, description="Page number (1-indexed)"),
delimiter: str = Query(default="/", description="Folder delimiter (enables folder grouping)"),
db_id: Optional[str] = Query(default=None, description="The ID of the database to use"),
) -> SourceFilesResponseSchema:
knowledge = get_knowledge_instance(knowledge_instances, db_id, knowledge_id)
if isinstance(knowledge, RemoteKnowledge):
raise HTTPException(status_code=501, detail="Source file listing not yet supported for RemoteKnowledge")
# Get the config for this source
config = knowledge._get_remote_config_by_id(source_id)
if config is None:
raise HTTPException(status_code=404, detail=f"Content source not found: {source_id}")
# Only S3 sources support file listing
if not isinstance(config, S3Config):
raise HTTPException(
status_code=400,
detail=f"Source type '{type(config).__name__}' does not support file listing.",
)
try:
result = await config.alist_files(
prefix=prefix,
delimiter=delimiter,
limit=limit,
page=page,
)
except ImportError as e:
raise HTTPException(status_code=500, detail=str(e))
except Exception as e:
error_str = str(e)
if "NoSuchBucket" in error_str:
raise HTTPException(
status_code=404,
detail=f"Bucket '{config.bucket_name}' does not exist",
)
if "NoCredentials" in error_str or "InvalidAccessKeyId" in error_str:
raise HTTPException(
status_code=401,
detail="Invalid or missing AWS credentials for this source",
)
log_error(f"Error listing files from {type(config).__name__}: {e}")
raise HTTPException(status_code=500, detail=f"Failed to list files: {error_str}")
return SourceFilesResponseSchema(
source_id=source_id,
source_name=config.name,
prefix=prefix or "",
folders=[
SourceFolderSchema(
prefix=folder["prefix"],
name=folder["name"],
is_empty=folder["is_empty"],
)
for folder in result.folders
],
files=[
SourceFileSchema(
key=file["key"],
name=file["name"],
size=file["size"],
last_modified=file["last_modified"],
content_type=file["content_type"],
)
for file in result.files
],
meta=PaginationInfo(
page=result.page,
limit=result.limit,
total_count=result.total_count,
total_pages=result.total_pages,
),
)
return router
async def process_content(
knowledge: Knowledge,
content: Content,
reader_id: Optional[str] = None,
chunker: Optional[str] = None,
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
):
"""Background task to process the content"""
try:
if reader_id:
reader = None
# Use get_readers() to ensure we get a dict (handles list conversion)
custom_readers = knowledge.get_readers()
if custom_readers and reader_id in custom_readers:
reader = custom_readers[reader_id]
log_debug(f"Found custom reader: {reader.__class__.__name__}")
else:
# Try to resolve from factory readers
key = reader_id.lower().strip().replace("-", "_").replace(" ", "_")
candidates = [key] + ([key[:-6]] if key.endswith("reader") else [])
for cand in candidates:
try:
reader = ReaderFactory.create_reader(cand)
log_debug(f"Resolved reader from factory: {reader.__class__.__name__}")
break
except Exception:
continue
if reader:
content.reader = reader
else:
log_debug(f"Could not resolve reader with id: {reader_id}")
if chunker and content.reader:
# Set the chunker name on the reader - let the reader handle it internally
content.reader.set_chunking_strategy_from_string(chunker, chunk_size=chunk_size, overlap=chunk_overlap)
log_debug(f"Set chunking strategy: {chunker}")
log_debug(f"Using reader: {content.reader.__class__.__name__}")
await knowledge._aload_content(content, upsert=False, skip_if_exists=True)
log_info(f"Content {content.id} processed successfully")
except Exception as e:
log_info(f"Error processing content: {e}")
# Mark content as failed in the contents DB
try:
from agno.knowledge.content import ContentStatus as KnowledgeContentStatus
content.status = KnowledgeContentStatus.FAILED
content.status_message = str(e)
# Use async patch method if contents_db is an AsyncBaseDb, otherwise use sync patch method
if knowledge.contents_db is not None and isinstance(knowledge.contents_db, AsyncBaseDb):
await knowledge.apatch_content(content)
else:
knowledge.patch_content(content)
except Exception:
# Swallow any secondary errors to avoid crashing the background task
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/knowledge/knowledge.py",
"license": "Apache License 2.0",
"lines": 1333,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/knowledge/schemas.py | from datetime import datetime, timezone
from enum import Enum
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from agno.os.schema import PaginationInfo
class ContentStatus(str, Enum):
"""Enumeration of possible content processing statuses."""
PROCESSING = "processing"
COMPLETED = "completed"
FAILED = "failed"
class ContentStatusResponse(BaseModel):
"""Response model for content status endpoint."""
id: Optional[str] = Field(None, description="Content ID")
status: ContentStatus = Field(..., description="Current processing status of the content")
status_message: str = Field("", description="Status message or error details")
class ContentResponseSchema(BaseModel):
id: str = Field(..., description="Unique identifier for the content")
name: Optional[str] = Field(None, description="Name of the content")
description: Optional[str] = Field(None, description="Description of the content")
type: Optional[str] = Field(None, description="MIME type of the content")
size: Optional[str] = Field(None, description="Size of the content in bytes")
linked_to: Optional[str] = Field(None, description="ID of related content if linked")
metadata: Optional[dict] = Field(None, description="Additional metadata as key-value pairs")
access_count: Optional[int] = Field(None, description="Number of times content has been accessed", ge=0)
status: Optional[ContentStatus] = Field(None, description="Processing status of the content")
status_message: Optional[str] = Field(None, description="Status message or error details")
created_at: Optional[datetime] = Field(None, description="Timestamp when content was created")
updated_at: Optional[datetime] = Field(None, description="Timestamp when content was last updated")
@classmethod
def from_dict(cls, content: Dict[str, Any]) -> "ContentResponseSchema":
status = content.get("status")
if isinstance(status, str):
try:
status = ContentStatus(status.lower())
except ValueError:
# Handle legacy or unknown statuses gracefully
if "failed" in status.lower():
status = ContentStatus.FAILED
elif "completed" in status.lower():
status = ContentStatus.COMPLETED
else:
status = ContentStatus.PROCESSING
elif status is None:
status = ContentStatus.PROCESSING # Default for None values
# Helper function to safely parse timestamps
def parse_timestamp(timestamp_value):
if timestamp_value is None:
return None
try:
# If it's already a datetime object, return it
if isinstance(timestamp_value, datetime):
return timestamp_value
# If it's a string, try to parse it as ISO format first
if isinstance(timestamp_value, str):
try:
return datetime.fromisoformat(timestamp_value.replace("Z", "+00:00"))
except ValueError:
# Try to parse as float/int timestamp
timestamp_value = float(timestamp_value)
# If it's a number, use fromtimestamp
return datetime.fromtimestamp(timestamp_value, tz=timezone.utc)
except (ValueError, TypeError, OSError):
# If all parsing fails, return None
return None
return cls(
id=content.get("id"), # type: ignore
name=content.get("name"),
description=content.get("description"),
type=content.get("file_type"),
size=str(content.get("size")) if content.get("size") else "0",
metadata=content.get("metadata"),
status=status,
status_message=content.get("status_message"),
created_at=parse_timestamp(content.get("created_at")),
updated_at=parse_timestamp(content.get("updated_at", content.get("created_at", 0))),
# TODO: These fields are not available in the Content class. Fix the inconsistency
access_count=None,
linked_to=None,
)
class ContentUpdateSchema(BaseModel):
"""Schema for updating content."""
name: Optional[str] = Field(None, description="Content name", min_length=1, max_length=255)
description: Optional[str] = Field(None, description="Content description", max_length=1000)
metadata: Optional[Dict[str, Any]] = Field(None, description="Content metadata as key-value pairs")
reader_id: Optional[str] = Field(None, description="ID of the reader to use for processing", min_length=1)
class ReaderSchema(BaseModel):
id: str = Field(..., description="Unique identifier for the reader")
name: Optional[str] = Field(None, description="Name of the reader")
description: Optional[str] = Field(None, description="Description of the reader's capabilities")
chunkers: Optional[List[str]] = Field(None, description="List of supported chunking strategies")
class ChunkerSchema(BaseModel):
key: str
name: Optional[str] = None
description: Optional[str] = None
metadata: Optional[Dict[str, Any]] = None
class VectorDbSchema(BaseModel):
id: str = Field(..., description="Unique identifier for the vector database")
name: Optional[str] = Field(None, description="Name of the vector database")
description: Optional[str] = Field(None, description="Description of the vector database")
search_types: Optional[List[str]] = Field(
None, description="List of supported search types (vector, keyword, hybrid)"
)
class VectorSearchResult(BaseModel):
"""Schema for search result documents."""
id: str = Field(..., description="Unique identifier for the search result document")
content: str = Field(..., description="Content text of the document")
name: Optional[str] = Field(None, description="Name of the document")
meta_data: Optional[Dict[str, Any]] = Field(None, description="Metadata associated with the document")
usage: Optional[Dict[str, Any]] = Field(None, description="Usage statistics (e.g., token counts)")
reranking_score: Optional[float] = Field(None, description="Reranking score for relevance", ge=0.0, le=1.0)
content_id: Optional[str] = Field(None, description="ID of the source content")
content_origin: Optional[str] = Field(None, description="Origin URL or source of the content")
size: Optional[int] = Field(None, description="Size of the content in bytes", ge=0)
@classmethod
def from_document(cls, document) -> "VectorSearchResult":
"""Convert a Document object to a serializable VectorSearchResult."""
return cls(
id=document.id,
content=document.content,
name=getattr(document, "name", None),
meta_data=getattr(document, "meta_data", None),
usage=getattr(document, "usage", None),
reranking_score=getattr(document, "reranking_score", None),
content_id=getattr(document, "content_id", None),
content_origin=getattr(document, "content_origin", None),
size=getattr(document, "size", None),
)
class VectorSearchRequestSchema(BaseModel):
"""Schema for vector search request."""
class Meta(BaseModel):
"""Inline metadata schema for pagination."""
limit: int = Field(20, description="Number of results per page", ge=1)
page: int = Field(1, description="Page number", ge=1)
query: str = Field(..., description="The search query text")
db_id: Optional[str] = Field(None, description="Database ID to search in")
knowledge_id: Optional[str] = Field(None, description="Knowledge base ID to search in")
vector_db_ids: Optional[List[str]] = Field(None, description="List of vector database IDs to search in")
search_type: Optional[str] = Field(None, description="The type of search to perform (vector, keyword, hybrid)")
max_results: Optional[int] = Field(None, description="The maximum number of results to return", ge=1, le=1000)
filters: Optional[Dict[str, Any]] = Field(None, description="Filters to apply to the search results")
meta: Optional[Meta] = Field(
None, description="Pagination metadata. Limit and page number to return a subset of results."
)
class RemoteContentSourceSchema(BaseModel):
"""Schema for remote content source configuration."""
id: str = Field(..., description="Unique identifier for the content source")
name: str = Field(..., description="Display name for the content source")
type: str = Field(..., description="Type of content source (s3, gcs, sharepoint, github, azureblob)")
metadata: Optional[Dict[str, Any]] = Field(None, description="Custom metadata for the content source")
prefix: Optional[str] = Field(None, description="Default path prefix for this source")
class SourceFileSchema(BaseModel):
"""Schema for a file in a content source."""
key: str = Field(..., description="Full path/key of the file")
name: str = Field(..., description="Display name (filename)")
size: Optional[int] = Field(None, description="File size in bytes")
last_modified: Optional[datetime] = Field(None, description="ISO 8601 timestamp of last modification")
content_type: Optional[str] = Field(None, description="MIME type of the file")
class SourceFolderSchema(BaseModel):
"""Schema for a folder in a content source."""
prefix: str = Field(..., description="Full prefix to use for navigating into this folder")
name: str = Field(..., description="Display name of the folder")
is_empty: bool = Field(False, description="Whether the folder contains any files")
class SourceFilesResponseSchema(BaseModel):
"""Response schema for listing files in a content source."""
source_id: str = Field(..., description="ID of the content source")
source_name: str = Field(..., description="Name of the content source")
prefix: Optional[str] = Field(None, description="Prefix filter that was applied")
folders: List[SourceFolderSchema] = Field(default_factory=list, description="Subfolders at this level")
files: List[SourceFileSchema] = Field(default_factory=list, description="List of files at this level")
meta: PaginationInfo = Field(..., description="Pagination metadata")
class ConfigResponseSchema(BaseModel):
readers: Optional[Dict[str, ReaderSchema]] = Field(None, description="Available content readers")
readersForType: Optional[Dict[str, List[str]]] = Field(None, description="Mapping of content types to reader IDs")
chunkers: Optional[Dict[str, ChunkerSchema]] = Field(None, description="Available chunking strategies")
filters: Optional[List[str]] = Field(None, description="Available filter tags")
vector_dbs: Optional[List[VectorDbSchema]] = Field(None, description="Configured vector databases")
remote_content_sources: Optional[List[RemoteContentSourceSchema]] = Field(
None, description="Configured remote content sources (S3, GCS, SharePoint, GitHub)"
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/knowledge/schemas.py",
"license": "Apache License 2.0",
"lines": 179,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/memory/memory.py | import logging
import math
from typing import List, Optional, Union, cast
from uuid import uuid4
from fastapi import Depends, HTTPException, Path, Query, Request
from fastapi.routing import APIRouter
from agno.db.base import AsyncBaseDb, BaseDb
from agno.db.schemas import UserMemory
from agno.models.utils import get_model
from agno.os.auth import get_auth_token_from_request, get_authentication_dependency
from agno.os.routers.memory.schemas import (
DeleteMemoriesRequest,
OptimizeMemoriesRequest,
OptimizeMemoriesResponse,
UserMemoryCreateSchema,
UserMemorySchema,
UserStatsSchema,
)
from agno.os.schema import (
BadRequestResponse,
InternalServerErrorResponse,
NotFoundResponse,
PaginatedResponse,
PaginationInfo,
SortOrder,
UnauthenticatedResponse,
ValidationErrorResponse,
)
from agno.os.settings import AgnoAPISettings
from agno.os.utils import get_db
from agno.remote.base import RemoteDb
logger = logging.getLogger(__name__)
def get_memory_router(
dbs: dict[str, list[Union[BaseDb, AsyncBaseDb, RemoteDb]]], settings: AgnoAPISettings = AgnoAPISettings(), **kwargs
) -> APIRouter:
"""Create memory router with comprehensive OpenAPI documentation for user memory management endpoints."""
router = APIRouter(
dependencies=[Depends(get_authentication_dependency(settings))],
tags=["Memory"],
responses={
400: {"description": "Bad Request", "model": BadRequestResponse},
401: {"description": "Unauthorized", "model": UnauthenticatedResponse},
404: {"description": "Not Found", "model": NotFoundResponse},
422: {"description": "Validation Error", "model": ValidationErrorResponse},
500: {"description": "Internal Server Error", "model": InternalServerErrorResponse},
},
)
return attach_routes(router=router, dbs=dbs)
def attach_routes(router: APIRouter, dbs: dict[str, list[Union[BaseDb, AsyncBaseDb, RemoteDb]]]) -> APIRouter:
@router.post(
"/memories",
response_model=UserMemorySchema,
status_code=200,
operation_id="create_memory",
summary="Create Memory",
description=(
"Create a new user memory with content and associated topics. "
"Memories are used to store contextual information for users across conversations."
),
responses={
200: {
"description": "Memory created successfully",
"content": {
"application/json": {
"example": {
"memory_id": "mem-123",
"memory": "User prefers technical explanations with code examples",
"topics": ["preferences", "communication_style", "technical"],
"user_id": "user-456",
"created_at": "2024-01-15T10:30:00Z",
"updated_at": "2024-01-15T10:30:00Z",
}
}
},
},
400: {"description": "Invalid request data", "model": BadRequestResponse},
422: {"description": "Validation error in payload", "model": ValidationErrorResponse},
500: {"description": "Failed to create memory", "model": InternalServerErrorResponse},
},
)
async def create_memory(
request: Request,
payload: UserMemoryCreateSchema,
db_id: Optional[str] = Query(default=None, description="Database ID to use for memory storage"),
table: Optional[str] = Query(default=None, description="Table to use for memory storage"),
) -> UserMemorySchema:
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
payload.user_id = user_id
if payload.user_id is None:
raise HTTPException(status_code=400, detail="User ID is required")
db = await get_db(dbs, db_id, table)
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.create_memory(
memory=payload.memory,
topics=payload.topics or [],
user_id=payload.user_id,
db_id=db_id,
table=table,
headers=headers,
)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
user_memory = await db.upsert_user_memory(
memory=UserMemory(
memory_id=str(uuid4()),
memory=payload.memory,
topics=payload.topics or [],
user_id=payload.user_id,
),
deserialize=False,
)
else:
user_memory = db.upsert_user_memory(
memory=UserMemory(
memory_id=str(uuid4()),
memory=payload.memory,
topics=payload.topics or [],
user_id=payload.user_id,
),
deserialize=False,
)
if not user_memory:
raise HTTPException(status_code=500, detail="Failed to create memory")
return UserMemorySchema.from_dict(user_memory) # type: ignore
@router.delete(
"/memories/{memory_id}",
status_code=204,
operation_id="delete_memory",
summary="Delete Memory",
description="Permanently delete a specific user memory. This action cannot be undone.",
responses={
204: {"description": "Memory deleted successfully"},
404: {"description": "Memory not found", "model": NotFoundResponse},
500: {"description": "Failed to delete memory", "model": InternalServerErrorResponse},
},
)
async def delete_memory(
request: Request,
memory_id: str = Path(description="Memory ID to delete"),
user_id: Optional[str] = Query(default=None, description="User ID to delete memory for"),
db_id: Optional[str] = Query(default=None, description="Database ID to use for deletion"),
table: Optional[str] = Query(default=None, description="Table to use for deletion"),
) -> None:
db = await get_db(dbs, db_id, table)
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.delete_memory(
memory_id=memory_id,
user_id=user_id,
db_id=db_id,
table=table,
headers=headers,
)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
await db.delete_user_memory(memory_id=memory_id, user_id=user_id)
else:
db.delete_user_memory(memory_id=memory_id, user_id=user_id)
@router.delete(
"/memories",
status_code=204,
operation_id="delete_memories",
summary="Delete Multiple Memories",
description=(
"Delete multiple user memories by their IDs in a single operation. "
"This action cannot be undone and all specified memories will be permanently removed."
),
responses={
204: {"description": "Memories deleted successfully"},
400: {"description": "Invalid request - empty memory_ids list", "model": BadRequestResponse},
500: {"description": "Failed to delete memories", "model": InternalServerErrorResponse},
},
)
async def delete_memories(
http_request: Request,
request: DeleteMemoriesRequest,
db_id: Optional[str] = Query(default=None, description="Database ID to use for deletion"),
table: Optional[str] = Query(default=None, description="Table to use for deletion"),
) -> None:
db = await get_db(dbs, db_id, table)
if hasattr(http_request.state, "user_id") and http_request.state.user_id is not None:
request.user_id = http_request.state.user_id
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(http_request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.delete_memories(
memory_ids=request.memory_ids,
user_id=request.user_id,
db_id=db_id,
table=table,
headers=headers,
)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
await db.delete_user_memories(memory_ids=request.memory_ids, user_id=request.user_id)
else:
db.delete_user_memories(memory_ids=request.memory_ids, user_id=request.user_id)
@router.get(
"/memories",
response_model=PaginatedResponse[UserMemorySchema],
status_code=200,
operation_id="get_memories",
summary="List Memories",
description=(
"Retrieve paginated list of user memories with filtering and search capabilities. "
"Filter by user, agent, team, topics, or search within memory content."
),
responses={
200: {
"description": "Memories retrieved successfully",
"content": {
"application/json": {
"example": {
"data": [
{
"memory_id": "f9361a69-2997-40c7-ae4e-a5861d434047",
"memory": "User likes coffee.",
"topics": ["preferences"],
"agent_id": None,
"team_id": None,
"user_id": "123",
"updated_at": "2025-09-01T07:53:17Z",
}
]
}
}
},
}
},
)
async def get_memories(
request: Request,
user_id: Optional[str] = Query(default=None, description="Filter memories by user ID"),
agent_id: Optional[str] = Query(default=None, description="Filter memories by agent ID"),
team_id: Optional[str] = Query(default=None, description="Filter memories by team ID"),
topics: Optional[List[str]] = Depends(parse_topics),
search_content: Optional[str] = Query(default=None, description="Fuzzy search within memory content"),
limit: Optional[int] = Query(default=20, description="Number of memories to return per page", ge=1),
page: Optional[int] = Query(default=1, description="Page number for pagination", ge=0),
sort_by: Optional[str] = Query(default="updated_at", description="Field to sort memories by"),
sort_order: Optional[SortOrder] = Query(default="desc", description="Sort order (asc or desc)"),
db_id: Optional[str] = Query(default=None, description="Database ID to query memories from"),
table: Optional[str] = Query(default=None, description="The database table to use"),
) -> PaginatedResponse[UserMemorySchema]:
db = await get_db(dbs, db_id, table)
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.get_memories(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
topics=topics,
search_content=search_content,
limit=limit,
page=page,
sort_by=sort_by,
sort_order=sort_order.value if sort_order else "desc",
db_id=db_id,
table=table,
headers=headers,
)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
user_memories, total_count = await db.get_user_memories(
limit=limit,
page=page,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
topics=topics,
search_content=search_content,
sort_by=sort_by,
sort_order=sort_order,
deserialize=False,
)
else:
user_memories, total_count = db.get_user_memories( # type: ignore
limit=limit,
page=page,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
topics=topics,
search_content=search_content,
sort_by=sort_by,
sort_order=sort_order,
deserialize=False,
)
memories = [UserMemorySchema.from_dict(user_memory) for user_memory in user_memories] # type: ignore
return PaginatedResponse(
data=[memory for memory in memories if memory is not None],
meta=PaginationInfo(
page=page,
limit=limit,
total_count=total_count, # type: ignore
total_pages=math.ceil(total_count / limit) if limit is not None and limit > 0 else 0, # type: ignore
),
)
@router.get(
"/memories/{memory_id}",
response_model=UserMemorySchema,
status_code=200,
operation_id="get_memory",
summary="Get Memory by ID",
description="Retrieve detailed information about a specific user memory by its ID.",
responses={
200: {
"description": "Memory retrieved successfully",
"content": {
"application/json": {
"example": {
"memory_id": "f9361a69-2997-40c7-ae4e-a5861d434047",
"memory": "User likes coffee.",
"topics": ["preferences"],
"agent_id": None,
"team_id": None,
"user_id": "123",
"updated_at": "2025-09-01T07:53:17Z",
}
}
},
},
404: {"description": "Memory not found", "model": NotFoundResponse},
},
)
async def get_memory(
request: Request,
memory_id: str = Path(description="Memory ID to retrieve"),
user_id: Optional[str] = Query(default=None, description="User ID to query memory for"),
db_id: Optional[str] = Query(default=None, description="Database ID to query memory from"),
table: Optional[str] = Query(default=None, description="Table to query memory from"),
) -> UserMemorySchema:
db = await get_db(dbs, db_id, table)
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.get_memory(
memory_id=memory_id,
user_id=user_id,
db_id=db_id,
table=table,
headers=headers,
)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
user_memory = await db.get_user_memory(memory_id=memory_id, user_id=user_id, deserialize=False)
else:
user_memory = db.get_user_memory(memory_id=memory_id, user_id=user_id, deserialize=False)
if not user_memory:
raise HTTPException(status_code=404, detail=f"Memory with ID {memory_id} not found")
return UserMemorySchema.from_dict(user_memory) # type: ignore
@router.get(
"/memory_topics",
response_model=List[str],
status_code=200,
operation_id="get_memory_topics",
summary="Get Memory Topics",
description=(
"Retrieve all unique topics associated with memories in the system. "
"Useful for filtering and categorizing memories by topic."
),
responses={
200: {
"description": "Memory topics retrieved successfully",
"content": {
"application/json": {
"example": [
"preferences",
"communication_style",
"technical",
"industry",
"compliance",
"code_examples",
"requirements",
"healthcare",
"finance",
]
}
},
}
},
)
async def get_topics(
request: Request,
user_id: Optional[str] = Query(default=None, description="User ID to filter topics for"),
db_id: Optional[str] = Query(default=None, description="Database ID to query topics from"),
table: Optional[str] = Query(default=None, description="Table to query topics from"),
) -> List[str]:
db = await get_db(dbs, db_id, table)
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.get_memory_topics(
db_id=db_id,
table=table,
headers=headers,
)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
return await db.get_all_memory_topics(user_id=user_id)
else:
return db.get_all_memory_topics(user_id=user_id)
@router.patch(
"/memories/{memory_id}",
response_model=UserMemorySchema,
status_code=200,
operation_id="update_memory",
summary="Update Memory",
description=(
"Update an existing user memory's content and topics. "
"Replaces the entire memory content and topic list with the provided values."
),
responses={
200: {
"description": "Memory updated successfully",
"content": {
"application/json": {
"example": {
"memory_id": "f9361a69-2997-40c7-ae4e-a5861d434047",
"memory": "User likes coffee.",
"topics": ["preferences"],
"agent_id": None,
"team_id": None,
"user_id": "123",
"updated_at": "2025-09-01T07:53:17Z",
}
}
},
},
400: {"description": "Invalid request data", "model": BadRequestResponse},
404: {"description": "Memory not found", "model": NotFoundResponse},
422: {"description": "Validation error in payload", "model": ValidationErrorResponse},
500: {"description": "Failed to update memory", "model": InternalServerErrorResponse},
},
)
async def update_memory(
request: Request,
payload: UserMemoryCreateSchema,
memory_id: str = Path(description="Memory ID to update"),
db_id: Optional[str] = Query(default=None, description="Database ID to use for update"),
table: Optional[str] = Query(default=None, description="Table to use for update"),
) -> UserMemorySchema:
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
payload.user_id = user_id
if payload.user_id is None:
raise HTTPException(status_code=400, detail="User ID is required")
db = await get_db(dbs, db_id, table)
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.update_memory(
memory_id=memory_id,
user_id=payload.user_id,
memory=payload.memory,
topics=payload.topics or [],
db_id=db_id,
table=table,
headers=headers,
)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
user_memory = await db.upsert_user_memory(
memory=UserMemory(
memory_id=memory_id,
memory=payload.memory,
topics=payload.topics or [],
user_id=payload.user_id,
),
deserialize=False,
)
else:
user_memory = db.upsert_user_memory(
memory=UserMemory(
memory_id=memory_id,
memory=payload.memory,
topics=payload.topics or [],
user_id=payload.user_id,
),
deserialize=False,
)
if not user_memory:
raise HTTPException(status_code=500, detail="Failed to update memory")
return UserMemorySchema.from_dict(user_memory) # type: ignore
@router.get(
"/user_memory_stats",
response_model=PaginatedResponse[UserStatsSchema],
status_code=200,
operation_id="get_user_memory_stats",
summary="Get User Memory Statistics",
description=(
"Retrieve paginated statistics about memory usage by user. "
"Provides insights into user engagement and memory distribution across users."
),
responses={
200: {
"description": "User memory statistics retrieved successfully",
"content": {
"application/json": {
"example": {
"data": [
{
"user_id": "123",
"total_memories": 3,
"last_memory_updated_at": "2025-09-01T07:53:17Z",
}
]
}
}
},
},
500: {"description": "Failed to retrieve user statistics", "model": InternalServerErrorResponse},
},
)
async def get_user_memory_stats(
request: Request,
limit: Optional[int] = Query(default=20, description="Number of user statistics to return per page", ge=1),
page: Optional[int] = Query(default=1, description="Page number for pagination", ge=0),
user_id: Optional[str] = Query(default=None, description="User ID to filter statistics for"),
db_id: Optional[str] = Query(default=None, description="Database ID to query statistics from"),
table: Optional[str] = Query(default=None, description="Table to query statistics from"),
) -> PaginatedResponse[UserStatsSchema]:
db = await get_db(dbs, db_id, table)
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.get_user_memory_stats(
limit=limit,
page=page,
db_id=db_id,
table=table,
headers=headers,
)
try:
# Ensure limit and page are integers
limit = int(limit) if limit is not None else 20
page = int(page) if page is not None else 1
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
user_stats, total_count = await db.get_user_memory_stats(
limit=limit,
page=page,
user_id=user_id,
)
else:
user_stats, total_count = db.get_user_memory_stats(
limit=limit,
page=page,
user_id=user_id,
)
return PaginatedResponse(
data=[UserStatsSchema.from_dict(stats) for stats in user_stats],
meta=PaginationInfo(
page=page,
limit=limit,
total_count=total_count,
total_pages=(total_count + limit - 1) // limit if limit is not None and limit > 0 else 0,
),
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Failed to get user statistics: {str(e)}")
@router.post(
"/optimize-memories",
response_model=OptimizeMemoriesResponse,
status_code=200,
operation_id="optimize_memories",
summary="Optimize User Memories",
description=(
"Optimize all memories for a given user using the default summarize strategy. "
"This operation combines all memories into a single comprehensive summary, "
"achieving maximum token reduction while preserving all key information. "
"To use a custom model, specify the model parameter in 'provider:model_id' format "
"(e.g., 'openai:gpt-4o-mini', 'anthropic:claude-3-5-sonnet-20241022'). "
"If not specified, uses MemoryManager's default model (gpt-4o). "
"Set apply=false to preview optimization results without saving to database."
),
responses={
200: {
"description": "Memories optimized successfully",
"content": {
"application/json": {
"example": {
"memories": [
{
"memory_id": "f9361a69-2997-40c7-ae4e-a5861d434047",
"memory": "User has a 3-year-old golden retriever named Max who loves fetch and walks. Lives in San Francisco's Mission district, works as a product manager in tech. Enjoys hiking Bay Area trails, trying new restaurants (especially Japanese, Thai, Mexican), and learning piano for 1.5 years.",
"topics": ["pets", "location", "work", "hobbies", "food_preferences"],
"user_id": "user2",
"updated_at": "2025-11-18T10:30:00Z",
}
],
"memories_before": 4,
"memories_after": 1,
"tokens_before": 450,
"tokens_after": 180,
"tokens_saved": 270,
"reduction_percentage": 60.0,
}
}
},
},
400: {
"description": "Bad request - User ID is required or invalid model string format",
"model": BadRequestResponse,
},
404: {"description": "No memories found for user", "model": NotFoundResponse},
500: {"description": "Failed to optimize memories", "model": InternalServerErrorResponse},
},
)
async def optimize_memories(
http_request: Request,
request: OptimizeMemoriesRequest,
db_id: Optional[str] = Query(default=None, description="Database ID to use for optimization"),
table: Optional[str] = Query(default=None, description="Table to use for optimization"),
) -> OptimizeMemoriesResponse:
"""Optimize user memories using the default summarize strategy."""
from agno.memory import MemoryManager
from agno.memory.strategies.types import MemoryOptimizationStrategyType
if hasattr(http_request.state, "user_id") and http_request.state.user_id is not None:
request.user_id = http_request.state.user_id
try:
# Get database instance
db = await get_db(dbs, db_id, table)
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(http_request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.optimize_memories(
user_id=request.user_id,
model=request.model,
apply=request.apply,
db_id=db_id,
table=table,
headers=headers,
)
# Create memory manager with optional model
if request.model:
try:
model_instance = get_model(request.model)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
memory_manager = MemoryManager(model=model_instance, db=db)
else:
# No model specified - use MemoryManager's default
memory_manager = MemoryManager(db=db)
# Get current memories to count tokens before optimization
if isinstance(db, AsyncBaseDb):
memories_before = await memory_manager.aget_user_memories(user_id=request.user_id)
else:
memories_before = memory_manager.get_user_memories(user_id=request.user_id)
if not memories_before:
raise HTTPException(status_code=404, detail=f"No memories found for user {request.user_id}")
# Count tokens before optimization
from agno.memory.strategies.summarize import SummarizeStrategy
strategy = SummarizeStrategy()
tokens_before = strategy.count_tokens(memories_before)
memories_before_count = len(memories_before)
# Optimize memories with default SUMMARIZE strategy
if isinstance(db, AsyncBaseDb):
optimized_memories = await memory_manager.aoptimize_memories(
user_id=request.user_id,
strategy=MemoryOptimizationStrategyType.SUMMARIZE,
apply=request.apply,
)
else:
optimized_memories = memory_manager.optimize_memories(
user_id=request.user_id,
strategy=MemoryOptimizationStrategyType.SUMMARIZE,
apply=request.apply,
)
# Count tokens after optimization
tokens_after = strategy.count_tokens(optimized_memories)
memories_after_count = len(optimized_memories)
# Calculate statistics
tokens_saved = tokens_before - tokens_after
reduction_percentage = (tokens_saved / tokens_before * 100.0) if tokens_before > 0 else 0.0
# Convert to schema objects
optimized_memory_schemas = [
UserMemorySchema(
memory_id=mem.memory_id or "",
memory=mem.memory or "",
topics=mem.topics,
agent_id=mem.agent_id,
team_id=mem.team_id,
user_id=mem.user_id,
updated_at=mem.updated_at,
)
for mem in optimized_memories
]
return OptimizeMemoriesResponse(
memories=optimized_memory_schemas,
memories_before=memories_before_count,
memories_after=memories_after_count,
tokens_before=tokens_before,
tokens_after=tokens_after,
tokens_saved=tokens_saved,
reduction_percentage=reduction_percentage,
)
except HTTPException:
raise
except Exception as e:
logger.error(f"Failed to optimize memories for user {request.user_id}: {str(e)}")
raise HTTPException(status_code=500, detail=f"Failed to optimize memories: {str(e)}")
return router
def parse_topics(
topics: Optional[List[str]] = Query(
default=None,
description="Comma-separated list of topics to filter by",
examples=["preferences,technical,communication_style"],
),
) -> Optional[List[str]]:
"""Parse comma-separated topics into a list for filtering memories by topic."""
if not topics:
return None
try:
return [topic.strip() for topic in topics[0].split(",") if topic.strip()]
except Exception as e:
raise HTTPException(status_code=422, detail=f"Invalid topics format: {e}")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/memory/memory.py",
"license": "Apache License 2.0",
"lines": 732,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/memory/schemas.py | import json
from datetime import datetime
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from agno.os.utils import to_utc_datetime
class DeleteMemoriesRequest(BaseModel):
memory_ids: List[str] = Field(..., description="List of memory IDs to delete", min_length=1)
user_id: Optional[str] = Field(None, description="User ID to filter memories for deletion")
class UserMemorySchema(BaseModel):
memory_id: str = Field(..., description="Unique identifier for the memory")
memory: str = Field(..., description="Memory content text")
topics: Optional[List[str]] = Field(None, description="Topics or tags associated with the memory")
agent_id: Optional[str] = Field(None, description="Agent ID associated with this memory")
team_id: Optional[str] = Field(None, description="Team ID associated with this memory")
user_id: Optional[str] = Field(None, description="User ID who owns this memory")
updated_at: Optional[datetime] = Field(None, description="Timestamp when memory was last updated")
@classmethod
def from_dict(cls, memory_dict: Dict[str, Any]) -> Optional["UserMemorySchema"]:
if memory_dict["memory"] == "":
return None
# Handle nested memory content (relevant for some memories migrated from v1)
if isinstance(memory_dict["memory"], dict):
if memory_dict["memory"].get("memory") is not None:
memory = str(memory_dict["memory"]["memory"])
else:
try:
memory = json.dumps(memory_dict["memory"])
except json.JSONDecodeError:
memory = str(memory_dict["memory"])
else:
memory = memory_dict["memory"]
return cls(
memory_id=memory_dict["memory_id"],
user_id=str(memory_dict["user_id"]),
agent_id=memory_dict.get("agent_id"),
team_id=memory_dict.get("team_id"),
memory=memory,
topics=memory_dict.get("topics", []),
updated_at=memory_dict["updated_at"],
)
class UserMemoryCreateSchema(BaseModel):
"""Define the payload expected for creating a new user memory"""
memory: str = Field(..., description="Memory content text", min_length=1, max_length=5000)
user_id: Optional[str] = Field(None, description="User ID who owns this memory")
topics: Optional[List[str]] = Field(None, description="Topics or tags to categorize the memory")
class UserStatsSchema(BaseModel):
"""Schema for user memory statistics"""
user_id: str = Field(..., description="User ID")
total_memories: int = Field(..., description="Total number of memories for this user", ge=0)
last_memory_updated_at: Optional[datetime] = Field(None, description="Timestamp of the most recent memory update")
@classmethod
def from_dict(cls, user_stats_dict: Dict[str, Any]) -> "UserStatsSchema":
updated_at = user_stats_dict.get("last_memory_updated_at")
return cls(
user_id=str(user_stats_dict["user_id"]),
total_memories=user_stats_dict["total_memories"],
last_memory_updated_at=to_utc_datetime(updated_at),
)
class OptimizeMemoriesRequest(BaseModel):
"""Schema for memory optimization request"""
user_id: str = Field(..., description="User ID to optimize memories for")
model: Optional[str] = Field(
default=None,
description="Model to use for optimization in format 'provider:model_id' (e.g., 'openai:gpt-4o-mini', 'anthropic:claude-3-5-sonnet-20241022', 'google:gemini-2.0-flash-exp'). If not specified, uses MemoryManager's default model (gpt-4o).",
)
apply: bool = Field(
default=True,
description="If True, apply optimization changes to database. If False, return preview only without saving.",
)
class OptimizeMemoriesResponse(BaseModel):
"""Schema for memory optimization response"""
memories: List[UserMemorySchema] = Field(..., description="List of optimized memory objects")
memories_before: int = Field(..., description="Number of memories before optimization", ge=0)
memories_after: int = Field(..., description="Number of memories after optimization", ge=0)
tokens_before: int = Field(..., description="Token count before optimization", ge=0)
tokens_after: int = Field(..., description="Token count after optimization", ge=0)
tokens_saved: int = Field(..., description="Number of tokens saved through optimization", ge=0)
reduction_percentage: float = Field(..., description="Percentage of token reduction achieved", ge=0.0, le=100.0)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/memory/schemas.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/metrics/metrics.py | import logging
from datetime import date
from typing import List, Optional, Union, cast
from fastapi import Depends, HTTPException, Query, Request
from fastapi.routing import APIRouter
from agno.db.base import AsyncBaseDb, BaseDb
from agno.os.auth import get_auth_token_from_request, get_authentication_dependency
from agno.os.routers.metrics.schemas import DayAggregatedMetrics, MetricsResponse
from agno.os.schema import (
BadRequestResponse,
InternalServerErrorResponse,
NotFoundResponse,
UnauthenticatedResponse,
ValidationErrorResponse,
)
from agno.os.settings import AgnoAPISettings
from agno.os.utils import get_db, to_utc_datetime
from agno.remote.base import RemoteDb
logger = logging.getLogger(__name__)
def get_metrics_router(
dbs: dict[str, list[Union[BaseDb, AsyncBaseDb, RemoteDb]]], settings: AgnoAPISettings = AgnoAPISettings(), **kwargs
) -> APIRouter:
"""Create metrics router with comprehensive OpenAPI documentation for system metrics and analytics endpoints."""
router = APIRouter(
dependencies=[Depends(get_authentication_dependency(settings))],
tags=["Metrics"],
responses={
400: {"description": "Bad Request", "model": BadRequestResponse},
401: {"description": "Unauthorized", "model": UnauthenticatedResponse},
404: {"description": "Not Found", "model": NotFoundResponse},
422: {"description": "Validation Error", "model": ValidationErrorResponse},
500: {"description": "Internal Server Error", "model": InternalServerErrorResponse},
},
)
return attach_routes(router=router, dbs=dbs)
def attach_routes(router: APIRouter, dbs: dict[str, list[Union[BaseDb, AsyncBaseDb, RemoteDb]]]) -> APIRouter:
@router.get(
"/metrics",
response_model=MetricsResponse,
status_code=200,
operation_id="get_metrics",
summary="Get AgentOS Metrics",
description=(
"Retrieve AgentOS metrics and analytics data for a specified date range. "
"If no date range is specified, returns all available metrics."
),
responses={
200: {
"description": "Metrics retrieved successfully",
"content": {
"application/json": {
"example": {
"metrics": [
{
"id": "7bf39658-a00a-484c-8a28-67fd8a9ddb2a",
"agent_runs_count": 5,
"agent_sessions_count": 5,
"team_runs_count": 0,
"team_sessions_count": 0,
"workflow_runs_count": 0,
"workflow_sessions_count": 0,
"users_count": 1,
"token_metrics": {
"input_tokens": 448,
"output_tokens": 148,
"total_tokens": 596,
"audio_tokens": 0,
"input_audio_tokens": 0,
"output_audio_tokens": 0,
"cached_tokens": 0,
"cache_write_tokens": 0,
"reasoning_tokens": 0,
},
"model_metrics": [{"model_id": "gpt-4o", "model_provider": "OpenAI", "count": 5}],
"date": "2025-07-31T00:00:00Z",
"created_at": "2025-07-31T12:38:52Z",
"updated_at": "2025-07-31T12:49:01Z",
}
]
}
}
},
},
400: {"description": "Invalid date range parameters", "model": BadRequestResponse},
500: {"description": "Failed to retrieve metrics", "model": InternalServerErrorResponse},
},
)
async def get_metrics(
request: Request,
starting_date: Optional[date] = Query(
default=None, description="Starting date for metrics range (YYYY-MM-DD format)"
),
ending_date: Optional[date] = Query(
default=None, description="Ending date for metrics range (YYYY-MM-DD format)"
),
db_id: Optional[str] = Query(default=None, description="Database ID to query metrics from"),
table: Optional[str] = Query(default=None, description="The database table to use"),
) -> MetricsResponse:
try:
db = await get_db(dbs, db_id, table)
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.get_metrics(
starting_date=starting_date, ending_date=ending_date, db_id=db_id, table=table, headers=headers
)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
metrics, latest_updated_at = await db.get_metrics(starting_date=starting_date, ending_date=ending_date)
else:
metrics, latest_updated_at = db.get_metrics(starting_date=starting_date, ending_date=ending_date)
return MetricsResponse(
metrics=[DayAggregatedMetrics.from_dict(metric) for metric in metrics],
updated_at=to_utc_datetime(latest_updated_at),
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Error getting metrics: {str(e)}")
@router.post(
"/metrics/refresh",
response_model=List[DayAggregatedMetrics],
status_code=200,
operation_id="refresh_metrics",
summary="Refresh Metrics",
description=(
"Manually trigger recalculation of system metrics from raw data. "
"This operation analyzes system activity logs and regenerates aggregated metrics. "
"Useful for ensuring metrics are up-to-date or after system maintenance."
),
responses={
200: {
"description": "Metrics refreshed successfully",
"content": {
"application/json": {
"example": [
{
"id": "e77c9531-818b-47a5-99cd-59fed61e5403",
"agent_runs_count": 2,
"agent_sessions_count": 2,
"team_runs_count": 0,
"team_sessions_count": 0,
"workflow_runs_count": 0,
"workflow_sessions_count": 0,
"users_count": 1,
"token_metrics": {
"input_tokens": 256,
"output_tokens": 441,
"total_tokens": 697,
"audio_total_tokens": 0,
"audio_input_tokens": 0,
"audio_output_tokens": 0,
"cache_read_tokens": 0,
"cache_write_tokens": 0,
"reasoning_tokens": 0,
},
"model_metrics": [{"model_id": "gpt-4o", "model_provider": "OpenAI", "count": 2}],
"date": "2025-08-12T00:00:00Z",
"created_at": "2025-08-12T08:01:47Z",
"updated_at": "2025-08-12T08:01:47Z",
}
]
}
},
},
500: {"description": "Failed to refresh metrics", "model": InternalServerErrorResponse},
},
)
async def calculate_metrics(
request: Request,
db_id: Optional[str] = Query(default=None, description="Database ID to use for metrics calculation"),
table: Optional[str] = Query(default=None, description="Table to use for metrics calculation"),
) -> List[DayAggregatedMetrics]:
try:
db = await get_db(dbs, db_id, table)
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.refresh_metrics(db_id=db_id, table=table, headers=headers)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
result = await db.calculate_metrics()
else:
result = db.calculate_metrics()
if result is None:
return []
return [DayAggregatedMetrics.from_dict(metric) for metric in result]
except Exception as e:
raise HTTPException(status_code=500, detail=f"Error refreshing metrics: {str(e)}")
return router
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/metrics/metrics.py",
"license": "Apache License 2.0",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/metrics/schemas.py | from datetime import datetime, timezone
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from agno.os.utils import to_utc_datetime
class DayAggregatedMetrics(BaseModel):
"""Aggregated metrics for a given day"""
id: str = Field(..., description="Unique identifier for the metrics record")
agent_runs_count: int = Field(..., description="Total number of agent runs", ge=0)
agent_sessions_count: int = Field(..., description="Total number of agent sessions", ge=0)
team_runs_count: int = Field(..., description="Total number of team runs", ge=0)
team_sessions_count: int = Field(..., description="Total number of team sessions", ge=0)
workflow_runs_count: int = Field(..., description="Total number of workflow runs", ge=0)
workflow_sessions_count: int = Field(..., description="Total number of workflow sessions", ge=0)
users_count: int = Field(..., description="Total number of unique users", ge=0)
token_metrics: Dict[str, Any] = Field(..., description="Token usage metrics (input, output, cached, etc.)")
model_metrics: List[Dict[str, Any]] = Field(..., description="Metrics grouped by model (model_id, provider, count)")
date: datetime = Field(..., description="Date for which these metrics are aggregated")
created_at: datetime = Field(..., description="Timestamp when metrics were created")
updated_at: datetime = Field(..., description="Timestamp when metrics were last updated")
@classmethod
def from_dict(cls, metrics_dict: Dict[str, Any]) -> "DayAggregatedMetrics":
created_at = to_utc_datetime(metrics_dict.get("created_at")) or datetime.now(timezone.utc)
updated_at = to_utc_datetime(metrics_dict.get("updated_at", created_at)) or created_at
return cls(
agent_runs_count=metrics_dict.get("agent_runs_count", 0),
agent_sessions_count=metrics_dict.get("agent_sessions_count", 0),
date=metrics_dict.get("date", datetime.now(timezone.utc)),
id=metrics_dict.get("id", ""),
model_metrics=metrics_dict.get("model_metrics", {}),
team_runs_count=metrics_dict.get("team_runs_count", 0),
team_sessions_count=metrics_dict.get("team_sessions_count", 0),
token_metrics=metrics_dict.get("token_metrics", {}),
created_at=created_at,
updated_at=updated_at,
users_count=metrics_dict.get("users_count", 0),
workflow_runs_count=metrics_dict.get("workflow_runs_count", 0),
workflow_sessions_count=metrics_dict.get("workflow_sessions_count", 0),
)
class MetricsResponse(BaseModel):
metrics: List[DayAggregatedMetrics] = Field(..., description="List of daily aggregated metrics")
updated_at: Optional[datetime] = Field(None, description="Timestamp of the most recent metrics update")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/metrics/schemas.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/os/routers/session/session.py | import logging
import time
from typing import Any, List, Optional, Union, cast
from uuid import uuid4
from fastapi import APIRouter, Body, Depends, HTTPException, Path, Query, Request
from agno.db.base import AsyncBaseDb, BaseDb, SessionType
from agno.os.auth import get_auth_token_from_request, get_authentication_dependency
from agno.os.schema import (
AgentSessionDetailSchema,
BadRequestResponse,
CreateSessionRequest,
DeleteSessionRequest,
InternalServerErrorResponse,
NotFoundResponse,
PaginatedResponse,
PaginationInfo,
RunSchema,
SessionSchema,
SortOrder,
TeamRunSchema,
TeamSessionDetailSchema,
UnauthenticatedResponse,
UpdateSessionRequest,
ValidationErrorResponse,
WorkflowRunSchema,
WorkflowSessionDetailSchema,
)
from agno.os.settings import AgnoAPISettings
from agno.os.utils import get_db
from agno.remote.base import RemoteDb
from agno.session import AgentSession, TeamSession, WorkflowSession
logger = logging.getLogger(__name__)
def get_session_router(
dbs: dict[str, list[Union[BaseDb, AsyncBaseDb, RemoteDb]]], settings: AgnoAPISettings = AgnoAPISettings()
) -> APIRouter:
"""Create session router with comprehensive OpenAPI documentation for session management endpoints."""
session_router = APIRouter(
dependencies=[Depends(get_authentication_dependency(settings))],
tags=["Sessions"],
responses={
400: {"description": "Bad Request", "model": BadRequestResponse},
401: {"description": "Unauthorized", "model": UnauthenticatedResponse},
404: {"description": "Not Found", "model": NotFoundResponse},
422: {"description": "Validation Error", "model": ValidationErrorResponse},
500: {"description": "Internal Server Error", "model": InternalServerErrorResponse},
},
)
return attach_routes(router=session_router, dbs=dbs)
def attach_routes(router: APIRouter, dbs: dict[str, list[Union[BaseDb, AsyncBaseDb, RemoteDb]]]) -> APIRouter:
@router.get(
"/sessions",
response_model=PaginatedResponse[SessionSchema],
status_code=200,
operation_id="get_sessions",
summary="List Sessions",
description=(
"Retrieve paginated list of sessions with filtering and sorting options. "
"Supports filtering by session type (agent, team, workflow), component, user, and name. "
"Sessions represent conversation histories and execution contexts."
),
response_model_exclude_none=True,
responses={
200: {
"description": "Sessions retrieved successfully",
"content": {
"application/json": {
"example": {
"session_example": {
"summary": "Example session response",
"value": {
"data": [
{
"session_id": "6f6cfbfd-9643-479a-ae47-b8f32eb4d710",
"session_name": "What tools do you have?",
"session_state": {},
"created_at": "2025-09-05T16:02:09Z",
"updated_at": "2025-09-05T16:02:09Z",
}
]
},
}
}
}
},
},
400: {"description": "Invalid session type or filter parameters", "model": BadRequestResponse},
404: {"description": "Not found", "model": NotFoundResponse},
422: {"description": "Validation error in query parameters", "model": ValidationErrorResponse},
},
)
async def get_sessions(
request: Request,
session_type: SessionType = Query(
default=SessionType.AGENT,
alias="type",
description="Type of sessions to retrieve (agent, team, or workflow)",
),
component_id: Optional[str] = Query(
default=None, description="Filter sessions by component ID (agent/team/workflow ID)"
),
user_id: Optional[str] = Query(default=None, description="Filter sessions by user ID"),
session_name: Optional[str] = Query(default=None, description="Filter sessions by name (partial match)"),
limit: Optional[int] = Query(default=20, description="Number of sessions to return per page", ge=1),
page: Optional[int] = Query(default=1, description="Page number for pagination", ge=0),
sort_by: Optional[str] = Query(default="created_at", description="Field to sort sessions by"),
sort_order: Optional[SortOrder] = Query(default="desc", description="Sort order (asc or desc)"),
db_id: Optional[str] = Query(default=None, description="Database ID to query sessions from"),
table: Optional[str] = Query(default=None, description="The database table to use"),
) -> PaginatedResponse[SessionSchema]:
try:
db = await get_db(dbs, db_id, table)
except Exception as e:
raise HTTPException(status_code=404, detail=f"{e}")
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.get_sessions(
session_type=session_type,
component_id=component_id,
user_id=user_id,
session_name=session_name,
limit=limit,
page=page,
sort_by=sort_by,
sort_order=sort_order.value if sort_order else None,
db_id=db_id,
table=table,
headers=headers,
)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
sessions, total_count = await db.get_sessions(
session_type=session_type,
component_id=component_id,
user_id=user_id,
session_name=session_name,
limit=limit,
page=page,
sort_by=sort_by,
sort_order=sort_order,
deserialize=False,
)
else:
sessions, total_count = db.get_sessions( # type: ignore
session_type=session_type,
component_id=component_id,
user_id=user_id,
session_name=session_name,
limit=limit,
page=page,
sort_by=sort_by,
sort_order=sort_order,
deserialize=False,
)
return PaginatedResponse(
data=[SessionSchema.from_dict(session) for session in sessions], # type: ignore
meta=PaginationInfo(
page=page,
limit=limit,
total_count=total_count, # type: ignore
total_pages=(total_count + limit - 1) // limit if limit is not None and limit > 0 else 0, # type: ignore
),
)
@router.post(
"/sessions",
response_model=Union[AgentSessionDetailSchema, TeamSessionDetailSchema, WorkflowSessionDetailSchema],
status_code=201,
operation_id="create_session",
summary="Create New Session",
description=(
"Create a new empty session with optional configuration. "
"Useful for pre-creating sessions with specific session_state, metadata, or other properties "
"before running any agent/team/workflow interactions. "
"The session can later be used by providing its session_id in run requests."
),
response_model_exclude_none=True,
responses={
201: {
"description": "Session created successfully",
"content": {
"application/json": {
"examples": {
"agent_session_example": {
"summary": "Example created agent session",
"value": {
"user_id": "user-123",
"agent_session_id": "new-session-id",
"session_id": "new-session-id",
"session_name": "New Session",
"session_state": {"key": "value"},
"metadata": {"key": "value"},
"agent_id": "agent-1",
"created_at": "2025-10-21T12:00:00Z",
"updated_at": "2025-10-21T12:00:00Z",
},
}
}
}
},
},
400: {"description": "Invalid request parameters", "model": BadRequestResponse},
422: {"description": "Validation error", "model": ValidationErrorResponse},
500: {"description": "Failed to create session", "model": InternalServerErrorResponse},
},
)
async def create_session(
request: Request,
session_type: SessionType = Query(
default=SessionType.AGENT, alias="type", description="Type of session to create (agent, team, or workflow)"
),
create_session_request: CreateSessionRequest = Body(
default=CreateSessionRequest(), description="Session configuration data"
),
db_id: Optional[str] = Query(default=None, description="Database ID to create session in"),
) -> Union[AgentSessionDetailSchema, TeamSessionDetailSchema, WorkflowSessionDetailSchema]:
db = await get_db(dbs, db_id)
# Get user_id from request state if available (from auth middleware)
user_id = create_session_request.user_id
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.create_session(
session_type=session_type,
session_id=create_session_request.session_id,
session_name=create_session_request.session_name,
session_state=create_session_request.session_state,
metadata=create_session_request.metadata,
user_id=user_id,
agent_id=create_session_request.agent_id,
team_id=create_session_request.team_id,
workflow_id=create_session_request.workflow_id,
db_id=db_id,
headers=headers,
)
# Generate session_id if not provided
session_id = create_session_request.session_id or str(uuid4())
# Prepare session_data with session_state and session_name
session_data: dict[str, Any] = {}
if create_session_request.session_state is not None:
session_data["session_state"] = create_session_request.session_state
if create_session_request.session_name is not None:
session_data["session_name"] = create_session_request.session_name
current_time = int(time.time())
# Create the appropriate session type
session: Union[AgentSession, TeamSession, WorkflowSession]
if session_type == SessionType.AGENT:
session = AgentSession(
session_id=session_id,
agent_id=create_session_request.agent_id,
user_id=user_id,
session_data=session_data if session_data else None,
metadata=create_session_request.metadata,
created_at=current_time,
updated_at=current_time,
)
elif session_type == SessionType.TEAM:
session = TeamSession(
session_id=session_id,
team_id=create_session_request.team_id,
user_id=user_id,
session_data=session_data if session_data else None,
metadata=create_session_request.metadata,
created_at=current_time,
updated_at=current_time,
)
elif session_type == SessionType.WORKFLOW:
session = WorkflowSession(
session_id=session_id,
workflow_id=create_session_request.workflow_id,
user_id=user_id,
session_data=session_data if session_data else None,
metadata=create_session_request.metadata,
created_at=current_time,
updated_at=current_time,
)
else:
raise HTTPException(status_code=400, detail=f"Invalid session type: {session_type}")
# Upsert the session to the database
try:
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
created_session = await db.upsert_session(session, deserialize=True)
else:
created_session = db.upsert_session(session, deserialize=True)
if not created_session:
raise HTTPException(status_code=500, detail="Failed to create session")
# Return appropriate schema based on session type
if session_type == SessionType.AGENT:
return AgentSessionDetailSchema.from_session(created_session) # type: ignore
elif session_type == SessionType.TEAM:
return TeamSessionDetailSchema.from_session(created_session) # type: ignore
else:
return WorkflowSessionDetailSchema.from_session(created_session) # type: ignore
except Exception as e:
logger.error(f"Error creating session: {e}")
raise HTTPException(status_code=500, detail=f"Failed to create session: {str(e)}")
@router.get(
"/sessions/{session_id}",
response_model=Union[AgentSessionDetailSchema, TeamSessionDetailSchema, WorkflowSessionDetailSchema],
status_code=200,
operation_id="get_session_by_id",
summary="Get Session by ID",
description=(
"Retrieve detailed information about a specific session including metadata, configuration, "
"and run history. Response schema varies based on session type (agent, team, or workflow)."
),
response_model_exclude_none=True,
responses={
200: {
"description": "Session details retrieved successfully",
"content": {
"application/json": {
"examples": {
"agent_session_example": {
"summary": "Example agent session response",
"value": {
"user_id": "123",
"agent_session_id": "6f6cfbfd-9643-479a-ae47-b8f32eb4d710",
"session_id": "6f6cfbfd-9643-479a-ae47-b8f32eb4d710",
"session_name": "What tools do you have?",
"session_summary": {
"summary": "The user and assistant engaged in a conversation about the tools the agent has available.",
"updated_at": "2025-09-05T18:02:12.269392",
},
"session_state": {},
"agent_id": "basic-agent",
"total_tokens": 160,
"agent_data": {
"name": "Basic Agent",
"agent_id": "basic-agent",
"model": {"provider": "OpenAI", "name": "OpenAIChat", "id": "gpt-4o"},
},
"metrics": {
"input_tokens": 134,
"output_tokens": 26,
"total_tokens": 160,
"audio_input_tokens": 0,
"audio_output_tokens": 0,
"audio_total_tokens": 0,
"cache_read_tokens": 0,
"cache_write_tokens": 0,
"reasoning_tokens": 0,
"timer": None,
"time_to_first_token": None,
"duration": None,
"provider_metrics": None,
"additional_metrics": None,
},
"chat_history": [
{
"content": "<additional_information>\n- Use markdown to format your answers.\n- The current time is 2025-09-05 18:02:09.171627.\n</additional_information>\n\nYou have access to memories from previous interactions with the user that you can use:\n\n<memories_from_previous_interactions>\n- User really likes Digimon and Japan.\n- User really likes Japan.\n- User likes coffee.\n</memories_from_previous_interactions>\n\nNote: this information is from previous interactions and may be updated in this conversation. You should always prefer information from this conversation over the past memories.",
"from_history": False,
"stop_after_tool_call": False,
"role": "system",
"created_at": 1757088129,
},
{
"content": "What tools do you have?",
"from_history": False,
"stop_after_tool_call": False,
"role": "user",
"created_at": 1757088129,
},
{
"content": "I don't have access to external tools or the internet. However, I can assist you with a wide range of topics by providing information, answering questions, and offering suggestions based on the knowledge I've been trained on. If there's anything specific you need help with, feel free to ask!",
"from_history": False,
"stop_after_tool_call": False,
"role": "assistant",
"metrics": {"input_tokens": 134, "output_tokens": 26, "total_tokens": 160},
"created_at": 1757088129,
},
],
"created_at": "2025-09-05T16:02:09Z",
"updated_at": "2025-09-05T16:02:09Z",
},
}
}
}
},
},
404: {"description": "Session not found", "model": NotFoundResponse},
422: {"description": "Invalid session type", "model": ValidationErrorResponse},
},
)
async def get_session_by_id(
request: Request,
session_id: str = Path(description="Session ID to retrieve"),
session_type: SessionType = Query(
default=SessionType.AGENT, description="Session type (agent, team, or workflow)", alias="type"
),
user_id: Optional[str] = Query(default=None, description="User ID to query session from"),
db_id: Optional[str] = Query(default=None, description="Database ID to query session from"),
table: Optional[str] = Query(default=None, description="Table to query session from"),
) -> Union[AgentSessionDetailSchema, TeamSessionDetailSchema, WorkflowSessionDetailSchema]:
db = await get_db(dbs, db_id, table)
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.get_session(
session_id=session_id,
session_type=session_type,
user_id=user_id,
db_id=db_id,
table=table,
headers=headers,
)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
session = await db.get_session(session_id=session_id, session_type=session_type, user_id=user_id)
else:
session = db.get_session(session_id=session_id, session_type=session_type, user_id=user_id)
if not session:
raise HTTPException(
status_code=404, detail=f"{session_type.value.title()} Session with id '{session_id}' not found"
)
if session_type == SessionType.AGENT:
return AgentSessionDetailSchema.from_session(session) # type: ignore
elif session_type == SessionType.TEAM:
return TeamSessionDetailSchema.from_session(session) # type: ignore
else:
return WorkflowSessionDetailSchema.from_session(session) # type: ignore
@router.get(
"/sessions/{session_id}/runs",
response_model=List[Union[RunSchema, TeamRunSchema, WorkflowRunSchema]],
status_code=200,
operation_id="get_session_runs",
summary="Get Session Runs",
description=(
"Retrieve all runs (executions) for a specific session with optional timestamp filtering. "
"Runs represent individual interactions or executions within a session. "
"Response schema varies based on session type."
),
response_model_exclude_none=True,
responses={
200: {
"description": "Session runs retrieved successfully",
"content": {
"application/json": {
"examples": {
"completed_run": {
"summary": "Example completed run",
"value": {
"run_id": "fcdf50f0-7c32-4593-b2ef-68a558774340",
"parent_run_id": "80056af0-c7a5-4d69-b6a2-c3eba9f040e0",
"agent_id": "basic-agent",
"user_id": "",
"run_input": "Which tools do you have access to?",
"content": "I don't have access to external tools or the internet. However, I can assist you with a wide range of topics by providing information, answering questions, and offering suggestions based on the knowledge I've been trained on. If there's anything specific you need help with, feel free to ask!",
"run_response_format": "text",
"reasoning_content": "",
"metrics": {
"input_tokens": 82,
"output_tokens": 56,
"total_tokens": 138,
"time_to_first_token": 0.047505500027909875,
"duration": 4.840060166025069,
},
"messages": [
{
"content": "<additional_information>\n- Use markdown to format your answers.\n- The current time is 2025-09-08 17:52:10.101003.\n</additional_information>\n\nYou have the capability to retain memories from previous interactions with the user, but have not had any interactions with the user yet.",
"from_history": False,
"stop_after_tool_call": False,
"role": "system",
"created_at": 1757346730,
},
{
"content": "Which tools do you have access to?",
"from_history": False,
"stop_after_tool_call": False,
"role": "user",
"created_at": 1757346730,
},
{
"content": "I don't have access to external tools or the internet. However, I can assist you with a wide range of topics by providing information, answering questions, and offering suggestions based on the knowledge I've been trained on. If there's anything specific you need help with, feel free to ask!",
"from_history": False,
"stop_after_tool_call": False,
"role": "assistant",
"metrics": {"input_tokens": 82, "output_tokens": 56, "total_tokens": 138},
"created_at": 1757346730,
},
],
"tools": None,
"events": [
{
"created_at": 1757346730,
"event": "RunStarted",
"agent_id": "basic-agent",
"agent_name": "Basic Agent",
"run_id": "fcdf50f0-7c32-4593-b2ef-68a558774340",
"session_id": "80056af0-c7a5-4d69-b6a2-c3eba9f040e0",
"model": "gpt-4o",
"model_provider": "OpenAI",
},
{
"created_at": 1757346733,
"event": "MemoryUpdateStarted",
"agent_id": "basic-agent",
"agent_name": "Basic Agent",
"run_id": "fcdf50f0-7c32-4593-b2ef-68a558774340",
"session_id": "80056af0-c7a5-4d69-b6a2-c3eba9f040e0",
},
{
"created_at": 1757346734,
"event": "MemoryUpdateCompleted",
"agent_id": "basic-agent",
"agent_name": "Basic Agent",
"run_id": "fcdf50f0-7c32-4593-b2ef-68a558774340",
"session_id": "80056af0-c7a5-4d69-b6a2-c3eba9f040e0",
},
{
"created_at": 1757346734,
"event": "RunCompleted",
"agent_id": "basic-agent",
"agent_name": "Basic Agent",
"run_id": "fcdf50f0-7c32-4593-b2ef-68a558774340",
"session_id": "80056af0-c7a5-4d69-b6a2-c3eba9f040e0",
"content": "I don't have access to external tools or the internet. However, I can assist you with a wide range of topics by providing information, answering questions, and offering suggestions based on the knowledge I've been trained on. If there's anything specific you need help with, feel free to ask!",
"content_type": "str",
"metrics": {
"input_tokens": 82,
"output_tokens": 56,
"total_tokens": 138,
"time_to_first_token": 0.047505500027909875,
"duration": 4.840060166025069,
},
},
],
"created_at": "2025-09-08T15:52:10Z",
},
}
}
}
},
},
404: {"description": "Session not found or has no runs", "model": NotFoundResponse},
422: {"description": "Invalid session type", "model": ValidationErrorResponse},
},
)
async def get_session_runs(
request: Request,
session_id: str = Path(description="Session ID to get runs from"),
session_type: SessionType = Query(
default=SessionType.AGENT, description="Session type (agent, team, or workflow)", alias="type"
),
user_id: Optional[str] = Query(default=None, description="User ID to query runs from"),
created_after: Optional[int] = Query(
default=None,
description="Filter runs created after this Unix timestamp (epoch time in seconds)",
),
created_before: Optional[int] = Query(
default=None,
description="Filter runs created before this Unix timestamp (epoch time in seconds)",
),
db_id: Optional[str] = Query(default=None, description="Database ID to query runs from"),
table: Optional[str] = Query(default=None, description="Table to query runs from"),
) -> List[Union[RunSchema, TeamRunSchema, WorkflowRunSchema]]:
db = await get_db(dbs, db_id, table)
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.get_session_runs(
session_id=session_id,
session_type=session_type,
user_id=user_id,
created_after=created_after,
created_before=created_before,
db_id=db_id,
table=table,
headers=headers,
)
# Use timestamp filters directly (already in epoch format)
start_timestamp = created_after
end_timestamp = created_before
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
session = await db.get_session(
session_id=session_id, session_type=session_type, user_id=user_id, deserialize=False
)
else:
session = db.get_session(
session_id=session_id, session_type=session_type, user_id=user_id, deserialize=False
)
if not session:
raise HTTPException(status_code=404, detail=f"Session with ID {session_id} not found")
runs = session.get("runs") # type: ignore
if not runs:
return []
# Filter runs by timestamp if specified
# TODO: Move this filtering into the DB layer
filtered_runs = []
for run in runs:
if start_timestamp or end_timestamp:
run_created_at = run.get("created_at")
if run_created_at:
# created_at is stored as epoch int
if start_timestamp and run_created_at < start_timestamp:
continue
if end_timestamp and run_created_at > end_timestamp:
continue
filtered_runs.append(run)
if not filtered_runs:
return []
run_responses: List[Union[RunSchema, TeamRunSchema, WorkflowRunSchema]] = []
if session_type == SessionType.AGENT:
return [RunSchema.from_dict(run) for run in filtered_runs]
elif session_type == SessionType.TEAM:
for run in filtered_runs:
if run.get("agent_id") is not None:
run_responses.append(RunSchema.from_dict(run))
elif run.get("team_id") is not None:
run_responses.append(TeamRunSchema.from_dict(run))
return run_responses
elif session_type == SessionType.WORKFLOW:
for run in filtered_runs:
if run.get("workflow_id") is not None:
run_responses.append(WorkflowRunSchema.from_dict(run))
elif run.get("team_id") is not None:
run_responses.append(TeamRunSchema.from_dict(run))
else:
run_responses.append(RunSchema.from_dict(run))
return run_responses
else:
raise HTTPException(status_code=400, detail=f"Invalid session type: {session_type}")
@router.get(
"/sessions/{session_id}/runs/{run_id}",
response_model=Union[RunSchema, TeamRunSchema, WorkflowRunSchema],
status_code=200,
operation_id="get_session_run",
summary="Get Run by ID",
description=(
"Retrieve a specific run by its ID from a session. Response schema varies based on the "
"run type (agent run, team run, or workflow run)."
),
responses={
200: {
"description": "Run retrieved successfully",
"content": {
"application/json": {
"examples": {
"agent_run": {
"summary": "Example agent run",
"value": {
"run_id": "fcdf50f0-7c32-4593-b2ef-68a558774340",
"parent_run_id": "80056af0-c7a5-4d69-b6a2-c3eba9f040e0",
"agent_id": "basic-agent",
"user_id": "user_123",
"run_input": "Which tools do you have access to?",
"content": "I don't have access to external tools.",
"created_at": 1728499200,
},
}
}
}
},
},
404: {"description": "Session or run not found", "model": NotFoundResponse},
422: {"description": "Invalid session type", "model": ValidationErrorResponse},
},
)
async def get_session_run(
request: Request,
session_id: str = Path(description="Session ID to get run from"),
run_id: str = Path(description="Run ID to retrieve"),
session_type: SessionType = Query(
default=SessionType.AGENT, description="Session type (agent, team, or workflow)", alias="type"
),
user_id: Optional[str] = Query(default=None, description="User ID to query run from"),
db_id: Optional[str] = Query(default=None, description="Database ID to query run from"),
table: Optional[str] = Query(default=None, description="Table to query run from"),
) -> Union[RunSchema, TeamRunSchema, WorkflowRunSchema]:
db = await get_db(dbs, db_id)
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.get_session_run(
session_id=session_id,
run_id=run_id,
session_type=session_type,
user_id=user_id,
db_id=db_id,
table=table,
headers=headers,
)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
session = await db.get_session(
session_id=session_id, session_type=session_type, user_id=user_id, deserialize=False
)
else:
session = db.get_session(
session_id=session_id, session_type=session_type, user_id=user_id, deserialize=False
)
if not session:
raise HTTPException(status_code=404, detail=f"Session with ID {session_id} not found")
runs = session.get("runs") # type: ignore
if not runs:
raise HTTPException(status_code=404, detail=f"Session with ID {session_id} has no runs")
# Find the specific run
# TODO: Move this filtering into the DB layer
target_run = None
for run in runs:
if run.get("run_id") == run_id:
target_run = run
break
if not target_run:
raise HTTPException(status_code=404, detail=f"Run with ID {run_id} not found in session {session_id}")
# Return the appropriate schema based on run type
if target_run.get("workflow_id") is not None:
return WorkflowRunSchema.from_dict(target_run)
elif target_run.get("team_id") is not None:
return TeamRunSchema.from_dict(target_run)
else:
return RunSchema.from_dict(target_run)
@router.delete(
"/sessions/{session_id}",
status_code=204,
operation_id="delete_session",
summary="Delete Session",
description=(
"Permanently delete a specific session and all its associated runs. "
"This action cannot be undone and will remove all conversation history."
),
responses={
204: {},
500: {"description": "Failed to delete session", "model": InternalServerErrorResponse},
},
)
async def delete_session(
request: Request,
session_id: str = Path(description="Session ID to delete"),
user_id: Optional[str] = Query(default=None, description="User ID to scope deletion to"),
db_id: Optional[str] = Query(default=None, description="Database ID to use for deletion"),
table: Optional[str] = Query(default=None, description="Table to use for deletion"),
) -> None:
db = await get_db(dbs, db_id, table)
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
await db.delete_session(session_id=session_id, db_id=db_id, table=table, headers=headers, user_id=user_id)
return
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
await db.delete_session(session_id=session_id, user_id=user_id)
else:
db.delete_session(session_id=session_id, user_id=user_id)
@router.delete(
"/sessions",
status_code=204,
operation_id="delete_sessions",
summary="Delete Multiple Sessions",
description=(
"Delete multiple sessions by their IDs in a single operation. "
"This action cannot be undone and will permanently remove all specified sessions and their runs."
),
responses={
204: {"description": "Sessions deleted successfully"},
400: {
"description": "Invalid request - session IDs and types length mismatch",
"model": BadRequestResponse,
},
500: {"description": "Failed to delete sessions", "model": InternalServerErrorResponse},
},
)
async def delete_sessions(
http_request: Request,
request: DeleteSessionRequest,
user_id: Optional[str] = Query(default=None, description="User ID to scope deletion to"),
db_id: Optional[str] = Query(default=None, description="Database ID to use for deletion"),
table: Optional[str] = Query(default=None, description="Table to use for deletion"),
) -> None:
if len(request.session_ids) != len(request.session_types):
raise HTTPException(status_code=400, detail="Session IDs and session types must have the same length")
db = await get_db(dbs, db_id, table)
if hasattr(http_request.state, "user_id") and http_request.state.user_id is not None:
user_id = http_request.state.user_id
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(http_request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
await db.delete_sessions(
session_ids=request.session_ids,
session_types=request.session_types,
db_id=db_id,
table=table,
headers=headers,
user_id=user_id,
)
return
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
await db.delete_sessions(session_ids=request.session_ids, user_id=user_id)
else:
db.delete_sessions(session_ids=request.session_ids, user_id=user_id)
@router.post(
"/sessions/{session_id}/rename",
response_model=Union[AgentSessionDetailSchema, TeamSessionDetailSchema, WorkflowSessionDetailSchema],
status_code=200,
operation_id="rename_session",
summary="Rename Session",
description=(
"Update the name of an existing session. Useful for organizing and categorizing "
"sessions with meaningful names for better identification and management."
),
responses={
200: {
"description": "Session renamed successfully",
"content": {
"application/json": {
"examples": {
"agent_session_example": {
"summary": "Example agent session response",
"value": {
"user_id": "123",
"agent_session_id": "6f6cfbfd-9643-479a-ae47-b8f32eb4d710",
"session_id": "6f6cfbfd-9643-479a-ae47-b8f32eb4d710",
"session_name": "What tools do you have?",
"session_summary": {
"summary": "The user and assistant engaged in a conversation about the tools the agent has available.",
"updated_at": "2025-09-05T18:02:12.269392",
},
"session_state": {},
"agent_id": "basic-agent",
"total_tokens": 160,
"agent_data": {
"name": "Basic Agent",
"agent_id": "basic-agent",
"model": {"provider": "OpenAI", "name": "OpenAIChat", "id": "gpt-4o"},
},
"metrics": {
"input_tokens": 134,
"output_tokens": 26,
"total_tokens": 160,
"audio_input_tokens": 0,
"audio_output_tokens": 0,
"audio_total_tokens": 0,
"cache_read_tokens": 0,
"cache_write_tokens": 0,
"reasoning_tokens": 0,
"timer": None,
"time_to_first_token": None,
"duration": None,
"provider_metrics": None,
"additional_metrics": None,
},
"chat_history": [
{
"content": "<additional_information>\n- Use markdown to format your answers.\n- The current time is 2025-09-05 18:02:09.171627.\n</additional_information>\n\nYou have access to memories from previous interactions with the user that you can use:\n\n<memories_from_previous_interactions>\n- User really likes Digimon and Japan.\n- User really likes Japan.\n- User likes coffee.\n</memories_from_previous_interactions>\n\nNote: this information is from previous interactions and may be updated in this conversation. You should always prefer information from this conversation over the past memories.",
"from_history": False,
"stop_after_tool_call": False,
"role": "system",
"created_at": 1757088129,
},
{
"content": "What tools do you have?",
"from_history": False,
"stop_after_tool_call": False,
"role": "user",
"created_at": 1757088129,
},
{
"content": "I don't have access to external tools or the internet. However, I can assist you with a wide range of topics by providing information, answering questions, and offering suggestions based on the knowledge I've been trained on. If there's anything specific you need help with, feel free to ask!",
"from_history": False,
"stop_after_tool_call": False,
"role": "assistant",
"metrics": {"input_tokens": 134, "output_tokens": 26, "total_tokens": 160},
"created_at": 1757088129,
},
],
"created_at": "2025-09-05T16:02:09Z",
"updated_at": "2025-09-05T16:02:09Z",
},
}
}
}
},
},
400: {"description": "Invalid session name", "model": BadRequestResponse},
404: {"description": "Session not found", "model": NotFoundResponse},
422: {"description": "Invalid session type or validation error", "model": ValidationErrorResponse},
},
)
async def rename_session(
request: Request,
session_id: str = Path(description="Session ID to rename"),
session_type: SessionType = Query(
default=SessionType.AGENT, description="Session type (agent, team, or workflow)", alias="type"
),
session_name: str = Body(embed=True, description="New name for the session"),
user_id: Optional[str] = Query(default=None, description="User ID to scope rename to"),
db_id: Optional[str] = Query(default=None, description="Database ID to use for rename operation"),
table: Optional[str] = Query(default=None, description="Table to use for rename operation"),
) -> Union[AgentSessionDetailSchema, TeamSessionDetailSchema, WorkflowSessionDetailSchema]:
db = await get_db(dbs, db_id, table)
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.rename_session(
session_id=session_id,
session_name=session_name,
session_type=session_type,
db_id=db_id,
table=table,
headers=headers,
user_id=user_id,
)
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
session = await db.rename_session(
session_id=session_id, session_type=session_type, session_name=session_name, user_id=user_id
)
else:
session = db.rename_session(
session_id=session_id, session_type=session_type, session_name=session_name, user_id=user_id
)
if not session:
raise HTTPException(status_code=404, detail=f"Session with id '{session_id}' not found")
if session_type == SessionType.AGENT:
return AgentSessionDetailSchema.from_session(session) # type: ignore
elif session_type == SessionType.TEAM:
return TeamSessionDetailSchema.from_session(session) # type: ignore
else:
return WorkflowSessionDetailSchema.from_session(session) # type: ignore
@router.patch(
"/sessions/{session_id}",
response_model=Union[AgentSessionDetailSchema, TeamSessionDetailSchema, WorkflowSessionDetailSchema],
status_code=200,
operation_id="update_session",
summary="Update Session",
description=(
"Update session properties such as session_name, session_state, metadata, or summary. "
"Use this endpoint to modify the session name, update state, add metadata, or update the session summary."
),
responses={
200: {
"description": "Session updated successfully",
"content": {
"application/json": {
"examples": {
"update_summary": {
"summary": "Update session summary",
"value": {
"summary": {
"summary": "The user discussed project planning with the agent.",
"updated_at": "2025-10-21T14:30:00Z",
}
},
},
"update_metadata": {
"summary": "Update session metadata",
"value": {
"metadata": {
"tags": ["planning", "project"],
"priority": "high",
}
},
},
"update_session_name": {
"summary": "Update session name",
"value": {"session_name": "Updated Session Name"},
},
"update_session_state": {
"summary": "Update session state",
"value": {
"session_state": {
"step": "completed",
"context": "Project planning finished",
"progress": 100,
}
},
},
}
}
},
},
404: {"description": "Session not found", "model": NotFoundResponse},
422: {"description": "Invalid request", "model": ValidationErrorResponse},
500: {"description": "Failed to update session", "model": InternalServerErrorResponse},
},
)
async def update_session(
request: Request,
session_id: str = Path(description="Session ID to update"),
session_type: SessionType = Query(
default=SessionType.AGENT, description="Session type (agent, team, or workflow)", alias="type"
),
update_data: UpdateSessionRequest = Body(description="Session update data"),
user_id: Optional[str] = Query(default=None, description="User ID"),
db_id: Optional[str] = Query(default=None, description="Database ID to use for update operation"),
table: Optional[str] = Query(default=None, description="Table to use for update operation"),
) -> Union[AgentSessionDetailSchema, TeamSessionDetailSchema, WorkflowSessionDetailSchema]:
db = await get_db(dbs, db_id)
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.update_session(
session_id=session_id,
session_type=session_type,
session_name=update_data.session_name,
session_state=update_data.session_state,
metadata=update_data.metadata,
summary=update_data.summary,
user_id=user_id,
db_id=db_id,
table=table,
headers=headers,
)
# Get the existing session
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
existing_session = await db.get_session(
session_id=session_id, session_type=session_type, user_id=user_id, deserialize=True
)
else:
existing_session = db.get_session(
session_id=session_id, session_type=session_type, user_id=user_id, deserialize=True
)
if not existing_session:
raise HTTPException(status_code=404, detail=f"Session with id '{session_id}' not found")
# Update session properties
# Handle session_name - stored in session_data
if update_data.session_name is not None:
if existing_session.session_data is None: # type: ignore
existing_session.session_data = {} # type: ignore
existing_session.session_data["session_name"] = update_data.session_name # type: ignore
# Handle session_state - stored in session_data
if update_data.session_state is not None:
if existing_session.session_data is None: # type: ignore
existing_session.session_data = {} # type: ignore
existing_session.session_data["session_state"] = update_data.session_state # type: ignore
if update_data.metadata is not None:
existing_session.metadata = update_data.metadata # type: ignore
if update_data.summary is not None:
from agno.session.summary import SessionSummary
existing_session.summary = SessionSummary.from_dict(update_data.summary) # type: ignore
# Upsert the updated session
if isinstance(db, AsyncBaseDb):
db = cast(AsyncBaseDb, db)
updated_session = await db.upsert_session(existing_session, deserialize=True) # type: ignore
else:
updated_session = db.upsert_session(existing_session, deserialize=True) # type: ignore
if not updated_session:
raise HTTPException(status_code=500, detail="Failed to update session")
# Return appropriate schema based on session type
if session_type == SessionType.AGENT:
return AgentSessionDetailSchema.from_session(updated_session) # type: ignore
elif session_type == SessionType.TEAM:
return TeamSessionDetailSchema.from_session(updated_session) # type: ignore
else:
return WorkflowSessionDetailSchema.from_session(updated_session) # type: ignore
return router
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/session/session.py",
"license": "Apache License 2.0",
"lines": 1065,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/schema.py | from datetime import datetime, timezone
from enum import Enum
from typing import Any, Dict, Generic, List, Optional, TypeVar, Union
from pydantic import BaseModel, ConfigDict, Field
from agno.agent import Agent
from agno.agent.remote import RemoteAgent
from agno.db.base import SessionType
from agno.os.config import (
ChatConfig,
EvalsConfig,
KnowledgeConfig,
MemoryConfig,
MetricsConfig,
SessionConfig,
TracesConfig,
)
from agno.os.utils import extract_input_media, get_run_input, get_session_name, to_utc_datetime
from agno.session import AgentSession, TeamSession, WorkflowSession
from agno.team.remote import RemoteTeam
from agno.team.team import Team
from agno.workflow.remote import RemoteWorkflow
from agno.workflow.workflow import Workflow
class BadRequestResponse(BaseModel):
model_config = ConfigDict(json_schema_extra={"example": {"detail": "Bad request", "error_code": "BAD_REQUEST"}})
detail: str = Field(..., description="Error detail message")
error_code: Optional[str] = Field(None, description="Error code for categorization")
class NotFoundResponse(BaseModel):
model_config = ConfigDict(json_schema_extra={"example": {"detail": "Not found", "error_code": "NOT_FOUND"}})
detail: str = Field(..., description="Error detail message")
error_code: Optional[str] = Field(None, description="Error code for categorization")
class UnauthorizedResponse(BaseModel):
model_config = ConfigDict(
json_schema_extra={"example": {"detail": "Unauthorized access", "error_code": "UNAUTHORIZED"}}
)
detail: str = Field(..., description="Error detail message")
error_code: Optional[str] = Field(None, description="Error code for categorization")
class UnauthenticatedResponse(BaseModel):
model_config = ConfigDict(
json_schema_extra={"example": {"detail": "Unauthenticated access", "error_code": "UNAUTHENTICATED"}}
)
detail: str = Field(..., description="Error detail message")
error_code: Optional[str] = Field(None, description="Error code for categorization")
class ValidationErrorResponse(BaseModel):
model_config = ConfigDict(
json_schema_extra={"example": {"detail": "Validation error", "error_code": "VALIDATION_ERROR"}}
)
detail: str = Field(..., description="Error detail message")
error_code: Optional[str] = Field(None, description="Error code for categorization")
class InternalServerErrorResponse(BaseModel):
model_config = ConfigDict(
json_schema_extra={"example": {"detail": "Internal server error", "error_code": "INTERNAL_SERVER_ERROR"}}
)
detail: str = Field(..., description="Error detail message")
error_code: Optional[str] = Field(None, description="Error code for categorization")
class HealthResponse(BaseModel):
model_config = ConfigDict(
json_schema_extra={"example": {"status": "ok", "instantiated_at": "2025-06-10T12:00:00Z"}}
)
status: str = Field(..., description="Health status of the service")
instantiated_at: datetime = Field(..., description="Timestamp when service was instantiated")
class InterfaceResponse(BaseModel):
type: str = Field(..., description="Type of the interface")
version: str = Field(..., description="Version of the interface")
route: str = Field(..., description="API route path")
class ManagerResponse(BaseModel):
type: str = Field(..., description="Type of the manager")
name: str = Field(..., description="Name of the manager")
version: str = Field(..., description="Version of the manager")
route: str = Field(..., description="API route path")
class AgentSummaryResponse(BaseModel):
id: Optional[str] = Field(None, description="Unique identifier for the agent")
name: Optional[str] = Field(None, description="Name of the agent")
description: Optional[str] = Field(None, description="Description of the agent")
db_id: Optional[str] = Field(None, description="Database identifier")
@classmethod
def from_agent(cls, agent: Union[Agent, RemoteAgent]) -> "AgentSummaryResponse":
return cls(id=agent.id, name=agent.name, description=agent.description, db_id=agent.db.id if agent.db else None)
class TeamSummaryResponse(BaseModel):
id: Optional[str] = Field(None, description="Unique identifier for the team")
name: Optional[str] = Field(None, description="Name of the team")
description: Optional[str] = Field(None, description="Description of the team")
db_id: Optional[str] = Field(None, description="Database identifier")
mode: Optional[str] = Field(None, description="Team execution mode (coordinate, route, broadcast, tasks)")
@classmethod
def from_team(cls, team: Union[Team, RemoteTeam]) -> "TeamSummaryResponse":
db_id = team.db.id if team.db else None
mode = team.mode.value if hasattr(team, "mode") and team.mode else None
return cls(id=team.id, name=team.name, description=team.description, db_id=db_id, mode=mode)
class WorkflowSummaryResponse(BaseModel):
id: Optional[str] = Field(None, description="Unique identifier for the workflow")
name: Optional[str] = Field(None, description="Name of the workflow")
description: Optional[str] = Field(None, description="Description of the workflow")
db_id: Optional[str] = Field(None, description="Database identifier")
is_component: bool = Field(False, description="Whether this workflow was created via Builder")
current_version: Optional[int] = Field(None, description="Current published version number")
stage: Optional[str] = Field(None, description="Stage of the loaded config (draft/published)")
@classmethod
def from_workflow(
cls,
workflow: Union[Workflow, RemoteWorkflow],
is_component: bool = False,
) -> "WorkflowSummaryResponse":
db_id = workflow.db.id if workflow.db else None
return cls(
id=workflow.id,
name=workflow.name,
description=workflow.description,
db_id=db_id,
is_component=is_component,
current_version=getattr(workflow, "_version", None),
stage=getattr(workflow, "_stage", None),
)
class ConfigResponse(BaseModel):
"""Response schema for the general config endpoint"""
os_id: str = Field(..., description="Unique identifier for the OS instance")
name: Optional[str] = Field(None, description="Name of the OS instance")
description: Optional[str] = Field(None, description="Description of the OS instance")
available_models: Optional[List[str]] = Field(None, description="List of available models")
os_database: Optional[str] = Field(None, description="ID of the database used for the OS instance")
databases: List[str] = Field(..., description="List of database IDs used by the components of the OS instance")
chat: Optional[ChatConfig] = Field(None, description="Chat configuration")
session: Optional[SessionConfig] = Field(None, description="Session configuration")
metrics: Optional[MetricsConfig] = Field(None, description="Metrics configuration")
memory: Optional[MemoryConfig] = Field(None, description="Memory configuration")
knowledge: Optional[KnowledgeConfig] = Field(None, description="Knowledge configuration")
evals: Optional[EvalsConfig] = Field(None, description="Evaluations configuration")
traces: Optional[TracesConfig] = Field(None, description="Traces configuration")
agents: List[AgentSummaryResponse] = Field(..., description="List of registered agents")
teams: List[TeamSummaryResponse] = Field(..., description="List of registered teams")
workflows: List[WorkflowSummaryResponse] = Field(..., description="List of registered workflows")
interfaces: List[InterfaceResponse] = Field(..., description="List of available interfaces")
class Model(BaseModel):
id: Optional[str] = Field(None, description="Model identifier")
provider: Optional[str] = Field(None, description="Model provider name")
class ModelResponse(BaseModel):
name: Optional[str] = Field(None, description="Name of the model")
model: Optional[str] = Field(None, description="Model identifier")
provider: Optional[str] = Field(None, description="Model provider name")
class WorkflowRunRequest(BaseModel):
input: Dict[str, Any] = Field(..., description="Input parameters for the workflow run")
user_id: Optional[str] = Field(None, description="User identifier for the workflow run")
session_id: Optional[str] = Field(None, description="Session identifier for context persistence")
class SessionSchema(BaseModel):
session_id: str = Field(..., description="Unique identifier for the session")
session_name: str = Field(..., description="Human-readable name for the session")
session_state: Optional[dict] = Field(None, description="Current state data of the session")
created_at: Optional[datetime] = Field(None, description="Timestamp when session was created")
updated_at: Optional[datetime] = Field(None, description="Timestamp when session was last updated")
@classmethod
def from_dict(cls, session: Dict[str, Any]) -> "SessionSchema":
session_name = session.get("session_name")
if not session_name:
session_name = get_session_name(session)
session_data = session.get("session_data", {}) or {}
created_at = to_utc_datetime(session.get("created_at", 0))
updated_at = to_utc_datetime(session.get("updated_at", created_at))
return cls(
session_id=session.get("session_id", ""),
session_name=session_name,
session_state=session_data.get("session_state", None),
created_at=created_at,
updated_at=updated_at,
)
class DeleteSessionRequest(BaseModel):
session_ids: List[str] = Field(..., description="List of session IDs to delete", min_length=1)
session_types: List[SessionType] = Field(..., description="Types of sessions to delete", min_length=1)
class CreateSessionRequest(BaseModel):
session_id: Optional[str] = Field(None, description="Optional session ID (generated if not provided)")
session_name: Optional[str] = Field(None, description="Name for the session")
session_state: Optional[Dict[str, Any]] = Field(None, description="Initial session state")
metadata: Optional[Dict[str, Any]] = Field(None, description="Additional metadata")
user_id: Optional[str] = Field(None, description="User ID associated with the session")
agent_id: Optional[str] = Field(None, description="Agent ID if this is an agent session")
team_id: Optional[str] = Field(None, description="Team ID if this is a team session")
workflow_id: Optional[str] = Field(None, description="Workflow ID if this is a workflow session")
class UpdateSessionRequest(BaseModel):
session_name: Optional[str] = Field(None, description="Updated session name")
session_state: Optional[Dict[str, Any]] = Field(None, description="Updated session state")
metadata: Optional[Dict[str, Any]] = Field(None, description="Updated metadata")
summary: Optional[Dict[str, Any]] = Field(None, description="Session summary")
class AgentSessionDetailSchema(BaseModel):
user_id: Optional[str] = Field(None, description="User ID associated with the session")
agent_session_id: str = Field(..., description="Unique agent session identifier")
session_id: str = Field(..., description="Session identifier")
session_name: str = Field(..., description="Human-readable session name")
session_summary: Optional[dict] = Field(None, description="Summary of session interactions")
session_state: Optional[dict] = Field(None, description="Current state of the session")
agent_id: Optional[str] = Field(None, description="Agent ID used in this session")
total_tokens: Optional[int] = Field(None, description="Total tokens used in this session")
agent_data: Optional[dict] = Field(None, description="Agent-specific data")
metrics: Optional[dict] = Field(None, description="Session metrics")
metadata: Optional[dict] = Field(None, description="Additional metadata")
chat_history: Optional[List[dict]] = Field(None, description="Complete chat history")
created_at: Optional[datetime] = Field(None, description="Session creation timestamp")
updated_at: Optional[datetime] = Field(None, description="Last update timestamp")
@classmethod
def from_session(cls, session: AgentSession) -> "AgentSessionDetailSchema":
session_name = get_session_name({**session.to_dict(), "session_type": "agent"})
created_at = datetime.fromtimestamp(session.created_at, tz=timezone.utc) if session.created_at else None
updated_at = datetime.fromtimestamp(session.updated_at, tz=timezone.utc) if session.updated_at else created_at
return cls(
user_id=session.user_id,
agent_session_id=session.session_id,
session_id=session.session_id,
session_name=session_name,
session_summary=session.summary.to_dict() if session.summary else None,
session_state=session.session_data.get("session_state", None) if session.session_data else None,
agent_id=session.agent_id if session.agent_id else None,
agent_data=session.agent_data,
total_tokens=session.session_data.get("session_metrics", {}).get("total_tokens")
if session.session_data
else None,
metrics=session.session_data.get("session_metrics", {}) if session.session_data else None, # type: ignore
metadata=session.metadata,
chat_history=[message.to_dict() for message in session.get_chat_history()],
created_at=to_utc_datetime(created_at),
updated_at=to_utc_datetime(updated_at),
)
class TeamSessionDetailSchema(BaseModel):
session_id: str = Field(..., description="Unique session identifier")
session_name: str = Field(..., description="Human-readable session name")
user_id: Optional[str] = Field(None, description="User ID associated with the session")
team_id: Optional[str] = Field(None, description="Team ID used in this session")
session_summary: Optional[dict] = Field(None, description="Summary of team interactions")
session_state: Optional[dict] = Field(None, description="Current state of the session")
metrics: Optional[dict] = Field(None, description="Session metrics")
team_data: Optional[dict] = Field(None, description="Team-specific data")
metadata: Optional[dict] = Field(None, description="Additional metadata")
chat_history: Optional[List[dict]] = Field(None, description="Complete chat history")
created_at: Optional[datetime] = Field(None, description="Session creation timestamp")
updated_at: Optional[datetime] = Field(None, description="Last update timestamp")
total_tokens: Optional[int] = Field(None, description="Total tokens used in this session")
@classmethod
def from_session(cls, session: TeamSession) -> "TeamSessionDetailSchema":
session_dict = session.to_dict()
session_name = get_session_name({**session_dict, "session_type": "team"})
created_at = datetime.fromtimestamp(session.created_at, tz=timezone.utc) if session.created_at else None
updated_at = datetime.fromtimestamp(session.updated_at, tz=timezone.utc) if session.updated_at else created_at
return cls(
session_id=session.session_id,
team_id=session.team_id,
session_name=session_name,
session_summary=session_dict.get("summary") if session_dict.get("summary") else None,
user_id=session.user_id,
team_data=session.team_data,
session_state=session.session_data.get("session_state", None) if session.session_data else None,
total_tokens=session.session_data.get("session_metrics", {}).get("total_tokens")
if session.session_data
else None,
metrics=session.session_data.get("session_metrics", {}) if session.session_data else None,
metadata=session.metadata,
chat_history=[message.to_dict() for message in session.get_chat_history()],
created_at=to_utc_datetime(created_at),
updated_at=to_utc_datetime(updated_at),
)
class WorkflowSessionDetailSchema(BaseModel):
user_id: Optional[str] = Field(None, description="User ID associated with the session")
workflow_id: Optional[str] = Field(None, description="Workflow ID used in this session")
workflow_name: Optional[str] = Field(None, description="Name of the workflow")
session_id: str = Field(..., description="Unique session identifier")
session_name: str = Field(..., description="Human-readable session name")
session_data: Optional[dict] = Field(None, description="Complete session data")
session_state: Optional[dict] = Field(None, description="Current workflow state")
workflow_data: Optional[dict] = Field(None, description="Workflow-specific data")
metadata: Optional[dict] = Field(None, description="Additional metadata")
created_at: Optional[datetime] = Field(None, description="Session creation timestamp")
updated_at: Optional[datetime] = Field(None, description="Last update timestamp")
@classmethod
def from_session(cls, session: WorkflowSession) -> "WorkflowSessionDetailSchema":
session_dict = session.to_dict()
session_name = get_session_name({**session_dict, "session_type": "workflow"})
created_at = datetime.fromtimestamp(session.created_at, tz=timezone.utc) if session.created_at else None
updated_at = datetime.fromtimestamp(session.updated_at, tz=timezone.utc) if session.updated_at else created_at
return cls(
session_id=session.session_id,
user_id=session.user_id,
workflow_id=session.workflow_id,
workflow_name=session.workflow_name,
session_name=session_name,
session_data=session.session_data,
session_state=session.session_data.get("session_state", None) if session.session_data else None,
workflow_data=session.workflow_data,
metadata=session.metadata,
created_at=to_utc_datetime(created_at),
updated_at=to_utc_datetime(updated_at),
)
class RunSchema(BaseModel):
run_id: str = Field(..., description="Unique identifier for the run")
parent_run_id: Optional[str] = Field(None, description="Parent run ID if this is a nested run")
agent_id: Optional[str] = Field(None, description="Agent ID that executed this run")
user_id: Optional[str] = Field(None, description="User ID associated with the run")
status: Optional[str] = Field(None, description="Run status (PENDING, RUNNING, COMPLETED, ERROR, etc.)")
run_input: Optional[str] = Field(None, description="Input provided to the run")
content: Optional[Union[str, dict]] = Field(None, description="Output content from the run")
run_response_format: Optional[str] = Field(None, description="Format of the response (text/json)")
reasoning_content: Optional[str] = Field(None, description="Reasoning content if reasoning was enabled")
reasoning_steps: Optional[List[dict]] = Field(None, description="List of reasoning steps")
metrics: Optional[dict] = Field(None, description="Performance and usage metrics")
messages: Optional[List[dict]] = Field(None, description="Message history for the run")
tools: Optional[List[dict]] = Field(None, description="Tools used in the run")
events: Optional[List[dict]] = Field(None, description="Events generated during the run")
created_at: Optional[datetime] = Field(None, description="Run creation timestamp")
references: Optional[List[dict]] = Field(None, description="References cited in the run")
citations: Optional[Dict[str, Any]] = Field(
None, description="Citations from the model (e.g., from Gemini grounding/search)"
)
reasoning_messages: Optional[List[dict]] = Field(None, description="Reasoning process messages")
session_state: Optional[dict] = Field(None, description="Session state at the end of the run")
images: Optional[List[dict]] = Field(None, description="Images included in the run")
videos: Optional[List[dict]] = Field(None, description="Videos included in the run")
audio: Optional[List[dict]] = Field(None, description="Audio files included in the run")
files: Optional[List[dict]] = Field(None, description="Files included in the run")
response_audio: Optional[dict] = Field(None, description="Audio response if generated")
input_media: Optional[Dict[str, Any]] = Field(None, description="Input media attachments")
@classmethod
def from_dict(cls, run_dict: Dict[str, Any]) -> "RunSchema":
run_input = get_run_input(run_dict)
run_response_format = "text" if run_dict.get("content_type", "str") == "str" else "json"
return cls(
run_id=run_dict.get("run_id", ""),
parent_run_id=run_dict.get("parent_run_id", ""),
agent_id=run_dict.get("agent_id", ""),
user_id=run_dict.get("user_id", ""),
status=run_dict.get("status"),
run_input=run_input,
content=run_dict.get("content", ""),
run_response_format=run_response_format,
reasoning_content=run_dict.get("reasoning_content", ""),
reasoning_steps=run_dict.get("reasoning_steps", []),
metrics=run_dict.get("metrics", {}),
messages=[message for message in run_dict.get("messages", [])] if run_dict.get("messages") else None,
tools=[tool for tool in run_dict.get("tools", [])] if run_dict.get("tools") else None,
events=[event for event in run_dict["events"]] if run_dict.get("events") else None,
references=run_dict.get("references", []),
citations=run_dict.get("citations", None),
reasoning_messages=run_dict.get("reasoning_messages", []),
session_state=run_dict.get("session_state"),
images=run_dict.get("images", []),
videos=run_dict.get("videos", []),
audio=run_dict.get("audio", []),
files=run_dict.get("files", []),
response_audio=run_dict.get("response_audio", None),
input_media=extract_input_media(run_dict),
created_at=to_utc_datetime(run_dict.get("created_at")),
)
class TeamRunSchema(BaseModel):
run_id: str = Field(..., description="Unique identifier for the team run")
parent_run_id: Optional[str] = Field(None, description="Parent run ID if this is a nested run")
team_id: Optional[str] = Field(None, description="Team ID that executed this run")
status: Optional[str] = Field(None, description="Run status (PENDING, RUNNING, COMPLETED, ERROR, etc.)")
content: Optional[Union[str, dict]] = Field(None, description="Output content from the team run")
reasoning_content: Optional[str] = Field(None, description="Reasoning content if reasoning was enabled")
reasoning_steps: Optional[List[dict]] = Field(None, description="List of reasoning steps")
run_input: Optional[str] = Field(None, description="Input provided to the run")
run_response_format: Optional[str] = Field(None, description="Format of the response (text/json)")
metrics: Optional[dict] = Field(None, description="Performance and usage metrics")
tools: Optional[List[dict]] = Field(None, description="Tools used in the run")
messages: Optional[List[dict]] = Field(None, description="Message history for the run")
events: Optional[List[dict]] = Field(None, description="Events generated during the run")
created_at: Optional[datetime] = Field(None, description="Run creation timestamp")
references: Optional[List[dict]] = Field(None, description="References cited in the run")
citations: Optional[Dict[str, Any]] = Field(
None, description="Citations from the model (e.g., from Gemini grounding/search)"
)
reasoning_messages: Optional[List[dict]] = Field(None, description="Reasoning process messages")
session_state: Optional[dict] = Field(None, description="Session state at the end of the run")
input_media: Optional[Dict[str, Any]] = Field(None, description="Input media attachments")
images: Optional[List[dict]] = Field(None, description="Images included in the run")
videos: Optional[List[dict]] = Field(None, description="Videos included in the run")
audio: Optional[List[dict]] = Field(None, description="Audio files included in the run")
files: Optional[List[dict]] = Field(None, description="Files included in the run")
response_audio: Optional[dict] = Field(None, description="Audio response if generated")
@classmethod
def from_dict(cls, run_dict: Dict[str, Any]) -> "TeamRunSchema":
run_input = get_run_input(run_dict)
run_response_format = "text" if run_dict.get("content_type", "str") == "str" else "json"
return cls(
run_id=run_dict.get("run_id", ""),
parent_run_id=run_dict.get("parent_run_id", ""),
team_id=run_dict.get("team_id", ""),
status=run_dict.get("status"),
run_input=run_input,
content=run_dict.get("content", ""),
run_response_format=run_response_format,
reasoning_content=run_dict.get("reasoning_content", ""),
reasoning_steps=run_dict.get("reasoning_steps", []),
metrics=run_dict.get("metrics", {}),
messages=[message for message in run_dict.get("messages", [])] if run_dict.get("messages") else None,
tools=[tool for tool in run_dict.get("tools", [])] if run_dict.get("tools") else None,
events=[event for event in run_dict["events"]] if run_dict.get("events") else None,
created_at=to_utc_datetime(run_dict.get("created_at")),
references=run_dict.get("references", []),
citations=run_dict.get("citations", None),
reasoning_messages=run_dict.get("reasoning_messages", []),
session_state=run_dict.get("session_state"),
images=run_dict.get("images", []),
videos=run_dict.get("videos", []),
audio=run_dict.get("audio", []),
files=run_dict.get("files", []),
response_audio=run_dict.get("response_audio", None),
input_media=extract_input_media(run_dict),
)
class WorkflowRunSchema(BaseModel):
run_id: str = Field(..., description="Unique identifier for the workflow run")
run_input: Optional[str] = Field(None, description="Input provided to the workflow")
events: Optional[List[dict]] = Field(None, description="Events generated during the workflow")
workflow_id: Optional[str] = Field(None, description="Workflow ID that was executed")
user_id: Optional[str] = Field(None, description="User ID associated with the run")
content: Optional[Union[str, dict]] = Field(None, description="Output content from the workflow")
content_type: Optional[str] = Field(None, description="Type of content returned")
status: Optional[str] = Field(None, description="Status of the workflow run")
step_results: Optional[list[dict]] = Field(None, description="Results from each workflow step")
step_executor_runs: Optional[list[dict]] = Field(None, description="Executor runs for each step")
metrics: Optional[dict] = Field(None, description="Performance and usage metrics")
created_at: Optional[datetime] = Field(None, description="Run creation timestamp")
reasoning_content: Optional[str] = Field(None, description="Reasoning content if reasoning was enabled")
reasoning_steps: Optional[List[dict]] = Field(None, description="List of reasoning steps")
references: Optional[List[dict]] = Field(None, description="References cited in the workflow")
citations: Optional[Dict[str, Any]] = Field(
None, description="Citations from the model (e.g., from Gemini grounding/search)"
)
reasoning_messages: Optional[List[dict]] = Field(None, description="Reasoning process messages")
images: Optional[List[dict]] = Field(None, description="Images included in the workflow")
videos: Optional[List[dict]] = Field(None, description="Videos included in the workflow")
audio: Optional[List[dict]] = Field(None, description="Audio files included in the workflow")
files: Optional[List[dict]] = Field(None, description="Files included in the workflow")
response_audio: Optional[dict] = Field(None, description="Audio response if generated")
@classmethod
def from_dict(cls, run_response: Dict[str, Any]) -> "WorkflowRunSchema":
run_input = get_run_input(run_response, is_workflow_run=True)
return cls(
run_id=run_response.get("run_id", ""),
run_input=run_input,
events=run_response.get("events", []),
workflow_id=run_response.get("workflow_id", ""),
user_id=run_response.get("user_id", ""),
content=run_response.get("content", ""),
content_type=run_response.get("content_type", ""),
status=run_response.get("status", ""),
metrics=run_response.get("metrics", {}),
step_results=run_response.get("step_results", []),
step_executor_runs=run_response.get("step_executor_runs", []),
created_at=to_utc_datetime(run_response.get("created_at")),
reasoning_content=run_response.get("reasoning_content", ""),
reasoning_steps=run_response.get("reasoning_steps", []),
references=run_response.get("references", []),
citations=run_response.get("citations", None),
reasoning_messages=run_response.get("reasoning_messages", []),
images=run_response.get("images", []),
videos=run_response.get("videos", []),
audio=run_response.get("audio", []),
files=run_response.get("files", []),
response_audio=run_response.get("response_audio", None),
)
T = TypeVar("T")
class SortOrder(str, Enum):
ASC = "asc"
DESC = "desc"
class PaginationInfo(BaseModel):
page: int = Field(0, description="Current page number (0-indexed)", ge=0)
limit: int = Field(20, description="Number of items per page", ge=1)
total_pages: int = Field(0, description="Total number of pages", ge=0)
total_count: int = Field(0, description="Total count of items", ge=0)
search_time_ms: float = Field(0, description="Search execution time in milliseconds", ge=0)
class PaginatedResponse(BaseModel, Generic[T]):
"""Wrapper to add pagination info to classes used as response models"""
data: List[T] = Field(..., description="List of items for the current page")
meta: PaginationInfo = Field(..., description="Pagination metadata")
class ComponentType(str, Enum):
AGENT = "agent"
TEAM = "team"
WORKFLOW = "workflow"
class ComponentCreate(BaseModel):
name: str = Field(..., description="Display name")
component_id: Optional[str] = Field(
None, description="Unique identifier for the entity. Auto-generated from name if not provided."
)
component_type: ComponentType = Field(..., description="Type of entity: agent, team, or workflow")
description: Optional[str] = Field(None, description="Optional description")
metadata: Optional[Dict[str, Any]] = Field(None, description="Optional metadata")
# Config parameters are optional, but if provided, they will be used to create the initial config
config: Optional[Dict[str, Any]] = Field(None, description="Optional configuration")
label: Optional[str] = Field(None, description="Optional label (e.g., 'stable')")
stage: str = Field("draft", description="Stage: 'draft' or 'published'")
notes: Optional[str] = Field(None, description="Optional notes")
set_current: bool = Field(True, description="Set as current version")
class ComponentResponse(BaseModel):
component_id: str
component_type: ComponentType
name: Optional[str] = None
description: Optional[str] = None
current_version: Optional[int] = None
metadata: Optional[Dict[str, Any]] = None
created_at: int
updated_at: Optional[int] = None
class ConfigCreate(BaseModel):
config: Dict[str, Any] = Field(..., description="The configuration data")
version: Optional[int] = Field(None, description="Optional version number")
label: Optional[str] = Field(None, description="Optional label (e.g., 'stable')")
stage: str = Field("draft", description="Stage: 'draft' or 'published'")
notes: Optional[str] = Field(None, description="Optional notes")
links: Optional[List[Dict[str, Any]]] = Field(None, description="Optional links to child components")
set_current: bool = Field(True, description="Set as current version")
class ComponentConfigResponse(BaseModel):
component_id: str
version: int
label: Optional[str] = None
stage: str
config: Dict[str, Any]
notes: Optional[str] = None
created_at: int
updated_at: Optional[int] = None
class ComponentUpdate(BaseModel):
name: Optional[str] = None
description: Optional[str] = None
component_type: Optional[str] = None
metadata: Optional[Dict[str, Any]] = None
current_version: Optional[int] = None
class ConfigUpdate(BaseModel):
config: Optional[Dict[str, Any]] = None
label: Optional[str] = None
stage: Optional[str] = None
notes: Optional[str] = None
links: Optional[List[Dict[str, Any]]] = None
class RegistryResourceType(str, Enum):
"""Types of resources that can be stored in a registry."""
TOOL = "tool"
MODEL = "model"
DB = "db"
VECTOR_DB = "vector_db"
SCHEMA = "schema"
FUNCTION = "function"
AGENT = "agent"
TEAM = "team"
class CallableMetadata(BaseModel):
"""Common metadata for callable components (tools, functions)."""
name: str = Field(..., description="Callable name")
description: Optional[str] = Field(None, description="Callable description")
class_path: str = Field(..., description="Full module path to the class/function")
module: Optional[str] = Field(None, description="Module where the callable is defined")
qualname: Optional[str] = Field(None, description="Qualified name of the callable")
has_entrypoint: bool = Field(..., description="Whether the callable has an executable entrypoint")
parameters: Dict[str, Any] = Field(default_factory=dict, description="JSON schema of parameters")
requires_confirmation: Optional[bool] = Field(None, description="Whether execution requires user confirmation")
external_execution: Optional[bool] = Field(None, description="Whether execution happens externally")
signature: Optional[str] = Field(None, description="Function signature string")
return_annotation: Optional[str] = Field(None, description="Return type annotation")
class ToolMetadata(BaseModel):
"""Metadata for tool registry components."""
class_path: str = Field(..., description="Full module path to the tool class")
is_toolkit: bool = Field(False, description="Whether this is a toolkit containing multiple functions")
functions: Optional[List[CallableMetadata]] = Field(
None, description="Functions in the toolkit (if is_toolkit=True)"
)
# Fields for non-toolkit tools (Function or raw callable)
module: Optional[str] = Field(None, description="Module where the callable is defined")
qualname: Optional[str] = Field(None, description="Qualified name of the callable")
has_entrypoint: Optional[bool] = Field(None, description="Whether the tool has an executable entrypoint")
parameters: Optional[Dict[str, Any]] = Field(None, description="JSON schema of parameters")
requires_confirmation: Optional[bool] = Field(None, description="Whether execution requires user confirmation")
external_execution: Optional[bool] = Field(None, description="Whether execution happens externally")
signature: Optional[str] = Field(None, description="Function signature string")
return_annotation: Optional[str] = Field(None, description="Return type annotation")
class ModelMetadata(BaseModel):
"""Metadata for model registry components."""
class_path: str = Field(..., description="Full module path to the model class")
provider: Optional[str] = Field(None, description="Model provider (e.g., openai, anthropic)")
model_id: Optional[str] = Field(None, description="Model identifier")
class DbMetadata(BaseModel):
"""Metadata for database registry components."""
class_path: str = Field(..., description="Full module path to the database class")
db_id: Optional[str] = Field(None, description="Database identifier")
class VectorDbMetadata(BaseModel):
"""Metadata for vector database registry components."""
class_path: str = Field(..., description="Full module path to the vector database class")
vector_db_id: Optional[str] = Field(None, description="Vector database identifier")
collection: Optional[str] = Field(None, description="Collection name")
table_name: Optional[str] = Field(None, description="Table name (for SQL-based vector stores)")
class SchemaMetadata(BaseModel):
"""Metadata for schema registry components."""
class_path: str = Field(..., description="Full module path to the schema class")
schema_: Optional[Dict[str, Any]] = Field(None, alias="schema", description="JSON schema definition")
schema_error: Optional[str] = Field(None, description="Error message if schema generation failed")
class FunctionMetadata(CallableMetadata):
"""Metadata for function registry components (workflow conditions, selectors, etc.)."""
pass
# Union of all metadata types for type hints
RegistryMetadata = Union[
ToolMetadata,
ModelMetadata,
DbMetadata,
VectorDbMetadata,
SchemaMetadata,
FunctionMetadata,
]
class RegistryContentResponse(BaseModel):
name: str
type: RegistryResourceType
id: Optional[str] = None
description: Optional[str] = None
metadata: Optional[Dict[str, Any]] = None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/schema.py",
"license": "Apache License 2.0",
"lines": 601,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/settings.py | from __future__ import annotations
from typing import List, Optional
from pydantic import Field, field_validator
from pydantic_settings import BaseSettings
class AgnoAPISettings(BaseSettings):
"""App settings for API-based apps that can be set using environment variables.
Reference: https://pydantic-docs.helpmanual.io/usage/settings/
"""
env: str = "dev"
# Set to False to disable docs server at /docs and /redoc
docs_enabled: bool = True
# Authentication settings
os_security_key: Optional[str] = Field(default=None, description="Bearer token for API authentication")
# Authorization flag - when True, JWT middleware handles auth and security key validation is skipped
authorization_enabled: bool = Field(default=False, description="Whether JWT authorization is enabled")
# Cors origin list to allow requests from.
# This list is set using the set_cors_origin_list validator
cors_origin_list: Optional[List[str]] = Field(default=None, validate_default=True)
@field_validator("cors_origin_list", mode="before")
def set_cors_origin_list(cls, cors_origin_list):
valid_cors = cors_origin_list or []
# Add Agno domains to cors origin list
valid_cors.extend(
[
"http://localhost:3000",
"https://agno.com",
"https://www.agno.com",
"https://app.agno.com",
"https://os-stg.agno.com",
"https://os.agno.com",
]
)
return valid_cors
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/settings.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/os/utils.py | import json
from datetime import datetime, timezone
from typing import Any, Callable, Dict, List, Optional, Set, Type, Union
from fastapi import FastAPI, HTTPException, Request, UploadFile
from fastapi.routing import APIRoute, APIRouter
from pydantic import BaseModel, create_model
from starlette.middleware.cors import CORSMiddleware
from agno.agent import Agent, RemoteAgent
from agno.db.base import AsyncBaseDb, BaseDb
from agno.knowledge.knowledge import Knowledge
from agno.media import Audio, Image, Video
from agno.media import File as FileMedia
from agno.models.message import Message
from agno.os.config import AgentOSConfig
from agno.registry import Registry
from agno.remote.base import RemoteDb, RemoteKnowledge
from agno.run.agent import RunOutputEvent
from agno.run.team import TeamRunOutputEvent
from agno.run.workflow import WorkflowRunOutputEvent
from agno.team import RemoteTeam, Team
from agno.tools import Function, Toolkit
from agno.utils.log import log_warning, logger
from agno.workflow import RemoteWorkflow, Workflow
def to_utc_datetime(value: Optional[Union[str, int, float, datetime]]) -> Optional[datetime]:
"""Convert a timestamp, ISO 8601 string, or datetime to a UTC datetime."""
if value is None:
return None
if isinstance(value, datetime):
# If already a datetime, make sure the timezone is UTC
if value.tzinfo is None:
return value.replace(tzinfo=timezone.utc)
return value
if isinstance(value, str):
try:
if value.endswith("Z"):
value = value[:-1] + "+00:00"
return datetime.fromisoformat(value)
except (ValueError, TypeError):
return None
return datetime.fromtimestamp(value, tz=timezone.utc)
async def get_request_kwargs(request: Request, endpoint_func: Callable) -> Dict[str, Any]:
"""Given a Request and an endpoint function, return a dictionary with all extra form data fields.
Args:
request: The FastAPI Request object
endpoint_func: The function exposing the endpoint that received the request
Supported form parameters:
- session_state: JSON string of session state dict
- dependencies: JSON string of dependencies dict
- metadata: JSON string of metadata dict
- knowledge_filters: JSON string of knowledge filters
- output_schema: JSON schema string (converted to Pydantic model by default)
- use_json_schema: If "true", keeps output_schema as dict instead of converting to Pydantic model
Returns:
A dictionary of kwargs to pass to Agent/Team run methods
"""
import inspect
form_data = await request.form()
sig = inspect.signature(endpoint_func)
known_fields = set(sig.parameters.keys())
kwargs: Dict[str, Any] = {key: value for key, value in form_data.items() if key not in known_fields}
# Handle JSON parameters. They are passed as strings and need to be deserialized.
if session_state := kwargs.get("session_state"):
try:
if isinstance(session_state, str):
session_state_dict = json.loads(session_state) # type: ignore
kwargs["session_state"] = session_state_dict
except json.JSONDecodeError:
kwargs.pop("session_state")
log_warning(f"Invalid session_state parameter couldn't be loaded: {session_state}")
if dependencies := kwargs.get("dependencies"):
try:
if isinstance(dependencies, str):
dependencies_dict = json.loads(dependencies) # type: ignore
kwargs["dependencies"] = dependencies_dict
except json.JSONDecodeError:
kwargs.pop("dependencies")
log_warning(f"Invalid dependencies parameter couldn't be loaded: {dependencies}")
if metadata := kwargs.get("metadata"):
try:
if isinstance(metadata, str):
metadata_dict = json.loads(metadata) # type: ignore
kwargs["metadata"] = metadata_dict
except json.JSONDecodeError:
kwargs.pop("metadata")
log_warning(f"Invalid metadata parameter couldn't be loaded: {metadata}")
if knowledge_filters := kwargs.get("knowledge_filters"):
try:
if isinstance(knowledge_filters, str):
knowledge_filters_dict = json.loads(knowledge_filters) # type: ignore
# Try to deserialize FilterExpr objects
from agno.filters import from_dict
# Check if it's a single FilterExpr dict or a list of FilterExpr dicts
if isinstance(knowledge_filters_dict, dict) and "op" in knowledge_filters_dict:
# Single FilterExpr - convert to list format
kwargs["knowledge_filters"] = [from_dict(knowledge_filters_dict)]
elif isinstance(knowledge_filters_dict, list):
# List of FilterExprs or mixed content
deserialized = []
for item in knowledge_filters_dict:
if isinstance(item, dict) and "op" in item:
deserialized.append(from_dict(item))
else:
# Keep non-FilterExpr items as-is
deserialized.append(item)
kwargs["knowledge_filters"] = deserialized
else:
# Regular dict filter
kwargs["knowledge_filters"] = knowledge_filters_dict
except json.JSONDecodeError:
kwargs.pop("knowledge_filters")
log_warning(f"Invalid knowledge_filters parameter couldn't be loaded: {knowledge_filters}")
except ValueError as e:
# Filter deserialization failed
kwargs.pop("knowledge_filters")
log_warning(f"Invalid FilterExpr in knowledge_filters: {e}")
# Handle output_schema - convert JSON schema to Pydantic model or keep as dict
# use_json_schema is a control flag consumed here (not passed to Agent/Team)
# When true, output_schema stays as dict for direct JSON output
use_json_schema = kwargs.pop("use_json_schema", False)
if isinstance(use_json_schema, str):
use_json_schema = use_json_schema.lower() == "true"
if output_schema := kwargs.get("output_schema"):
try:
if isinstance(output_schema, str):
schema_dict = json.loads(output_schema)
if use_json_schema:
# Keep as dict schema for direct JSON output
kwargs["output_schema"] = schema_dict
else:
# Convert to Pydantic model (default behavior)
dynamic_model = json_schema_to_pydantic_model(schema_dict)
kwargs["output_schema"] = dynamic_model
except json.JSONDecodeError:
kwargs.pop("output_schema")
log_warning(f"Invalid output_schema JSON: {output_schema}")
except Exception as e:
kwargs.pop("output_schema")
log_warning(f"Failed to create output_schema model: {e}")
# Parse boolean and null values
for key, value in kwargs.items():
if isinstance(value, str) and value.lower() in ["true", "false"]:
kwargs[key] = value.lower() == "true"
elif isinstance(value, str) and value.lower() in ["null", "none"]:
kwargs[key] = None
return kwargs
def format_sse_event(event: Union[RunOutputEvent, TeamRunOutputEvent, WorkflowRunOutputEvent]) -> str:
"""Parse JSON data into SSE-compliant format.
Args:
event_dict: Dictionary containing the event data
Returns:
SSE-formatted response:
```
event: EventName
data: { ... }
event: AnotherEventName
data: { ... }
```
"""
try:
# Parse the JSON to extract the event type
event_type = event.event or "message"
# Serialize to valid JSON with double quotes and no newlines
clean_json = event.to_json(separators=(",", ":"), indent=None)
return f"event: {event_type}\ndata: {clean_json}\n\n"
except json.JSONDecodeError:
clean_json = event.to_json(separators=(",", ":"), indent=None)
return f"event: message\ndata: {clean_json}\n\n"
async def get_db(
dbs: dict[str, list[Union[BaseDb, AsyncBaseDb, RemoteDb]]], db_id: Optional[str] = None, table: Optional[str] = None
) -> Union[BaseDb, AsyncBaseDb, RemoteDb]:
"""Return the database with the given ID and/or table, or the first database if no ID/table is provided."""
if table and not db_id:
raise HTTPException(status_code=400, detail="The db_id query parameter is required when passing a table")
async def _has_table(db: Union[BaseDb, AsyncBaseDb, RemoteDb], table_name: str) -> bool:
"""Check if this database has the specified table (configured and actually exists)."""
# First check if table name is configured
is_configured = (
hasattr(db, "session_table_name")
and db.session_table_name == table_name
or hasattr(db, "memory_table_name")
and db.memory_table_name == table_name
or hasattr(db, "metrics_table_name")
and db.metrics_table_name == table_name
or hasattr(db, "eval_table_name")
and db.eval_table_name == table_name
or hasattr(db, "knowledge_table_name")
and db.knowledge_table_name == table_name
)
if not is_configured:
return False
if isinstance(db, RemoteDb):
# We have to assume remote DBs are always configured and exist
return True
# Then check if table actually exists in the database
try:
if isinstance(db, AsyncBaseDb):
# For async databases, await the check
return await db.table_exists(table_name)
else:
# For sync databases, call directly
return db.table_exists(table_name)
except (NotImplementedError, AttributeError):
# If table_exists not implemented, fall back to configuration check
return is_configured
# If db_id is provided, first find the database with that ID
if db_id:
target_db_list = dbs.get(db_id)
if not target_db_list:
raise HTTPException(status_code=404, detail=f"No database found with id '{db_id}'")
# If table is also specified, search through all databases with this ID to find one with the table
if table:
for db in target_db_list:
if await _has_table(db, table):
return db
raise HTTPException(status_code=404, detail=f"No database with id '{db_id}' has table '{table}'")
# If no table specified, return the first database with this ID
return target_db_list[0]
# Raise if multiple databases are provided but no db_id is provided
if len(dbs) > 1:
raise HTTPException(
status_code=400, detail="The db_id query parameter is required when using multiple databases"
)
# Return the first (and only) database
return next(db for dbs in dbs.values() for db in dbs)
def _generate_knowledge_id(name: str, db_id: str, table_name: str) -> str:
"""Generate a deterministic unique ID for a knowledge instance.
Uses db_id, table_name, and name to ensure uniqueness across all knowledge instances.
"""
import hashlib
id_seed = f"{db_id}:{table_name}:{name}"
# Use SHA256 instead of MD5 for FIPS compliance
hash_hex = hashlib.sha256(id_seed.encode()).hexdigest()
return f"{hash_hex[:8]}-{hash_hex[8:12]}-{hash_hex[12:16]}-{hash_hex[16:20]}-{hash_hex[20:32]}"
def get_knowledge_instance(
knowledge_instances: List[Union[Knowledge, RemoteKnowledge]],
db_id: Optional[str] = None,
knowledge_id: Optional[str] = None,
) -> Union[Knowledge, RemoteKnowledge]:
"""Return the knowledge instance matching the given criteria.
Args:
knowledge_instances: List of knowledge instances to search
db_id: Database ID to filter by (for backward compatibility)
knowledge_id: Unique generated ID to filter by (preferred)
Returns:
The matching knowledge instance
Raises:
HTTPException: If no matching instance is found or parameters are invalid
"""
# If only one instance and no specific identifier requested, return it (backwards compatible)
if len(knowledge_instances) == 1 and not knowledge_id and not db_id:
return next(iter(knowledge_instances))
# If knowledge_id provided, find by unique ID (preferred)
if knowledge_id:
for knowledge in knowledge_instances:
if not knowledge.contents_db:
continue
# Use knowledge name or generate fallback name from db_id
name = getattr(knowledge, "name", None) or f"knowledge_{knowledge.contents_db.id}"
kb_table_name = knowledge.contents_db.knowledge_table_name or "unknown"
# Generate the unique ID for this knowledge instance
generated_id = _generate_knowledge_id(name, knowledge.contents_db.id, kb_table_name)
# Match by unique generated ID
if generated_id == knowledge_id:
return knowledge
raise HTTPException(status_code=404, detail=f"Knowledge base '{knowledge_id}' not found")
# If db_id provided, find by database ID (backward compatible)
if db_id:
matches = [k for k in knowledge_instances if k.contents_db and k.contents_db.id == db_id]
if not matches:
raise HTTPException(status_code=404, detail=f"Knowledge instance with db_id '{db_id}' not found")
if len(matches) == 1:
return matches[0]
# Multiple matches - recommend using knowledge_id
knowledge_ids = []
for k in matches:
if k.contents_db:
name = getattr(k, "name", None) or f"knowledge_{k.contents_db.id}"
table_name = k.contents_db.knowledge_table_name or "unknown"
knowledge_ids.append(_generate_knowledge_id(name, k.contents_db.id, table_name))
raise HTTPException(
status_code=400,
detail=f"Multiple knowledge instances found for db_id '{db_id}'. "
f"Please specify knowledge_id parameter. Available IDs: {knowledge_ids}",
)
# No identifiers provided - list available IDs
knowledge_ids = []
for k in knowledge_instances:
if k.contents_db:
name = getattr(k, "name", None) or f"knowledge_{k.contents_db.id}"
table_name = k.contents_db.knowledge_table_name or "unknown"
knowledge_ids.append(_generate_knowledge_id(name, k.contents_db.id, table_name))
raise HTTPException(
status_code=400,
detail=f"db_id or knowledge_id query parameter is required when using multiple knowledge bases. "
f"Available IDs: {knowledge_ids}",
)
def get_run_input(run_dict: Dict[str, Any], is_workflow_run: bool = False) -> str:
"""Get the run input from the given run dictionary
Uses the RunInput/TeamRunInput object which stores the original user input.
"""
# For agent or team runs, use the stored input_content
if not is_workflow_run and run_dict.get("input") is not None:
input_data = run_dict.get("input")
if isinstance(input_data, dict) and input_data.get("input_content") is not None:
return stringify_input_content(input_data["input_content"])
if is_workflow_run:
# Check the input field directly
input_value = run_dict.get("input")
if input_value is not None:
return stringify_input_content(input_value)
# Check the step executor runs for fallback
step_executor_runs = run_dict.get("step_executor_runs", [])
if step_executor_runs:
for message in reversed(step_executor_runs[0].get("messages", [])):
if message.get("role") == "user":
return message.get("content", "")
# Final fallback: scan messages
if run_dict.get("messages") is not None:
for message in reversed(run_dict["messages"]):
if message.get("role") == "user":
return message.get("content", "")
return ""
def get_session_name(session: Dict[str, Any]) -> str:
"""Get the session name from the given session dictionary"""
# If session_data.session_name is set, return that
session_data = session.get("session_data")
if session_data is not None and session_data.get("session_name") is not None:
return session_data["session_name"]
runs = session.get("runs", []) or []
session_type = session.get("session_type")
# Handle workflows separately
if session_type == "workflow":
if not runs:
return ""
workflow_run = runs[0]
workflow_input = workflow_run.get("input")
if isinstance(workflow_input, str):
return workflow_input
elif isinstance(workflow_input, dict):
try:
return json.dumps(workflow_input)
except (TypeError, ValueError):
pass
workflow_name = session.get("workflow_data", {}).get("name")
return f"New {workflow_name} Session" if workflow_name else ""
# For team, filter to team runs (runs without agent_id); for agents, use all runs
if session_type == "team":
runs_to_check = [r for r in runs if not r.get("agent_id")]
else:
runs_to_check = runs
# Find the first user message across runs
for r in runs_to_check:
if r is None:
continue
run_dict = r if isinstance(r, dict) else r.to_dict()
for message in run_dict.get("messages") or []:
if message.get("role") == "user" and message.get("content"):
return message["content"]
run_input = r.get("input")
if run_input is not None:
return stringify_input_content(run_input)
return ""
def extract_input_media(run_dict: Dict[str, Any]) -> Dict[str, Any]:
input_media: Dict[str, List[Any]] = {
"images": [],
"videos": [],
"audios": [],
"files": [],
}
input_data = run_dict.get("input", {})
if isinstance(input_data, dict):
input_media["images"].extend(input_data.get("images", []))
input_media["videos"].extend(input_data.get("videos", []))
input_media["audios"].extend(input_data.get("audios", []))
input_media["files"].extend(input_data.get("files", []))
return input_media
def process_image(file: UploadFile) -> Image:
content = file.file.read()
if not content:
raise HTTPException(status_code=400, detail="Empty file")
return Image(content=content, format=extract_format(file), mime_type=file.content_type)
def process_audio(file: UploadFile) -> Audio:
content = file.file.read()
if not content:
raise HTTPException(status_code=400, detail="Empty file")
return Audio(content=content, format=extract_format(file), mime_type=file.content_type)
def process_video(file: UploadFile) -> Video:
content = file.file.read()
if not content:
raise HTTPException(status_code=400, detail="Empty file")
return Video(content=content, format=extract_format(file), mime_type=file.content_type)
def process_document(file: UploadFile) -> Optional[FileMedia]:
try:
content = file.file.read()
if not content:
raise HTTPException(status_code=400, detail="Empty file")
return FileMedia(
content=content, filename=file.filename, format=extract_format(file), mime_type=file.content_type
)
except Exception as e:
logger.error(f"Error processing document {file.filename}: {e}")
return None
def extract_format(file: UploadFile) -> Optional[str]:
"""Extract the File format from file name or content_type."""
# Get the format from the filename
if file.filename and "." in file.filename:
return file.filename.split(".")[-1].lower()
# Fallback to the file content_type
if file.content_type:
return file.content_type.strip().split("/")[-1]
return None
def get_agent_by_id(
agent_id: str,
agents: Optional[List[Union[Agent, RemoteAgent]]] = None,
db: Optional[Union[BaseDb, AsyncBaseDb]] = None,
registry: Optional[Registry] = None,
version: Optional[int] = None,
create_fresh: bool = False,
) -> Optional[Union[Agent, RemoteAgent]]:
"""Get an agent by ID, optionally creating a fresh instance for request isolation.
When create_fresh=True, creates a new agent instance using deep_copy() to prevent
state contamination between concurrent requests. The new instance shares heavy
resources (db, model, MCP tools) but has isolated mutable state.
Args:
agent_id: The agent ID to look up
agents: List of agents to search
create_fresh: If True, creates a new instance using deep_copy()
Returns:
The agent instance (shared or fresh copy based on create_fresh)
"""
if agent_id is None:
return None
# Try to get the agent from the list of agents
if agents:
for agent in agents:
if agent.id == agent_id:
if create_fresh and isinstance(agent, Agent):
fresh_agent = agent.deep_copy()
# Clear team/workflow context — this is a standalone agent copy
fresh_agent.team_id = None
fresh_agent.workflow_id = None
return fresh_agent
return agent
# Try to get the agent from the database
if db and isinstance(db, BaseDb):
from agno.agent.agent import get_agent_by_id as get_agent_by_id_db
try:
db_agent = get_agent_by_id_db(db=db, id=agent_id, version=version, registry=registry)
return db_agent
except Exception as e:
logger.error(f"Error getting agent {agent_id} from database: {e}")
return None
return None
def get_team_by_id(
team_id: str,
teams: Optional[List[Union[Team, RemoteTeam]]] = None,
create_fresh: bool = False,
db: Optional[Union[BaseDb, AsyncBaseDb]] = None,
version: Optional[int] = None,
registry: Optional[Registry] = None,
) -> Optional[Union[Team, RemoteTeam]]:
"""Get a team by ID, optionally creating a fresh instance for request isolation.
When create_fresh=True, creates a new team instance using deep_copy() to prevent
state contamination between concurrent requests. Member agents are also deep copied.
Args:
team_id: The team ID to look up
teams: List of teams to search
create_fresh: If True, creates a new instance using deep_copy()
Returns:
The team instance (shared or fresh copy based on create_fresh)
"""
if team_id is None:
return None
if teams:
for team in teams:
if team.id == team_id:
if create_fresh and isinstance(team, Team):
return team.deep_copy()
return team
if db and isinstance(db, BaseDb):
from agno.team.team import get_team_by_id as get_team_by_id_db
try:
db_team = get_team_by_id_db(db=db, id=team_id, version=version, registry=registry)
return db_team
except Exception as e:
logger.error(f"Error getting team {team_id} from database: {e}")
return None
return None
def get_workflow_by_id(
workflow_id: str,
workflows: Optional[List[Union[Workflow, RemoteWorkflow]]] = None,
create_fresh: bool = False,
db: Optional[Union[BaseDb, AsyncBaseDb]] = None,
version: Optional[int] = None,
registry: Optional[Registry] = None,
) -> Optional[Union[Workflow, RemoteWorkflow]]:
"""Get a workflow by ID, optionally creating a fresh instance for request isolation.
When create_fresh=True, creates a new workflow instance using deep_copy() to prevent
state contamination between concurrent requests. Steps containing agents/teams are also deep copied.
Args:
workflow_id: The workflow ID to look up
workflows: List of workflows to search
create_fresh: If True, creates a new instance using deep_copy()
db: Optional database interface
version: Workflow version, if needed
registry: Optional Registry instance
Returns:
The workflow instance (shared or fresh copy based on create_fresh)
"""
if workflow_id is None:
return None
if workflows:
for workflow in workflows:
if workflow.id == workflow_id:
if create_fresh and isinstance(workflow, Workflow):
return workflow.deep_copy()
return workflow
if db and isinstance(db, BaseDb):
from agno.workflow.workflow import get_workflow_by_id as get_workflow_by_id_db
try:
db_workflow = get_workflow_by_id_db(db=db, id=workflow_id, version=version, registry=registry)
return db_workflow
except Exception as e:
logger.error(f"Error getting workflow {workflow_id} from database: {e}")
return None
return None
def resolve_origins(user_origins: Optional[List[str]] = None, default_origins: Optional[List[str]] = None) -> List[str]:
"""
Get CORS origins - user-provided origins override defaults.
Args:
user_origins: Optional list of user-provided CORS origins
Returns:
List of allowed CORS origins (user-provided if set, otherwise defaults)
"""
# User-provided origins override defaults
if user_origins:
return user_origins
# Default Agno domains
return default_origins or [
"http://localhost:3000",
"https://agno.com",
"https://www.agno.com",
"https://app.agno.com",
"https://os-stg.agno.com",
"https://os.agno.com",
]
def update_cors_middleware(app: FastAPI, new_origins: list):
existing_origins: List[str] = []
# TODO: Allow more options where CORS is properly merged and user can disable this behaviour
# Extract existing origins from current CORS middleware
for middleware in app.user_middleware:
if middleware.cls == CORSMiddleware:
if hasattr(middleware, "kwargs"):
origins_value = middleware.kwargs.get("allow_origins", [])
if isinstance(origins_value, list):
existing_origins = origins_value
else:
existing_origins = []
break
# Merge origins
merged_origins = list(set(new_origins + existing_origins))
final_origins = [origin for origin in merged_origins if origin != "*"]
# Remove existing CORS
app.user_middleware = [m for m in app.user_middleware if m.cls != CORSMiddleware]
app.middleware_stack = None
# Add updated CORS
app.add_middleware(
CORSMiddleware, # type: ignore
allow_origins=final_origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
expose_headers=["*"],
)
def get_existing_route_paths(fastapi_app: FastAPI) -> Dict[str, List[str]]:
"""Get all existing route paths and methods from the FastAPI app.
Returns:
Dict[str, List[str]]: Dictionary mapping paths to list of HTTP methods
"""
existing_paths: Dict[str, Any] = {}
for route in fastapi_app.routes:
if isinstance(route, APIRoute):
path = route.path
methods = list(route.methods) if route.methods else []
if path in existing_paths:
existing_paths[path].extend(methods)
else:
existing_paths[path] = methods
return existing_paths
def find_conflicting_routes(fastapi_app: FastAPI, router: APIRouter) -> List[Dict[str, Any]]:
"""Find conflicting routes in the FastAPI app.
Args:
fastapi_app: The FastAPI app with all existing routes
router: The APIRouter to add
Returns:
List[Dict[str, Any]]: List of conflicting routes
"""
existing_paths = get_existing_route_paths(fastapi_app)
conflicts = []
for route in router.routes:
if isinstance(route, APIRoute):
full_path = route.path
route_methods = list(route.methods) if route.methods else []
if full_path in existing_paths:
conflicting_methods: Set[str] = set(route_methods) & set(existing_paths[full_path])
if conflicting_methods:
conflicts.append({"path": full_path, "methods": list(conflicting_methods), "route": route})
return conflicts
def load_yaml_config(config_file_path: str) -> AgentOSConfig:
"""Load a YAML config file and return the configuration as an AgentOSConfig instance."""
from pathlib import Path
import yaml
# Validate that the path points to a YAML file
path = Path(config_file_path)
if path.suffix.lower() not in [".yaml", ".yml"]:
raise ValueError(f"Config file must have a .yaml or .yml extension, got: {config_file_path}")
# Load the YAML file
with open(config_file_path, "r") as f:
return AgentOSConfig.model_validate(yaml.safe_load(f))
def collect_mcp_tools_from_team(team: Team, mcp_tools: List[Any]) -> None:
"""Recursively collect MCP tools from a team and its members."""
# Check the team tools
if team.tools and isinstance(team.tools, list):
for tool in team.tools:
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
if hasattr(type(tool), "__mro__") and any(
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
):
if tool not in mcp_tools:
mcp_tools.append(tool)
# Recursively check team members
if team.members and isinstance(team.members, list):
for member in team.members:
if isinstance(member, Agent):
if member.tools and isinstance(member.tools, list):
for tool in member.tools:
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
if hasattr(type(tool), "__mro__") and any(
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
):
if tool not in mcp_tools:
mcp_tools.append(tool)
elif isinstance(member, Team):
# Recursively check nested team
collect_mcp_tools_from_team(member, mcp_tools)
def collect_mcp_tools_from_workflow(workflow: Workflow, mcp_tools: List[Any]) -> None:
"""Recursively collect MCP tools from a workflow and its steps."""
from agno.workflow.steps import Steps
# Recursively check workflow steps
if workflow.steps:
if isinstance(workflow.steps, list):
# Handle list of steps
for step in workflow.steps:
collect_mcp_tools_from_workflow_step(step, mcp_tools)
elif isinstance(workflow.steps, Steps):
# Handle Steps container
if steps := workflow.steps.steps:
for step in steps:
collect_mcp_tools_from_workflow_step(step, mcp_tools)
elif callable(workflow.steps):
pass
def collect_mcp_tools_from_workflow_step(step: Any, mcp_tools: List[Any]) -> None:
"""Collect MCP tools from a single workflow step."""
from agno.workflow.condition import Condition
from agno.workflow.loop import Loop
from agno.workflow.parallel import Parallel
from agno.workflow.router import Router
from agno.workflow.step import Step
from agno.workflow.steps import Steps
if isinstance(step, Step):
# Check step's agent
if step.agent:
if step.agent.tools and isinstance(step.agent.tools, list):
for tool in step.agent.tools:
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
if hasattr(type(tool), "__mro__") and any(
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
):
if tool not in mcp_tools:
mcp_tools.append(tool)
# Check step's team
if step.team:
collect_mcp_tools_from_team(step.team, mcp_tools)
elif isinstance(step, Steps):
if steps := step.steps:
for step in steps:
collect_mcp_tools_from_workflow_step(step, mcp_tools)
elif isinstance(step, (Parallel, Loop, Condition, Router)):
# These contain other steps - recursively check them
if hasattr(step, "steps") and step.steps:
for sub_step in step.steps:
collect_mcp_tools_from_workflow_step(sub_step, mcp_tools)
elif isinstance(step, Agent):
# Direct agent in workflow steps
if step.tools and isinstance(step.tools, list):
for tool in step.tools:
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
if hasattr(type(tool), "__mro__") and any(
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
):
if tool not in mcp_tools:
mcp_tools.append(tool)
elif isinstance(step, Team):
# Direct team in workflow steps
collect_mcp_tools_from_team(step, mcp_tools)
elif isinstance(step, Workflow):
# Nested workflow
collect_mcp_tools_from_workflow(step, mcp_tools)
def _get_python_type_from_json_schema(field_schema: Dict[str, Any], field_name: str = "NestedModel") -> Type:
"""Map JSON schema type to Python type with recursive handling.
Args:
field_schema: JSON schema dictionary for a single field
field_name: Name of the field (used for nested model naming)
Returns:
Python type corresponding to the JSON schema type
"""
if not isinstance(field_schema, dict):
return Any
json_type = field_schema.get("type")
# Handle basic types
if json_type == "string":
return str
elif json_type == "integer":
return int
elif json_type == "number":
return float
elif json_type == "boolean":
return bool
elif json_type == "null":
return type(None)
elif json_type == "array":
# Handle arrays with item type specification
items_schema = field_schema.get("items")
if items_schema and isinstance(items_schema, dict):
item_type = _get_python_type_from_json_schema(items_schema, f"{field_name}Item")
return List[item_type] # type: ignore
else:
# No item type specified - use generic list
return List[Any]
elif json_type == "object":
# Recursively create nested Pydantic model
nested_properties = field_schema.get("properties", {})
nested_required = field_schema.get("required", [])
nested_title = field_schema.get("title", field_name)
# Build field definitions for nested model
nested_fields = {}
for nested_field_name, nested_field_schema in nested_properties.items():
nested_field_type = _get_python_type_from_json_schema(nested_field_schema, nested_field_name)
if nested_field_name in nested_required:
nested_fields[nested_field_name] = (nested_field_type, ...)
else:
nested_fields[nested_field_name] = (Optional[nested_field_type], None) # type: ignore[assignment]
# Create nested model if it has fields
if nested_fields:
return create_model(nested_title, **nested_fields) # type: ignore
else:
# Empty object schema - use generic dict
return Dict[str, Any]
else:
# Unknown or unspecified type - fallback to Any
if json_type:
logger.warning(f"Unknown JSON schema type '{json_type}' for field '{field_name}', using Any")
return Any # type: ignore
def json_schema_to_pydantic_model(schema: Dict[str, Any]) -> Type[BaseModel]:
"""Convert a JSON schema dictionary to a Pydantic BaseModel class.
This function dynamically creates a Pydantic model from a JSON schema specification,
handling nested objects, arrays, and optional fields.
Args:
schema: JSON schema dictionary with 'properties', 'required', 'type', etc.
Returns:
Dynamically created Pydantic BaseModel class
"""
import copy
# Deep copy to avoid modifying the original schema
schema = copy.deepcopy(schema)
# Extract schema components
model_name = schema.get("title", "DynamicModel")
properties = schema.get("properties", {})
required_fields = schema.get("required", [])
# Validate schema has properties
if not properties:
logger.warning(f"JSON schema '{model_name}' has no properties, creating empty model")
# Build field definitions for create_model
field_definitions = {}
for field_name, field_schema in properties.items():
try:
field_type = _get_python_type_from_json_schema(field_schema, field_name)
if field_name in required_fields:
# Required field: (type, ...)
field_definitions[field_name] = (field_type, ...)
else:
# Optional field: (Optional[type], None)
field_definitions[field_name] = (Optional[field_type], None) # type: ignore[assignment]
except Exception as e:
logger.warning(f"Failed to process field '{field_name}' in schema '{model_name}': {e}")
# Skip problematic fields rather than failing entirely
continue
# Create and return the dynamic model
try:
return create_model(model_name, **field_definitions) # type: ignore
except Exception as e:
logger.error(f"Failed to create dynamic model '{model_name}': {e}")
# Return a minimal model as fallback
return create_model(model_name)
def setup_tracing_for_os(db: Union[BaseDb, AsyncBaseDb, RemoteDb]) -> None:
"""Set up OpenTelemetry tracing for this agent/team/workflow."""
try:
from agno.tracing import setup_tracing
setup_tracing(db=db)
except ImportError:
logger.warning(
"tracing=True but OpenTelemetry packages not installed. "
"Install with: pip install opentelemetry-api opentelemetry-sdk openinference-instrumentation-agno"
)
except Exception as e:
logger.warning(f"Failed to enable tracing: {e}")
def format_duration_ms(duration_ms: Optional[int]) -> str:
"""Format a duration in milliseconds to a human-readable string.
Args:
duration_ms: Duration in milliseconds
Returns:
Formatted string like "150ms" or "1.50s"
"""
if duration_ms is None or duration_ms < 1000:
return f"{duration_ms or 0}ms"
return f"{duration_ms / 1000:.2f}s"
def timestamp_to_datetime(datetime_str: str, param_name: str = "datetime") -> "datetime":
"""Parse an ISO 8601 datetime string and convert to UTC.
Args:
datetime_str: ISO 8601 formatted datetime string (e.g., '2025-11-19T10:00:00Z' or '2025-11-19T15:30:00+05:30')
param_name: Name of the parameter for error messages
Returns:
datetime object in UTC timezone
Raises:
HTTPException: If the datetime string is invalid
"""
try:
dt = datetime.fromisoformat(datetime_str.replace("Z", "+00:00"))
# Convert to UTC if timezone-aware, otherwise assume UTC
if dt.tzinfo is not None:
return dt.astimezone(timezone.utc)
else:
return dt.replace(tzinfo=timezone.utc)
except ValueError as e:
raise HTTPException(
status_code=400,
detail=f"Invalid {param_name} format. Use ISO 8601 format (e.g., '2025-11-19T10:00:00Z' or '2025-11-19T10:00:00+05:30'): {e}",
)
def format_team_tools(team_tools: List[Union[Function, dict]]):
formatted_tools: List[Dict] = []
if team_tools is not None:
for tool in team_tools:
if isinstance(tool, dict):
formatted_tools.append(tool)
elif isinstance(tool, Function):
formatted_tools.append(tool.to_dict())
return formatted_tools
def format_tools(agent_tools: List[Union[Dict[str, Any], Toolkit, Function, Callable]]):
formatted_tools: List[Dict] = []
if agent_tools is not None:
for tool in agent_tools:
if isinstance(tool, dict):
formatted_tools.append(tool)
elif isinstance(tool, Toolkit):
for _, f in tool.functions.items():
formatted_tools.append(f.to_dict())
elif isinstance(tool, Function):
formatted_tools.append(tool.to_dict())
elif callable(tool):
func = Function.from_callable(tool)
formatted_tools.append(func.to_dict())
else:
logger.warning(f"Unknown tool type: {type(tool)}")
return formatted_tools
def stringify_input_content(input_content: Union[str, Dict[str, Any], List[Any], BaseModel]) -> str:
"""Convert any given input_content into its string representation.
This handles both serialized (dict) and live (object) input_content formats.
"""
import json
if isinstance(input_content, str):
return input_content
elif isinstance(input_content, Message):
return json.dumps(input_content.to_dict())
elif isinstance(input_content, dict):
return json.dumps(input_content, indent=2, default=str)
elif isinstance(input_content, list):
if input_content:
# Handle live Message objects
if isinstance(input_content[0], Message):
return json.dumps([m.to_dict() for m in input_content])
# Handle serialized Message dicts
elif isinstance(input_content[0], dict) and input_content[0].get("role") == "user":
return input_content[0].get("content", str(input_content))
return str(input_content)
else:
return str(input_content)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/utils.py",
"license": "Apache License 2.0",
"lines": 896,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/run/agent.py | from dataclasses import asdict, dataclass, field
from enum import Enum
from time import time
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
from pydantic import BaseModel
from agno.media import Audio, File, Image, Video
from agno.models.message import Citations, Message
from agno.models.metrics import RunMetrics
from agno.models.response import ToolExecution
from agno.reasoning.step import ReasoningStep
from agno.run.base import BaseRunOutputEvent, MessageReferences, RunStatus
from agno.run.requirement import RunRequirement
from agno.utils.log import logger
from agno.utils.media import (
reconstruct_audio_list,
reconstruct_files,
reconstruct_images,
reconstruct_response_audio,
reconstruct_videos,
)
if TYPE_CHECKING:
from agno.session.summary import SessionSummary
@dataclass
class RunInput:
"""Container for the raw input data passed to Agent.run().
This captures the original input exactly as provided by the user,
separate from the processed messages that go to the model.
Attributes:
input_content: The literal input message/content passed to run()
images: Images directly passed to run()
videos: Videos directly passed to run()
audios: Audio files directly passed to run()
files: Files directly passed to run()
"""
input_content: Union[str, List, Dict, Message, BaseModel, List[Message]]
images: Optional[Sequence[Image]] = None
videos: Optional[Sequence[Video]] = None
audios: Optional[Sequence[Audio]] = None
files: Optional[Sequence[File]] = None
def input_content_string(self) -> str:
import json
if isinstance(self.input_content, (str)):
return self.input_content
elif isinstance(self.input_content, BaseModel):
return self.input_content.model_dump_json(exclude_none=True)
elif isinstance(self.input_content, Message):
return json.dumps(self.input_content.to_dict())
elif isinstance(self.input_content, list):
try:
return json.dumps(self.to_dict().get("input_content"))
except Exception:
return str(self.input_content)
else:
return str(self.input_content)
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary representation"""
result: Dict[str, Any] = {}
if self.input_content is not None:
if isinstance(self.input_content, (str)):
result["input_content"] = self.input_content
elif isinstance(self.input_content, BaseModel):
result["input_content"] = self.input_content.model_dump(exclude_none=True)
elif isinstance(self.input_content, Message):
result["input_content"] = self.input_content.to_dict()
elif isinstance(self.input_content, list):
serialized_items: List[Any] = []
for item in self.input_content:
if isinstance(item, Message):
serialized_items.append(item.to_dict())
elif isinstance(item, BaseModel):
serialized_items.append(item.model_dump(exclude_none=True))
elif isinstance(item, dict):
content = dict(item)
if content.get("images"):
content["images"] = [
img.to_dict() if isinstance(img, Image) else img for img in content["images"]
]
if content.get("videos"):
content["videos"] = [
vid.to_dict() if isinstance(vid, Video) else vid for vid in content["videos"]
]
if content.get("audios"):
content["audios"] = [
aud.to_dict() if isinstance(aud, Audio) else aud for aud in content["audios"]
]
if content.get("files"):
content["files"] = [
file.to_dict() if isinstance(file, File) else file for file in content["files"]
]
serialized_items.append(content)
else:
serialized_items.append(item)
result["input_content"] = serialized_items
else:
result["input_content"] = self.input_content
if self.images:
result["images"] = [img.to_dict() for img in self.images]
if self.videos:
result["videos"] = [vid.to_dict() for vid in self.videos]
if self.audios:
result["audios"] = [aud.to_dict() for aud in self.audios]
if self.files:
result["files"] = [file.to_dict() for file in self.files]
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "RunInput":
"""Create RunInput from dictionary"""
images = reconstruct_images(data.get("images"))
videos = reconstruct_videos(data.get("videos"))
audios = reconstruct_audio_list(data.get("audios"))
files = reconstruct_files(data.get("files"))
return cls(
input_content=data.get("input_content", ""), images=images, videos=videos, audios=audios, files=files
)
class RunEvent(str, Enum):
"""Events that can be sent by the run() functions"""
run_started = "RunStarted"
run_content = "RunContent"
run_content_completed = "RunContentCompleted"
run_intermediate_content = "RunIntermediateContent"
run_completed = "RunCompleted"
run_error = "RunError"
run_cancelled = "RunCancelled"
run_paused = "RunPaused"
run_continued = "RunContinued"
pre_hook_started = "PreHookStarted"
pre_hook_completed = "PreHookCompleted"
post_hook_started = "PostHookStarted"
post_hook_completed = "PostHookCompleted"
tool_call_started = "ToolCallStarted"
tool_call_completed = "ToolCallCompleted"
tool_call_error = "ToolCallError"
reasoning_started = "ReasoningStarted"
reasoning_step = "ReasoningStep"
reasoning_content_delta = "ReasoningContentDelta"
reasoning_completed = "ReasoningCompleted"
memory_update_started = "MemoryUpdateStarted"
memory_update_completed = "MemoryUpdateCompleted"
session_summary_started = "SessionSummaryStarted"
session_summary_completed = "SessionSummaryCompleted"
parser_model_response_started = "ParserModelResponseStarted"
parser_model_response_completed = "ParserModelResponseCompleted"
output_model_response_started = "OutputModelResponseStarted"
output_model_response_completed = "OutputModelResponseCompleted"
model_request_started = "ModelRequestStarted"
model_request_completed = "ModelRequestCompleted"
compression_started = "CompressionStarted"
compression_completed = "CompressionCompleted"
custom_event = "CustomEvent"
@dataclass
class BaseAgentRunEvent(BaseRunOutputEvent):
created_at: int = field(default_factory=lambda: int(time()))
event: str = ""
agent_id: str = ""
agent_name: str = ""
run_id: Optional[str] = None
parent_run_id: Optional[str] = None
session_id: Optional[str] = None
# Step context for workflow execution
workflow_id: Optional[str] = None
workflow_run_id: Optional[str] = None
step_id: Optional[str] = None
step_name: Optional[str] = None
step_index: Optional[int] = None
tools: Optional[List[ToolExecution]] = None
# For backwards compatibility
content: Optional[Any] = None
@property
def tools_requiring_confirmation(self):
return [t for t in self.tools if t.requires_confirmation] if self.tools else []
@property
def tools_requiring_user_input(self):
return [t for t in self.tools if t.requires_user_input] if self.tools else []
@property
def tools_awaiting_external_execution(self):
return [t for t in self.tools if t.external_execution_required] if self.tools else []
@dataclass
class RunStartedEvent(BaseAgentRunEvent):
"""Event sent when the run starts"""
event: str = RunEvent.run_started.value
model: str = ""
model_provider: str = ""
@dataclass
class RunContentEvent(BaseAgentRunEvent):
"""Main event for each delta of the RunOutput"""
event: str = RunEvent.run_content.value
content: Optional[Any] = None
workflow_agent: bool = (
False # Used by consumers of the events to distinguish between workflow agent and regular agent
)
content_type: str = "str"
reasoning_content: Optional[str] = None
model_provider_data: Optional[Dict[str, Any]] = None
citations: Optional[Citations] = None
response_audio: Optional[Audio] = None # Model audio response
image: Optional[Image] = None # Image attached to the response
references: Optional[List[MessageReferences]] = None
additional_input: Optional[List[Message]] = None
reasoning_steps: Optional[List[ReasoningStep]] = None
reasoning_messages: Optional[List[Message]] = None
@dataclass
class RunContentCompletedEvent(BaseAgentRunEvent):
event: str = RunEvent.run_content_completed.value
@dataclass
class IntermediateRunContentEvent(BaseAgentRunEvent):
event: str = RunEvent.run_intermediate_content.value
content: Optional[Any] = None
content_type: str = "str"
@dataclass
class RunCompletedEvent(BaseAgentRunEvent):
event: str = RunEvent.run_completed.value
content: Optional[Any] = None
content_type: str = "str"
reasoning_content: Optional[str] = None
citations: Optional[Citations] = None
model_provider_data: Optional[Dict[str, Any]] = None
images: Optional[List[Image]] = None # Images attached to the response
videos: Optional[List[Video]] = None # Videos attached to the response
audio: Optional[List[Audio]] = None # Audio attached to the response
response_audio: Optional[Audio] = None # Model audio response
references: Optional[List[MessageReferences]] = None
additional_input: Optional[List[Message]] = None
reasoning_steps: Optional[List[ReasoningStep]] = None
reasoning_messages: Optional[List[Message]] = None
metadata: Optional[Dict[str, Any]] = None
metrics: Optional[RunMetrics] = None
session_state: Optional[Dict[str, Any]] = None
@dataclass
class RunPausedEvent(BaseAgentRunEvent):
event: str = RunEvent.run_paused.value
tools: Optional[List[ToolExecution]] = None
requirements: Optional[List[RunRequirement]] = None
@property
def is_paused(self):
return True
@property
def active_requirements(self) -> List[RunRequirement]:
if not self.requirements:
return []
return [requirement for requirement in self.requirements if not requirement.is_resolved()]
@dataclass
class RunContinuedEvent(BaseAgentRunEvent):
event: str = RunEvent.run_continued.value
@dataclass
class RunErrorEvent(BaseAgentRunEvent):
event: str = RunEvent.run_error.value
content: Optional[str] = None
# From exceptions
error_type: Optional[str] = None
error_id: Optional[str] = None
additional_data: Optional[Dict[str, Any]] = None
@dataclass
class RunCancelledEvent(BaseAgentRunEvent):
event: str = RunEvent.run_cancelled.value
reason: Optional[str] = None
@property
def is_cancelled(self):
return True
@dataclass
class PreHookStartedEvent(BaseAgentRunEvent):
event: str = RunEvent.pre_hook_started.value
pre_hook_name: Optional[str] = None
run_input: Optional[RunInput] = None
@dataclass
class PreHookCompletedEvent(BaseAgentRunEvent):
event: str = RunEvent.pre_hook_completed.value
pre_hook_name: Optional[str] = None
run_input: Optional[RunInput] = None
@dataclass
class PostHookStartedEvent(BaseAgentRunEvent):
event: str = RunEvent.post_hook_started.value
post_hook_name: Optional[str] = None
@dataclass
class PostHookCompletedEvent(BaseAgentRunEvent):
event: str = RunEvent.post_hook_completed.value
post_hook_name: Optional[str] = None
@dataclass
class MemoryUpdateStartedEvent(BaseAgentRunEvent):
event: str = RunEvent.memory_update_started.value
@dataclass
class MemoryUpdateCompletedEvent(BaseAgentRunEvent):
event: str = RunEvent.memory_update_completed.value
memories: Optional[List[Any]] = None
@dataclass
class SessionSummaryStartedEvent(BaseAgentRunEvent):
event: str = RunEvent.session_summary_started.value
@dataclass
class SessionSummaryCompletedEvent(BaseAgentRunEvent):
event: str = RunEvent.session_summary_completed.value
session_summary: Optional["SessionSummary"] = None
@dataclass
class ReasoningStartedEvent(BaseAgentRunEvent):
event: str = RunEvent.reasoning_started.value
@dataclass
class ReasoningStepEvent(BaseAgentRunEvent):
event: str = RunEvent.reasoning_step.value
content: Optional[Any] = None
content_type: str = "str"
reasoning_content: str = ""
@dataclass
class ReasoningContentDeltaEvent(BaseAgentRunEvent):
"""Event for streaming reasoning content chunks as they arrive."""
event: str = RunEvent.reasoning_content_delta.value
reasoning_content: str = "" # The delta/chunk of reasoning content
@dataclass
class ReasoningCompletedEvent(BaseAgentRunEvent):
event: str = RunEvent.reasoning_completed.value
content: Optional[Any] = None
content_type: str = "str"
@dataclass
class ToolCallStartedEvent(BaseAgentRunEvent):
event: str = RunEvent.tool_call_started.value
tool: Optional[ToolExecution] = None
@dataclass
class ToolCallCompletedEvent(BaseAgentRunEvent):
event: str = RunEvent.tool_call_completed.value
tool: Optional[ToolExecution] = None
content: Optional[Any] = None
images: Optional[List[Image]] = None # Images produced by the tool call
videos: Optional[List[Video]] = None # Videos produced by the tool call
audio: Optional[List[Audio]] = None # Audio produced by the tool call
@dataclass
class ToolCallErrorEvent(BaseAgentRunEvent):
event: str = RunEvent.tool_call_error.value
tool: Optional[ToolExecution] = None
error: Optional[str] = None
@dataclass
class ParserModelResponseStartedEvent(BaseAgentRunEvent):
event: str = RunEvent.parser_model_response_started.value
@dataclass
class ParserModelResponseCompletedEvent(BaseAgentRunEvent):
event: str = RunEvent.parser_model_response_completed.value
@dataclass
class OutputModelResponseStartedEvent(BaseAgentRunEvent):
event: str = RunEvent.output_model_response_started.value
@dataclass
class OutputModelResponseCompletedEvent(BaseAgentRunEvent):
event: str = RunEvent.output_model_response_completed.value
@dataclass
class ModelRequestStartedEvent(BaseAgentRunEvent):
"""Event sent when a model request is about to be made"""
event: str = RunEvent.model_request_started.value
model: Optional[str] = None
model_provider: Optional[str] = None
@dataclass
class ModelRequestCompletedEvent(BaseAgentRunEvent):
"""Event sent when a model request has completed"""
event: str = RunEvent.model_request_completed.value
model: Optional[str] = None
model_provider: Optional[str] = None
input_tokens: Optional[int] = None
output_tokens: Optional[int] = None
total_tokens: Optional[int] = None
time_to_first_token: Optional[float] = None
reasoning_tokens: Optional[int] = None
cache_read_tokens: Optional[int] = None
cache_write_tokens: Optional[int] = None
@dataclass
class CompressionStartedEvent(BaseAgentRunEvent):
"""Event sent when tool result compression is about to start"""
event: str = RunEvent.compression_started.value
@dataclass
class CompressionCompletedEvent(BaseAgentRunEvent):
"""Event sent when tool result compression has completed"""
event: str = RunEvent.compression_completed.value
tool_results_compressed: Optional[int] = None
original_size: Optional[int] = None
compressed_size: Optional[int] = None
@dataclass
class CustomEvent(BaseAgentRunEvent):
event: str = RunEvent.custom_event.value
# tool_call_id for ToolExecution
tool_call_id: Optional[str] = None
def __init__(self, **kwargs):
# Store arbitrary attributes directly on the instance
for key, value in kwargs.items():
setattr(self, key, value)
RunOutputEvent = Union[
RunStartedEvent,
RunContentEvent,
IntermediateRunContentEvent,
RunContentCompletedEvent,
RunCompletedEvent,
RunErrorEvent,
RunCancelledEvent,
RunPausedEvent,
RunContinuedEvent,
PreHookStartedEvent,
PreHookCompletedEvent,
PostHookStartedEvent,
PostHookCompletedEvent,
ReasoningStartedEvent,
ReasoningStepEvent,
ReasoningContentDeltaEvent,
ReasoningCompletedEvent,
MemoryUpdateStartedEvent,
MemoryUpdateCompletedEvent,
SessionSummaryStartedEvent,
SessionSummaryCompletedEvent,
ToolCallStartedEvent,
ToolCallCompletedEvent,
ToolCallErrorEvent,
ParserModelResponseStartedEvent,
ParserModelResponseCompletedEvent,
OutputModelResponseStartedEvent,
OutputModelResponseCompletedEvent,
ModelRequestStartedEvent,
ModelRequestCompletedEvent,
CompressionStartedEvent,
CompressionCompletedEvent,
CustomEvent,
]
# Map event string to dataclass
RUN_EVENT_TYPE_REGISTRY = {
RunEvent.run_started.value: RunStartedEvent,
RunEvent.run_content.value: RunContentEvent,
RunEvent.run_content_completed.value: RunContentCompletedEvent,
RunEvent.run_intermediate_content.value: IntermediateRunContentEvent,
RunEvent.run_completed.value: RunCompletedEvent,
RunEvent.run_error.value: RunErrorEvent,
RunEvent.run_cancelled.value: RunCancelledEvent,
RunEvent.run_paused.value: RunPausedEvent,
RunEvent.run_continued.value: RunContinuedEvent,
RunEvent.pre_hook_started.value: PreHookStartedEvent,
RunEvent.pre_hook_completed.value: PreHookCompletedEvent,
RunEvent.post_hook_started.value: PostHookStartedEvent,
RunEvent.post_hook_completed.value: PostHookCompletedEvent,
RunEvent.reasoning_started.value: ReasoningStartedEvent,
RunEvent.reasoning_step.value: ReasoningStepEvent,
RunEvent.reasoning_content_delta.value: ReasoningContentDeltaEvent,
RunEvent.reasoning_completed.value: ReasoningCompletedEvent,
RunEvent.memory_update_started.value: MemoryUpdateStartedEvent,
RunEvent.memory_update_completed.value: MemoryUpdateCompletedEvent,
RunEvent.session_summary_started.value: SessionSummaryStartedEvent,
RunEvent.session_summary_completed.value: SessionSummaryCompletedEvent,
RunEvent.tool_call_started.value: ToolCallStartedEvent,
RunEvent.tool_call_completed.value: ToolCallCompletedEvent,
RunEvent.tool_call_error.value: ToolCallErrorEvent,
RunEvent.parser_model_response_started.value: ParserModelResponseStartedEvent,
RunEvent.parser_model_response_completed.value: ParserModelResponseCompletedEvent,
RunEvent.output_model_response_started.value: OutputModelResponseStartedEvent,
RunEvent.output_model_response_completed.value: OutputModelResponseCompletedEvent,
RunEvent.model_request_started.value: ModelRequestStartedEvent,
RunEvent.model_request_completed.value: ModelRequestCompletedEvent,
RunEvent.compression_started.value: CompressionStartedEvent,
RunEvent.compression_completed.value: CompressionCompletedEvent,
RunEvent.custom_event.value: CustomEvent,
}
def run_output_event_from_dict(data: dict) -> BaseRunOutputEvent:
event_type = data.get("event", "")
cls = RUN_EVENT_TYPE_REGISTRY.get(event_type)
if not cls:
raise ValueError(f"Unknown event type: {event_type}")
return cls.from_dict(data) # type: ignore
@dataclass
class RunOutput:
"""Response returned by Agent.run() or Workflow.run() functions"""
run_id: Optional[str] = None
agent_id: Optional[str] = None
agent_name: Optional[str] = None
session_id: Optional[str] = None
parent_run_id: Optional[str] = None
workflow_id: Optional[str] = None
user_id: Optional[str] = None
# Input media and messages from user
input: Optional[RunInput] = None
content: Optional[Any] = None
content_type: str = "str"
reasoning_content: Optional[str] = None
reasoning_steps: Optional[List[ReasoningStep]] = None
reasoning_messages: Optional[List[Message]] = None
model_provider_data: Optional[Dict[str, Any]] = None
model: Optional[str] = None
model_provider: Optional[str] = None
messages: Optional[List[Message]] = None
metrics: Optional[RunMetrics] = None
additional_input: Optional[List[Message]] = None
tools: Optional[List[ToolExecution]] = None
images: Optional[List[Image]] = None # Images attached to the response
videos: Optional[List[Video]] = None # Videos attached to the response
audio: Optional[List[Audio]] = None # Audio attached to the response
files: Optional[List[File]] = None # Files attached to the response
response_audio: Optional[Audio] = None # Model audio response
citations: Optional[Citations] = None
references: Optional[List[MessageReferences]] = None
metadata: Optional[Dict[str, Any]] = None
session_state: Optional[Dict[str, Any]] = None
created_at: int = field(default_factory=lambda: int(time()))
events: Optional[List[RunOutputEvent]] = None
status: RunStatus = RunStatus.running
# User control flow (HITL) requirements to continue a run when paused, in order of arrival
requirements: Optional[list[RunRequirement]] = None
# === FOREIGN KEY RELATIONSHIPS ===
# These fields establish relationships to parent workflow/step structures
# and should be treated as foreign keys for data integrity
workflow_step_id: Optional[str] = None # FK: Points to StepOutput.step_id
@property
def active_requirements(self) -> list[RunRequirement]:
if not self.requirements:
return []
return [requirement for requirement in self.requirements if not requirement.is_resolved()]
@property
def is_paused(self):
return self.status == RunStatus.paused
@property
def is_cancelled(self):
return self.status == RunStatus.cancelled
@property
def tools_requiring_confirmation(self):
return [t for t in self.tools if t.requires_confirmation] if self.tools else []
@property
def tools_requiring_user_input(self):
return [t for t in self.tools if t.requires_user_input] if self.tools else []
@property
def tools_awaiting_external_execution(self):
return [t for t in self.tools if t.external_execution_required] if self.tools else []
def to_dict(self) -> Dict[str, Any]:
_dict = {
k: v
for k, v in asdict(self).items()
if v is not None
and k
not in [
"messages",
"metrics",
"tools",
"metadata",
"images",
"videos",
"audio",
"files",
"response_audio",
"input",
"citations",
"events",
"additional_input",
"reasoning_steps",
"reasoning_messages",
"references",
"requirements",
]
}
if self.metrics is not None:
_dict["metrics"] = self.metrics.to_dict() if isinstance(self.metrics, RunMetrics) else self.metrics
if self.events is not None:
_dict["events"] = [e.to_dict() for e in self.events]
if self.status is not None:
_dict["status"] = self.status.value if isinstance(self.status, RunStatus) else self.status
if self.messages is not None:
_dict["messages"] = [m.to_dict() for m in self.messages]
if self.metadata is not None:
_dict["metadata"] = self.metadata
if self.additional_input is not None:
_dict["additional_input"] = [m.to_dict() for m in self.additional_input]
if self.reasoning_messages is not None:
_dict["reasoning_messages"] = [m.to_dict() for m in self.reasoning_messages]
if self.reasoning_steps is not None:
_dict["reasoning_steps"] = [rs.model_dump() for rs in self.reasoning_steps]
if self.references is not None:
_dict["references"] = [r.model_dump() for r in self.references]
if self.images is not None:
_dict["images"] = []
for img in self.images:
if isinstance(img, Image):
_dict["images"].append(img.to_dict())
else:
_dict["images"].append(img)
if self.videos is not None:
_dict["videos"] = []
for vid in self.videos:
if isinstance(vid, Video):
_dict["videos"].append(vid.to_dict())
else:
_dict["videos"].append(vid)
if self.audio is not None:
_dict["audio"] = []
for aud in self.audio:
if isinstance(aud, Audio):
_dict["audio"].append(aud.to_dict())
else:
_dict["audio"].append(aud)
if self.files is not None:
_dict["files"] = []
for file in self.files:
if isinstance(file, File):
_dict["files"].append(file.to_dict())
else:
_dict["files"].append(file)
if self.response_audio is not None:
if isinstance(self.response_audio, Audio):
_dict["response_audio"] = self.response_audio.to_dict()
else:
_dict["response_audio"] = self.response_audio
if self.citations is not None:
if isinstance(self.citations, Citations):
_dict["citations"] = self.citations.model_dump(exclude_none=True)
else:
_dict["citations"] = self.citations
if self.content and isinstance(self.content, BaseModel):
_dict["content"] = self.content.model_dump(exclude_none=True, mode="json")
if self.tools is not None:
_dict["tools"] = []
for tool in self.tools:
if isinstance(tool, ToolExecution):
_dict["tools"].append(tool.to_dict())
else:
_dict["tools"].append(tool)
if self.requirements is not None:
_dict["requirements"] = [req.to_dict() if hasattr(req, "to_dict") else req for req in self.requirements]
if self.input is not None:
_dict["input"] = self.input.to_dict()
return _dict
def to_json(self, separators=(", ", ": "), indent: Optional[int] = 2) -> str:
import json
try:
_dict = self.to_dict()
except Exception:
logger.error("Failed to convert response to json", exc_info=True)
raise
if indent is None:
return json.dumps(_dict, separators=separators)
else:
return json.dumps(_dict, indent=indent, separators=separators)
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "RunOutput":
if "run" in data:
data = data.pop("run")
events = data.pop("events", None)
final_events = []
for event in events or []:
if "agent_id" in event:
event = run_output_event_from_dict(event)
else:
# Use the factory from response.py for agent events
from agno.run.team import team_run_output_event_from_dict
event = team_run_output_event_from_dict(event)
final_events.append(event)
events = final_events
messages = data.pop("messages", None)
messages = [Message.from_dict(message) for message in messages] if messages else None
citations = data.pop("citations", None)
citations = Citations.model_validate(citations) if citations else None
tools = data.pop("tools", [])
tools = [ToolExecution.from_dict(tool) for tool in tools] if tools else None
# Handle requirements
requirements_data = data.pop("requirements", None)
requirements: Optional[List[RunRequirement]] = None
if requirements_data is not None:
requirements_list: List[RunRequirement] = []
for item in requirements_data:
if isinstance(item, RunRequirement):
requirements_list.append(item)
elif isinstance(item, dict):
requirements_list.append(RunRequirement.from_dict(item))
requirements = requirements_list if requirements_list else None
images = reconstruct_images(data.pop("images", []))
videos = reconstruct_videos(data.pop("videos", []))
audio = reconstruct_audio_list(data.pop("audio", []))
files = reconstruct_files(data.pop("files", []))
response_audio = reconstruct_response_audio(data.pop("response_audio", None))
input_data = data.pop("input", None)
input_obj = None
if input_data:
input_obj = RunInput.from_dict(input_data)
metrics = data.pop("metrics", None)
if metrics:
metrics = RunMetrics.from_dict(metrics)
additional_input = data.pop("additional_input", None)
if additional_input is not None:
additional_input = [Message.from_dict(message) for message in additional_input]
reasoning_steps = data.pop("reasoning_steps", None)
if reasoning_steps is not None:
reasoning_steps = [ReasoningStep.model_validate(step) for step in reasoning_steps]
reasoning_messages = data.pop("reasoning_messages", None)
if reasoning_messages is not None:
reasoning_messages = [Message.from_dict(message) for message in reasoning_messages]
references = data.pop("references", None)
if references is not None:
references = [MessageReferences.model_validate(reference) for reference in references]
# Filter data to only include fields that are actually defined in the RunOutput dataclass
from dataclasses import fields
supported_fields = {f.name for f in fields(cls)}
filtered_data = {k: v for k, v in data.items() if k in supported_fields}
return cls(
messages=messages,
metrics=metrics,
citations=citations,
tools=tools,
images=images,
audio=audio,
videos=videos,
files=files,
response_audio=response_audio,
input=input_obj,
events=events,
additional_input=additional_input,
reasoning_steps=reasoning_steps,
reasoning_messages=reasoning_messages,
references=references,
requirements=requirements,
**filtered_data,
)
def get_content_as_string(self, **kwargs) -> str:
import json
from pydantic import BaseModel
if isinstance(self.content, str):
return self.content
elif isinstance(self.content, BaseModel):
return self.content.model_dump_json(exclude_none=True, **kwargs)
else:
return json.dumps(self.content, **kwargs)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/run/agent.py",
"license": "Apache License 2.0",
"lines": 711,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/run/cancel.py | """Run cancellation management."""
from typing import Dict
from agno.run.cancellation_management.base import BaseRunCancellationManager
from agno.run.cancellation_management.in_memory_cancellation_manager import InMemoryRunCancellationManager
from agno.utils.log import logger
# Global cancellation manager instance
_cancellation_manager: BaseRunCancellationManager = InMemoryRunCancellationManager()
def set_cancellation_manager(manager: BaseRunCancellationManager) -> None:
"""Set a custom cancellation manager.
Args:
manager: A BaseRunCancellationManager instance or subclass.
Example:
```python
class MyCustomManager(BaseRunCancellationManager):
....
set_cancellation_manager(MyCustomManager())
```
"""
global _cancellation_manager
_cancellation_manager = manager
logger.info(f"Cancellation manager set to {type(manager).__name__}")
def get_cancellation_manager() -> BaseRunCancellationManager:
"""Get the current cancellation manager instance."""
return _cancellation_manager
def register_run(run_id: str) -> None:
"""Register a new run for cancellation tracking."""
_cancellation_manager.register_run(run_id)
async def aregister_run(run_id: str) -> None:
"""Register a new run for cancellation tracking (async version)."""
await _cancellation_manager.aregister_run(run_id)
def cancel_run(run_id: str) -> bool:
"""Cancel a run."""
return _cancellation_manager.cancel_run(run_id)
async def acancel_run(run_id: str) -> bool:
"""Cancel a run (async version)."""
return await _cancellation_manager.acancel_run(run_id)
def is_cancelled(run_id: str) -> bool:
"""Check if a run is cancelled."""
return _cancellation_manager.is_cancelled(run_id)
async def ais_cancelled(run_id: str) -> bool:
"""Check if a run is cancelled (async version)."""
return await _cancellation_manager.ais_cancelled(run_id)
def cleanup_run(run_id: str) -> None:
"""Clean up cancellation tracking for a completed run."""
_cancellation_manager.cleanup_run(run_id)
async def acleanup_run(run_id: str) -> None:
"""Clean up cancellation tracking for a completed run (async version)."""
await _cancellation_manager.acleanup_run(run_id)
def raise_if_cancelled(run_id: str) -> None:
"""Check if a run should be cancelled and raise exception if so."""
_cancellation_manager.raise_if_cancelled(run_id)
async def araise_if_cancelled(run_id: str) -> None:
"""Check if a run should be cancelled and raise exception if so (async version)."""
await _cancellation_manager.araise_if_cancelled(run_id)
def get_active_runs() -> Dict[str, bool]:
"""Get all currently tracked runs and their cancellation status."""
return _cancellation_manager.get_active_runs()
async def aget_active_runs() -> Dict[str, bool]:
"""Get all currently tracked runs and their cancellation status (async version)."""
return await _cancellation_manager.aget_active_runs()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/run/cancel.py",
"license": "Apache License 2.0",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/session/agent.py | from __future__ import annotations
from dataclasses import asdict, dataclass
from typing import Any, Dict, List, Mapping, Optional, Union
from agno.models.message import Message
from agno.run.agent import RunOutput
from agno.run.base import RunStatus
from agno.run.team import TeamRunOutput
from agno.session.summary import SessionSummary
from agno.utils.log import log_debug, log_warning
@dataclass
class AgentSession:
"""Agent Session that is stored in the database"""
# Session UUID
session_id: str
# ID of the agent that this session is associated with
agent_id: Optional[str] = None
# ID of the team that this session is associated with
team_id: Optional[str] = None
# # ID of the user interacting with this agent
user_id: Optional[str] = None
# ID of the workflow that this session is associated with
workflow_id: Optional[str] = None
# Session Data: session_name, session_state, images, videos, audio
session_data: Optional[Dict[str, Any]] = None
# Metadata stored with this agent
metadata: Optional[Dict[str, Any]] = None
# Agent Data: agent_id, name and model
agent_data: Optional[Dict[str, Any]] = None
# List of all runs in the session
runs: Optional[List[Union[RunOutput, TeamRunOutput]]] = None
# Summary of the session
summary: Optional["SessionSummary"] = None
# The unix timestamp when this session was created
created_at: Optional[int] = None
# The unix timestamp when this session was last updated
updated_at: Optional[int] = None
def to_dict(self) -> Dict[str, Any]:
session_dict = asdict(self)
session_dict["runs"] = [run.to_dict() for run in self.runs] if self.runs else None
session_dict["summary"] = self.summary.to_dict() if self.summary else None
return session_dict
@classmethod
def from_dict(cls, data: Mapping[str, Any]) -> Optional[AgentSession]:
if data is None or data.get("session_id") is None:
log_warning("AgentSession is missing session_id")
return None
runs = data.get("runs")
serialized_runs: List[Union[RunOutput, TeamRunOutput]] = []
if runs is not None and isinstance(runs[0], dict):
for run in runs:
if "agent_id" in run:
serialized_runs.append(RunOutput.from_dict(run))
elif "team_id" in run:
serialized_runs.append(TeamRunOutput.from_dict(run))
summary = data.get("summary")
if summary is not None and isinstance(summary, dict):
summary = SessionSummary.from_dict(summary)
metadata = data.get("metadata")
return cls(
session_id=data.get("session_id"), # type: ignore
agent_id=data.get("agent_id"),
user_id=data.get("user_id"),
workflow_id=data.get("workflow_id"),
team_id=data.get("team_id"),
agent_data=data.get("agent_data"),
session_data=data.get("session_data"),
metadata=metadata,
created_at=data.get("created_at"),
updated_at=data.get("updated_at"),
runs=serialized_runs,
summary=summary,
)
def upsert_run(self, run: RunOutput):
"""Adds a RunOutput, together with some calculated data, to the runs list."""
messages = run.messages
for m in messages or []:
if m.metrics is not None and hasattr(m.metrics, "timer"):
m.metrics.timer = None
if not self.runs:
self.runs = []
for i, existing_run in enumerate(self.runs or []):
if existing_run.run_id == run.run_id:
self.runs[i] = run
break
else:
self.runs.append(run)
log_debug("Added RunOutput to Agent Session")
def get_run(self, run_id: str) -> Optional[Union[RunOutput, TeamRunOutput]]:
for run in self.runs or []:
if run.run_id == run_id:
return run
return None
def get_messages(
self,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
last_n_runs: Optional[int] = None,
limit: Optional[int] = None,
skip_roles: Optional[List[str]] = None,
skip_statuses: Optional[List[RunStatus]] = None,
skip_history_messages: bool = True,
) -> List[Message]:
"""Returns the messages belonging to the session that fit the given criteria.
Args:
agent_id: The id of the agent to get the messages from.
team_id: The id of the team to get the messages from.
last_n_runs: The number of runs to return messages from, counting from the latest. Defaults to all runs.
last_n_messages: The number of messages to return, counting from the latest. Defaults to all messages.
skip_roles: Skip messages with these roles.
skip_statuses: Skip messages with these statuses.
skip_history_messages: Skip messages that were tagged as history in previous runs.
Returns:
A list of Messages belonging to the session.
"""
def _should_skip_message(
message: Message, skip_roles: Optional[List[str]] = None, skip_history_messages: bool = True
) -> bool:
"""Logic to determine if a message should be skipped"""
# Skip messages that were tagged as history in previous runs
if hasattr(message, "from_history") and message.from_history and skip_history_messages:
return True
# Skip messages with specified role
if skip_roles and message.role in skip_roles:
return True
return False
if not self.runs:
return []
if skip_statuses is None:
skip_statuses = [RunStatus.paused, RunStatus.cancelled, RunStatus.error]
runs = self.runs
# Filter by agent_id and team_id
if agent_id:
runs = [run for run in runs if hasattr(run, "agent_id") and run.agent_id == agent_id] # type: ignore
if team_id:
runs = [run for run in runs if hasattr(run, "team_id") and run.team_id == team_id] # type: ignore
# Skip any messages that might be part of members of teams (for session re-use)
runs = [run for run in runs if run.parent_run_id is None] # type: ignore
# Filter by status
runs = [run for run in runs if hasattr(run, "status") and run.status not in skip_statuses] # type: ignore
messages_from_history = []
system_message = None
# Limit the number of messages returned if limit is set
if limit is not None:
for run_response in runs:
if not run_response or not run_response.messages:
continue
for message in run_response.messages or []:
if _should_skip_message(message, skip_roles, skip_history_messages):
continue
if message.role == "system":
# Only add the system message once
if system_message is None:
system_message = message
else:
messages_from_history.append(message)
if system_message:
messages_from_history = [system_message] + messages_from_history[
-(limit - 1) :
] # Grab one less message then add the system message
else:
messages_from_history = messages_from_history[-limit:]
# Remove tool result messages that don't have an associated assistant message with tool calls
while len(messages_from_history) > 0 and messages_from_history[0].role == "tool":
messages_from_history.pop(0)
# If limit is not set, return all messages
else:
runs_to_process = runs[-last_n_runs:] if last_n_runs is not None else runs
for run_response in runs_to_process:
if not run_response or not run_response.messages:
continue
for message in run_response.messages or []:
if _should_skip_message(message, skip_roles, skip_history_messages):
continue
if message.role == "system":
# Only add the system message once
if system_message is None:
system_message = message
messages_from_history.append(system_message)
else:
messages_from_history.append(message)
log_debug(f"Getting messages from previous runs: {len(messages_from_history)}")
return messages_from_history
def get_chat_history(self, last_n_runs: Optional[int] = None) -> List[Message]:
"""Return the chat history (user and assistant messages) for the session.
Use get_messages() for more filtering options.
Args:
last_n_runs: Number of recent runs to include. If None, all runs will be considered.
Returns:
A list of user and assistant Messages belonging to the session.
"""
return self.get_messages(skip_roles=["system", "tool"], last_n_runs=last_n_runs)
def get_tool_calls(self, num_calls: Optional[int] = None) -> List[Dict[str, Any]]:
"""Returns a list of tool calls from the messages"""
tool_calls = []
if self.runs:
session_runs = self.runs
for run_response in session_runs[::-1]:
if run_response and run_response.messages:
for message in run_response.messages or []:
if message.tool_calls:
for tool_call in message.tool_calls:
tool_calls.append(tool_call)
if num_calls and len(tool_calls) >= num_calls:
return tool_calls
return tool_calls
def get_session_summary(self) -> Optional[SessionSummary]:
"""Get the session summary for the session"""
if self.summary is None:
return None
return self.summary
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/session/agent.py",
"license": "Apache License 2.0",
"lines": 210,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/session/summary.py | from dataclasses import dataclass
from datetime import datetime
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
from pydantic import BaseModel, Field
from agno.models.base import Model
from agno.models.utils import get_model
from agno.run.agent import Message
from agno.utils.log import log_debug, log_warning
# TODO: Look into moving all managers into a separate dir
if TYPE_CHECKING:
from agno.metrics import RunMetrics
from agno.session import Session
from agno.session.agent import AgentSession
from agno.session.team import TeamSession
@dataclass
class SessionSummary:
"""Model for Session Summary."""
summary: str
topics: Optional[List[str]] = None
updated_at: Optional[datetime] = None
def to_dict(self) -> Dict[str, Any]:
_dict = {
"summary": self.summary,
"topics": self.topics,
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
return {k: v for k, v in _dict.items() if v is not None}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "SessionSummary":
updated_at = data.get("updated_at")
if updated_at:
data["updated_at"] = datetime.fromisoformat(updated_at)
return cls(**data)
class SessionSummaryResponse(BaseModel):
"""Model for Session Summary."""
summary: str = Field(
...,
description="Summary of the session. Be concise and focus on only important information. Do not make anything up.",
)
topics: Optional[List[str]] = Field(None, description="Topics discussed in the session.")
def to_dict(self) -> Dict[str, Any]:
return self.model_dump(exclude_none=True)
def to_json(self) -> str:
return self.model_dump_json(exclude_none=True, indent=2)
@dataclass
class SessionSummaryManager:
"""Session Summary Manager"""
# Model used for session summary generation
model: Optional[Model] = None
# Prompt used for session summary generation
session_summary_prompt: Optional[str] = None
# User message prompt for requesting the summary
summary_request_message: str = "Provide the summary of the conversation."
# Whether session summaries were created in the last run
summaries_updated: bool = False
def get_response_format(self, model: "Model") -> Union[Dict[str, Any], Type[BaseModel]]: # type: ignore
if model.supports_native_structured_outputs:
return SessionSummaryResponse
elif model.supports_json_schema_outputs:
return {
"type": "json_schema",
"json_schema": {
"name": SessionSummaryResponse.__name__,
"schema": SessionSummaryResponse.model_json_schema(),
},
}
else:
return {"type": "json_object"}
def get_system_message(
self,
conversation: List[Message],
response_format: Union[Dict[str, Any], Type[BaseModel]],
) -> Message:
if self.session_summary_prompt is not None:
system_prompt = self.session_summary_prompt
else:
system_prompt = dedent("""\
Analyze the following conversation between a user and an assistant, and extract the following details:
- Summary (str): Provide a concise summary of the session, focusing on important information that would be helpful for future interactions.
- Topics (Optional[List[str]]): List the topics discussed in the session.
Keep the summary concise and to the point. Only include relevant information.
""")
conversation_messages = []
system_prompt += "<conversation>"
for message in conversation:
if message.role == "user":
# Handle empty user messages with media - note what media was provided
if not message.content or (isinstance(message.content, str) and message.content.strip() == ""):
media_types = []
if hasattr(message, "images") and message.images:
media_types.append(f"{len(message.images)} image(s)")
if hasattr(message, "videos") and message.videos:
media_types.append(f"{len(message.videos)} video(s)")
if hasattr(message, "audio") and message.audio:
media_types.append(f"{len(message.audio)} audio file(s)")
if hasattr(message, "files") and message.files:
media_types.append(f"{len(message.files)} file(s)")
if media_types:
conversation_messages.append(f"User: [Provided {', '.join(media_types)}]")
# Skip empty messages with no media
else:
conversation_messages.append(f"User: {message.content}")
elif message.role in ["assistant", "model"]:
conversation_messages.append(f"Assistant: {message.content}\n")
system_prompt += "\n".join(conversation_messages)
system_prompt += "</conversation>"
if response_format == {"type": "json_object"}:
from agno.utils.prompts import get_json_output_prompt
system_prompt += "\n" + get_json_output_prompt(SessionSummaryResponse) # type: ignore
return Message(role="system", content=system_prompt)
def _prepare_summary_messages(
self,
session: Optional["Session"] = None,
) -> Optional[List[Message]]:
"""Prepare messages for session summary generation. Returns None if no meaningful messages to summarize."""
if not session:
return None
self.model = get_model(self.model)
if self.model is None:
return None
response_format = self.get_response_format(self.model)
system_message = self.get_system_message(
conversation=session.get_messages(), # type: ignore
response_format=response_format,
)
if system_message is None:
return None
return [
system_message,
Message(role="user", content=self.summary_request_message),
]
def _process_summary_response(self, summary_response, session_summary_model: "Model") -> Optional[SessionSummary]: # type: ignore
"""Process the model response into a SessionSummary"""
from datetime import datetime
if summary_response is None:
return None
# Handle native structured outputs
if (
session_summary_model.supports_native_structured_outputs
and summary_response.parsed is not None
and isinstance(summary_response.parsed, SessionSummaryResponse)
):
session_summary = SessionSummary(
summary=summary_response.parsed.summary,
topics=summary_response.parsed.topics,
updated_at=datetime.now(),
)
self.summary = session_summary
log_debug("Session summary created", center=True)
return session_summary
# Handle string responses
if isinstance(summary_response.content, str):
try:
from agno.utils.string import parse_response_model_str
parsed_summary: SessionSummaryResponse = parse_response_model_str( # type: ignore
summary_response.content, SessionSummaryResponse
)
if parsed_summary is not None:
session_summary = SessionSummary(
summary=parsed_summary.summary, topics=parsed_summary.topics, updated_at=datetime.now()
)
self.summary = session_summary
log_debug("Session summary created", center=True)
return session_summary
else:
log_warning("Failed to parse session summary response")
except Exception as e:
log_warning(f"Failed to parse session summary response: {e}")
return None
def create_session_summary(
self,
session: Union["AgentSession", "TeamSession"],
run_metrics: Optional["RunMetrics"] = None,
) -> Optional[SessionSummary]:
"""Creates a summary of the session"""
log_debug("Creating session summary", center=True)
self.model = get_model(self.model)
if self.model is None:
return None
messages = self._prepare_summary_messages(session)
# Skip summary generation if there are no meaningful messages
if messages is None:
log_debug("No meaningful messages to summarize, skipping session summary")
return None
response_format = self.get_response_format(self.model)
summary_response = self.model.response(messages=messages, response_format=response_format)
# Accumulate session summary model metrics
if run_metrics is not None:
from agno.metrics import ModelType, accumulate_model_metrics
accumulate_model_metrics(summary_response, self.model, ModelType.SESSION_SUMMARY_MODEL, run_metrics)
session_summary = self._process_summary_response(summary_response, self.model)
if session is not None and session_summary is not None:
session.summary = session_summary
self.summaries_updated = True
return session_summary
async def acreate_session_summary(
self,
session: Union["AgentSession", "TeamSession"],
run_metrics: Optional["RunMetrics"] = None,
) -> Optional[SessionSummary]:
"""Creates a summary of the session"""
log_debug("Creating session summary", center=True)
self.model = get_model(self.model)
if self.model is None:
return None
messages = self._prepare_summary_messages(session)
# Skip summary generation if there are no meaningful messages
if messages is None:
log_debug("No meaningful messages to summarize, skipping session summary")
return None
response_format = self.get_response_format(self.model)
summary_response = await self.model.aresponse(messages=messages, response_format=response_format)
# Accumulate session summary model metrics
if run_metrics is not None:
from agno.metrics import ModelType, accumulate_model_metrics
accumulate_model_metrics(summary_response, self.model, ModelType.SESSION_SUMMARY_MODEL, run_metrics)
session_summary = self._process_summary_response(summary_response, self.model)
if session is not None and session_summary is not None:
session.summary = session_summary
self.summaries_updated = True
return session_summary
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/session/summary.py",
"license": "Apache License 2.0",
"lines": 222,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/session/team.py | from __future__ import annotations
from dataclasses import asdict, dataclass
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from pydantic import BaseModel
from agno.models.message import Message
from agno.run.agent import RunOutput, RunStatus
from agno.run.team import TeamRunOutput
from agno.session.summary import SessionSummary
from agno.utils.log import log_debug, log_warning
@dataclass
class TeamSession:
"""Team Session that is stored in the database"""
# Session UUID
session_id: str
# ID of the team that this session is associated with
team_id: Optional[str] = None
# ID of the user interacting with this team
user_id: Optional[str] = None
# ID of the workflow that this session is associated with
workflow_id: Optional[str] = None
# Team Data: agent_id, name and model
team_data: Optional[Dict[str, Any]] = None
# Session Data: session_name, session_state, images, videos, audio
session_data: Optional[Dict[str, Any]] = None
# Metadata stored with this team
metadata: Optional[Dict[str, Any]] = None
# List of all runs in the session
runs: Optional[list[Union[TeamRunOutput, RunOutput]]] = None
# Summary of the session
summary: Optional[SessionSummary] = None
# The unix timestamp when this session was created
created_at: Optional[int] = None
# The unix timestamp when this session was last updated
updated_at: Optional[int] = None
def to_dict(self) -> Dict[str, Any]:
session_dict = asdict(self)
session_dict["runs"] = [run.to_dict() for run in self.runs] if self.runs else None
session_dict["summary"] = self.summary.to_dict() if self.summary else None
return session_dict
@classmethod
def from_dict(cls, data: Mapping[str, Any]) -> Optional[TeamSession]:
if data is None or data.get("session_id") is None:
log_warning("TeamSession is missing session_id")
return None
summary = data.get("summary")
if summary is not None and isinstance(summary, dict):
data["summary"] = SessionSummary.from_dict(data["summary"]) # type: ignore
runs = data.get("runs")
serialized_runs: List[Union[TeamRunOutput, RunOutput]] = []
if runs is not None and isinstance(runs[0], dict):
for run in runs:
if "agent_id" in run:
serialized_runs.append(RunOutput.from_dict(run))
elif "team_id" in run:
serialized_runs.append(TeamRunOutput.from_dict(run))
return cls(
session_id=data.get("session_id"), # type: ignore
team_id=data.get("team_id"),
user_id=data.get("user_id"),
workflow_id=data.get("workflow_id"),
team_data=data.get("team_data"),
session_data=data.get("session_data"),
metadata=data.get("metadata"),
created_at=data.get("created_at"),
updated_at=data.get("updated_at"),
runs=serialized_runs,
summary=data.get("summary"),
)
def get_run(self, run_id: str) -> Optional[Union[TeamRunOutput, RunOutput]]:
for run in self.runs or []:
if run.run_id == run_id:
return run
return None
def upsert_run(self, run_response: Union[TeamRunOutput, RunOutput]):
"""Adds a RunOutput, together with some calculated data, to the runs list."""
messages = run_response.messages
# Clear message timer before storage
for m in messages or []:
if m.metrics is not None and hasattr(m.metrics, "timer"):
m.metrics.timer = None
if not self.runs:
self.runs = []
for i, existing_run in enumerate(self.runs or []):
if existing_run.run_id == run_response.run_id:
self.runs[i] = run_response
break
else:
self.runs.append(run_response)
log_debug("Added RunOutput to Team Session")
def get_messages(
self,
team_id: Optional[str] = None,
member_ids: Optional[List[str]] = None,
last_n_runs: Optional[int] = None,
limit: Optional[int] = None,
skip_roles: Optional[List[str]] = None,
skip_statuses: Optional[List[RunStatus]] = None,
skip_history_messages: bool = True,
skip_member_messages: bool = True,
) -> List[Message]:
"""Returns the messages belonging to the session that fit the given criteria.
Args:
team_id: The id of the team to get the messages from.
member_ids: The ids of the members to get the messages from.
last_n_runs: The number of runs to return messages from, counting from the latest. Defaults to all runs.
limit: The number of messages to return, counting from the latest. Defaults to all messages.
skip_roles: Skip messages with these roles.
skip_statuses: Skip messages with these statuses.
skip_history_messages: Skip messages that were tagged as history in previous runs.
skip_member_messages: Skip messages created by members of the team.
Returns:
A list of Messages belonging to the session.
"""
def _should_skip_message(
message: Message, skip_roles: Optional[List[str]] = None, skip_history_messages: bool = True
) -> bool:
"""Processes a message for history"""
# Skip messages that were tagged as history in previous runs
if hasattr(message, "from_history") and message.from_history and skip_history_messages:
return True
# Skip messages with specified role
if skip_roles and message.role in skip_roles:
return True
return False
if member_ids is not None and skip_member_messages:
log_debug("Member IDs to filter by were provided. The skip_member_messages flag will be ignored.")
skip_member_messages = False
if not self.runs:
return []
if skip_statuses is None:
skip_statuses = [RunStatus.paused, RunStatus.cancelled, RunStatus.error]
session_runs = self.runs
# Filter by team_id and member_ids
if team_id:
session_runs = [run for run in session_runs if hasattr(run, "team_id") and run.team_id == team_id] # type: ignore
if member_ids:
filtered_runs = []
for run in session_runs:
if hasattr(run, "agent_id") and run.agent_id in member_ids: # type: ignore
filtered_runs.append(run)
elif hasattr(run, "member_responses"):
for member_run in run.member_responses:
if hasattr(member_run, "agent_id") and member_run.agent_id in member_ids: # type: ignore
filtered_runs.append(member_run)
session_runs = filtered_runs
if skip_member_messages:
# Filter for the top-level runs (main team runs or agent runs when sharing session)
session_runs = [run for run in session_runs if run.parent_run_id is None] # type: ignore
# Filter by status
session_runs = [run for run in session_runs if hasattr(run, "status") and run.status not in skip_statuses] # type: ignore
messages_from_history = []
system_message = None
# Limit the number of messages returned if limit is set
if limit is not None:
for run_response in session_runs:
if not run_response or not run_response.messages:
continue
for message in run_response.messages or []:
if _should_skip_message(message, skip_roles, skip_history_messages):
continue
if message.role == "system":
# Only add the system message once
if system_message is None:
system_message = message
else:
messages_from_history.append(message)
if system_message:
messages_from_history = [system_message] + messages_from_history[
-(limit - 1) :
] # Grab one less message then add the system message
else:
messages_from_history = messages_from_history[-limit:]
# Remove tool result messages that don't have an associated assistant message with tool calls
while len(messages_from_history) > 0 and messages_from_history[0].role == "tool":
messages_from_history.pop(0)
else:
# Filter by last_n runs
runs_to_process = session_runs[-last_n_runs:] if last_n_runs is not None else session_runs
for run_response in runs_to_process:
if not (run_response and run_response.messages):
continue
for message in run_response.messages or []:
if _should_skip_message(message, skip_roles, skip_history_messages):
continue
if message.role == "system":
# Only add the system message once
if system_message is None:
system_message = message
messages_from_history.append(system_message)
else:
messages_from_history.append(message)
log_debug(f"Getting messages from previous runs: {len(messages_from_history)}")
return messages_from_history
def get_chat_history(self, last_n_runs: Optional[int] = None) -> List[Message]:
"""Return the chat history (user and assistant messages) for the session.
Use get_messages() for more filtering options.
Args:
last_n_runs: Number of recent runs to include. If None, all runs will be considered.
Returns:
A list of user and assistant Messages belonging to the session.
"""
return self.get_messages(skip_roles=["system", "tool"], skip_member_messages=True, last_n_runs=last_n_runs)
def get_tool_calls(self, num_calls: Optional[int] = None) -> List[Dict[str, Any]]:
"""Returns a list of tool calls from the messages"""
tool_calls = []
session_runs = self.runs
if session_runs is None:
return []
for run_response in session_runs[::-1]:
if run_response and run_response.messages:
for message in run_response.messages or []:
if message.tool_calls:
for tool_call in message.tool_calls:
tool_calls.append(tool_call)
if num_calls and len(tool_calls) >= num_calls:
return tool_calls
return tool_calls
def get_team_history(self, num_runs: Optional[int] = None) -> List[Tuple[str, str]]:
"""Get team history as structured data (input, response pairs) -> This is the history of the team leader, not the members.
Args:
num_runs: Number of recent runs to include. If None, returns all available history.
"""
if not self.runs:
return []
from agno.run.base import RunStatus
# Get completed runs only (exclude current/pending run)
completed_runs = [run for run in self.runs if run.status == RunStatus.completed and run.parent_run_id is None]
if num_runs is not None and len(completed_runs) > num_runs:
recent_runs = completed_runs[-num_runs:]
else:
recent_runs = completed_runs
if not recent_runs:
return []
# Return structured data as list of (input, response) tuples
history_data = []
for run in recent_runs:
# Get input
input_str = ""
if run.input:
input_str = run.input.input_content_string()
# Get response
response_str = ""
if run.content:
response_str = (
run.content.model_dump_json(indent=2, exclude_none=True)
if isinstance(run.content, BaseModel)
else str(run.content)
)
history_data.append((input_str, response_str))
return history_data
def get_team_history_context(self, num_runs: Optional[int] = None) -> Optional[str]:
"""Get formatted team history context for steps
Args:
num_runs: Number of recent runs to include. If None, returns all available history.
"""
history_data = self.get_team_history(num_runs)
if not history_data:
return None
# Format as team history context using the structured data
context_parts = ["<team_history_context>"]
for i, (input_str, response_str) in enumerate(history_data, 1):
context_parts.append(f"[run-{i}]")
if input_str:
context_parts.append(f"input: {input_str}")
if response_str:
context_parts.append(f"response: {response_str}")
context_parts.append("") # Empty line between runs
context_parts.append("</team_history_context>")
context_parts.append("") # Empty line before current input
return "\n".join(context_parts)
def get_session_summary(self) -> Optional[SessionSummary]:
"""Get the session summary for the session"""
if self.summary is None:
return None
return self.summary # type: ignore
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/session/team.py",
"license": "Apache License 2.0",
"lines": 275,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/session/workflow.py | from __future__ import annotations
import time
from dataclasses import dataclass
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from pydantic import BaseModel
from agno.models.message import Message
from agno.run.agent import RunOutput
from agno.run.base import RunStatus
from agno.run.team import TeamRunOutput
from agno.run.workflow import WorkflowRunOutput
from agno.utils.log import log_debug, logger
@dataclass
class WorkflowSession:
"""Workflow Session for pipeline-based workflows"""
# Session UUID - this is the workflow_session_id that gets set on agents/teams
session_id: str
# ID of the user interacting with this workflow
user_id: Optional[str] = None
# ID of the workflow that this session is associated with
workflow_id: Optional[str] = None
# Workflow name
workflow_name: Optional[str] = None
# Workflow runs - stores WorkflowRunOutput objects in memory
runs: Optional[List[WorkflowRunOutput]] = None
# Session Data: session_name, session_state, images, videos, audio
session_data: Optional[Dict[str, Any]] = None
# Workflow configuration and metadata
workflow_data: Optional[Dict[str, Any]] = None
# Metadata stored with this workflow session
metadata: Optional[Dict[str, Any]] = None
# The unix timestamp when this session was created
created_at: Optional[int] = None
# The unix timestamp when this session was last updated
updated_at: Optional[int] = None
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary for storage, serializing runs to dicts"""
runs_data = None
if self.runs:
runs_data = []
for run in self.runs:
try:
runs_data.append(run.to_dict())
except Exception as e:
raise ValueError(f"Serialization failed: {str(e)}")
return {
"session_id": self.session_id,
"user_id": self.user_id,
"workflow_id": self.workflow_id,
"workflow_name": self.workflow_name,
"runs": runs_data,
"session_data": self.session_data,
"workflow_data": self.workflow_data,
"metadata": self.metadata,
"created_at": self.created_at,
"updated_at": self.updated_at,
}
@classmethod
def from_dict(cls, data: Mapping[str, Any]) -> Optional[WorkflowSession]:
"""Create WorkflowSession from dictionary, deserializing runs from dicts"""
if data is None or data.get("session_id") is None:
logger.warning("WorkflowSession is missing session_id")
return None
# Deserialize runs from dictionaries back to WorkflowRunOutput objects
runs_data = data.get("runs")
runs: Optional[List[WorkflowRunOutput]] = None
if runs_data is not None:
runs = []
for run_item in runs_data:
if isinstance(run_item, WorkflowRunOutput):
# Already a WorkflowRunOutput object (from deserialize_session_json_fields)
runs.append(run_item)
elif isinstance(run_item, dict):
# Still a dictionary, needs to be converted
runs.append(WorkflowRunOutput.from_dict(run_item))
else:
logger.warning(f"Unexpected run item type: {type(run_item)}")
return cls(
session_id=data.get("session_id"), # type: ignore
user_id=data.get("user_id"),
workflow_id=data.get("workflow_id"),
workflow_name=data.get("workflow_name"),
runs=runs,
session_data=data.get("session_data"),
workflow_data=data.get("workflow_data"),
metadata=data.get("metadata"),
created_at=data.get("created_at"),
updated_at=data.get("updated_at"),
)
def __post_init__(self):
if self.runs is None:
self.runs = []
# Ensure session_data, workflow_data, and metadata are dictionaries, not None
if self.session_data is None:
self.session_data = {}
if self.workflow_data is None:
self.workflow_data = {}
if self.metadata is None:
self.metadata = {}
# Set timestamps if they're not already set
current_time = int(time.time())
if self.created_at is None:
self.created_at = current_time
if self.updated_at is None:
self.updated_at = current_time
def get_run(self, run_id: str) -> Optional[WorkflowRunOutput]:
for run in self.runs or []:
if run.run_id == run_id:
return run
return None
def upsert_run(self, run: WorkflowRunOutput) -> None:
"""Add or update a workflow run (upsert behavior)"""
if self.runs is None:
self.runs = []
# Find existing run and update it, or append new one
for i, existing_run in enumerate(self.runs):
if existing_run.run_id == run.run_id:
self.runs[i] = run
break
else:
self.runs.append(run)
def get_workflow_history(self, num_runs: Optional[int] = None) -> List[Tuple[str, str]]:
"""Get workflow history as structured data (input, response pairs)
Args:
num_runs: Number of recent runs to include. If None, returns all available history.
"""
if not self.runs:
return []
# Get completed runs only (exclude current/pending run)
completed_runs = [run for run in self.runs if run.status == RunStatus.completed]
if num_runs is not None and len(completed_runs) > num_runs:
recent_runs = completed_runs[-num_runs:]
else:
recent_runs = completed_runs
if not recent_runs:
return []
# Return structured data as list of (input, response) tuples
history_data = []
for run in recent_runs:
# Get input
input_str = ""
if run.input:
input_str = str(run.input) if not isinstance(run.input, str) else run.input
# Get response
response_str = ""
if run.content:
response_str = str(run.content) if not isinstance(run.content, str) else run.content
history_data.append((input_str, response_str))
return history_data
def get_workflow_history_context(self, num_runs: Optional[int] = None) -> Optional[str]:
"""Get formatted workflow history context for steps
Args:
num_runs: Number of recent runs to include. If None, returns all available history.
"""
history_data = self.get_workflow_history(num_runs)
if not history_data:
return None
# Format as workflow context using the structured data
context_parts = ["<workflow_history_context>"]
for i, (input_str, response_str) in enumerate(history_data, 1):
context_parts.append(f"[Workflow Run-{i}]")
if input_str:
context_parts.append(f"User input: {input_str}")
if response_str:
context_parts.append(f"Workflow output: {response_str}")
context_parts.append("") # Empty line between runs
context_parts.append("</workflow_history_context>")
context_parts.append("") # Empty line before current input
return "\n".join(context_parts)
def get_messages_from_agent_runs(
self,
runs: List[RunOutput],
last_n_runs: Optional[int] = None,
limit: Optional[int] = None,
skip_roles: Optional[List[str]] = None,
skip_statuses: Optional[List[RunStatus]] = None,
skip_history_messages: bool = True,
) -> List[Message]:
"""Return the messages belonging to the given agent runs that fit the given criteria.
Args:
runs: The list of agent runs to get the messages from.
last_n_runs: Number of recent runs to include. If None, all runs will be considered.
limit: Number of messages to include. If None, all messages will be included.
skip_roles: Roles to skip.
skip_statuses: Statuses to skip.
skip_history_messages: Whether to skip history messages.
Returns:
A list of messages from the given agent runs.
"""
def _should_skip_message(
message: Message, skip_roles: Optional[List[str]] = None, skip_history_messages: bool = True
) -> bool:
"""Logic to determine if a message should be skipped"""
# Skip messages that were tagged as history in previous runs
if hasattr(message, "from_history") and message.from_history and skip_history_messages:
return True
# Skip messages with specified role
if skip_roles and message.role in skip_roles:
return True
return False
# Filter by status
if skip_statuses:
runs = [run for run in runs if hasattr(run, "status") and run.status not in skip_statuses] # type: ignore
messages_from_history = []
system_message = None
# Limit the number of messages returned if limit is set
if limit is not None:
for run_response in runs:
if not run_response or not run_response.messages:
continue
for message in run_response.messages or []:
if _should_skip_message(message, skip_roles, skip_history_messages):
continue
if message.role == "system":
# Only add the system message once
if system_message is None:
system_message = message
else:
messages_from_history.append(message)
if system_message:
messages_from_history = [system_message] + messages_from_history[
-(limit - 1) :
] # Grab one less message then add the system message
else:
messages_from_history = messages_from_history[-limit:]
# Remove tool result messages that don't have an associated assistant message with tool calls
while len(messages_from_history) > 0 and messages_from_history[0].role == "tool":
messages_from_history.pop(0)
# If limit is not set, return all messages
else:
runs_to_process = runs[-last_n_runs:] if last_n_runs is not None else runs
for run_response in runs_to_process:
if not run_response or not run_response.messages:
continue
for message in run_response.messages or []:
if _should_skip_message(message, skip_roles, skip_history_messages):
continue
if message.role == "system":
# Only add the system message once
if system_message is None:
system_message = message
messages_from_history.append(system_message)
else:
messages_from_history.append(message)
log_debug(f"Getting messages from previous runs: {len(messages_from_history)}")
return messages_from_history
def get_messages_from_team_runs(
self,
team_id: str,
runs: List[TeamRunOutput],
last_n_runs: Optional[int] = None,
limit: Optional[int] = None,
skip_roles: Optional[List[str]] = None,
skip_statuses: Optional[List[RunStatus]] = None,
skip_history_messages: bool = True,
skip_member_messages: bool = True,
) -> List[Message]:
"""Return the messages in the given team runs that fit the given criteria.
Args:
team_id: The ID of the contextual team.
runs: The list of team runs to get the messages from.
last_n_runs: Number of recent runs to include. If None, all runs will be considered.
limit: Number of messages to include. If None, all messages will be included.
skip_roles: Roles to skip.
skip_statuses: Statuses to skip.
skip_history_messages: Whether to skip history messages.
skip_member_messages: Whether to skip messages from members of the team.
Returns:
A list of messages from the given team runs.
"""
def _should_skip_message(
message: Message, skip_roles: Optional[List[str]] = None, skip_history_messages: bool = True
) -> bool:
"""Logic to determine if a message should be skipped"""
# Skip messages that were tagged as history in previous runs
if hasattr(message, "from_history") and message.from_history and skip_history_messages:
return True
# Skip messages with specified role
if skip_roles and message.role in skip_roles:
return True
return False
# Filter for top-level runs (main team runs or agent runs when sharing session)
if skip_member_messages:
session_runs = [run for run in runs if run.team_id == team_id]
# Filter runs by status
if skip_statuses:
session_runs = [run for run in session_runs if hasattr(run, "status") and run.status not in skip_statuses]
messages_from_history = []
system_message = None
# Limit the number of messages returned if limit is set
if limit is not None:
for run_response in session_runs:
if not run_response or not run_response.messages:
continue
for message in run_response.messages or []:
if _should_skip_message(message, skip_roles, skip_history_messages):
continue
if message.role == "system":
# Only add the system message once
if system_message is None:
system_message = message
else:
messages_from_history.append(message)
if system_message:
messages_from_history = [system_message] + messages_from_history[
-(limit - 1) :
] # Grab one less message then add the system message
else:
messages_from_history = messages_from_history[-limit:]
# Remove tool result messages that don't have an associated assistant message with tool calls
while len(messages_from_history) > 0 and messages_from_history[0].role == "tool":
messages_from_history.pop(0)
else:
# Filter by last_n runs
runs_to_process = session_runs[-last_n_runs:] if last_n_runs is not None else session_runs
for run_response in runs_to_process:
if not (run_response and run_response.messages):
continue
for message in run_response.messages or []:
if _should_skip_message(message, skip_roles, skip_history_messages):
continue
if message.role == "system":
# Only add the system message once
if system_message is None:
system_message = message
messages_from_history.append(system_message)
else:
messages_from_history.append(message)
log_debug(f"Getting messages from previous runs: {len(messages_from_history)}")
return messages_from_history
def get_messages(
self,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
last_n_runs: Optional[int] = None,
limit: Optional[int] = None,
skip_roles: Optional[List[str]] = None,
skip_statuses: Optional[List[RunStatus]] = None,
skip_history_messages: bool = True,
skip_member_messages: bool = True,
) -> List[Message]:
"""Return the messages belonging to the session that fit the given criteria.
Args:
agent_id: The ID of the agent to get the messages for.
team_id: The ID of the team to get the messages for.
last_n_runs: Number of recent runs to include. If None, all runs will be considered.
limit: Number of messages to include. If None, all messages will be included.
skip_roles: Roles to skip.
skip_statuses: Statuses to skip.
skip_history_messages: Whether to skip history messages.
skip_member_messages: Whether to skip messages from members of the team.
Returns:
A list of messages from the session.
"""
if agent_id and team_id:
raise ValueError("agent_id and team_id cannot be used together")
if not self.runs:
return []
if agent_id:
agent_runs: List[RunOutput] = []
for run in self.runs:
if run.step_executor_runs:
for executor_run in run.step_executor_runs:
if isinstance(executor_run, RunOutput) and executor_run.agent_id == agent_id:
agent_runs.append(executor_run)
return self.get_messages_from_agent_runs(
runs=agent_runs,
last_n_runs=last_n_runs,
limit=limit,
skip_roles=skip_roles,
skip_statuses=skip_statuses,
skip_history_messages=skip_history_messages,
)
elif team_id:
team_runs: List[TeamRunOutput] = []
for run in self.runs:
if run.step_executor_runs:
for executor_run in run.step_executor_runs:
if isinstance(executor_run, TeamRunOutput) and executor_run.team_id == team_id:
team_runs.append(executor_run)
return self.get_messages_from_team_runs(
team_id=team_id,
runs=team_runs,
last_n_runs=last_n_runs,
limit=limit,
skip_roles=skip_roles,
skip_statuses=skip_statuses,
skip_history_messages=skip_history_messages,
skip_member_messages=skip_member_messages,
)
else:
raise ValueError("agent_id or team_id must be provided")
def get_chat_history(self, last_n_runs: Optional[int] = None) -> List[WorkflowChatInteraction]:
"""Return a list of dictionaries containing the input and output for each run in the session.
Args:
last_n_runs: Number of recent runs to include. If None, all runs will be considered.
Returns:
A list of WorkflowChatInteraction objects.
"""
if not self.runs:
return []
runs = self.runs
if last_n_runs is not None:
runs = self.runs[-last_n_runs:]
return [
WorkflowChatInteraction(input=run.input, output=run.content) for run in runs if run.input and run.content
]
@dataclass
class WorkflowChatInteraction:
input: Union[str, Dict[str, Any], List[Any], BaseModel]
output: Any
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/session/workflow.py",
"license": "Apache License 2.0",
"lines": 410,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/tools/clickup.py | import json
import re
from os import getenv
from typing import Any, Dict, List, Optional
from agno.tools import Toolkit
from agno.utils.log import log_debug, logger
try:
import requests
except ImportError:
raise ImportError("`requests` not installed. Please install using `pip install requests`")
class ClickUpTools(Toolkit):
def __init__(
self,
api_key: Optional[str] = None,
master_space_id: Optional[str] = None,
**kwargs,
):
self.api_key = api_key or getenv("CLICKUP_API_KEY")
self.master_space_id = master_space_id or getenv("MASTER_SPACE_ID")
self.base_url = "https://api.clickup.com/api/v2"
self.headers = {"Authorization": self.api_key}
if not self.api_key:
raise ValueError("CLICKUP_API_KEY not set. Please set the CLICKUP_API_KEY environment variable.")
if not self.master_space_id:
raise ValueError("MASTER_SPACE_ID not set. Please set the MASTER_SPACE_ID environment variable.")
tools: List[Any] = [
self.list_tasks,
self.create_task,
self.get_task,
self.update_task,
self.delete_task,
self.list_spaces,
self.list_lists,
]
super().__init__(name="clickup", tools=tools, **kwargs)
def _make_request(
self, method: str, endpoint: str, params: Optional[Dict] = None, data: Optional[Dict] = None
) -> Dict[str, Any]:
"""Make a request to the ClickUp API."""
url = f"{self.base_url}/{endpoint}"
try:
response = requests.request(method=method, url=url, headers=self.headers, params=params, json=data)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
logger.error(f"Error making request to {url}: {e}")
return {"error": str(e)}
def _find_by_name(self, items: List[Dict[str, Any]], name: str) -> Optional[Dict[str, Any]]:
"""Find an item in a list by name using exact match or regex pattern.
Args:
items: List of items to search through
name: Name to search for
Returns:
Matching item or None if not found
"""
if not name:
return items[0] if items else None
pattern = re.compile(name, re.IGNORECASE)
for item in items:
# Try exact match first (case-insensitive)
if item["name"].lower() == name.lower():
return item
# Then try regex pattern match
if pattern.search(item["name"]):
return item
return None
def _get_space(self, space_name: str) -> Dict[str, Any]:
"""Get space information by name."""
spaces = self._make_request("GET", f"team/{self.master_space_id}/space")
if "error" in spaces:
return spaces
spaces_list = spaces.get("spaces", [])
if not spaces_list:
return {"error": "No spaces found"}
space = self._find_by_name(spaces_list, space_name)
if not space:
return {"error": f"Space '{space_name}' not found"}
return space
def _get_list(self, space_id: str, list_name: str) -> Dict[str, Any]:
"""Get list information by name."""
lists = self._make_request("GET", f"space/{space_id}/list")
if "error" in lists:
return lists
lists_data = lists.get("lists", [])
if not lists_data:
return {"error": "No lists found in space"}
list_item = self._find_by_name(lists_data, list_name)
if not list_item:
return {"error": f"List '{list_name}' not found"}
return list_item
def _get_tasks(self, list_id: str) -> List[Dict[str, Any]]:
"""Get tasks in a list, optionally filtered by name."""
tasks = self._make_request("GET", f"list/{list_id}/task")
if "error" in tasks:
return []
tasks_data = tasks.get("tasks", [])
return tasks_data
def list_tasks(self, space_name: str) -> str:
"""List all tasks in a space.
Args:
space_name (str): Name of the space to list tasks from
Returns:
str: JSON string containing tasks
"""
# Get space
space = self._get_space(space_name)
if "error" in space:
return json.dumps(space, indent=2)
# Get lists
lists = self._make_request("GET", f"space/{space['id']}/list")
lists_data = lists.get("lists", [])
if not lists_data:
return json.dumps({"error": f"No lists found in space '{space_name}'"}, indent=2)
# Get tasks from all lists
all_tasks = []
for list_info in lists_data:
tasks = self._get_tasks(list_info["id"])
for task in tasks:
task["list_name"] = list_info["name"] # Add list name for context
all_tasks.extend(tasks)
return json.dumps({"tasks": all_tasks}, indent=2)
def create_task(self, space_name: str, task_name: str, task_description: str) -> str:
"""Create a new task in a space.
Args:
space_name (str): Name of the space to create task in
task_name (str): Name of the task
task_description (str): Description of the task
Returns:
str: JSON string containing created task details
"""
# Get space
space = self._get_space(space_name)
if "error" in space:
return json.dumps(space, indent=2)
# Get first list in space
response = self._make_request("GET", f"space/{space['id']}/list")
log_debug(f"Lists: {response}")
lists_data = response.get("lists", [])
if not lists_data:
return json.dumps({"error": f"No lists found in space '{space_name}'"}, indent=2)
list_info = lists_data[0] # Use first list
# Create task
data = {"name": task_name, "description": task_description}
task = self._make_request("POST", f"list/{list_info['id']}/task", data=data)
return json.dumps(task, indent=2)
def list_spaces(self) -> str:
"""List all spaces in the workspace.
Returns:
str: JSON string containing list of spaces
"""
spaces = self._make_request("GET", f"team/{self.master_space_id}/space")
return json.dumps(spaces, indent=2)
def list_lists(self, space_name: str) -> str:
"""List all lists in a space.
Args:
space_name (str): Name of the space to list lists from
Returns:
str: JSON string containing list of lists
"""
# Get space
space = self._get_space(space_name)
if "error" in space:
return json.dumps(space, indent=2)
# Get lists
lists = self._make_request("GET", f"space/{space['id']}/list")
return json.dumps(lists, indent=2)
def get_task(self, task_id: str) -> str:
"""Get details of a specific task.
Args:
task_id (str): The ID of the task
Returns:
str: JSON string containing task details
"""
task = self._make_request("GET", f"task/{task_id}")
return json.dumps(task, indent=2)
def update_task(self, task_id: str, **kwargs) -> str:
"""Update a specific task.
Args:
task_id (str): The ID of the task
**kwargs: Task fields to update (name, description, status, priority, etc.)
Returns:
str: JSON string containing updated task details
"""
task = self._make_request("PUT", f"task/{task_id}", data=kwargs)
return json.dumps(task, indent=2)
def delete_task(self, task_id: str) -> str:
"""Delete a specific task.
Args:
task_id (str): The ID of the task
Returns:
str: JSON string containing deletion status
"""
result = self._make_request("DELETE", f"task/{task_id}")
if "error" not in result:
result = {"success": True, "message": f"Task {task_id} deleted successfully"}
return json.dumps(result, indent=2)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/clickup.py",
"license": "Apache License 2.0",
"lines": 196,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/utils/knowledge.py | from typing import Any, Dict, List, Optional, Union
from agno.filters import FilterExpr
from agno.utils.log import log_info
def get_agentic_or_user_search_filters(
filters: Optional[Dict[str, Any]], effective_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]]
) -> Dict[str, Any]:
"""Helper function to determine the final filters to use for the search.
Args:
filters: Filters passed by the agent.
effective_filters: Filters passed by user.
Returns:
Dict[str, Any]: The final filters to use for the search.
"""
search_filters = None
# If agentic filters exist and manual filters (passed by user) do not, use agentic filters
if filters and not effective_filters:
search_filters = filters
# If both agentic filters exist and manual filters (passed by user) exist, use manual filters (give priority to user and override)
if filters and effective_filters:
if isinstance(effective_filters, dict):
search_filters = effective_filters
elif isinstance(effective_filters, list):
# If effective_filters is a list (likely List[FilterExpr]), convert both filters and effective_filters to a dict if possible, otherwise raise
raise ValueError(
"Merging dict and list of filters is not supported; effective_filters should be a dict for search compatibility."
)
log_info(f"Filters used by Agent: {search_filters}")
return search_filters or {}
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/knowledge.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/utils/print_response/agent.py | import json
from collections.abc import Set
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union, cast, get_args
from pydantic import BaseModel
from rich.console import Group
from rich.json import JSON
from rich.live import Live
from rich.markdown import Markdown
from rich.status import Status
from rich.text import Text
from agno.filters import FilterExpr
from agno.media import Audio, File, Image, Video
from agno.models.message import Message
from agno.reasoning.step import ReasoningStep
from agno.run.agent import RunEvent, RunOutput, RunOutputEvent, RunPausedEvent
from agno.utils.log import log_warning
from agno.utils.message import get_text_from_message
from agno.utils.response import create_panel, create_paused_run_output_panel, escape_markdown_tags, format_tool_calls
from agno.utils.timer import Timer
if TYPE_CHECKING:
from agno.agent.agent import Agent
def print_response_stream(
agent: "Agent",
input: Union[List, Dict, str, Message, BaseModel, List[Message]],
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
run_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
debug_mode: Optional[bool] = None,
markdown: bool = False,
show_message: bool = True,
show_reasoning: bool = True,
show_full_reasoning: bool = False,
tags_to_include_in_markdown: Set[str] = {"think", "thinking"},
console: Optional[Any] = None,
add_history_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
):
_response_content: str = ""
_response_reasoning_content: str = ""
response_content_batch: Union[str, JSON, Markdown] = ""
reasoning_steps: List[ReasoningStep] = []
accumulated_tool_calls: List = []
with Live(console=console) as live_log:
status = Status("Working...", spinner="aesthetic", speed=0.4, refresh_per_second=10)
live_log.update(status)
response_timer = Timer()
response_timer.start()
# Flag which indicates if the panels should be rendered
render = False
# Panels to be rendered
panels = [status]
# First render the message panel if the message is not None
if input and show_message:
render = True
# Convert message to a panel
message_content = get_text_from_message(input)
message_panel = create_panel(
content=Text(message_content, style="green"),
title="Message",
border_style="cyan",
)
panels.append(message_panel)
if render:
live_log.update(Group(*panels))
input_content = get_text_from_message(input)
for response_event in agent.run(
input=input,
session_id=session_id,
session_state=session_state,
user_id=user_id,
run_id=run_id,
audio=audio,
images=images,
videos=videos,
files=files,
stream=True,
knowledge_filters=knowledge_filters,
debug_mode=debug_mode,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
dependencies=dependencies,
metadata=metadata,
**kwargs,
):
if isinstance(response_event, tuple(get_args(RunOutputEvent))):
if response_event.is_paused: # type: ignore
response_event = cast(RunPausedEvent, response_event) # type: ignore
response_panel = create_paused_run_output_panel(response_event) # type: ignore
if response_panel is not None:
panels.append(response_panel)
live_log.update(Group(*panels))
return
if response_event.event == RunEvent.pre_hook_completed: # type: ignore
if response_event.run_input is not None: # type: ignore
input_content = get_text_from_message(response_event.run_input.input_content) # type: ignore
if (
response_event.event == RunEvent.tool_call_started # type: ignore
and hasattr(response_event, "tool")
and response_event.tool is not None
):
accumulated_tool_calls.append(response_event.tool)
if response_event.event == RunEvent.run_content: # type: ignore
if hasattr(response_event, "content"):
if isinstance(response_event.content, str):
# Don't accumulate text content, parser_model will replace it
if not (agent.parser_model is not None and agent.output_schema is not None):
_response_content += response_event.content
elif agent.output_schema is not None and isinstance(response_event.content, BaseModel):
try:
response_content_batch = JSON( # type: ignore
response_event.content.model_dump_json(exclude_none=True), indent=2
)
except Exception as e:
log_warning(f"Failed to convert response to JSON: {e}")
elif agent.output_schema is not None and isinstance(response_event.content, dict):
try:
response_content_batch = JSON(json.dumps(response_event.content), indent=2) # type: ignore
except Exception as e:
log_warning(f"Failed to convert response to JSON: {e}")
else:
try:
response_content_batch = JSON(json.dumps(response_event.content), indent=4)
except Exception as e:
log_warning(f"Failed to convert response to JSON: {e}")
if hasattr(response_event, "reasoning_content") and response_event.reasoning_content is not None: # type: ignore
_response_reasoning_content += response_event.reasoning_content # type: ignore
# Handle streaming reasoning content delta events
if response_event.event == RunEvent.reasoning_content_delta: # type: ignore
if hasattr(response_event, "reasoning_content") and response_event.reasoning_content is not None: # type: ignore
_response_reasoning_content += response_event.reasoning_content # type: ignore
if hasattr(response_event, "reasoning_steps") and response_event.reasoning_steps is not None: # type: ignore
reasoning_steps = response_event.reasoning_steps # type: ignore
# Escape special tags before markdown conversion
if markdown:
escaped_content = escape_markdown_tags(_response_content, tags_to_include_in_markdown) # type: ignore
response_content_batch = Markdown(escaped_content)
response_content_stream: str = _response_content
# Check if we have any response content to display
if response_content_stream and not markdown:
response_content = response_content_stream
else:
response_content = response_content_batch # type: ignore
# Sanitize empty Markdown content
if isinstance(response_content, Markdown):
if not (response_content.markup and response_content.markup.strip()):
response_content = None # type: ignore
panels = [status]
if show_message:
# Convert message to a panel
message_panel = create_panel(
content=Text(input_content, style="green"),
title="Message",
border_style="cyan",
)
panels.append(message_panel)
additional_panels = build_panels_stream(
response_content=response_content,
response_event=response_event, # type: ignore
response_timer=response_timer,
response_reasoning_content_buffer=_response_reasoning_content,
reasoning_steps=reasoning_steps,
show_reasoning=show_reasoning,
show_full_reasoning=show_full_reasoning,
accumulated_tool_calls=accumulated_tool_calls,
compression_manager=agent.compression_manager,
)
panels.extend(additional_panels)
if panels:
live_log.update(Group(*panels))
if agent.memory_manager is not None and agent.memory_manager.memories_updated:
memory_panel = create_panel(
content=Text("Memories updated"),
title="Memories",
border_style="green",
)
panels.append(memory_panel)
live_log.update(Group(*panels))
agent.memory_manager.memories_updated = False
if agent.session_summary_manager is not None and agent.session_summary_manager.summaries_updated:
summary_panel = create_panel(
content=Text("Session summary updated"),
title="Session Summary",
border_style="green",
)
panels.append(summary_panel)
live_log.update(Group(*panels))
agent.session_summary_manager.summaries_updated = False
# Clear compression stats after final display
if agent.compression_manager is not None:
agent.compression_manager.stats.clear()
response_timer.stop()
# Final update to remove the "Working..." status
panels = [p for p in panels if not isinstance(p, Status)]
live_log.update(Group(*panels))
async def aprint_response_stream(
agent: "Agent",
input: Union[List, Dict, str, Message, BaseModel, List[Message]],
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
run_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
debug_mode: Optional[bool] = None,
markdown: bool = False,
show_message: bool = True,
show_reasoning: bool = True,
show_full_reasoning: bool = False,
tags_to_include_in_markdown: Set[str] = {"think", "thinking"},
console: Optional[Any] = None,
add_history_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
):
_response_content: str = ""
_response_reasoning_content: str = ""
reasoning_steps: List[ReasoningStep] = []
response_content_batch: Union[str, JSON, Markdown] = ""
accumulated_tool_calls: List = []
with Live(console=console) as live_log:
status = Status("Working...", spinner="aesthetic", speed=0.4, refresh_per_second=10)
live_log.update(status)
response_timer = Timer()
response_timer.start()
# Flag which indicates if the panels should be rendered
render = False
# Panels to be rendered
panels = [status]
# First render the message panel if the message is not None
if input and show_message:
render = True
# Convert message to a panel
message_content = get_text_from_message(input)
message_panel = create_panel(
content=Text(message_content, style="green"),
title="Message",
border_style="cyan",
)
panels.append(message_panel)
if render:
live_log.update(Group(*panels))
result = agent.arun(
input=input,
session_id=session_id,
session_state=session_state,
user_id=user_id,
run_id=run_id,
audio=audio,
images=images,
videos=videos,
files=files,
stream=True,
knowledge_filters=knowledge_filters,
debug_mode=debug_mode,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
dependencies=dependencies,
metadata=metadata,
**kwargs,
)
input_content = get_text_from_message(input)
async for resp in result: # type: ignore
if isinstance(resp, tuple(get_args(RunOutputEvent))):
if resp.is_paused:
response_panel = create_paused_run_output_panel(resp) # type: ignore
if response_panel is not None:
panels.append(response_panel)
live_log.update(Group(*panels))
break
if (
resp.event == RunEvent.tool_call_started # type: ignore
and hasattr(resp, "tool")
and resp.tool is not None
):
accumulated_tool_calls.append(resp.tool)
if resp.event == RunEvent.pre_hook_completed: # type: ignore
if resp.run_input is not None: # type: ignore
input_content = get_text_from_message(resp.run_input.input_content) # type: ignore
if resp.event == RunEvent.run_content: # type: ignore
if isinstance(resp.content, str):
# Don't accumulate text content, parser_model will replace it
if not (agent.parser_model is not None and agent.output_schema is not None):
_response_content += resp.content
elif agent.output_schema is not None and isinstance(resp.content, BaseModel):
try:
response_content_batch = JSON(resp.content.model_dump_json(exclude_none=True), indent=2) # type: ignore
except Exception as e:
log_warning(f"Failed to convert response to JSON: {e}")
elif agent.output_schema is not None and isinstance(resp.content, dict):
try:
response_content_batch = JSON(json.dumps(resp.content), indent=2) # type: ignore
except Exception as e:
log_warning(f"Failed to convert response to JSON: {e}")
else:
try:
response_content_batch = JSON(json.dumps(resp.content), indent=4)
except Exception as e:
log_warning(f"Failed to convert response to JSON: {e}")
if resp.reasoning_content is not None: # type: ignore
_response_reasoning_content += resp.reasoning_content # type: ignore
# Handle streaming reasoning content delta events
if resp.event == RunEvent.reasoning_content_delta: # type: ignore
if hasattr(resp, "reasoning_content") and resp.reasoning_content is not None: # type: ignore
_response_reasoning_content += resp.reasoning_content # type: ignore
if hasattr(resp, "reasoning_steps") and resp.reasoning_steps is not None: # type: ignore
reasoning_steps = resp.reasoning_steps # type: ignore
response_content_stream: str = _response_content
# Escape special tags before markdown conversion
if markdown:
escaped_content = escape_markdown_tags(_response_content, tags_to_include_in_markdown) # type: ignore
response_content_batch = Markdown(escaped_content)
# Check if we have any response content to display
if response_content_stream and not markdown:
response_content = response_content_stream
else:
response_content = response_content_batch # type: ignore
# Sanitize empty Markdown content
if isinstance(response_content, Markdown):
if not (response_content.markup and response_content.markup.strip()):
response_content = None # type: ignore
panels = [status]
if input_content and show_message:
render = True
# Convert message to a panel
message_panel = create_panel(
content=Text(input_content, style="green"),
title="Message",
border_style="cyan",
)
panels.append(message_panel)
additional_panels = build_panels_stream(
response_content=response_content,
response_event=resp, # type: ignore
response_timer=response_timer,
response_reasoning_content_buffer=_response_reasoning_content,
reasoning_steps=reasoning_steps,
show_reasoning=show_reasoning,
show_full_reasoning=show_full_reasoning,
accumulated_tool_calls=accumulated_tool_calls,
compression_manager=agent.compression_manager,
)
panels.extend(additional_panels)
if panels:
live_log.update(Group(*panels))
if agent.memory_manager is not None and agent.memory_manager.memories_updated:
memory_panel = create_panel(
content=Text("Memories updated"),
title="Memories",
border_style="green",
)
panels.append(memory_panel)
live_log.update(Group(*panels))
agent.memory_manager.memories_updated = False
if agent.session_summary_manager is not None and agent.session_summary_manager.summaries_updated:
summary_panel = create_panel(
content=Text("Session summary updated"),
title="Session Summary",
border_style="green",
)
panels.append(summary_panel)
live_log.update(Group(*panels))
agent.session_summary_manager.summaries_updated = False
# Clear compression stats after final display
if agent.compression_manager is not None:
agent.compression_manager.stats.clear()
response_timer.stop()
# Final update to remove the "Working..." status
panels = [p for p in panels if not isinstance(p, Status)]
live_log.update(Group(*panels))
def build_panels_stream(
response_content: Union[str, JSON, Markdown],
response_event: RunOutputEvent,
response_timer: Timer,
response_reasoning_content_buffer: str,
reasoning_steps: List[ReasoningStep],
show_reasoning: bool = True,
show_full_reasoning: bool = False,
accumulated_tool_calls: Optional[List] = None,
compression_manager: Optional[Any] = None,
):
panels = []
if len(reasoning_steps) > 0 and show_reasoning:
# Create panels for reasoning steps
for i, step in enumerate(reasoning_steps, 1):
# Build step content
step_content = Text.assemble()
if step.title is not None:
step_content.append(f"{step.title}\n", "bold")
if step.action is not None:
step_content.append(Text.from_markup(f"[bold]Action:[/bold] {step.action}\n", style="dim"))
if step.result is not None:
step_content.append(Text.from_markup(step.result, style="dim"))
if show_full_reasoning:
# Add detailed reasoning information if available
if step.reasoning is not None:
step_content.append(Text.from_markup(f"\n[bold]Reasoning:[/bold] {step.reasoning}", style="dim"))
if step.confidence is not None:
step_content.append(Text.from_markup(f"\n[bold]Confidence:[/bold] {step.confidence}", style="dim"))
reasoning_panel = create_panel(content=step_content, title=f"Reasoning step {i}", border_style="green")
panels.append(reasoning_panel)
if len(response_reasoning_content_buffer) > 0 and show_reasoning:
# Create panel for thinking
thinking_panel = create_panel(
content=Text(response_reasoning_content_buffer),
title=f"Thinking ({response_timer.elapsed:.1f}s)",
border_style="green",
)
panels.append(thinking_panel)
if accumulated_tool_calls: # Use accumulated tool calls instead of just current event
# Create bullet points for each tool call
tool_calls_content = Text()
formatted_tool_calls = format_tool_calls(accumulated_tool_calls)
for formatted_tool_call in formatted_tool_calls:
tool_calls_content.append(f"• {formatted_tool_call}\n")
tool_calls_text = tool_calls_content.plain.rstrip()
# Add compression stats if available (don't clear - caller will clear after final display)
if compression_manager is not None and compression_manager.stats:
stats = compression_manager.stats
saved = stats.get("original_size", 0) - stats.get("compressed_size", 0)
orig = stats.get("original_size", 1)
if stats.get("tool_results_compressed", 0) > 0:
tool_calls_text += f"\n\ncompressed: {stats.get('tool_results_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
tool_calls_panel = create_panel(
content=tool_calls_text,
title="Tool Calls",
border_style="yellow",
)
panels.append(tool_calls_panel)
response_panel = None
if response_content:
response_panel = create_panel(
content=response_content,
title=f"Response ({response_timer.elapsed:.1f}s)",
border_style="blue",
)
panels.append(response_panel)
if (
isinstance(response_event, tuple(get_args(RunOutputEvent)))
and hasattr(response_event, "citations")
and response_event.citations is not None
and response_event.citations.urls is not None
):
md_lines = []
# Add search queries if present
if response_event.citations.search_queries:
md_lines.append("**Search Queries:**")
for query in response_event.citations.search_queries:
md_lines.append(f"- {query}")
md_lines.append("") # Empty line before URLs
# Add URL citations
md_lines.extend(
f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
for i, citation in enumerate(response_event.citations.urls)
if citation.url # Only include citations with valid URLs
)
md_content = "\n".join(md_lines)
if md_content: # Only create panel if there are citations
citations_panel = create_panel(
content=Markdown(md_content),
title="Citations",
border_style="green",
)
panels.append(citations_panel)
return panels
def print_response(
agent: "Agent",
input: Union[List, Dict, str, Message, BaseModel, List[Message]],
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
run_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
debug_mode: Optional[bool] = None,
markdown: bool = False,
show_message: bool = True,
show_reasoning: bool = True,
show_full_reasoning: bool = False,
tags_to_include_in_markdown: Set[str] = {"think", "thinking"},
console: Optional[Any] = None,
add_history_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
):
with Live(console=console) as live_log:
status = Status("Working...", spinner="aesthetic", speed=0.4, refresh_per_second=10)
live_log.update(status)
response_timer = Timer()
response_timer.start()
# Panels to be rendered
panels = [status]
# First render the message panel if the message is not None
if input and show_message:
# Convert message to a panel
message_content = get_text_from_message(input)
message_panel = create_panel(
content=Text(message_content, style="green"),
title="Message",
border_style="cyan",
)
panels.append(message_panel) # type: ignore
live_log.update(Group(*panels))
# Run the agent
run_response = agent.run(
input=input,
session_id=session_id,
session_state=session_state,
user_id=user_id,
run_id=run_id,
audio=audio,
images=images,
videos=videos,
files=files,
stream=False,
stream_events=True,
knowledge_filters=knowledge_filters,
debug_mode=debug_mode,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
dependencies=dependencies,
metadata=metadata,
**kwargs,
)
response_timer.stop()
if run_response.input is not None and run_response.input.input_content != input:
# Input was modified during the run
panels = [status]
if show_message:
# Convert message to a panel
message_content = get_text_from_message(run_response.input.input_content)
message_panel = create_panel(
content=Text(message_content, style="green"),
title="Message",
border_style="cyan",
)
panels.append(message_panel) # type: ignore
live_log.update(Group(*panels))
additional_panels = build_panels(
run_response=run_response,
output_schema=agent.output_schema, # type: ignore
response_timer=response_timer,
show_reasoning=show_reasoning,
show_full_reasoning=show_full_reasoning,
tags_to_include_in_markdown=tags_to_include_in_markdown,
markdown=markdown,
compression_manager=agent.compression_manager,
)
panels.extend(additional_panels)
if agent.memory_manager is not None and agent.memory_manager.memories_updated:
memory_panel = create_panel(
content=Text("Memories updated"),
title="Memories",
border_style="green",
)
panels.append(memory_panel)
live_log.update(Group(*panels))
agent.memory_manager.memories_updated = False
if agent.session_summary_manager is not None and agent.session_summary_manager.summaries_updated:
summary_panel = create_panel(
content=Text("Session summary updated"),
title="Session Summary",
border_style="green",
)
panels.append(summary_panel)
live_log.update(Group(*panels))
agent.session_summary_manager.summaries_updated = False
# Final update to remove the "Working..." status
panels = [p for p in panels if not isinstance(p, Status)]
live_log.update(Group(*panels))
async def aprint_response(
agent: "Agent",
input: Union[List, Dict, str, Message, BaseModel, List[Message]],
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
run_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
debug_mode: Optional[bool] = None,
markdown: bool = False,
show_message: bool = True,
show_reasoning: bool = True,
show_full_reasoning: bool = False,
tags_to_include_in_markdown: Set[str] = {"think", "thinking"},
console: Optional[Any] = None,
add_history_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
):
with Live(console=console) as live_log:
status = Status("Working...", spinner="aesthetic", speed=0.4, refresh_per_second=10)
live_log.update(status)
response_timer = Timer()
response_timer.start()
# Panels to be rendered
panels = [status]
# First render the message panel if the message is not None
if input and show_message:
# Convert message to a panel
message_content = get_text_from_message(input)
message_panel = create_panel(
content=Text(message_content, style="green"),
title="Message",
border_style="cyan",
)
panels.append(message_panel)
live_log.update(Group(*panels))
# Run the agent
run_response = await agent.arun( # type: ignore[misc]
input=input,
session_id=session_id,
session_state=session_state,
user_id=user_id,
run_id=run_id,
audio=audio,
images=images,
videos=videos,
files=files,
stream=False,
stream_events=True,
knowledge_filters=knowledge_filters,
debug_mode=debug_mode,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
dependencies=dependencies,
metadata=metadata,
**kwargs,
)
response_timer.stop()
if run_response.input is not None and run_response.input.input_content != input:
# Input was modified during the run
panels = [status]
if show_message:
# Convert message to a panel
message_content = get_text_from_message(run_response.input.input_content)
message_panel = create_panel(
content=Text(message_content, style="green"),
title="Message",
border_style="cyan",
)
panels.append(message_panel) # type: ignore
live_log.update(Group(*panels))
additional_panels = build_panels(
run_response=run_response,
output_schema=agent.output_schema, # type: ignore
response_timer=response_timer,
show_reasoning=show_reasoning,
show_full_reasoning=show_full_reasoning,
tags_to_include_in_markdown=tags_to_include_in_markdown,
markdown=markdown,
compression_manager=agent.compression_manager,
)
panels.extend(additional_panels)
if agent.memory_manager is not None and agent.memory_manager.memories_updated:
memory_panel = create_panel(
content=Text("Memories updated"),
title="Memories",
border_style="green",
)
panels.append(memory_panel)
live_log.update(Group(*panels))
agent.memory_manager.memories_updated = False
if agent.session_summary_manager is not None and agent.session_summary_manager.summaries_updated is not None:
summary_panel = create_panel(
content=Text("Session summary updated"),
title="Session Summary",
border_style="green",
)
agent.session_summary_manager.summaries_updated = False
panels.append(summary_panel)
live_log.update(Group(*panels))
# Final update to remove the "Working..." status
panels = [p for p in panels if not isinstance(p, Status)]
live_log.update(Group(*panels))
def build_panels(
run_response: RunOutput,
response_timer: Timer,
output_schema: Optional[BaseModel] = None,
show_reasoning: bool = True,
show_full_reasoning: bool = False,
tags_to_include_in_markdown: Optional[Set[str]] = None,
markdown: bool = False,
compression_manager: Optional[Any] = None,
):
panels = []
reasoning_steps = []
if isinstance(run_response, RunOutput) and run_response.is_paused:
response_panel = create_paused_run_output_panel(run_response)
if response_panel is not None:
panels.append(response_panel)
return panels
if isinstance(run_response, RunOutput) and run_response.reasoning_steps is not None:
reasoning_steps = run_response.reasoning_steps
if len(reasoning_steps) > 0 and show_reasoning:
# Create panels for reasoning steps
for i, step in enumerate(reasoning_steps, 1):
# Build step content
step_content = Text.assemble()
if step.title is not None:
step_content.append(f"{step.title}\n", "bold")
if step.action is not None:
step_content.append(Text.from_markup(f"[bold]Action:[/bold] {step.action}\n", style="dim"))
if step.result is not None:
step_content.append(Text.from_markup(step.result, style="dim"))
if show_full_reasoning:
# Add detailed reasoning information if available
if step.reasoning is not None:
step_content.append(Text.from_markup(f"\n[bold]Reasoning:[/bold] {step.reasoning}", style="dim"))
if step.confidence is not None:
step_content.append(Text.from_markup(f"\n[bold]Confidence:[/bold] {step.confidence}", style="dim"))
reasoning_panel = create_panel(content=step_content, title=f"Reasoning step {i}", border_style="green")
panels.append(reasoning_panel)
if isinstance(run_response, RunOutput) and run_response.reasoning_content is not None and show_reasoning:
# Create panel for thinking
thinking_panel = create_panel(
content=Text(run_response.reasoning_content),
title=f"Thinking ({response_timer.elapsed:.1f}s)",
border_style="green",
)
panels.append(thinking_panel)
# Add tool calls panel if available
if isinstance(run_response, RunOutput) and run_response.tools:
# Create bullet points for each tool call
tool_calls_content = Text()
formatted_tool_calls = format_tool_calls(run_response.tools)
for formatted_tool_call in formatted_tool_calls:
tool_calls_content.append(f"• {formatted_tool_call}\n")
tool_calls_text = tool_calls_content.plain.rstrip()
# Add compression stats if available
if compression_manager is not None and compression_manager.stats:
stats = compression_manager.stats
saved = stats.get("original_size", 0) - stats.get("compressed_size", 0)
orig = stats.get("original_size", 1)
if stats.get("tool_results_compressed", 0) > 0:
tool_calls_text += f"\n\ncompressed: {stats.get('tool_results_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
compression_manager.stats.clear()
tool_calls_panel = create_panel(
content=tool_calls_text,
title="Tool Calls",
border_style="yellow",
)
panels.append(tool_calls_panel)
response_content_batch: Union[str, JSON, Markdown] = "" # type: ignore
if isinstance(run_response, RunOutput):
if isinstance(run_response.content, str):
if markdown:
escaped_content = escape_markdown_tags(run_response.content, tags_to_include_in_markdown) # type: ignore
response_content_batch = Markdown(escaped_content)
else:
response_content_batch = run_response.get_content_as_string(indent=4)
elif output_schema is not None and isinstance(run_response.content, BaseModel):
try:
response_content_batch = JSON(run_response.content.model_dump_json(exclude_none=True), indent=2)
except Exception as e:
log_warning(f"Failed to convert response to JSON: {e}")
elif output_schema is not None and isinstance(run_response.content, dict):
try:
response_content_batch = JSON(json.dumps(run_response.content), indent=2)
except Exception as e:
log_warning(f"Failed to convert response to JSON: {e}")
else:
try:
response_content_batch = JSON(json.dumps(run_response.content), indent=4)
except Exception as e:
log_warning(f"Failed to convert response to JSON: {e}")
# Create panel for response
response_panel = create_panel(
content=response_content_batch,
title=f"Response ({response_timer.elapsed:.1f}s)",
border_style="blue",
)
panels.append(response_panel)
if (
isinstance(run_response, RunOutput)
and run_response.citations is not None
and run_response.citations.urls is not None
):
md_lines = []
# Add search queries if present
if run_response.citations.search_queries:
md_lines.append("**Search Queries:**")
for query in run_response.citations.search_queries:
md_lines.append(f"- {query}")
md_lines.append("") # Empty line before URLs
# Add URL citations
md_lines.extend(
f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
for i, citation in enumerate(run_response.citations.urls)
if citation.url # Only include citations with valid URLs
)
md_content = "\n".join(md_lines)
if md_content: # Only create panel if there are citations
citations_panel = create_panel(
content=Markdown(md_content),
title="Citations",
border_style="green",
)
panels.append(citations_panel)
return panels
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/print_response/agent.py",
"license": "Apache License 2.0",
"lines": 831,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/utils/print_response/team.py | import json
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Set, Union, get_args
from pydantic import BaseModel
from agno.filters import FilterExpr
from agno.media import Audio, File, Image, Video
from agno.models.message import Message
from agno.models.response import ToolExecution
from agno.reasoning.step import ReasoningStep
from agno.run.agent import RunOutput
from agno.run.team import TeamRunEvent, TeamRunOutput, TeamRunOutputEvent
from agno.utils.log import log_warning
from agno.utils.message import get_text_from_message
from agno.utils.response import build_reasoning_step_panel, create_panel, escape_markdown_tags, format_tool_calls
from agno.utils.timer import Timer
if TYPE_CHECKING:
from agno.team.team import Team
def print_response(
team: "Team",
input: Union[List, Dict, str, Message, BaseModel, List[Message]],
console: Optional[Any] = None,
show_message: bool = True,
show_reasoning: bool = True,
show_full_reasoning: bool = False,
show_member_responses: Optional[bool] = None,
tags_to_include_in_markdown: Optional[Set[str]] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
run_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
markdown: bool = False,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
add_history_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
metadata: Optional[Dict[str, Any]] = None,
debug_mode: Optional[bool] = None,
**kwargs: Any,
) -> None:
import textwrap
from rich.console import Group
from rich.json import JSON
from rich.live import Live
from rich.markdown import Markdown
from rich.status import Status
from rich.text import Text
from agno.utils.response import format_tool_calls
if not tags_to_include_in_markdown:
tags_to_include_in_markdown = {"think", "thinking"}
with Live(console=console) as live_console:
status = Status("Working...", spinner="aesthetic", speed=0.4, refresh_per_second=10)
live_console.update(status)
response_timer = Timer()
response_timer.start()
# Panels to be rendered
panels = [status]
# First render the message panel if the message is not None
if input and show_message:
# Convert message to a panel
message_content = get_text_from_message(input)
message_panel = create_panel(
content=Text(message_content, style="green"),
title="Message",
border_style="cyan",
)
panels.append(message_panel)
live_console.update(Group(*panels))
# Run the agent
run_response: TeamRunOutput = team.run( # type: ignore
input=input,
run_id=run_id,
images=images,
audio=audio,
videos=videos,
files=files,
stream=False,
stream_events=True,
session_id=session_id,
session_state=session_state,
user_id=user_id,
knowledge_filters=knowledge_filters,
add_history_to_context=add_history_to_context,
dependencies=dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=metadata,
debug_mode=debug_mode,
**kwargs,
)
response_timer.stop()
if run_response.input is not None and run_response.input.input_content != input:
# Input was modified during the run
panels = [status]
if show_message:
# Convert message to a panel
message_content = get_text_from_message(run_response.input.input_content)
message_panel = create_panel(
content=Text(message_content, style="green"),
title="Message",
border_style="cyan",
)
panels.append(message_panel) # type: ignore
live_console.update(Group(*panels))
team_markdown = False
member_markdown = {}
if markdown:
if isinstance(team.members, list):
for member in team.members:
if member.id is not None:
member_markdown[member.id] = True
team_markdown = True
if team.output_schema is not None:
team_markdown = False
if isinstance(team.members, list):
for member in team.members:
if member.output_schema is not None and member.id is not None:
member_markdown[member.id] = False # type: ignore
# Handle reasoning
reasoning_steps = []
if isinstance(run_response, TeamRunOutput) and run_response.reasoning_steps is not None:
reasoning_steps = run_response.reasoning_steps
if len(reasoning_steps) > 0 and show_reasoning:
# Create panels for reasoning steps
for i, step in enumerate(reasoning_steps, 1):
reasoning_panel = build_reasoning_step_panel(i, step, show_full_reasoning)
panels.append(reasoning_panel)
live_console.update(Group(*panels))
if isinstance(run_response, TeamRunOutput) and run_response.reasoning_content is not None and show_reasoning:
# Create panel for thinking
thinking_panel = create_panel(
content=Text(run_response.reasoning_content),
title=f"Thinking ({response_timer.elapsed:.1f}s)",
border_style="green",
)
panels.append(thinking_panel)
live_console.update(Group(*panels))
if isinstance(run_response, TeamRunOutput):
# Handle member responses
if show_member_responses:
for member_response in run_response.member_responses:
# Handle member reasoning
reasoning_steps = []
if isinstance(member_response, RunOutput) and member_response.reasoning_steps is not None:
reasoning_steps.extend(member_response.reasoning_steps)
if len(reasoning_steps) > 0 and show_reasoning:
# Create panels for reasoning steps
for i, step in enumerate(reasoning_steps, 1):
member_reasoning_panel = build_reasoning_step_panel(
i, step, show_full_reasoning, color="magenta"
)
panels.append(member_reasoning_panel)
# Add tool calls panel for member if available
if hasattr(member_response, "tools") and member_response.tools:
member_name = None
if isinstance(member_response, RunOutput) and member_response.agent_id is not None:
member_name = team._get_member_name(member_response.agent_id)
elif isinstance(member_response, TeamRunOutput) and member_response.team_id is not None:
member_name = team._get_member_name(member_response.team_id)
if member_name:
formatted_calls = format_tool_calls(member_response.tools)
if formatted_calls:
console_width = console.width if console else 80
panel_width = console_width + 30
lines = []
for call in formatted_calls:
wrapped_call = textwrap.fill(f"• {call}", width=panel_width, subsequent_indent=" ")
lines.append(wrapped_call)
tool_calls_text = "\n\n".join(lines)
member_tool_calls_panel = create_panel(
content=tool_calls_text,
title=f"{member_name} Tool Calls",
border_style="yellow",
)
panels.append(member_tool_calls_panel)
live_console.update(Group(*panels))
show_markdown = False
if member_markdown:
if isinstance(member_response, RunOutput) and member_response.agent_id is not None:
show_markdown = member_markdown.get(member_response.agent_id, False)
elif isinstance(member_response, TeamRunOutput) and member_response.team_id is not None:
show_markdown = member_markdown.get(member_response.team_id, False)
member_response_content: Union[str, JSON, Markdown] = _parse_response_content( # type: ignore
member_response,
tags_to_include_in_markdown,
show_markdown=show_markdown,
)
# Create panel for member response
if isinstance(member_response, RunOutput) and member_response.agent_id is not None:
member_response_panel = create_panel(
content=member_response_content,
title=f"{team._get_member_name(member_response.agent_id)} Response",
border_style="magenta",
)
elif isinstance(member_response, TeamRunOutput) and member_response.team_id is not None:
member_response_panel = create_panel(
content=member_response_content,
title=f"{team._get_member_name(member_response.team_id)} Response",
border_style="magenta",
)
panels.append(member_response_panel)
if member_response.citations is not None and member_response.citations.urls is not None:
md_lines = []
# Add search queries if present
if member_response.citations.search_queries:
md_lines.append("**Search Queries:**")
for query in member_response.citations.search_queries:
md_lines.append(f"- {query}")
md_lines.append("") # Empty line before URLs
# Add URL citations
md_lines.extend(
f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
for i, citation in enumerate(member_response.citations.urls)
if citation.url # Only include citations with valid URLs
)
md_content = "\n".join(md_lines)
if md_content: # Only create panel if there are citations
citations_panel = create_panel(
content=Markdown(md_content),
title="Citations",
border_style="magenta",
)
panels.append(citations_panel)
live_console.update(Group(*panels))
# Add team level tool calls panel if available
if run_response.tools:
formatted_calls = format_tool_calls(run_response.tools)
if formatted_calls:
console_width = console.width if console else 80
# Allow for panel borders and padding
panel_width = console_width + 30
lines = []
for call in formatted_calls:
wrapped_call = textwrap.fill(
f"• {call}", width=panel_width, subsequent_indent=" "
) # Indent continuation lines
lines.append(wrapped_call)
# Join with blank lines between items
tool_calls_text = "\n\n".join(lines)
# Add compression stats at end of tool calls
if team.compression_manager is not None and team.compression_manager.stats:
stats = team.compression_manager.stats
saved = stats.get("original_size", 0) - stats.get("compressed_size", 0)
orig = stats.get("original_size", 1)
if stats.get("tool_results_compressed", 0) > 0:
tool_calls_text += f"\n\ncompressed: {stats.get('tool_results_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
team.compression_manager.stats.clear()
team_tool_calls_panel = create_panel(
content=tool_calls_text,
title="Team Tool Calls",
border_style="yellow",
)
panels.append(team_tool_calls_panel)
live_console.update(Group(*panels))
response_content_batch: Union[str, JSON, Markdown] = _parse_response_content( # type: ignore
run_response, tags_to_include_in_markdown, show_markdown=team_markdown
)
# Create panel for response
response_panel = create_panel(
content=response_content_batch,
title=f"Response ({response_timer.elapsed:.1f}s)",
border_style="blue",
)
panels.append(response_panel)
# Add citations
if run_response.citations is not None and run_response.citations.urls is not None:
md_lines = []
# Add search queries if present
if run_response.citations.search_queries:
md_lines.append("**Search Queries:**")
for query in run_response.citations.search_queries:
md_lines.append(f"- {query}")
md_lines.append("") # Empty line before URLs
# Add URL citations
md_lines.extend(
f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
for i, citation in enumerate(run_response.citations.urls)
if citation.url # Only include citations with valid URLs
)
md_content = "\n".join(md_lines)
if md_content: # Only create panel if there are citations
citations_panel = create_panel(
content=Markdown(md_content),
title="Citations",
border_style="green",
)
panels.append(citations_panel)
if team.memory_manager is not None:
if team.memory_manager.memories_updated:
memory_panel = create_panel(
content=Text("Memories updated"),
title="Memories",
border_style="green",
)
panels.append(memory_panel)
if team.session_summary_manager is not None and team.session_summary_manager.summaries_updated:
summary_panel = create_panel(
content=Text("Session summary updated"),
title="Session Summary",
border_style="green",
)
panels.append(summary_panel)
team.session_summary_manager.summaries_updated = False
# Final update to remove the "Working..." status
panels = [p for p in panels if not isinstance(p, Status)]
live_console.update(Group(*panels))
def print_response_stream(
team: "Team",
input: Union[List, Dict, str, Message, BaseModel, List[Message]],
console: Optional[Any] = None,
show_message: bool = True,
show_reasoning: bool = True,
show_full_reasoning: bool = False,
show_member_responses: Optional[bool] = None,
tags_to_include_in_markdown: Optional[Set[str]] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
run_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
markdown: bool = False,
stream_events: bool = False,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
add_history_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
metadata: Optional[Dict[str, Any]] = None,
debug_mode: Optional[bool] = None,
**kwargs: Any,
) -> None:
import textwrap
from rich.console import Group
from rich.json import JSON
from rich.live import Live
from rich.markdown import Markdown
from rich.status import Status
from rich.text import Text
from agno.utils.response import format_tool_calls
if not tags_to_include_in_markdown:
tags_to_include_in_markdown = {"think", "thinking"}
stream_events = True # With streaming print response, we need to stream intermediate steps
_response_content: str = ""
_response_reasoning_content: str = ""
reasoning_steps: List[ReasoningStep] = []
# Track tool calls by member and team
member_tool_calls = {} # type: ignore
team_tool_calls = [] # type: ignore
# Track processed tool calls to avoid duplicates
processed_tool_calls = set()
with Live(console=console) as live_console:
status = Status("Working...", spinner="aesthetic", speed=0.4, refresh_per_second=10)
live_console.update(status)
response_timer = Timer()
response_timer.start()
# Flag which indicates if the panels should be rendered
render = False
# Panels to be rendered
panels = [status]
# First render the message panel if the message is not None
if input and show_message:
render = True
# Convert message to a panel
message_content = get_text_from_message(input)
message_panel = create_panel(
content=Text(message_content, style="green"),
title="Message",
border_style="cyan",
)
panels.append(message_panel)
if render:
live_console.update(Group(*panels))
# Get response from the team
stream_resp = team.run( # type: ignore
input=input,
audio=audio,
images=images,
videos=videos,
files=files,
stream=True,
stream_events=stream_events,
session_id=session_id,
session_state=session_state,
user_id=user_id,
run_id=run_id,
knowledge_filters=knowledge_filters,
add_history_to_context=add_history_to_context,
dependencies=dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=metadata,
debug_mode=debug_mode,
yield_run_output=True,
**kwargs,
)
input_content = get_text_from_message(input)
team_markdown = None
member_markdown = {}
# Dict to track member response panels by member_id
member_response_panels = {}
final_run_response = None
for resp in stream_resp:
if team_markdown is None:
if markdown:
team_markdown = True
else:
team_markdown = False
if team.output_schema is not None:
team_markdown = False
if isinstance(resp, TeamRunOutput):
final_run_response = resp
continue
if isinstance(resp, tuple(get_args(TeamRunOutputEvent))):
if resp.event == TeamRunEvent.run_content:
if isinstance(resp.content, str):
_response_content += resp.content
elif team.output_schema is not None and isinstance(resp.content, BaseModel):
try:
_response_content = JSON(resp.content.model_dump_json(exclude_none=True), indent=2) # type: ignore
except Exception as e:
log_warning(f"Failed to convert response to JSON: {e}")
elif team.output_schema is not None and isinstance(resp.content, dict):
try:
_response_content = JSON(json.dumps(resp.content), indent=2) # type: ignore
except Exception as e:
log_warning(f"Failed to convert response to JSON: {e}")
if hasattr(resp, "reasoning_content") and resp.reasoning_content is not None: # type: ignore
_response_reasoning_content += resp.reasoning_content # type: ignore
if hasattr(resp, "reasoning_steps") and resp.reasoning_steps is not None: # type: ignore
reasoning_steps = resp.reasoning_steps # type: ignore
if resp.event == TeamRunEvent.pre_hook_completed: # type: ignore
if resp.run_input is not None: # type: ignore
input_content = get_text_from_message(resp.run_input.input_content) # type: ignore
# Collect team tool calls, avoiding duplicates
if resp.event == TeamRunEvent.tool_call_completed and resp.tool: # type: ignore
tool = resp.tool # type: ignore
# Generate a unique ID for this tool call
if tool.tool_call_id:
tool_id = tool.tool_call_id
else:
tool_id = str(hash(str(tool)))
if tool_id not in processed_tool_calls:
processed_tool_calls.add(tool_id)
team_tool_calls.append(tool)
# Collect member tool calls, avoiding duplicates
if show_member_responses and hasattr(resp, "member_responses") and resp.member_responses:
for member_response in resp.member_responses:
member_id = None
if isinstance(member_response, RunOutput) and member_response.agent_id is not None:
member_id = member_response.agent_id
elif isinstance(member_response, TeamRunOutput) and member_response.team_id is not None:
member_id = member_response.team_id
if member_id and hasattr(member_response, "tools") and member_response.tools:
if member_id not in member_tool_calls:
member_tool_calls[member_id] = []
for tool in member_response.tools:
# Generate a unique ID for this tool call
if tool.tool_call_id:
tool_id = tool.tool_call_id
else:
tool_id = str(hash(str(tool)))
if tool_id not in processed_tool_calls:
processed_tool_calls.add(tool_id)
member_tool_calls[member_id].append(tool)
response_content_stream: Union[str, Markdown] = _response_content
# Escape special tags before markdown conversion
if team_markdown:
escaped_content = escape_markdown_tags(_response_content, tags_to_include_in_markdown)
response_content_stream = Markdown(escaped_content)
# Create new panels for each chunk
panels = []
if input_content and show_message:
render = True
# Convert message to a panel
message_panel = create_panel(
content=Text(input_content, style="green"),
title="Message",
border_style="cyan",
)
panels.append(message_panel)
if len(reasoning_steps) > 0 and show_reasoning:
render = True
# Create panels for reasoning steps
for i, step in enumerate(reasoning_steps, 1):
reasoning_panel = build_reasoning_step_panel(i, step, show_full_reasoning)
panels.append(reasoning_panel)
if len(_response_reasoning_content) > 0 and show_reasoning:
render = True
# Create panel for thinking
thinking_panel = create_panel(
content=Text(_response_reasoning_content),
title=f"Thinking ({response_timer.elapsed:.1f}s)",
border_style="green",
)
panels.append(thinking_panel)
elif _response_content == "":
# Keep showing status if no content yet
panels.append(status)
# Process member responses and their tool calls
for member_response in (
resp.member_responses if show_member_responses and hasattr(resp, "member_responses") else []
):
member_id = None
member_name = "Team Member"
if isinstance(member_response, RunOutput) and member_response.agent_id is not None:
member_id = member_response.agent_id
member_name = team._get_member_name(member_id)
elif isinstance(member_response, TeamRunOutput) and member_response.team_id is not None:
member_id = member_response.team_id
member_name = team._get_member_name(member_id)
# If we have tool calls for this member, display them
if member_id in member_tool_calls and member_tool_calls[member_id]:
formatted_calls = format_tool_calls(member_tool_calls[member_id])
if formatted_calls:
console_width = console.width if console else 80
panel_width = console_width + 30
lines = []
for call in formatted_calls:
wrapped_call = textwrap.fill(f"• {call}", width=panel_width, subsequent_indent=" ")
lines.append(wrapped_call)
tool_calls_text = "\n\n".join(lines)
member_tool_calls_panel = create_panel(
content=tool_calls_text,
title=f"{member_name} Tool Calls",
border_style="yellow",
)
panels.append(member_tool_calls_panel)
# Process member response content
if show_member_responses and member_id is not None:
show_markdown = False
if markdown:
show_markdown = True
member_response_content = _parse_response_content(
member_response,
tags_to_include_in_markdown,
show_markdown=show_markdown,
)
member_response_panel = create_panel(
content=member_response_content,
title=f"{member_name} Response",
border_style="magenta",
)
panels.append(member_response_panel)
# Store for reference
if member_id is not None:
member_response_panels[member_id] = member_response_panel
# Add team tool calls panel if available (before the team response)
if team_tool_calls:
formatted_calls = format_tool_calls(team_tool_calls)
if formatted_calls:
console_width = console.width if console else 80
panel_width = console_width + 30
lines = []
# Create a set to track already added calls by their string representation
added_calls = set()
for call in formatted_calls:
if call not in added_calls:
added_calls.add(call)
# Wrap the call text to fit within the panel
wrapped_call = textwrap.fill(f"• {call}", width=panel_width, subsequent_indent=" ")
lines.append(wrapped_call)
# Join with blank lines between items
tool_calls_text = "\n\n".join(lines)
# Add compression stats if available (don't clear - will be cleared in final_panels)
if team.compression_manager is not None and team.compression_manager.stats:
stats = team.compression_manager.stats
saved = stats.get("original_size", 0) - stats.get("compressed_size", 0)
orig = stats.get("original_size", 1)
if stats.get("tool_results_compressed", 0) > 0:
tool_calls_text += f"\n\ncompressed: {stats.get('tool_results_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
team_tool_calls_panel = create_panel(
content=tool_calls_text,
title="Team Tool Calls",
border_style="yellow",
)
panels.append(team_tool_calls_panel)
# Add the team response panel at the end
if response_content_stream:
render = True
# Create panel for response
response_panel = create_panel(
content=response_content_stream,
title=f"Response ({response_timer.elapsed:.1f}s)",
border_style="blue",
)
panels.append(response_panel)
if render or len(panels) > 0:
live_console.update(Group(*panels))
response_timer.stop()
run_response = final_run_response
# Add citations
if hasattr(resp, "citations") and resp.citations is not None and resp.citations.urls is not None:
md_lines = []
# Add search queries if present
if resp.citations.search_queries:
md_lines.append("**Search Queries:**")
for query in resp.citations.search_queries:
md_lines.append(f"- {query}")
md_lines.append("") # Empty line before URLs
# Add URL citations
md_lines.extend(
f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
for i, citation in enumerate(resp.citations.urls)
if citation.url # Only include citations with valid URLs
)
md_content = "\n".join(md_lines)
if md_content: # Only create panel if there are citations
citations_panel = create_panel(
content=Markdown(md_content),
title="Citations",
border_style="green",
)
panels.append(citations_panel)
live_console.update(Group(*panels))
if team.memory_manager is not None:
if team.memory_manager.memories_updated:
memory_panel = create_panel(
content=Text("Memories updated"),
title="Memories",
border_style="green",
)
panels.append(memory_panel)
live_console.update(Group(*panels))
if team.session_summary_manager is not None and team.session_summary_manager.summaries_updated:
summary_panel = create_panel(
content=Text("Session summary updated"),
title="Session Summary",
border_style="green",
)
panels.append(summary_panel)
live_console.update(Group(*panels))
team.session_summary_manager.summaries_updated = False
# Final update to remove the "Working..." status
panels = [p for p in panels if not isinstance(p, Status)]
if markdown:
if isinstance(team.members, list):
for member in team.members:
if member.id is not None:
member_markdown[member.id] = True
if isinstance(team.members, list):
for member in team.members:
if member.output_schema is not None and member.id is not None:
member_markdown[member.id] = False # type: ignore
# Final panels assembly - we'll recreate the panels from scratch to ensure correct order
final_panels = []
# Start with the message
if input_content and show_message:
message_panel = create_panel(
content=Text(input_content, style="green"),
title="Message",
border_style="cyan",
)
final_panels.append(message_panel)
# Add reasoning steps
if reasoning_steps and show_reasoning:
for i, step in enumerate(reasoning_steps, 1):
reasoning_panel = build_reasoning_step_panel(i, step, show_full_reasoning)
final_panels.append(reasoning_panel)
# Add thinking panel if available
if _response_reasoning_content and show_reasoning:
thinking_panel = create_panel(
content=Text(_response_reasoning_content),
title=f"Thinking ({response_timer.elapsed:.1f}s)",
border_style="green",
)
final_panels.append(thinking_panel)
# Add member tool calls and responses in correct order
if show_member_responses and run_response is not None and hasattr(run_response, "member_responses"):
for i, member_response in enumerate(run_response.member_responses): # type: ignore
member_id = None
if isinstance(member_response, RunOutput) and member_response.agent_id is not None:
member_id = member_response.agent_id
elif isinstance(member_response, TeamRunOutput) and member_response.team_id is not None:
member_id = member_response.team_id
if member_id:
# First add tool calls if any
if member_id in member_tool_calls and member_tool_calls[member_id]:
formatted_calls = format_tool_calls(member_tool_calls[member_id])
if formatted_calls:
console_width = console.width if console else 80
panel_width = console_width + 30
lines = []
for call in formatted_calls:
wrapped_call = textwrap.fill(f"• {call}", width=panel_width, subsequent_indent=" ")
lines.append(wrapped_call)
tool_calls_text = "\n\n".join(lines)
member_name = team._get_member_name(member_id)
member_tool_calls_panel = create_panel(
content=tool_calls_text,
title=f"{member_name} Tool Calls",
border_style="yellow",
)
final_panels.append(member_tool_calls_panel)
# Add reasoning steps if any
reasoning_steps = []
if member_response.reasoning_steps is not None:
reasoning_steps = member_response.reasoning_steps
if reasoning_steps and show_reasoning:
for j, step in enumerate(reasoning_steps, 1):
member_reasoning_panel = build_reasoning_step_panel(
j, step, show_full_reasoning, color="magenta"
)
final_panels.append(member_reasoning_panel)
# Then add response
show_markdown = False
if isinstance(member_response, RunOutput) and member_response.agent_id is not None:
show_markdown = member_markdown.get(member_response.agent_id, False)
elif isinstance(member_response, TeamRunOutput) and member_response.team_id is not None:
show_markdown = member_markdown.get(member_response.team_id, False)
member_response_content = _parse_response_content( # type: ignore
member_response,
tags_to_include_in_markdown,
show_markdown=show_markdown,
)
member_name = "Team Member"
if isinstance(member_response, RunOutput) and member_response.agent_id is not None:
member_name = team._get_member_name(member_response.agent_id)
elif isinstance(member_response, TeamRunOutput) and member_response.team_id is not None:
member_name = team._get_member_name(member_response.team_id)
member_response_panel = create_panel(
content=member_response_content,
title=f"{member_name} Response",
border_style="magenta",
)
final_panels.append(member_response_panel)
# Add citations if any
if member_response.citations is not None and member_response.citations.urls is not None:
md_lines = []
# Add search queries if present
if member_response.citations.search_queries:
md_lines.append("**Search Queries:**")
for query in member_response.citations.search_queries:
md_lines.append(f"- {query}")
md_lines.append("") # Empty line before URLs
# Add URL citations
md_lines.extend(
f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
for i, citation in enumerate(member_response.citations.urls)
if citation.url # Only include citations with valid URLs
)
md_content = "\n".join(md_lines)
if md_content: # Only create panel if there are citations
citations_panel = create_panel(
content=Markdown(md_content),
title="Citations",
border_style="magenta",
)
final_panels.append(citations_panel)
# Add team tool calls before team response
if team_tool_calls:
formatted_calls = format_tool_calls(team_tool_calls)
if formatted_calls:
console_width = console.width if console else 80
panel_width = console_width + 30
lines = []
# Create a set to track already added calls by their string representation
added_calls = set()
for call in formatted_calls:
if call not in added_calls:
added_calls.add(call)
# Wrap the call text to fit within the panel
wrapped_call = textwrap.fill(f"• {call}", width=panel_width, subsequent_indent=" ")
lines.append(wrapped_call)
tool_calls_text = "\n\n".join(lines)
# Add compression stats at end of tool calls
if team.compression_manager is not None and team.compression_manager.stats:
stats = team.compression_manager.stats
saved = stats.get("original_size", 0) - stats.get("compressed_size", 0)
orig = stats.get("original_size", 1)
if stats.get("tool_results_compressed", 0) > 0:
tool_calls_text += f"\n\ncompressed: {stats.get('tool_results_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
team.compression_manager.stats.clear()
team_tool_calls_panel = create_panel(
content=tool_calls_text,
title="Team Tool Calls",
border_style="yellow",
)
final_panels.append(team_tool_calls_panel)
# Add team response
if _response_content:
response_content_stream = _response_content
if team_markdown:
escaped_content = escape_markdown_tags(_response_content, tags_to_include_in_markdown)
response_content_stream = Markdown(escaped_content)
response_panel = create_panel(
content=response_content_stream,
title=f"Response ({response_timer.elapsed:.1f}s)",
border_style="blue",
)
final_panels.append(response_panel)
# Add team citations
if hasattr(resp, "citations") and resp.citations is not None and resp.citations.urls is not None:
md_lines = []
# Add search queries if present
if resp.citations.search_queries:
md_lines.append("**Search Queries:**")
for query in resp.citations.search_queries:
md_lines.append(f"- {query}")
md_lines.append("") # Empty line before URLs
# Add URL citations
md_lines.extend(
f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
for i, citation in enumerate(resp.citations.urls)
if citation.url # Only include citations with valid URLs
)
md_content = "\n".join(md_lines)
if md_content: # Only create panel if there are citations
citations_panel = create_panel(
content=Markdown(md_content),
title="Citations",
border_style="green",
)
final_panels.append(citations_panel)
# Final update with correctly ordered panels
live_console.update(Group(*final_panels))
async def aprint_response(
team: "Team",
input: Union[List, Dict, str, Message, BaseModel, List[Message]],
console: Optional[Any] = None,
show_message: bool = True,
show_reasoning: bool = True,
show_full_reasoning: bool = False,
show_member_responses: Optional[bool] = None,
tags_to_include_in_markdown: Optional[Set[str]] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
run_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
markdown: bool = False,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
add_history_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
metadata: Optional[Dict[str, Any]] = None,
debug_mode: Optional[bool] = None,
**kwargs: Any,
) -> None:
import textwrap
from rich.console import Group
from rich.json import JSON
from rich.live import Live
from rich.markdown import Markdown
from rich.status import Status
from rich.text import Text
from agno.utils.response import format_tool_calls
if not tags_to_include_in_markdown:
tags_to_include_in_markdown = {"think", "thinking"}
with Live(console=console) as live_console:
status = Status("Working...", spinner="aesthetic", speed=0.4, refresh_per_second=10)
live_console.update(status)
response_timer = Timer()
response_timer.start()
# Panels to be rendered
panels = [status]
# First render the message panel if the message is not None
if input and show_message:
# Convert message to a panel
message_content = get_text_from_message(input)
message_panel = create_panel(
content=Text(message_content, style="green"),
title="Message",
border_style="cyan",
)
panels.append(message_panel)
live_console.update(Group(*panels))
# Run the agent
run_response: TeamRunOutput = await team.arun( # type: ignore
input=input,
run_id=run_id,
images=images,
audio=audio,
videos=videos,
files=files,
stream=False,
stream_events=True,
session_id=session_id,
session_state=session_state,
user_id=user_id,
knowledge_filters=knowledge_filters,
add_history_to_context=add_history_to_context,
dependencies=dependencies,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
metadata=metadata,
debug_mode=debug_mode,
**kwargs,
)
response_timer.stop()
if run_response.input is not None and run_response.input.input_content != input:
# Input was modified during the run
panels = [status]
if show_message:
# Convert message to a panel
message_content = get_text_from_message(run_response.input.input_content)
message_panel = create_panel(
content=Text(message_content, style="green"),
title="Message",
border_style="cyan",
)
panels.append(message_panel) # type: ignore
live_console.update(Group(*panels))
team_markdown = False
member_markdown = {}
if markdown:
if isinstance(team.members, list):
for member in team.members:
if member.id is not None:
member_markdown[member.id] = True
team_markdown = True
if team.output_schema is not None:
team_markdown = False
if isinstance(team.members, list):
for member in team.members:
if member.output_schema is not None and member.id is not None:
member_markdown[member.id] = False # type: ignore
# Handle reasoning
reasoning_steps = []
if isinstance(run_response, TeamRunOutput) and run_response.reasoning_steps is not None:
reasoning_steps = run_response.reasoning_steps
if len(reasoning_steps) > 0 and show_reasoning:
# Create panels for reasoning steps
for i, step in enumerate(reasoning_steps, 1):
reasoning_panel = build_reasoning_step_panel(i, step, show_full_reasoning)
panels.append(reasoning_panel)
live_console.update(Group(*panels))
if isinstance(run_response, TeamRunOutput) and run_response.reasoning_content is not None and show_reasoning:
# Create panel for thinking
thinking_panel = create_panel(
content=Text(run_response.reasoning_content),
title=f"Thinking ({response_timer.elapsed:.1f}s)",
border_style="green",
)
panels.append(thinking_panel)
live_console.update(Group(*panels))
if isinstance(run_response, TeamRunOutput):
# Handle member responses
if show_member_responses:
for member_response in run_response.member_responses:
# Handle member reasoning
reasoning_steps = []
if isinstance(member_response, RunOutput) and member_response.reasoning_steps is not None:
reasoning_steps.extend(member_response.reasoning_steps)
if len(reasoning_steps) > 0 and show_reasoning:
# Create panels for reasoning steps
for i, step in enumerate(reasoning_steps, 1):
member_reasoning_panel = build_reasoning_step_panel(
i, step, show_full_reasoning, color="magenta"
)
panels.append(member_reasoning_panel)
# Add tool calls panel for member if available
if hasattr(member_response, "tools") and member_response.tools:
member_name = None
if isinstance(member_response, RunOutput) and member_response.agent_id is not None:
member_name = team._get_member_name(member_response.agent_id)
elif isinstance(member_response, TeamRunOutput) and member_response.team_id is not None:
member_name = team._get_member_name(member_response.team_id)
if member_name:
# Format tool calls
formatted_calls = format_tool_calls(member_response.tools)
if formatted_calls:
console_width = console.width if console else 80
panel_width = console_width + 30
lines = []
for call in formatted_calls:
wrapped_call = textwrap.fill(f"• {call}", width=panel_width, subsequent_indent=" ")
lines.append(wrapped_call)
tool_calls_text = "\n\n".join(lines)
member_tool_calls_panel = create_panel(
content=tool_calls_text,
title=f"{member_name} Tool Calls",
border_style="yellow",
)
panels.append(member_tool_calls_panel)
live_console.update(Group(*panels))
show_markdown = False
if isinstance(member_response, RunOutput) and member_response.agent_id is not None:
show_markdown = member_markdown.get(member_response.agent_id, False)
elif isinstance(member_response, TeamRunOutput) and member_response.team_id is not None:
show_markdown = member_markdown.get(member_response.team_id, False)
member_response_content: Union[str, JSON, Markdown] = _parse_response_content( # type: ignore
member_response,
tags_to_include_in_markdown,
show_markdown=show_markdown,
)
# Create panel for member response
if isinstance(member_response, RunOutput) and member_response.agent_id is not None:
member_response_panel = create_panel(
content=member_response_content,
title=f"{team._get_member_name(member_response.agent_id)} Response",
border_style="magenta",
)
elif isinstance(member_response, TeamRunOutput) and member_response.team_id is not None:
member_response_panel = create_panel(
content=member_response_content,
title=f"{team._get_member_name(member_response.team_id)} Response",
border_style="magenta",
)
panels.append(member_response_panel)
if member_response.citations is not None and member_response.citations.urls is not None:
md_lines = []
# Add search queries if present
if member_response.citations.search_queries:
md_lines.append("**Search Queries:**")
for query in member_response.citations.search_queries:
md_lines.append(f"- {query}")
md_lines.append("") # Empty line before URLs
# Add URL citations
md_lines.extend(
f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
for i, citation in enumerate(member_response.citations.urls)
if citation.url # Only include citations with valid URLs
)
md_content = "\n".join(md_lines)
if md_content:
citations_panel = create_panel(
content=Markdown(md_content),
title="Citations",
border_style="magenta",
)
panels.append(citations_panel)
live_console.update(Group(*panels))
# Add team level tool calls panel if available
if run_response.tools:
formatted_calls = format_tool_calls(run_response.tools)
if formatted_calls:
console_width = console.width if console else 80
# Allow for panel borders and padding
panel_width = console_width + 30
lines = []
for call in formatted_calls:
# Wrap the call text to fit within the panel
wrapped_call = textwrap.fill(f"• {call}", width=panel_width, subsequent_indent=" ")
lines.append(wrapped_call)
tool_calls_text = "\n\n".join(lines)
# Add compression stats at end of tool calls
if team.compression_manager is not None and team.compression_manager.stats:
stats = team.compression_manager.stats
saved = stats.get("original_size", 0) - stats.get("compressed_size", 0)
orig = stats.get("original_size", 1)
if stats.get("tool_results_compressed", 0) > 0:
tool_calls_text += f"\n\ncompressed: {stats.get('tool_results_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
team.compression_manager.stats.clear()
team_tool_calls_panel = create_panel(
content=tool_calls_text,
title="Team Tool Calls",
border_style="yellow",
)
panels.append(team_tool_calls_panel)
live_console.update(Group(*panels))
response_content_batch: Union[str, JSON, Markdown] = _parse_response_content( # type: ignore
run_response, tags_to_include_in_markdown, show_markdown=team_markdown
)
# Create panel for response
response_panel = create_panel(
content=response_content_batch,
title=f"Response ({response_timer.elapsed:.1f}s)",
border_style="blue",
)
panels.append(response_panel)
# Add citations
if run_response.citations is not None and run_response.citations.urls is not None:
md_lines = []
# Add search queries if present
if run_response.citations.search_queries:
md_lines.append("**Search Queries:**")
for query in run_response.citations.search_queries:
md_lines.append(f"- {query}")
md_lines.append("") # Empty line before URLs
# Add URL citations
md_lines.extend(
f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
for i, citation in enumerate(run_response.citations.urls)
if citation.url # Only include citations with valid URLs
)
md_content = "\n".join(md_lines)
if md_content: # Only create panel if there are citations
citations_panel = create_panel(
content=Markdown(md_content),
title="Citations",
border_style="green",
)
panels.append(citations_panel)
if team.memory_manager is not None:
if team.memory_manager.memories_updated:
memory_panel = create_panel(
content=Text("Memories updated"),
title="Memories",
border_style="green",
)
panels.append(memory_panel)
if team.session_summary_manager is not None and team.session_summary_manager.summaries_updated:
summary_panel = create_panel(
content=Text("Session summary updated"),
title="Session Summary",
border_style="green",
)
panels.append(summary_panel)
team.session_summary_manager.summaries_updated = False
# Final update to remove the "Working..." status
panels = [p for p in panels if not isinstance(p, Status)]
live_console.update(Group(*panels))
async def aprint_response_stream(
team: "Team",
input: Union[List, Dict, str, Message, BaseModel, List[Message]],
console: Optional[Any] = None,
show_message: bool = True,
show_reasoning: bool = True,
show_full_reasoning: bool = False,
show_member_responses: Optional[bool] = None,
tags_to_include_in_markdown: Optional[Set[str]] = None,
session_id: Optional[str] = None,
session_state: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
run_id: Optional[str] = None,
audio: Optional[Sequence[Audio]] = None,
images: Optional[Sequence[Image]] = None,
videos: Optional[Sequence[Video]] = None,
files: Optional[Sequence[File]] = None,
markdown: bool = False,
stream_events: bool = False,
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
add_history_to_context: Optional[bool] = None,
dependencies: Optional[Dict[str, Any]] = None,
add_dependencies_to_context: Optional[bool] = None,
add_session_state_to_context: Optional[bool] = None,
metadata: Optional[Dict[str, Any]] = None,
debug_mode: Optional[bool] = None,
**kwargs: Any,
) -> None:
import textwrap
from rich.console import Group
from rich.json import JSON
from rich.live import Live
from rich.markdown import Markdown
from rich.status import Status
from rich.text import Text
if not tags_to_include_in_markdown:
tags_to_include_in_markdown = {"think", "thinking"}
stream_events = True # With streaming print response, we need to stream intermediate steps
_response_content: str = ""
_response_reasoning_content: str = ""
reasoning_steps: List[ReasoningStep] = []
# Track tool calls by member and team
member_tool_calls = {} # type: ignore
team_tool_calls: List[ToolExecution] = []
# Track processed tool calls to avoid duplicates
processed_tool_calls = set()
# Initialize final_panels here
final_panels = [] # type: ignore
with Live(console=console) as live_console:
status = Status("Working...", spinner="aesthetic", speed=0.4, refresh_per_second=10)
live_console.update(status)
response_timer = Timer()
response_timer.start()
# Flag which indicates if the panels should be rendered
render = False
# Panels to be rendered
panels = [status]
# First render the message panel if the message is not None
if input and show_message:
render = True
# Convert message to a panel
message_content = get_text_from_message(input)
message_panel = create_panel(
content=Text(message_content, style="green"),
title="Message",
border_style="cyan",
)
panels.append(message_panel)
if render:
live_console.update(Group(*panels))
# Get response from the team
team_markdown = None
member_markdown = {}
# Dict to track member response panels by member_id
member_response_panels = {}
input_content = get_text_from_message(input)
final_run_response = None
async for resp in team.arun( # type: ignore
input=input,
audio=audio,
images=images,
videos=videos,
files=files,
stream=True,
stream_events=stream_events,
session_id=session_id,
session_state=session_state,
user_id=user_id,
run_id=run_id,
knowledge_filters=knowledge_filters,
add_history_to_context=add_history_to_context,
add_dependencies_to_context=add_dependencies_to_context,
add_session_state_to_context=add_session_state_to_context,
dependencies=dependencies,
metadata=metadata,
debug_mode=debug_mode,
yield_run_output=True,
**kwargs,
):
if team_markdown is None:
if markdown:
team_markdown = True
else:
team_markdown = False
if team.output_schema is not None:
team_markdown = False
if isinstance(resp, TeamRunOutput):
final_run_response = resp
continue
if isinstance(resp, tuple(get_args(TeamRunOutputEvent))):
if resp.event == TeamRunEvent.run_content:
if isinstance(resp.content, str):
_response_content += resp.content
elif team.output_schema is not None and isinstance(resp.content, BaseModel):
try:
_response_content = JSON(resp.content.model_dump_json(exclude_none=True), indent=2) # type: ignore
except Exception as e:
log_warning(f"Failed to convert response to JSON: {e}")
elif team.output_schema is not None and isinstance(resp.content, dict):
try:
_response_content = JSON(json.dumps(resp.content), indent=2) # type: ignore
except Exception as e:
log_warning(f"Failed to convert response to JSON: {e}")
if hasattr(resp, "reasoning_content") and resp.reasoning_content is not None: # type: ignore
_response_reasoning_content += resp.reasoning_content # type: ignore
if hasattr(resp, "reasoning_steps") and resp.reasoning_steps is not None: # type: ignore
reasoning_steps = resp.reasoning_steps # type: ignore
if resp.event == TeamRunEvent.pre_hook_completed: # type: ignore
if resp.run_input is not None: # type: ignore
input_content = get_text_from_message(resp.run_input.input_content) # type: ignore
# Collect team tool calls, avoiding duplicates
if resp.event == TeamRunEvent.tool_call_completed and resp.tool: # type: ignore
tool = resp.tool # type: ignore
# Generate a unique ID for this tool call
if tool.tool_call_id is not None:
tool_id = tool.tool_call_id
else:
tool_id = str(hash(str(tool)))
if tool_id not in processed_tool_calls:
processed_tool_calls.add(tool_id)
team_tool_calls.append(tool)
# Collect member tool calls, avoiding duplicates
if show_member_responses and hasattr(resp, "member_responses") and resp.member_responses:
for member_response in resp.member_responses:
member_id = None
if isinstance(member_response, RunOutput) and member_response.agent_id is not None:
member_id = member_response.agent_id
elif isinstance(member_response, TeamRunOutput) and member_response.team_id is not None:
member_id = member_response.team_id
if member_id and hasattr(member_response, "tools") and member_response.tools:
if member_id not in member_tool_calls:
member_tool_calls[member_id] = []
for tool in member_response.tools:
if tool.tool_call_id is not None:
tool_id = tool.tool_call_id
else:
tool_id = str(hash(str(tool)))
if tool_id not in processed_tool_calls:
processed_tool_calls.add(tool_id)
member_tool_calls[member_id].append(tool)
response_content_stream: Union[str, Markdown] = _response_content
# Escape special tags before markdown conversion
if team_markdown:
escaped_content = escape_markdown_tags(_response_content, tags_to_include_in_markdown)
response_content_stream = Markdown(escaped_content)
# Create new panels for each chunk
panels = []
if input_content and show_message:
render = True
# Convert message to a panel
message_panel = create_panel(
content=Text(input_content, style="green"),
title="Message",
border_style="cyan",
)
panels.append(message_panel)
if len(reasoning_steps) > 0 and show_reasoning:
render = True
# Create panels for reasoning steps
for i, step in enumerate(reasoning_steps, 1):
reasoning_panel = build_reasoning_step_panel(i, step, show_full_reasoning)
panels.append(reasoning_panel)
if len(_response_reasoning_content) > 0 and show_reasoning:
render = True
# Create panel for thinking
thinking_panel = create_panel(
content=Text(_response_reasoning_content),
title=f"Thinking ({response_timer.elapsed:.1f}s)",
border_style="green",
)
panels.append(thinking_panel)
elif _response_content == "":
# Keep showing status if no content yet
panels.append(status)
# Process member responses and their tool calls
for member_response in (
resp.member_responses if show_member_responses and hasattr(resp, "member_responses") else []
):
member_id = None
member_name = "Team Member"
if isinstance(member_response, RunOutput) and member_response.agent_id is not None:
member_id = member_response.agent_id
member_name = team._get_member_name(member_id)
elif isinstance(member_response, TeamRunOutput) and member_response.team_id is not None:
member_id = member_response.team_id
member_name = team._get_member_name(member_id)
# If we have tool calls for this member, display them
if member_id in member_tool_calls and member_tool_calls[member_id]:
formatted_calls = format_tool_calls(member_tool_calls[member_id])
if formatted_calls:
console_width = console.width if console else 80
panel_width = console_width + 30
lines = []
for call in formatted_calls:
wrapped_call = textwrap.fill(f"• {call}", width=panel_width, subsequent_indent=" ")
lines.append(wrapped_call)
tool_calls_text = "\n\n".join(lines)
member_tool_calls_panel = create_panel(
content=tool_calls_text,
title=f"{member_name} Tool Calls",
border_style="yellow",
)
panels.append(member_tool_calls_panel)
# Process member response content
if show_member_responses and member_id is not None:
show_markdown = False
if markdown:
show_markdown = True
member_response_content = _parse_response_content(
member_response,
tags_to_include_in_markdown,
show_markdown=show_markdown,
)
member_response_panel = create_panel(
content=member_response_content,
title=f"{member_name} Response",
border_style="magenta",
)
panels.append(member_response_panel)
# Store for reference
if member_id is not None:
member_response_panels[member_id] = member_response_panel
# Add team tool calls panel if available (before the team response)
if team_tool_calls:
formatted_calls = format_tool_calls(team_tool_calls)
if formatted_calls:
console_width = console.width if console else 80
panel_width = console_width + 30
lines = []
# Create a set to track already added calls by their string representation
added_calls = set()
for call in formatted_calls:
if call not in added_calls:
added_calls.add(call)
# Wrap the call text to fit within the panel
wrapped_call = textwrap.fill(f"• {call}", width=panel_width, subsequent_indent=" ")
lines.append(wrapped_call)
# Join with blank lines between items
tool_calls_text = "\n\n".join(lines)
# Add compression stats if available (don't clear - will be cleared in final_panels)
if team.compression_manager is not None and team.compression_manager.stats:
stats = team.compression_manager.stats
saved = stats.get("original_size", 0) - stats.get("compressed_size", 0)
orig = stats.get("original_size", 1)
if stats.get("tool_results_compressed", 0) > 0:
tool_calls_text += f"\n\ncompressed: {stats.get('tool_results_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
team_tool_calls_panel = create_panel(
content=tool_calls_text,
title="Team Tool Calls",
border_style="yellow",
)
panels.append(team_tool_calls_panel)
# Add the team response panel at the end
if response_content_stream:
render = True
# Create panel for response
response_panel = create_panel(
content=response_content_stream,
title=f"Response ({response_timer.elapsed:.1f}s)",
border_style="blue",
)
panels.append(response_panel)
if render or len(panels) > 0:
live_console.update(Group(*panels))
response_timer.stop()
run_response = final_run_response
# Add citations
if hasattr(resp, "citations") and resp.citations is not None and resp.citations.urls is not None:
md_lines = []
# Add search queries if present
if resp.citations.search_queries:
md_lines.append("**Search Queries:**")
for query in resp.citations.search_queries:
md_lines.append(f"- {query}")
md_lines.append("") # Empty line before URLs
# Add URL citations
md_lines.extend(
f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
for i, citation in enumerate(resp.citations.urls)
if citation.url # Only include citations with valid URLs
)
md_content = "\n".join(md_lines)
if md_content: # Only create panel if there are citations
citations_panel = create_panel(
content=Markdown(md_content),
title="Citations",
border_style="green",
)
panels.append(citations_panel)
live_console.update(Group(*panels))
if team.memory_manager is not None:
if team.memory_manager.memories_updated:
memory_panel = create_panel(
content=Text("Memories updated"),
title="Memories",
border_style="green",
)
panels.append(memory_panel)
live_console.update(Group(*panels))
if team.session_summary_manager is not None and team.session_summary_manager.summaries_updated:
summary_panel = create_panel(
content=Text("Session summary updated"),
title="Session Summary",
border_style="green",
)
panels.append(summary_panel)
live_console.update(Group(*panels))
team.session_summary_manager.summaries_updated = False
# Final update to remove the "Working..." status
panels = [p for p in panels if not isinstance(p, Status)]
if markdown:
if isinstance(team.members, list):
for member in team.members:
if member.id is not None:
member_markdown[member.id] = True # type: ignore
if isinstance(team.members, list):
for member in team.members:
if member.output_schema is not None and member.id is not None:
member_markdown[member.id] = False # type: ignore
# Final panels assembly - we'll recreate the panels from scratch to ensure correct order
final_panels = []
# Start with the message
if input_content and show_message:
message_panel = create_panel(
content=Text(input_content, style="green"),
title="Message",
border_style="cyan",
)
final_panels.append(message_panel)
# Add reasoning steps
if reasoning_steps and show_reasoning:
for i, step in enumerate(reasoning_steps, 1):
reasoning_panel = build_reasoning_step_panel(i, step, show_full_reasoning)
final_panels.append(reasoning_panel)
# Add thinking panel if available
if _response_reasoning_content and show_reasoning:
thinking_panel = create_panel(
content=Text(_response_reasoning_content),
title=f"Thinking ({response_timer.elapsed:.1f}s)",
border_style="green",
)
final_panels.append(thinking_panel)
# Add member tool calls and responses in correct order
if show_member_responses and run_response is not None and hasattr(run_response, "member_responses"):
for i, member_response in enumerate(run_response.member_responses):
member_id = None
if isinstance(member_response, RunOutput) and member_response.agent_id is not None:
member_id = member_response.agent_id
elif isinstance(member_response, TeamRunOutput) and member_response.team_id is not None:
member_id = member_response.team_id
# Print tool calls
if member_id:
# First add tool calls if any
if member_id in member_tool_calls and member_tool_calls[member_id]:
formatted_calls = format_tool_calls(member_tool_calls[member_id])
if formatted_calls:
console_width = console.width if console else 80
panel_width = console_width + 30
lines = []
# Create a set to track already added calls by their string representation
added_calls = set()
for call in formatted_calls:
if call not in added_calls:
added_calls.add(call)
# Wrap the call text to fit within the panel
wrapped_call = textwrap.fill(f"• {call}", width=panel_width, subsequent_indent=" ")
lines.append(wrapped_call)
tool_calls_text = "\n\n".join(lines)
member_name = team._get_member_name(member_id)
member_tool_calls_panel = create_panel(
content=tool_calls_text,
title=f"{member_name} Tool Calls",
border_style="yellow",
)
final_panels.append(member_tool_calls_panel)
# Add reasoning steps if any
reasoning_steps = []
if member_response.reasoning_steps is not None:
reasoning_steps = member_response.reasoning_steps
if reasoning_steps and show_reasoning:
for j, step in enumerate(reasoning_steps, 1):
member_reasoning_panel = build_reasoning_step_panel(
j, step, show_full_reasoning, color="magenta"
)
final_panels.append(member_reasoning_panel)
# Add reasoning steps if any
reasoning_steps = []
if hasattr(member_response, "reasoning_steps") and member_response.reasoning_steps is not None:
reasoning_steps = member_response.reasoning_steps
if reasoning_steps and show_reasoning:
for j, step in enumerate(reasoning_steps, 1):
member_reasoning_panel = build_reasoning_step_panel(
j, step, show_full_reasoning, color="magenta"
)
final_panels.append(member_reasoning_panel)
# Then add response
show_markdown = False
if isinstance(member_response, RunOutput) and member_response.agent_id is not None:
show_markdown = member_markdown.get(member_response.agent_id, False)
elif isinstance(member_response, TeamRunOutput) and member_response.team_id is not None:
show_markdown = member_markdown.get(member_response.team_id, False)
member_response_content = _parse_response_content( # type: ignore
member_response,
tags_to_include_in_markdown,
show_markdown=show_markdown,
)
member_name = "Team Member"
if isinstance(member_response, RunOutput) and member_response.agent_id is not None:
member_name = team._get_member_name(member_response.agent_id)
elif isinstance(member_response, TeamRunOutput) and member_response.team_id is not None:
member_name = team._get_member_name(member_response.team_id)
member_response_panel = create_panel(
content=member_response_content,
title=f"{member_name} Response",
border_style="magenta",
)
final_panels.append(member_response_panel)
# Add citations if any
if member_response.citations is not None and member_response.citations.urls is not None:
md_lines = []
# Add search queries if present
if member_response.citations.search_queries:
md_lines.append("**Search Queries:**")
for query in member_response.citations.search_queries:
md_lines.append(f"- {query}")
md_lines.append("") # Empty line before URLs
# Add URL citations
md_lines.extend(
f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
for i, citation in enumerate(member_response.citations.urls)
if citation.url # Only include citations with valid URLs
)
md_content = "\n".join(md_lines)
if md_content: # Only create panel if there are citations
citations_panel = create_panel(
content=Markdown(md_content),
title="Citations",
border_style="magenta",
)
final_panels.append(citations_panel)
# Add team tool calls before team response
if team_tool_calls:
formatted_calls = format_tool_calls(team_tool_calls)
if formatted_calls:
console_width = console.width if console else 80
panel_width = console_width + 30
lines = []
# Create a set to track already added calls by their string representation
added_calls = set()
for call in formatted_calls:
if call not in added_calls:
added_calls.add(call)
# Wrap the call text to fit within the panel
wrapped_call = textwrap.fill(f"• {call}", width=panel_width, subsequent_indent=" ")
lines.append(wrapped_call)
tool_calls_text = "\n\n".join(lines)
# Add compression stats at end of tool calls
if team.compression_manager is not None and team.compression_manager.stats:
stats = team.compression_manager.stats
saved = stats.get("original_size", 0) - stats.get("compressed_size", 0)
orig = stats.get("original_size", 1)
if stats.get("tool_results_compressed", 0) > 0:
tool_calls_text += f"\n\ncompressed: {stats.get('tool_results_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
team.compression_manager.stats.clear()
team_tool_calls_panel = create_panel(
content=tool_calls_text,
title="Team Tool Calls",
border_style="yellow",
)
final_panels.append(team_tool_calls_panel)
# Add team response
if _response_content:
response_content_stream = _response_content
if team_markdown:
escaped_content = escape_markdown_tags(_response_content, tags_to_include_in_markdown)
response_content_stream = Markdown(escaped_content)
response_panel = create_panel(
content=response_content_stream,
title=f"Response ({response_timer.elapsed:.1f}s)",
border_style="blue",
)
final_panels.append(response_panel)
# Add team citations
if hasattr(resp, "citations") and resp.citations is not None and resp.citations.urls is not None:
md_lines = []
# Add search queries if present
if resp.citations.search_queries:
md_lines.append("**Search Queries:**")
for query in resp.citations.search_queries:
md_lines.append(f"- {query}")
md_lines.append("") # Empty line before URLs
# Add URL citations
md_lines.extend(
f"{i + 1}. [{citation.title or citation.url}]({citation.url})"
for i, citation in enumerate(resp.citations.urls)
if citation.url # Only include citations with valid URLs
)
md_content = "\n".join(md_lines)
if md_content: # Only create panel if there are citations
citations_panel = create_panel(
content=Markdown(md_content),
title="Citations",
border_style="green",
)
final_panels.append(citations_panel)
# Final update with correctly ordered panels
live_console.update(Group(*final_panels))
def _parse_response_content(
run_response: Union[TeamRunOutput, RunOutput],
tags_to_include_in_markdown: Set[str],
show_markdown: bool = True,
) -> Any:
from rich.json import JSON
from rich.markdown import Markdown
if isinstance(run_response.content, str):
if show_markdown:
escaped_content = escape_markdown_tags(run_response.content, tags_to_include_in_markdown)
return Markdown(escaped_content)
else:
return run_response.get_content_as_string(indent=4)
elif isinstance(run_response.content, BaseModel):
try:
return JSON(run_response.content.model_dump_json(exclude_none=True), indent=2)
except Exception as e:
log_warning(f"Failed to convert response to JSON: {e}")
else:
import json
try:
return JSON(json.dumps(run_response.content), indent=4)
except Exception as e:
log_warning(f"Failed to convert response to JSON: {e}")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/print_response/team.py",
"license": "Apache License 2.0",
"lines": 1663,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/utils/print_response/workflow.py | from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from pydantic import BaseModel
from rich.console import Group
from rich.live import Live
from rich.markdown import Markdown
from rich.status import Status
from rich.text import Text
from agno.media import Audio, File, Image, Video
from agno.models.message import Message
from agno.run.workflow import (
ConditionExecutionCompletedEvent,
ConditionExecutionStartedEvent,
LoopExecutionCompletedEvent,
LoopExecutionStartedEvent,
LoopIterationCompletedEvent,
LoopIterationStartedEvent,
ParallelExecutionCompletedEvent,
ParallelExecutionStartedEvent,
RouterExecutionCompletedEvent,
RouterExecutionStartedEvent,
StepCompletedEvent,
StepOutputEvent,
StepsExecutionCompletedEvent,
StepsExecutionStartedEvent,
StepStartedEvent,
WorkflowAgentCompletedEvent,
WorkflowAgentStartedEvent,
WorkflowCompletedEvent,
WorkflowErrorEvent,
WorkflowRunOutput,
WorkflowRunOutputEvent,
WorkflowStartedEvent,
)
from agno.utils.response import create_panel
from agno.utils.timer import Timer
from agno.workflow.types import StepOutput
if TYPE_CHECKING:
from agno.workflow.workflow import Workflow
def print_response(
workflow: "Workflow",
input: Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]],
user_id: Optional[str] = None,
session_id: Optional[str] = None,
additional_data: Optional[Dict[str, Any]] = None,
audio: Optional[List[Audio]] = None,
images: Optional[List[Image]] = None,
videos: Optional[List[Video]] = None,
files: Optional[List[File]] = None,
markdown: bool = True,
show_time: bool = True,
show_step_details: bool = True,
console: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Print workflow execution with rich formatting (non-streaming)"""
from rich.live import Live
from rich.markdown import Markdown
from rich.status import Status
from rich.text import Text
from agno.utils.response import create_panel
from agno.utils.timer import Timer
if console is None:
from rich.console import Console
console = Console()
# Show workflow info
media_info = []
if audio:
media_info.append(f"Audio files: {len(audio)}")
if images:
media_info.append(f"Images: {len(images)}")
if videos:
media_info.append(f"Videos: {len(videos)}")
if files:
media_info.append(f"Files: {len(files)}")
workflow_info = f"""**Workflow:** {workflow.name}"""
if workflow.description:
workflow_info += f"""\n\n**Description:** {workflow.description}"""
workflow_info += f"""\n\n**Steps:** {workflow._get_step_count()} steps"""
if input:
if isinstance(input, str):
workflow_info += f"""\n\n**Message:** {input}"""
else:
# Handle structured input message
if isinstance(input, BaseModel):
data_display = input.model_dump_json(indent=2, exclude_none=True)
elif isinstance(input, (dict, list)):
import json
data_display = json.dumps(input, indent=2, default=str)
else:
data_display = str(input)
workflow_info += f"""\n\n**Structured Input:**\n```json\n{data_display}\n```"""
if user_id:
workflow_info += f"""\n\n**User ID:** {user_id}"""
if session_id:
workflow_info += f"""\n\n**Session ID:** {session_id}"""
workflow_info = workflow_info.strip()
workflow_panel = create_panel(
content=Markdown(workflow_info) if markdown else workflow_info,
title="Workflow Information",
border_style="cyan",
)
console.print(workflow_panel) # type: ignore
# Start timer
response_timer = Timer()
response_timer.start()
with Live(console=console) as live_log:
status = Status("Starting workflow...", spinner="dots")
live_log.update(status)
try:
# Execute workflow and get the response directly
workflow_response: WorkflowRunOutput = workflow.run(
input=input,
user_id=user_id,
session_id=session_id,
additional_data=additional_data,
audio=audio,
images=images,
videos=videos,
files=files,
**kwargs,
) # type: ignore
response_timer.stop()
# Check if this is a workflow agent direct response
if workflow_response.workflow_agent_run is not None and not workflow_response.workflow_agent_run.tools:
# Agent answered directly from history without executing workflow
agent_response_panel = create_panel(
content=Markdown(str(workflow_response.content)) if markdown else str(workflow_response.content),
title="Workflow Agent Response",
border_style="green",
)
console.print(agent_response_panel) # type: ignore
elif show_step_details and workflow_response.step_results:
for i, step_output in enumerate(workflow_response.step_results):
print_step_output_recursive(step_output, i + 1, markdown, console) # type: ignore
# For callable functions, show the content directly since there are no step_results
elif show_step_details and callable(workflow.steps) and workflow_response.content:
step_panel = create_panel(
content=Markdown(workflow_response.content) if markdown else workflow_response.content, # type: ignore
title="Custom Function (Completed)",
border_style="orange3",
)
console.print(step_panel) # type: ignore
# Show final summary
if workflow_response.metadata:
status = workflow_response.status.value # type: ignore
summary_content = ""
summary_content += f"""\n\n**Status:** {status}"""
summary_content += f"""\n\n**Steps Completed:** {len(workflow_response.step_results) if workflow_response.step_results else 0}"""
summary_content = summary_content.strip()
summary_panel = create_panel(
content=Markdown(summary_content) if markdown else summary_content,
title="Execution Summary",
border_style="blue",
)
console.print(summary_panel) # type: ignore
live_log.update("")
# Final completion message
if show_time:
completion_text = Text(f"Completed in {response_timer.elapsed:.1f}s", style="bold green")
console.print(completion_text) # type: ignore
except Exception as e:
import traceback
traceback.print_exc()
response_timer.stop()
error_panel = create_panel(
content=f"Workflow execution failed: {str(e)}", title="Execution Error", border_style="red"
)
console.print(error_panel) # type: ignore
def print_response_stream(
workflow: "Workflow",
input: Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]],
user_id: Optional[str] = None,
session_id: Optional[str] = None,
additional_data: Optional[Dict[str, Any]] = None,
audio: Optional[List[Audio]] = None,
images: Optional[List[Image]] = None,
videos: Optional[List[Video]] = None,
files: Optional[List[File]] = None,
stream_events: bool = False,
markdown: bool = True,
show_time: bool = True,
show_step_details: bool = True,
console: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Print workflow execution with clean streaming"""
if console is None:
from rich.console import Console
console = Console()
stream_events = True # With streaming print response, we need to stream intermediate steps
# Show workflow info (same as before)
media_info = []
if audio:
media_info.append(f"Audio files: {len(audio)}")
if images:
media_info.append(f"Images: {len(images)}")
if videos:
media_info.append(f"Videos: {len(videos)}")
if files:
media_info.append(f"Files: {len(files)}")
workflow_info = f"""**Workflow:** {workflow.name}"""
if workflow.description:
workflow_info += f"""\n\n**Description:** {workflow.description}"""
workflow_info += f"""\n\n**Steps:** {workflow._get_step_count()} steps"""
if input:
if isinstance(input, str):
workflow_info += f"""\n\n**Message:** {input}"""
else:
# Handle structured input message
if isinstance(input, BaseModel):
data_display = input.model_dump_json(indent=2, exclude_none=True)
elif isinstance(input, (dict, list)):
import json
data_display = json.dumps(input, indent=2, default=str)
else:
data_display = str(input)
workflow_info += f"""\n\n**Structured Input:**\n```json\n{data_display}\n```"""
if user_id:
workflow_info += f"""\n\n**User ID:** {user_id}"""
if session_id:
workflow_info += f"""\n\n**Session ID:** {session_id}"""
workflow_info = workflow_info.strip()
workflow_panel = create_panel(
content=Markdown(workflow_info) if markdown else workflow_info,
title="Workflow Information",
border_style="cyan",
)
console.print(workflow_panel) # type: ignore
# Start timer
response_timer = Timer()
response_timer.start()
# Streaming execution variables with smart step tracking
current_step_content = ""
current_step_name = ""
current_step_index = 0
step_results = []
step_started_printed = False
is_callable_function = callable(workflow.steps)
workflow_started = False # Track if workflow has actually started
is_workflow_agent_response = False # Track if this is a workflow agent direct response
# Smart step hierarchy tracking
current_primitive_context = None # Current primitive being executed (parallel, loop, etc.)
step_display_cache = {} # type: ignore
# Parallel-aware tracking for simultaneous steps
parallel_step_states: Dict[
Any, Dict[str, Any]
] = {} # track state of each parallel step: {step_index: {"name": str, "content": str, "started": bool, "completed": bool}}
def get_step_display_number(step_index: Union[int, tuple], step_name: str = "") -> str:
"""Generate clean two-level step numbering: x.y format only"""
# Handle tuple format for child steps
if isinstance(step_index, tuple):
if len(step_index) >= 2:
parent_idx, sub_idx = step_index[0], step_index[1]
# Extract base parent index if it's nested
if isinstance(parent_idx, tuple):
base_idx = parent_idx[0] if len(parent_idx) > 0 else 0
while isinstance(base_idx, tuple) and len(base_idx) > 0:
base_idx = base_idx[0]
else:
base_idx = parent_idx
# Check context for parallel special case
if current_primitive_context and current_primitive_context["type"] == "parallel":
# For parallel child steps, all get the same number based on their actual step_index
return f"Step {base_idx + 1}.{sub_idx + 1}"
elif current_primitive_context and current_primitive_context["type"] == "loop":
iteration = current_primitive_context.get("current_iteration", 1)
return f"Step {base_idx + 1}.{sub_idx + 1} (Iteration {iteration})"
else:
# Regular child step numbering
return f"Step {base_idx + 1}.{sub_idx + 1}" # type: ignore
else:
# Single element tuple - treat as main step
return f"Step {step_index[0] + 1}"
# Handle integer step_index - main step
if not current_primitive_context:
# Regular main step
return f"Step {step_index + 1}"
else:
# This shouldn't happen with the new logic, but fallback
return f"Step {step_index + 1}"
with Live(console=console, refresh_per_second=10) as live_log:
status = Status("Starting workflow...", spinner="dots")
live_log.update(status)
try:
for response in workflow.run(
input=input,
user_id=user_id,
session_id=session_id,
additional_data=additional_data,
audio=audio,
images=images,
videos=videos,
files=files,
stream=True,
stream_events=stream_events,
**kwargs,
): # type: ignore
# Handle the new event types
if isinstance(response, WorkflowStartedEvent):
workflow_started = True
status.update("Workflow started...")
if is_callable_function:
current_step_name = "Custom Function"
current_step_index = 0
live_log.update(status)
elif isinstance(response, WorkflowAgentStartedEvent):
# Workflow agent is starting to process
status.update("Workflow agent processing...")
live_log.update(status)
continue
elif isinstance(response, WorkflowAgentCompletedEvent):
# Workflow agent has completed
status.update("Workflow agent completed")
live_log.update(status)
continue
elif isinstance(response, StepStartedEvent):
step_name = response.step_name or "Unknown"
step_index = response.step_index or 0 # type: ignore
current_step_name = step_name
current_step_index = step_index # type: ignore
current_step_content = ""
step_started_printed = False
# Generate smart step number
step_display = get_step_display_number(current_step_index, current_step_name)
status.update(f"Starting {step_display}: {current_step_name}...")
live_log.update(status)
elif isinstance(response, StepCompletedEvent):
step_name = response.step_name or "Unknown"
step_index = response.step_index or 0
# Skip parallel sub-step completed events - they're handled in ParallelExecutionCompletedEvent (avoid duplication)
if (
current_primitive_context
and current_primitive_context["type"] == "parallel"
and isinstance(step_index, tuple)
):
continue
# Generate smart step number for completion (will use cached value)
step_display = get_step_display_number(step_index, step_name)
status.update(f"Completed {step_display}: {step_name}")
if response.content:
step_results.append(
{
"step_name": step_name,
"step_index": step_index,
"content": response.content,
"event": response.event,
}
)
# Print the final step result in orange (only once)
if show_step_details and current_step_content and not step_started_printed:
live_log.update(status, refresh=True)
final_step_panel = create_panel(
content=Markdown(current_step_content) if markdown else current_step_content,
title=f"{step_display}: {step_name} (Completed)",
border_style="orange3",
)
console.print(final_step_panel) # type: ignore
step_started_printed = True
elif isinstance(response, LoopExecutionStartedEvent):
current_step_name = response.step_name or "Loop"
current_step_index = response.step_index or 0 # type: ignore
current_step_content = ""
step_started_printed = False
# Set up loop context
current_primitive_context = {
"type": "loop",
"step_index": current_step_index,
"sub_step_counter": 0,
"current_iteration": 1,
"max_iterations": response.max_iterations,
}
# Initialize parallel step tracking - clear previous states
parallel_step_states.clear()
step_display_cache.clear()
status.update(f"Starting loop: {current_step_name} (max {response.max_iterations} iterations)...")
live_log.update(status)
elif isinstance(response, LoopIterationStartedEvent):
if current_primitive_context and current_primitive_context["type"] == "loop":
current_primitive_context["current_iteration"] = response.iteration
current_primitive_context["sub_step_counter"] = 0 # Reset for new iteration
# Clear cache for new iteration
step_display_cache.clear()
status.update(
f"Loop iteration {response.iteration}/{response.max_iterations}: {response.step_name}..."
)
live_log.update(status)
elif isinstance(response, LoopIterationCompletedEvent):
status.update(
f"Completed iteration {response.iteration}/{response.max_iterations}: {response.step_name}"
)
elif isinstance(response, LoopExecutionCompletedEvent):
step_name = response.step_name or "Loop"
step_index = response.step_index or 0
status.update(f"Completed loop: {step_name} ({response.total_iterations} iterations)")
live_log.update(status, refresh=True)
# Print loop summary
if show_step_details:
summary_content = "**Loop Summary:**\n\n"
summary_content += (
f"- Total iterations: {response.total_iterations}/{response.max_iterations}\n"
)
summary_content += (
f"- Total steps executed: {sum(len(iteration) for iteration in response.all_results)}\n"
)
loop_summary_panel = create_panel(
content=Markdown(summary_content) if markdown else summary_content,
title=f"Loop {step_name} (Completed)",
border_style="yellow",
)
console.print(loop_summary_panel) # type: ignore
# Reset context
current_primitive_context = None
step_display_cache.clear()
step_started_printed = True
elif isinstance(response, ParallelExecutionStartedEvent):
current_step_name = response.step_name or "Parallel Steps"
current_step_index = response.step_index or 0 # type: ignore
current_step_content = ""
step_started_printed = False
# Set up parallel context
current_primitive_context = {
"type": "parallel",
"step_index": current_step_index,
"sub_step_counter": 0,
"total_steps": response.parallel_step_count,
}
# Initialize parallel step tracking - clear previous states
parallel_step_states.clear()
step_display_cache.clear()
# Print parallel execution summary panel
live_log.update(status, refresh=True)
parallel_summary = f"**Parallel Steps:** {response.parallel_step_count}"
# Use get_step_display_number for consistent numbering
step_display = get_step_display_number(current_step_index, current_step_name)
parallel_panel = create_panel(
content=Markdown(parallel_summary) if markdown else parallel_summary,
title=f"{step_display}: {current_step_name}",
border_style="cyan",
)
console.print(parallel_panel) # type: ignore
status.update(
f"Starting parallel execution: {current_step_name} ({response.parallel_step_count} steps)..."
)
live_log.update(status)
elif isinstance(response, ParallelExecutionCompletedEvent):
step_name = response.step_name or "Parallel Steps"
step_index = response.step_index or 0
status.update(f"Completed parallel execution: {step_name}")
# Display individual parallel step results immediately
if show_step_details and response.step_results:
live_log.update(status, refresh=True)
# Get the parallel container's display number for consistent numbering
parallel_step_display = get_step_display_number(step_index, step_name)
# Show each parallel step with the same number (1.1, 1.1)
for step_result in response.step_results:
if step_result.content:
step_result_name = step_result.step_name or "Parallel Step"
formatted_content = format_step_content_for_display(step_result.content) # type: ignore
# All parallel sub-steps get the same number
parallel_step_panel = create_panel(
content=Markdown(formatted_content) if markdown else formatted_content,
title=f"{parallel_step_display}: {step_result_name} (Completed)",
border_style="orange3",
)
console.print(parallel_step_panel) # type: ignore
# Reset context
current_primitive_context = None
parallel_step_states.clear()
step_display_cache.clear()
elif isinstance(response, ConditionExecutionStartedEvent):
current_step_name = response.step_name or "Condition"
current_step_index = response.step_index or 0 # type: ignore
current_step_content = ""
step_started_printed = False
# Set up condition context
current_primitive_context = {
"type": "condition",
"step_index": current_step_index,
"sub_step_counter": 0,
"condition_result": response.condition_result,
}
# Initialize parallel step tracking - clear previous states
parallel_step_states.clear()
step_display_cache.clear()
condition_text = "met" if response.condition_result else "not met"
status.update(f"Starting condition: {current_step_name} (condition {condition_text})...")
live_log.update(status)
elif isinstance(response, ConditionExecutionCompletedEvent):
step_name = response.step_name or "Condition"
step_index = response.step_index or 0
status.update(f"Completed condition: {step_name}")
# Reset context
current_primitive_context = None
step_display_cache.clear()
elif isinstance(response, RouterExecutionStartedEvent):
current_step_name = response.step_name or "Router"
current_step_index = response.step_index or 0 # type: ignore
current_step_content = ""
step_started_printed = False
# Set up router context
current_primitive_context = {
"type": "router",
"step_index": current_step_index,
"sub_step_counter": 0,
"selected_steps": response.selected_steps,
}
# Initialize parallel step tracking - clear previous states
parallel_step_states.clear()
step_display_cache.clear()
selected_steps_text = ", ".join(response.selected_steps) if response.selected_steps else "none"
status.update(f"Starting router: {current_step_name} (selected: {selected_steps_text})...")
live_log.update(status)
elif isinstance(response, RouterExecutionCompletedEvent):
step_name = response.step_name or "Router"
step_index = response.step_index or 0
status.update(f"Completed router: {step_name}")
# Print router summary
if show_step_details:
selected_steps_text = ", ".join(response.selected_steps) if response.selected_steps else "none"
summary_content = "**Router Summary:**\n\n"
summary_content += f"- Selected steps: {selected_steps_text}\n"
summary_content += f"- Executed steps: {response.executed_steps or 0}\n"
router_summary_panel = create_panel(
content=Markdown(summary_content) if markdown else summary_content,
title=f"Router {step_name} (Completed)",
border_style="purple",
)
console.print(router_summary_panel) # type: ignore
# Reset context
current_primitive_context = None
step_display_cache.clear()
step_started_printed = True
elif isinstance(response, StepsExecutionStartedEvent):
current_step_name = response.step_name or "Steps"
current_step_index = response.step_index or 0 # type: ignore
current_step_content = ""
step_started_printed = False
status.update(f"Starting steps: {current_step_name} ({response.steps_count} steps)...")
live_log.update(status)
elif isinstance(response, StepsExecutionCompletedEvent):
step_name = response.step_name or "Steps"
step_index = response.step_index or 0
status.update(f"Completed steps: {step_name}")
# Add results from executed steps to step_results
if response.step_results:
for i, step_result in enumerate(response.step_results):
# Use the same numbering system as other primitives
step_display_number = get_step_display_number(step_index, step_result.step_name or "")
step_results.append(
{
"step_name": f"{step_display_number}: {step_result.step_name}",
"step_index": step_index,
"content": step_result.content,
"event": "StepsStepResult",
}
)
# Print steps summary
if show_step_details:
summary_content = "**Steps Summary:**\n\n"
summary_content += f"- Total steps: {response.steps_count or 0}\n"
summary_content += f"- Executed steps: {response.executed_steps or 0}\n"
steps_summary_panel = create_panel(
content=Markdown(summary_content) if markdown else summary_content,
title=f"Steps {step_name} (Completed)",
border_style="yellow",
)
console.print(steps_summary_panel) # type: ignore
step_started_printed = True
elif isinstance(response, WorkflowCompletedEvent):
status.update("Workflow completed!")
# Check if this is an agent direct response
if response.metadata and response.metadata.get("agent_direct_response"):
is_workflow_agent_response = True
# Print the agent's direct response from history
if show_step_details:
live_log.update(status, refresh=True)
agent_response_panel = create_panel(
content=Markdown(str(response.content)) if markdown else str(response.content),
title="Workflow Agent Response",
border_style="green",
)
console.print(agent_response_panel) # type: ignore
step_started_printed = True
# For callable functions, print the final content block here since there are no step events
elif (
is_callable_function and show_step_details and current_step_content and not step_started_printed
):
final_step_panel = create_panel(
content=Markdown(current_step_content) if markdown else current_step_content,
title="Custom Function (Completed)",
border_style="orange3",
)
console.print(final_step_panel) # type: ignore
step_started_printed = True
live_log.update(status, refresh=True)
# Show final summary (skip for agent responses)
if response.metadata and not is_workflow_agent_response:
status = response.status
summary_content = ""
summary_content += f"""\n\n**Status:** {status}"""
summary_content += (
f"""\n\n**Steps Completed:** {len(response.step_results) if response.step_results else 0}"""
)
summary_content = summary_content.strip()
summary_panel = create_panel(
content=Markdown(summary_content) if markdown else summary_content,
title="Execution Summary",
border_style="blue",
)
console.print(summary_panel) # type: ignore
else:
# Handle streaming content
if isinstance(response, str):
response_str = response
elif isinstance(response, StepOutputEvent):
response_str = response.content or "" # type: ignore
else:
from agno.run.agent import RunContentEvent
from agno.run.team import RunContentEvent as TeamRunContentEvent
current_step_executor_type = None
# Handle both integer and tuple step indices for parallel execution
actual_step_index = current_step_index
if isinstance(current_step_index, tuple):
# For tuple indices, use the first element (parent step index)
actual_step_index = current_step_index[0]
# If it's nested tuple, keep extracting until we get an integer
while isinstance(actual_step_index, tuple) and len(actual_step_index) > 0:
actual_step_index = actual_step_index[0]
if not is_callable_function and workflow.steps and actual_step_index < len(workflow.steps): # type: ignore
step = workflow.steps[actual_step_index] # type: ignore
if hasattr(step, "executor_type"):
current_step_executor_type = step.executor_type
# Check if this is a streaming content event from agent or team
if isinstance(response, (TeamRunContentEvent, WorkflowRunOutputEvent)): # type: ignore
# Check if this is a team's final structured output
is_structured_output = (
isinstance(response, TeamRunContentEvent)
and hasattr(response, "content_type")
and response.content_type != "str"
and response.content_type != ""
)
response_str = response.content # type: ignore
if isinstance(response, RunContentEvent) and not workflow_started:
is_workflow_agent_response = True
continue
elif isinstance(response, RunContentEvent) and current_step_executor_type != "team":
response_str = response.content # type: ignore
# If we get RunContentEvent BEFORE workflow starts, it's an agent direct response
if not workflow_started and not is_workflow_agent_response:
is_workflow_agent_response = True
else:
continue
# Use the unified formatting function for consistency
response_str = format_step_content_for_display(response_str) # type: ignore
# Skip streaming content from parallel sub-steps - they're handled in ParallelExecutionCompletedEvent
if (
current_primitive_context
and current_primitive_context["type"] == "parallel"
and isinstance(current_step_index, tuple)
):
continue
# Filter out empty responses and add to current step content
if response_str and response_str.strip():
# If it's a structured output from a team, replace the content instead of appending
if "is_structured_output" in locals() and is_structured_output:
current_step_content = response_str
else:
current_step_content += response_str
# Live update the step panel with streaming content (skip for workflow agent responses)
if show_step_details and not step_started_printed and not is_workflow_agent_response:
# Generate smart step number for streaming title (will use cached value)
step_display = get_step_display_number(current_step_index, current_step_name)
title = f"{step_display}: {current_step_name} (Streaming...)"
if is_callable_function:
title = "Custom Function (Streaming...)"
# Show the streaming content live in orange panel
live_step_panel = create_panel(
content=Markdown(current_step_content) if markdown else current_step_content,
title=title,
border_style="orange3",
)
# Create group with status and current step content
group = Group(status, live_step_panel)
live_log.update(group)
response_timer.stop()
live_log.update("")
# Final completion message (skip for agent responses)
if show_time and not is_workflow_agent_response:
completion_text = Text(f"Completed in {response_timer.elapsed:.1f}s", style="bold green")
console.print(completion_text) # type: ignore
except Exception as e:
import traceback
traceback.print_exc()
response_timer.stop()
error_panel = create_panel(
content=f"Workflow execution failed: {str(e)}", title="Execution Error", border_style="red"
)
console.print(error_panel) # type: ignore
def print_step_output_recursive(
step_output: StepOutput, step_number: int, markdown: bool, console, depth: int = 0
) -> None:
"""Recursively print step output and its nested steps"""
from rich.markdown import Markdown
from agno.utils.response import create_panel
# Print the current step
if step_output.content:
formatted_content = format_step_content_for_display(step_output)
# Create title with proper nesting indication
if depth == 0:
title = f"Step {step_number}: {step_output.step_name} (Completed)"
else:
title = f"{' ' * depth}└─ {step_output.step_name} (Completed)"
step_panel = create_panel(
content=Markdown(formatted_content) if markdown else formatted_content,
title=title,
border_style="orange3",
)
console.print(step_panel)
# Print nested steps if they exist
if step_output.steps:
for j, nested_step in enumerate(step_output.steps):
print_step_output_recursive(nested_step, j + 1, markdown, console, depth + 1)
def format_step_content_for_display(step_output: StepOutput) -> str:
"""Format content for display, handling structured outputs. Works for both raw content and StepOutput objects."""
# If it's a StepOutput, extract the content
if hasattr(step_output, "content"):
actual_content = step_output.content
else:
actual_content = step_output
if not actual_content:
return ""
# If it's already a string, return as-is
if isinstance(actual_content, str):
return actual_content
# If it's a structured output (BaseModel or dict), format it nicely
if isinstance(actual_content, BaseModel):
return f"**Structured Output:**\n\n```json\n{actual_content.model_dump_json(indent=2, exclude_none=True)}\n```"
elif isinstance(actual_content, (dict, list)):
import json
return f"**Structured Output:**\n\n```json\n{json.dumps(actual_content, indent=2, default=str)}\n```"
else:
# Fallback to string conversion
return str(actual_content)
async def aprint_response(
workflow: "Workflow",
input: Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]],
additional_data: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
audio: Optional[List[Audio]] = None,
images: Optional[List[Image]] = None,
videos: Optional[List[Video]] = None,
files: Optional[List[File]] = None,
markdown: bool = True,
show_time: bool = True,
show_step_details: bool = True,
console: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Print workflow execution with rich formatting (non-streaming)"""
from rich.live import Live
from rich.markdown import Markdown
from rich.status import Status
from rich.text import Text
from agno.utils.response import create_panel
from agno.utils.timer import Timer
if console is None:
from rich.console import Console
console = Console()
# Show workflow info
media_info = []
if audio:
media_info.append(f"Audio files: {len(audio)}")
if images:
media_info.append(f"Images: {len(images)}")
if videos:
media_info.append(f"Videos: {len(videos)}")
if files:
media_info.append(f"Files: {len(files)}")
workflow_info = f"""**Workflow:** {workflow.name}"""
if workflow.description:
workflow_info += f"""\n\n**Description:** {workflow.description}"""
workflow_info += f"""\n\n**Steps:** {workflow._get_step_count()} steps"""
if input:
if isinstance(input, str):
workflow_info += f"""\n\n**Message:** {input}"""
else:
# Handle structured input message
if isinstance(input, BaseModel):
data_display = input.model_dump_json(indent=2, exclude_none=True)
elif isinstance(input, (dict, list)):
import json
data_display = json.dumps(input, indent=2, default=str)
else:
data_display = str(input)
workflow_info += f"""\n\n**Structured Input:**\n```json\n{data_display}\n```"""
if user_id:
workflow_info += f"""\n\n**User ID:** {user_id}"""
if session_id:
workflow_info += f"""\n\n**Session ID:** {session_id}"""
workflow_info = workflow_info.strip()
workflow_panel = create_panel(
content=Markdown(workflow_info) if markdown else workflow_info,
title="Workflow Information",
border_style="cyan",
)
console.print(workflow_panel) # type: ignore
# Start timer
response_timer = Timer()
response_timer.start()
with Live(console=console) as live_log:
status = Status("Starting async workflow...\n", spinner="dots")
live_log.update(status)
try:
# Execute workflow and get the response directly
workflow_response: WorkflowRunOutput = await workflow.arun(
input=input,
user_id=user_id,
session_id=session_id,
additional_data=additional_data,
audio=audio,
images=images,
videos=videos,
files=files,
**kwargs,
) # type: ignore
response_timer.stop()
# Check if this is a workflow agent direct response
if workflow_response.workflow_agent_run is not None and not workflow_response.workflow_agent_run.tools:
# Agent answered directly from history without executing workflow
agent_response_panel = create_panel(
content=Markdown(str(workflow_response.content)) if markdown else str(workflow_response.content),
title="Workflow Agent Response",
border_style="green",
)
console.print(agent_response_panel) # type: ignore
elif show_step_details and workflow_response.step_results:
for i, step_output in enumerate(workflow_response.step_results):
print_step_output_recursive(step_output, i + 1, markdown, console) # type: ignore
# For callable functions, show the content directly since there are no step_results
elif show_step_details and callable(workflow.steps) and workflow_response.content:
step_panel = create_panel(
content=Markdown(workflow_response.content) if markdown else workflow_response.content, # type: ignore
title="Custom Function (Completed)",
border_style="orange3",
)
console.print(step_panel) # type: ignore
# Show final summary
if workflow_response.metadata:
status = workflow_response.status.value # type: ignore
summary_content = ""
summary_content += f"""\n\n**Status:** {status}"""
summary_content += f"""\n\n**Steps Completed:** {len(workflow_response.step_results) if workflow_response.step_results else 0}"""
summary_content = summary_content.strip()
summary_panel = create_panel(
content=Markdown(summary_content) if markdown else summary_content,
title="Execution Summary",
border_style="blue",
)
console.print(summary_panel) # type: ignore
live_log.update("")
# Final completion message
if show_time:
completion_text = Text(f"Completed in {response_timer.elapsed:.1f}s", style="bold green")
console.print(completion_text) # type: ignore
except Exception as e:
import traceback
traceback.print_exc()
response_timer.stop()
error_panel = create_panel(
content=f"Workflow execution failed: {str(e)}", title="Execution Error", border_style="red"
)
console.print(error_panel) # type: ignore
async def aprint_response_stream(
workflow: "Workflow",
input: Union[str, Dict[str, Any], List[Any], BaseModel, List[Message]],
additional_data: Optional[Dict[str, Any]] = None,
user_id: Optional[str] = None,
session_id: Optional[str] = None,
audio: Optional[List[Audio]] = None,
images: Optional[List[Image]] = None,
videos: Optional[List[Video]] = None,
files: Optional[List[File]] = None,
stream_events: bool = False,
markdown: bool = True,
show_time: bool = True,
show_step_details: bool = True,
console: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Print workflow execution with clean streaming - orange step blocks displayed once"""
if console is None:
from rich.console import Console
console = Console()
stream_events = True # With streaming print response, we need to stream intermediate steps
# Show workflow info (same as before)
media_info = []
if audio:
media_info.append(f"Audio files: {len(audio)}")
if images:
media_info.append(f"Images: {len(images)}")
if videos:
media_info.append(f"Videos: {len(videos)}")
if files:
media_info.append(f"Files: {len(files)}")
workflow_info = f"""**Workflow:** {workflow.name}"""
if workflow.description:
workflow_info += f"""\n\n**Description:** {workflow.description}"""
workflow_info += f"""\n\n**Steps:** {workflow._get_step_count()} steps"""
if input:
if isinstance(input, str):
workflow_info += f"""\n\n**Message:** {input}"""
else:
# Handle structured input message
if isinstance(input, BaseModel):
data_display = input.model_dump_json(indent=2, exclude_none=True)
elif isinstance(input, (dict, list)):
import json
data_display = json.dumps(input, indent=2, default=str)
else:
data_display = str(input)
workflow_info += f"""\n\n**Structured Input:**\n```json\n{data_display}\n```"""
if user_id:
workflow_info += f"""\n\n**User ID:** {user_id}"""
if session_id:
workflow_info += f"""\n\n**Session ID:** {session_id}"""
workflow_info = workflow_info.strip()
workflow_panel = create_panel(
content=Markdown(workflow_info) if markdown else workflow_info,
title="Workflow Information",
border_style="cyan",
)
console.print(workflow_panel) # type: ignore
# Start timer
response_timer = Timer()
response_timer.start()
# Streaming execution variables
current_step_content = ""
current_step_name = ""
current_step_index = 0
step_results = []
step_started_printed = False
is_callable_function = callable(workflow.steps)
workflow_started = False # Track if workflow has actually started
is_workflow_agent_response = False # Track if this is a workflow agent direct response
# Smart step hierarchy tracking
current_primitive_context = None # Current primitive being executed (parallel, loop, etc.)
step_display_cache = {} # type: ignore
# Parallel-aware tracking for simultaneous steps
parallel_step_states: Dict[
Any, Dict[str, Any]
] = {} # track state of each parallel step: {step_index: {"name": str, "content": str, "started": bool, "completed": bool}}
def get_step_display_number(step_index: Union[int, tuple], step_name: str = "") -> str:
"""Generate clean two-level step numbering: x.y format only"""
# Handle tuple format for child steps
if isinstance(step_index, tuple):
if len(step_index) >= 2:
parent_idx, sub_idx = step_index[0], step_index[1]
# Extract base parent index if it's nested
if isinstance(parent_idx, tuple):
base_idx = parent_idx[0] if len(parent_idx) > 0 else 0
while isinstance(base_idx, tuple) and len(base_idx) > 0:
base_idx = base_idx[0]
else:
base_idx = parent_idx
# Check context for parallel special case
if current_primitive_context and current_primitive_context["type"] == "parallel":
# For parallel child steps, all get the same number based on their actual step_index
return f"Step {base_idx + 1}.{sub_idx + 1}"
elif current_primitive_context and current_primitive_context["type"] == "loop":
iteration = current_primitive_context.get("current_iteration", 1)
return f"Step {base_idx + 1}.{sub_idx + 1} (Iteration {iteration})"
else:
# Regular child step numbering
return f"Step {base_idx + 1}.{sub_idx + 1}" # type: ignore
else:
# Single element tuple - treat as main step
return f"Step {step_index[0] + 1}"
# Handle integer step_index - main step
if not current_primitive_context:
# Regular main step
return f"Step {step_index + 1}"
else:
# This shouldn't happen with the new logic, but fallback
return f"Step {step_index + 1}"
with Live(console=console, refresh_per_second=10) as live_log:
status = Status("Starting async workflow...", spinner="dots")
live_log.update(status)
try:
async for response in workflow.arun(
input=input,
additional_data=additional_data,
user_id=user_id,
session_id=session_id,
audio=audio,
images=images,
videos=videos,
files=files,
stream=True,
stream_events=stream_events,
**kwargs,
): # type: ignore
# Handle the new event types
if isinstance(response, WorkflowStartedEvent):
workflow_started = True
status.update("Workflow started...")
if is_callable_function:
current_step_name = "Custom Function"
current_step_index = 0
live_log.update(status)
elif isinstance(response, WorkflowAgentStartedEvent):
# Workflow agent is starting to process
status.update("Workflow agent processing...")
live_log.update(status)
continue
elif isinstance(response, WorkflowAgentCompletedEvent):
# Workflow agent has completed
status.update("Workflow agent completed")
live_log.update(status)
continue
elif isinstance(response, StepStartedEvent):
# Skip step events if workflow hasn't started (agent direct response)
if not workflow_started:
continue
step_name = response.step_name or "Unknown"
step_index = response.step_index or 0 # type: ignore
current_step_name = step_name
current_step_index = step_index # type: ignore
current_step_content = ""
step_started_printed = False
# Generate smart step number
step_display = get_step_display_number(current_step_index, current_step_name)
status.update(f"Starting {step_display}: {current_step_name}...")
live_log.update(status)
elif isinstance(response, StepCompletedEvent):
step_name = response.step_name or "Unknown"
step_index = response.step_index or 0
# Skip parallel sub-step completed events - they're handled in ParallelExecutionCompletedEvent (avoid duplication)
if (
current_primitive_context
and current_primitive_context["type"] == "parallel"
and isinstance(step_index, tuple)
):
continue
# Generate smart step number for completion (will use cached value)
step_display = get_step_display_number(step_index, step_name)
status.update(f"Completed {step_display}: {step_name}")
if response.content:
step_results.append(
{
"step_name": step_name,
"step_index": step_index,
"content": response.content,
"event": response.event,
}
)
# Print the final step result in orange (only once)
if show_step_details and current_step_content and not step_started_printed:
live_log.update(status, refresh=True)
final_step_panel = create_panel(
content=Markdown(current_step_content) if markdown else current_step_content,
title=f"{step_display}: {step_name} (Completed)",
border_style="orange3",
)
console.print(final_step_panel) # type: ignore
step_started_printed = True
elif isinstance(response, LoopExecutionStartedEvent):
current_step_name = response.step_name or "Loop"
current_step_index = response.step_index or 0 # type: ignore
current_step_content = ""
step_started_printed = False
# Set up loop context
current_primitive_context = {
"type": "loop",
"step_index": current_step_index,
"sub_step_counter": 0,
"current_iteration": 1,
"max_iterations": response.max_iterations,
}
# Initialize parallel step tracking - clear previous states
parallel_step_states.clear()
step_display_cache.clear()
status.update(f"Starting loop: {current_step_name} (max {response.max_iterations} iterations)...")
live_log.update(status)
elif isinstance(response, LoopIterationStartedEvent):
if current_primitive_context and current_primitive_context["type"] == "loop":
current_primitive_context["current_iteration"] = response.iteration
current_primitive_context["sub_step_counter"] = 0 # Reset for new iteration
# Clear cache for new iteration
step_display_cache.clear()
status.update(
f"Loop iteration {response.iteration}/{response.max_iterations}: {response.step_name}..."
)
live_log.update(status)
elif isinstance(response, LoopIterationCompletedEvent):
status.update(
f"Completed iteration {response.iteration}/{response.max_iterations}: {response.step_name}"
)
elif isinstance(response, LoopExecutionCompletedEvent):
step_name = response.step_name or "Loop"
step_index = response.step_index or 0
status.update(f"Completed loop: {step_name} ({response.total_iterations} iterations)")
live_log.update(status, refresh=True)
# Print loop summary
if show_step_details:
summary_content = "**Loop Summary:**\n\n"
summary_content += (
f"- Total iterations: {response.total_iterations}/{response.max_iterations}\n"
)
summary_content += (
f"- Total steps executed: {sum(len(iteration) for iteration in response.all_results)}\n"
)
loop_summary_panel = create_panel(
content=Markdown(summary_content) if markdown else summary_content,
title=f"Loop {step_name} (Completed)",
border_style="yellow",
)
console.print(loop_summary_panel) # type: ignore
# Reset context
current_primitive_context = None
step_display_cache.clear()
step_started_printed = True
elif isinstance(response, ParallelExecutionStartedEvent):
current_step_name = response.step_name or "Parallel Steps"
current_step_index = response.step_index or 0 # type: ignore
current_step_content = ""
step_started_printed = False
# Set up parallel context
current_primitive_context = {
"type": "parallel",
"step_index": current_step_index,
"sub_step_counter": 0,
"total_steps": response.parallel_step_count,
}
# Initialize parallel step tracking - clear previous states
parallel_step_states.clear()
step_display_cache.clear()
# Print parallel execution summary panel
live_log.update(status, refresh=True)
parallel_summary = f"**Parallel Steps:** {response.parallel_step_count}"
# Use get_step_display_number for consistent numbering
step_display = get_step_display_number(current_step_index, current_step_name)
parallel_panel = create_panel(
content=Markdown(parallel_summary) if markdown else parallel_summary,
title=f"{step_display}: {current_step_name}",
border_style="cyan",
)
console.print(parallel_panel) # type: ignore
status.update(
f"Starting parallel execution: {current_step_name} ({response.parallel_step_count} steps)..."
)
live_log.update(status)
elif isinstance(response, ParallelExecutionCompletedEvent):
step_name = response.step_name or "Parallel Steps"
step_index = response.step_index or 0
status.update(f"Completed parallel execution: {step_name}")
# Display individual parallel step results immediately
if show_step_details and response.step_results:
live_log.update(status, refresh=True)
# Get the parallel container's display number for consistent numbering
parallel_step_display = get_step_display_number(step_index, step_name)
# Show each parallel step with the same number (1.1, 1.1)
for step_result in response.step_results:
if step_result.content:
step_result_name = step_result.step_name or "Parallel Step"
formatted_content = format_step_content_for_display(step_result.content) # type: ignore
# All parallel sub-steps get the same number
parallel_step_panel = create_panel(
content=Markdown(formatted_content) if markdown else formatted_content,
title=f"{parallel_step_display}: {step_result_name} (Completed)",
border_style="orange3",
)
console.print(parallel_step_panel) # type: ignore
# Reset context
current_primitive_context = None
parallel_step_states.clear()
step_display_cache.clear()
elif isinstance(response, ConditionExecutionStartedEvent):
current_step_name = response.step_name or "Condition"
current_step_index = response.step_index or 0 # type: ignore
current_step_content = ""
step_started_printed = False
# Set up condition context
current_primitive_context = {
"type": "condition",
"step_index": current_step_index,
"sub_step_counter": 0,
"condition_result": response.condition_result,
}
# Initialize parallel step tracking - clear previous states
parallel_step_states.clear()
step_display_cache.clear()
condition_text = "met" if response.condition_result else "not met"
status.update(f"Starting condition: {current_step_name} (condition {condition_text})...")
live_log.update(status)
elif isinstance(response, ConditionExecutionCompletedEvent):
step_name = response.step_name or "Condition"
step_index = response.step_index or 0
status.update(f"Completed condition: {step_name}")
# Reset context
current_primitive_context = None
step_display_cache.clear()
elif isinstance(response, RouterExecutionStartedEvent):
current_step_name = response.step_name or "Router"
current_step_index = response.step_index or 0 # type: ignore
current_step_content = ""
step_started_printed = False
# Set up router context
current_primitive_context = {
"type": "router",
"step_index": current_step_index,
"sub_step_counter": 0,
"selected_steps": response.selected_steps,
}
# Initialize parallel step tracking - clear previous states
parallel_step_states.clear()
step_display_cache.clear()
selected_steps_text = ", ".join(response.selected_steps) if response.selected_steps else "none"
status.update(f"Starting router: {current_step_name} (selected: {selected_steps_text})...")
live_log.update(status)
elif isinstance(response, RouterExecutionCompletedEvent):
step_name = response.step_name or "Router"
step_index = response.step_index or 0
status.update(f"Completed router: {step_name}")
# Print router summary
if show_step_details:
selected_steps_text = ", ".join(response.selected_steps) if response.selected_steps else "none"
summary_content = "**Router Summary:**\n\n"
summary_content += f"- Selected steps: {selected_steps_text}\n"
summary_content += f"- Executed steps: {response.executed_steps or 0}\n"
router_summary_panel = create_panel(
content=Markdown(summary_content) if markdown else summary_content,
title=f"Router {step_name} (Completed)",
border_style="purple",
)
console.print(router_summary_panel) # type: ignore
# Reset context
current_primitive_context = None
step_display_cache.clear()
step_started_printed = True
elif isinstance(response, StepsExecutionStartedEvent):
current_step_name = response.step_name or "Steps"
current_step_index = response.step_index or 0 # type: ignore
current_step_content = ""
step_started_printed = False
status.update(f"Starting steps: {current_step_name} ({response.steps_count} steps)...")
live_log.update(status)
elif isinstance(response, StepsExecutionCompletedEvent):
step_name = response.step_name or "Steps"
step_index = response.step_index or 0
status.update(f"Completed steps: {step_name}")
# Add results from executed steps to step_results
if response.step_results:
for i, step_result in enumerate(response.step_results):
# Use the same numbering system as other primitives
step_display_number = get_step_display_number(step_index, step_result.step_name or "")
step_results.append(
{
"step_name": f"{step_display_number}: {step_result.step_name}",
"step_index": step_index,
"content": step_result.content,
"event": "StepsStepResult",
}
)
# Print steps summary
if show_step_details:
summary_content = "**Steps Summary:**\n\n"
summary_content += f"- Total steps: {response.steps_count or 0}\n"
summary_content += f"- Executed steps: {response.executed_steps or 0}\n"
steps_summary_panel = create_panel(
content=Markdown(summary_content) if markdown else summary_content,
title=f"Steps {step_name} (Completed)",
border_style="yellow",
)
console.print(steps_summary_panel) # type: ignore
step_started_printed = True
elif isinstance(response, WorkflowCompletedEvent):
status.update("Workflow completed!")
# Check if this is an agent direct response
if response.metadata and response.metadata.get("agent_direct_response"):
is_workflow_agent_response = True
# Print the agent's direct response from history
if show_step_details:
live_log.update(status, refresh=True)
agent_response_panel = create_panel(
content=Markdown(str(response.content)) if markdown else str(response.content),
title="Workflow Agent Response",
border_style="green",
)
console.print(agent_response_panel) # type: ignore
step_started_printed = True
# For callable functions, print the final content block here since there are no step events
elif (
is_callable_function and show_step_details and current_step_content and not step_started_printed
):
final_step_panel = create_panel(
content=Markdown(current_step_content) if markdown else current_step_content,
title="Custom Function (Completed)",
border_style="orange3",
)
console.print(final_step_panel) # type: ignore
step_started_printed = True
live_log.update(status, refresh=True)
# Show final summary (skip for agent responses)
if response.metadata and not is_workflow_agent_response:
status = response.status
summary_content = ""
summary_content += f"""\n\n**Status:** {status}"""
summary_content += (
f"""\n\n**Steps Completed:** {len(response.step_results) if response.step_results else 0}"""
)
summary_content = summary_content.strip()
summary_panel = create_panel(
content=Markdown(summary_content) if markdown else summary_content,
title="Execution Summary",
border_style="blue",
)
console.print(summary_panel) # type: ignore
else:
if isinstance(response, str):
response_str = response
elif isinstance(response, StepOutputEvent):
# Handle StepOutputEvent objects yielded from workflow
response_str = response.content or "" # type: ignore
else:
from agno.run.agent import RunContentEvent
from agno.run.team import RunContentEvent as TeamRunContentEvent
current_step_executor_type = None
# Handle both integer and tuple step indices for parallel execution
actual_step_index = current_step_index
if isinstance(current_step_index, tuple):
# For tuple indices, use the first element (parent step index)
actual_step_index = current_step_index[0]
# If it's nested tuple, keep extracting until we get an integer
while isinstance(actual_step_index, tuple) and len(actual_step_index) > 0:
actual_step_index = actual_step_index[0]
# Check if this is a streaming content event from agent or team
if isinstance(
response,
(RunContentEvent, TeamRunContentEvent, WorkflowRunOutputEvent), # type: ignore
): # type: ignore
# Handle WorkflowErrorEvent specifically
if isinstance(response, WorkflowErrorEvent): # type: ignore
response_str = response.error or "Workflow execution error" # type: ignore
else:
# Extract the content from the streaming event
response_str = response.content # type: ignore
# If we get RunContentEvent BEFORE workflow starts, it's an agent direct response
if isinstance(response, RunContentEvent) and not workflow_started:
is_workflow_agent_response = True
continue # Skip ALL agent direct response content
# Check if this is a team's final structured output
is_structured_output = (
isinstance(response, TeamRunContentEvent)
and hasattr(response, "content_type")
and response.content_type != "str"
and response.content_type != ""
)
elif isinstance(response, RunContentEvent) and current_step_executor_type != "team":
response_str = response.content # type: ignore
# If we get RunContentEvent BEFORE workflow starts, it's an agent direct response
if not workflow_started and not is_workflow_agent_response:
is_workflow_agent_response = True
else:
continue
# Use the unified formatting function for consistency
response_str = format_step_content_for_display(response_str) # type: ignore
# Skip streaming content from parallel sub-steps - they're handled in ParallelExecutionCompletedEvent
if (
current_primitive_context
and current_primitive_context["type"] == "parallel"
and isinstance(current_step_index, tuple)
):
continue
# Filter out empty responses and add to current step content
if response_str and response_str.strip():
# If it's a structured output from a team, replace the content instead of appending
if "is_structured_output" in locals() and is_structured_output:
current_step_content = response_str
else:
current_step_content += response_str
# Live update the step panel with streaming content (skip for workflow agent responses)
if show_step_details and not step_started_printed and not is_workflow_agent_response:
# Generate smart step number for streaming title (will use cached value)
step_display = get_step_display_number(current_step_index, current_step_name)
title = f"{step_display}: {current_step_name} (Streaming...)"
if is_callable_function:
title = "Custom Function (Streaming...)"
# Show the streaming content live in orange panel
live_step_panel = create_panel(
content=Markdown(current_step_content) if markdown else current_step_content,
title=title,
border_style="orange3",
)
# Create group with status and current step content
group = Group(status, live_step_panel)
live_log.update(group)
response_timer.stop()
live_log.update("")
if show_time and not is_workflow_agent_response:
completion_text = Text(f"Completed in {response_timer.elapsed:.1f}s", style="bold green")
console.print(completion_text) # type: ignore
except Exception as e:
import traceback
traceback.print_exc()
response_timer.stop()
error_panel = create_panel(
content=f"Workflow execution failed: {str(e)}", title="Execution Error", border_style="red"
)
console.print(error_panel) # type: ignore
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/print_response/workflow.py",
"license": "Apache License 2.0",
"lines": 1410,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/utils/reasoning.py | from typing import TYPE_CHECKING, List, Optional, Tuple, Union
from agno.metrics import MessageMetrics
from agno.models.message import Message
from agno.reasoning.step import ReasoningStep
if TYPE_CHECKING:
from agno.run.agent import RunOutput
from agno.team.team import TeamRunOutput
def extract_thinking_content(content: str) -> Tuple[Optional[str], str]:
"""Extract thinking content from response text between <think> tags."""
if not content or "</think>" not in content:
return None, content
# Find the end of thinking content
end_idx = content.find("</think>")
# Look for opening <think> tag, if not found, assume thinking starts at beginning
start_idx = content.find("<think>")
if start_idx == -1:
reasoning_content = content[:end_idx].strip()
else:
start_idx = start_idx + len("<think>")
reasoning_content = content[start_idx:end_idx].strip()
output_content = content[end_idx + len("</think>") :].strip()
return reasoning_content, output_content
def append_to_reasoning_content(run_response: Union["RunOutput", "TeamRunOutput"], content: str) -> None:
"""Helper to append content to the reasoning_content field."""
if not hasattr(run_response, "reasoning_content") or not run_response.reasoning_content: # type: ignore
run_response.reasoning_content = content # type: ignore
else:
run_response.reasoning_content += content # type: ignore
def add_reasoning_step_to_metadata(
run_response: Union["RunOutput", "TeamRunOutput"], reasoning_step: ReasoningStep
) -> None:
if run_response.reasoning_steps is None:
run_response.reasoning_steps = []
run_response.reasoning_steps.append(reasoning_step)
def add_reasoning_metrics_to_metadata(
run_response: Union["RunOutput", "TeamRunOutput"], reasoning_time_taken: float
) -> None:
try:
# Initialize reasoning_messages if it doesn't exist
if run_response.reasoning_messages is None:
run_response.reasoning_messages = []
metrics_message = Message(
role="assistant",
content=run_response.reasoning_content,
metrics=MessageMetrics(duration=reasoning_time_taken),
)
# Add the metrics message to the reasoning_messages
run_response.reasoning_messages.append(metrics_message)
except Exception as e:
# Log the error but don't crash
from agno.utils.log import log_error
log_error(f"Failed to add reasoning metrics to metadata: {str(e)}")
def update_run_output_with_reasoning(
run_response: Union["RunOutput", "TeamRunOutput"],
reasoning_steps: List[ReasoningStep],
reasoning_agent_messages: List[Message],
) -> None:
# Update reasoning_steps
if run_response.reasoning_steps is None:
run_response.reasoning_steps = reasoning_steps
else:
run_response.reasoning_steps.extend(reasoning_steps)
# Update reasoning_messages
if run_response.reasoning_messages is None:
run_response.reasoning_messages = reasoning_agent_messages
else:
run_response.reasoning_messages.extend(reasoning_agent_messages)
# Create and store reasoning_content
reasoning_content = ""
for step in reasoning_steps:
if step.title:
reasoning_content += f"## {step.title}\n"
if step.reasoning:
reasoning_content += f"{step.reasoning}\n"
if step.action:
reasoning_content += f"Action: {step.action}\n"
if step.result:
reasoning_content += f"Result: {step.result}\n"
reasoning_content += "\n"
run_response.reasoning_content = reasoning_content
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/reasoning.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/utils/streamlit.py | from datetime import datetime
from typing import Any, Callable, Dict, List, Optional
try:
from agno.agent import Agent
from agno.db.base import SessionType
from agno.models.anthropic import Claude
from agno.models.google import Gemini
from agno.models.openai import OpenAIChat
from agno.utils.log import logger
except ImportError:
raise ImportError("`agno` not installed. Please install using `pip install agno`")
try:
import streamlit as st
except ImportError:
raise ImportError("`streamlit` not installed. Please install using `pip install streamlit`")
def add_message(role: str, content: str, tool_calls: Optional[List[Dict[str, Any]]] = None) -> None:
"""Add a message to the session state."""
if "messages" not in st.session_state:
st.session_state["messages"] = []
message: Dict[str, Any] = {"role": role, "content": content}
if tool_calls:
message["tool_calls"] = tool_calls
st.session_state["messages"].append(message)
def display_tool_calls(container, tools: List[Any]):
"""Display tool calls in expandable sections."""
if not tools:
return
with container.container():
for tool in tools:
if hasattr(tool, "tool_name"):
name = tool.tool_name or "Tool"
args = tool.tool_args or {}
result = tool.result or ""
else:
name = tool.get("tool_name") or tool.get("name") or "Tool"
args = tool.get("tool_args") or tool.get("args") or {}
result = tool.get("result") or tool.get("content") or ""
with st.expander(f"🛠️ {name.replace('_', ' ')}", expanded=False):
if args:
st.markdown("**Arguments:**")
st.json(args)
if result:
st.markdown("**Result:**")
st.json(result)
def session_selector_widget(agent: Agent, model_id: str, agent_creation_callback: Callable[[str, str], Agent]) -> None:
"""Session selector widget"""
if not agent.db:
st.sidebar.info("💡 Database not configured. Sessions will not be saved.")
return
try:
sessions = agent.db.get_sessions(
session_type=SessionType.AGENT,
deserialize=True,
sort_by="created_at",
sort_order="desc",
)
except Exception as e:
logger.error(f"Error fetching sessions: {e}")
st.sidebar.error("Could not load sessions")
return
if not sessions:
st.sidebar.info("🆕 New Chat - Start your conversation!")
return
# Filter session data
session_options = []
session_dict = {}
for session in sessions: # type: ignore
if not hasattr(session, "session_id") or not session.session_id:
continue
session_id = session.session_id
session_name = None
# Extract session name from session_data
if hasattr(session, "session_data") and session.session_data:
session_name = session.session_data.get("session_name")
name = session_name or session_id
session_options.append(name)
session_dict[name] = session_id
current_session_id = st.session_state.get("session_id")
current_selection = None
if current_session_id and current_session_id not in [s_id for s_id in session_dict.values()]:
logger.info(f"New session: {current_session_id}")
if agent.get_session_name():
current_display_name = agent.get_session_name()
else:
current_display_name = f"{current_session_id[:8]}..."
session_options.insert(0, current_display_name)
session_dict[current_display_name] = current_session_id
current_selection = current_display_name
st.session_state["is_new_session"] = True
for display_name, session_id in session_dict.items():
if session_id == current_session_id:
current_selection = display_name
break
display_options = session_options
selected_index = (
session_options.index(current_selection)
if current_selection and current_selection in session_options
else 0
if session_options
else None
)
if not display_options:
st.sidebar.info("🆕 Start your first conversation!")
return
selected = st.sidebar.selectbox(
label="Session",
options=display_options,
index=selected_index,
help="Select a session to continue",
)
if selected and selected in session_dict:
selected_session_id = session_dict[selected]
if selected_session_id != current_session_id:
if not st.session_state.get("is_new_session", False):
st.session_state["is_loading_session"] = True
try:
_load_session(selected_session_id, model_id, agent_creation_callback)
finally:
# Always clear the loading flag, even if there's an error
st.session_state["is_loading_session"] = False
else:
# Clear the new session flag since we're done with initialization
st.session_state["is_new_session"] = False
# Rename session
if agent.session_id:
if "session_edit_mode" not in st.session_state:
st.session_state.session_edit_mode = False
current_name = agent.get_session_name() or agent.session_id
if not st.session_state.session_edit_mode:
col1, col2 = st.sidebar.columns([3, 1])
with col1:
st.write(f"**Session:** {current_name}")
with col2:
if st.button("✎", help="Rename session", key="rename_session_button"):
st.session_state.session_edit_mode = True
st.rerun()
else:
new_name = st.sidebar.text_input("Enter new name:", value=current_name, key="session_name_input")
col1, col2 = st.sidebar.columns([1, 1])
with col1:
if st.button(
"💾 Save",
type="primary",
use_container_width=True,
key="save_session_name",
):
if new_name and new_name.strip():
try:
result = agent.set_session_name(session_name=new_name.strip())
if result:
logger.info(f"Session renamed to: {new_name.strip()}")
# Clear any cached session data to ensure fresh reload
if hasattr(agent, "_agent_session") and agent._agent_session:
agent._agent_session = None
st.session_state.session_edit_mode = False
st.sidebar.success("Session renamed!")
st.rerun()
except Exception as e:
logger.error(f"Error renaming session: {e}")
st.sidebar.error(f"Error: {str(e)}")
else:
st.sidebar.error("Please enter a valid name")
with col2:
if st.button("❌ Cancel", use_container_width=True, key="cancel_session_rename"):
st.session_state.session_edit_mode = False
st.rerun()
def _load_session(session_id: str, model_id: str, agent_creation_callback: Callable[[str, str], Agent]):
try:
logger.info(f"Creating agent with session_id: {session_id}")
new_agent = agent_creation_callback(model_id, session_id)
st.session_state["agent"] = new_agent
st.session_state["session_id"] = session_id
st.session_state["messages"] = []
st.session_state["current_model"] = model_id # Keep current_model in sync
try:
if new_agent.db:
selected_session = new_agent.db.get_session(
session_id=session_id, session_type=SessionType.AGENT, deserialize=True
)
else:
selected_session = None
# Recreate the chat history
if selected_session:
if hasattr(selected_session, "runs") and selected_session.runs:
for run_idx, run in enumerate(selected_session.runs):
messages = getattr(run, "messages", None)
if messages:
user_msg = None
assistant_msg = None
tool_calls = []
for msg_idx, message in enumerate(messages):
if not hasattr(message, "role") or not hasattr(message, "content"):
continue
role = message.role
content = str(message.content) if message.content else ""
if role == "user":
if content and content.strip():
user_msg = content.strip()
elif role == "assistant":
if content and content.strip() and content.strip().lower() != "none":
assistant_msg = content
# Display tool calls for this run
if hasattr(run, "tools") and run.tools:
tool_calls = run.tools
# Add messages to chat history
if user_msg:
add_message("user", user_msg)
if assistant_msg:
add_message("assistant", assistant_msg, tool_calls)
else:
logger.warning(f"No session found in database for session_id: {session_id}")
except Exception as e:
logger.warning(f"Could not load chat history: {e}")
st.rerun()
except Exception as e:
logger.error(f"Error loading session: {e}")
st.sidebar.error(f"Error loading session: {str(e)}")
def display_response(agent: Agent, question: str) -> None:
"""Handle agent response with streaming and tool call display."""
with st.chat_message("assistant"):
tool_calls_container = st.empty()
resp_container = st.empty()
with st.spinner("🤔 Thinking..."):
response = ""
try:
# Run the agent and stream the response
run_response = agent.run(question, stream=True)
for resp_chunk in run_response:
try:
# Display tool calls if available
if hasattr(resp_chunk, "tool") and resp_chunk.tool:
display_tool_calls(tool_calls_container, [resp_chunk.tool])
except Exception as tool_error:
logger.warning(f"Error displaying tool calls: {tool_error}")
if resp_chunk.content is not None:
content = str(resp_chunk.content)
if not (
content.strip().endswith("completed in") or "completed in" in content and "s." in content
):
response += content
resp_container.markdown(response)
try:
if hasattr(agent, "run_response") and agent.run_response and hasattr(agent.run_response, "tools"):
add_message("assistant", response, agent.run_response.tools)
else:
add_message("assistant", response)
except Exception as add_msg_error:
logger.warning(f"Error adding message with tools: {add_msg_error}")
add_message("assistant", response)
except Exception as e:
st.error(f"Sorry, I encountered an error: {str(e)}")
def display_chat_messages() -> None:
"""Display all chat messages from session state."""
if "messages" not in st.session_state:
return
for message in st.session_state["messages"]:
if message["role"] in ["user", "assistant"]:
content = message["content"]
with st.chat_message(message["role"]):
# Display tool calls
if "tool_calls" in message and message["tool_calls"]:
display_tool_calls(st.container(), message["tool_calls"])
if content is not None and str(content).strip() and str(content).strip().lower() != "none":
st.markdown(content)
def initialize_agent(model_id: str, agent_creation_callback: Callable[[str, Optional[str]], Agent]) -> Agent:
"""Initialize or get agent with proper session management."""
if "agent" not in st.session_state or st.session_state["agent"] is None:
# First time initialization - get existing session_id if any
session_id = st.session_state.get("session_id")
agent = agent_creation_callback(model_id, session_id)
st.session_state["agent"] = agent
st.session_state["current_model"] = model_id
return agent
else:
return st.session_state["agent"]
def reset_session_state(agent: Agent) -> None:
"""Update session state."""
print(f"Resetting session state for agent: {agent.session_id}")
if agent.session_id is not None:
st.session_state["session_id"] = agent.session_id
if "messages" not in st.session_state:
st.session_state["messages"] = []
def knowledge_base_info_widget(agent: Agent) -> None:
"""Display knowledge base information widget."""
if not agent.knowledge:
st.sidebar.info("No knowledge base configured")
return
vector_db = getattr(agent.knowledge, "vector_db", None)
if not vector_db:
st.sidebar.info("No vector db configured")
return
try:
doc_count = vector_db.get_count()
if doc_count == 0:
st.sidebar.info("💡 Upload documents to populate the knowledge base")
else:
st.sidebar.metric("Documents Loaded", doc_count)
except Exception as e:
logger.error(f"Error getting knowledge base info: {e}")
st.sidebar.warning("Could not retrieve knowledge base information")
def export_chat_history(app_name: str = "Chat") -> str:
"""Export chat history to markdown."""
if "messages" not in st.session_state or not st.session_state["messages"]:
return "# Chat History\n\n*No messages to export*"
title = f"{app_name} Chat History"
for msg in st.session_state["messages"]:
if msg.get("role") == "user" and msg.get("content"):
title = msg["content"][:100]
if len(msg["content"]) > 100:
title += "..."
break
chat_text = f"# {title}\n\n"
chat_text += f"**Exported:** {datetime.now().strftime('%B %d, %Y at %I:%M %p')}\n\n"
chat_text += "---\n\n"
for msg in st.session_state["messages"]:
role = msg.get("role", "")
content = msg.get("content", "")
if not content or str(content).strip().lower() == "none":
continue
role_display = "## 🙋 User" if role == "user" else "## 🤖 Assistant"
chat_text += f"{role_display}\n\n{content}\n\n---\n\n"
return chat_text
def get_model_from_id(model_id: str):
"""Get a model instance from a model ID string."""
if model_id.startswith("openai:"):
return OpenAIChat(id=model_id.split("openai:")[1])
elif model_id.startswith("anthropic:"):
return Claude(id=model_id.split("anthropic:")[1])
elif model_id.startswith("google:"):
return Gemini(id=model_id.split("google:")[1])
else:
return OpenAIChat(id="gpt-4o")
def get_model_with_provider(model_name: str):
"""Get a model instance by inferring the correct provider from the model name.
Args:
model_name: Model name (e.g., "gpt-4o", "claude-4-sonnet", "gemini-2.5-pro")
Returns:
Model instance with correct provider
"""
if ":" in model_name:
return get_model_from_id(model_name)
model_lower = model_name.lower()
if any(pattern in model_lower for pattern in ["gpt", "o1", "o3"]):
return get_model_from_id(f"openai:{model_name}")
elif "claude" in model_lower:
return get_model_from_id(f"anthropic:{model_name}")
elif "gemini" in model_lower:
return get_model_from_id(f"google:{model_name}")
else:
return get_model_from_id(f"openai:{model_name}")
def about_section(description: str):
"""About section"""
st.sidebar.markdown("---")
st.sidebar.markdown("### ℹ️ About")
st.sidebar.markdown(f"""
{description}
Built with:
- 🚀 Agno
- 💫 Streamlit
""")
MODELS = [
"gpt-4o",
"o3-mini",
"gpt-5",
"claude-sonnet-4-5-20250929",
"gemini-2.5-pro",
]
COMMON_CSS = """
<style>
.main-title {
text-align: center;
background: linear-gradient(45deg, #FF4B2B, #FF416C);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
font-size: 3em;
font-weight: bold;
padding: 1em 0;
}
.subtitle {
text-align: center;
color: #666;
margin-bottom: 2em;
}
.stButton button {
width: 100%;
border-radius: 20px;
margin: 0.2em 0;
transition: all 0.3s ease;
}
.stButton button:hover {
transform: translateY(-2px);
box-shadow: 0 5px 15px rgba(0,0,0,0.1);
}
</style>
"""
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/streamlit.py",
"license": "Apache License 2.0",
"lines": 395,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/utils/team.py | from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from agno.agent import Agent
from agno.media import Audio, File, Image, Video
from agno.run.agent import RunOutput
from agno.run.team import TeamRunOutput
from agno.utils.log import log_debug
from agno.utils.string import is_valid_uuid, url_safe_string
if TYPE_CHECKING:
from agno.team.team import Team
def format_member_agent_task(
task_description: str,
team_member_interactions_str: Optional[str] = None,
team_history_str: Optional[str] = None,
) -> str:
member_task_str = ""
if team_member_interactions_str:
member_task_str += f"{team_member_interactions_str}\n\n"
if team_history_str:
member_task_str += f"{team_history_str}\n\n"
member_task_str += f"{task_description}"
return member_task_str
def get_member_id(member: Union[Agent, "Team"]) -> Optional[str]:
"""
Get the ID of a member
Priority order:
1. If the member has an explicitly provided id, use it (UUID or not)
2. If the member has a name, convert that to a URL safe string
3. Otherwise, return None
"""
from agno.team.team import Team
# First priority: Use the ID if explicitly provided
if isinstance(member, Agent) and member.id is not None:
url_safe_member_id = member.id if is_valid_uuid(member.id) else url_safe_string(member.id)
elif isinstance(member, Team) and member.id is not None:
url_safe_member_id = member.id if is_valid_uuid(member.id) else url_safe_string(member.id)
# Second priority: Use the name if available
elif member.name is not None:
url_safe_member_id = url_safe_string(member.name)
else:
url_safe_member_id = None
return url_safe_member_id
def add_interaction_to_team_run_context(
team_run_context: Dict[str, Any],
member_name: str,
task: str,
run_response: Optional[Union[RunOutput, TeamRunOutput]],
) -> None:
if "member_responses" not in team_run_context:
team_run_context["member_responses"] = []
team_run_context["member_responses"].append(
{
"member_name": member_name,
"task": task,
"run_response": run_response,
}
)
log_debug(f"Updated team run context with member name: {member_name}")
def get_team_member_interactions_str(
team_run_context: Dict[str, Any],
max_interactions: Optional[int] = None,
) -> str:
"""
Build a string representation of member interactions from the team run context.
Args:
team_run_context: The context containing member responses
max_interactions: Maximum number of recent interactions to include.
None means include all interactions.
If set, only the most recent N interactions are included.
Returns:
A formatted string with member interactions
"""
if not team_run_context:
return ""
team_member_interactions_str = ""
if "member_responses" in team_run_context:
member_responses = team_run_context["member_responses"]
# If max_interactions is set, only include the most recent N interactions
if max_interactions is not None and len(member_responses) > max_interactions:
member_responses = member_responses[-max_interactions:]
if not member_responses:
return ""
team_member_interactions_str += (
"<member_interaction_context>\nSee below interactions with other team members.\n"
)
for interaction in member_responses:
response_dict = interaction["run_response"].to_dict()
response_content = (
response_dict.get("content")
or ",".join([tool.get("content", "") for tool in response_dict.get("tools", [])])
or ""
)
team_member_interactions_str += f"Member: {interaction['member_name']}\n"
team_member_interactions_str += f"Task: {interaction['task']}\n"
team_member_interactions_str += f"Response: {response_content}\n"
team_member_interactions_str += "\n"
team_member_interactions_str += "</member_interaction_context>\n"
return team_member_interactions_str
def get_team_run_context_images(
team_run_context: Dict[str, Any],
max_interactions: Optional[int] = None,
) -> List[Image]:
if not team_run_context:
return []
images = []
if "member_responses" in team_run_context:
member_responses = team_run_context["member_responses"]
if max_interactions is not None and len(member_responses) > max_interactions:
member_responses = member_responses[-max_interactions:]
for interaction in member_responses:
if interaction["run_response"].images:
images.extend(interaction["run_response"].images)
return images
def get_team_run_context_videos(
team_run_context: Dict[str, Any],
max_interactions: Optional[int] = None,
) -> List[Video]:
if not team_run_context:
return []
videos = []
if "member_responses" in team_run_context:
member_responses = team_run_context["member_responses"]
if max_interactions is not None and len(member_responses) > max_interactions:
member_responses = member_responses[-max_interactions:]
for interaction in member_responses:
if interaction["run_response"].videos:
videos.extend(interaction["run_response"].videos)
return videos
def get_team_run_context_audio(
team_run_context: Dict[str, Any],
max_interactions: Optional[int] = None,
) -> List[Audio]:
if not team_run_context:
return []
audio = []
if "member_responses" in team_run_context:
member_responses = team_run_context["member_responses"]
if max_interactions is not None and len(member_responses) > max_interactions:
member_responses = member_responses[-max_interactions:]
for interaction in member_responses:
if interaction["run_response"].audio:
audio.extend(interaction["run_response"].audio)
return audio
def get_team_run_context_files(
team_run_context: Dict[str, Any],
max_interactions: Optional[int] = None,
) -> List[File]:
if not team_run_context:
return []
files = []
if "member_responses" in team_run_context:
member_responses = team_run_context["member_responses"]
if max_interactions is not None and len(member_responses) > max_interactions:
member_responses = member_responses[-max_interactions:]
for interaction in member_responses:
if interaction["run_response"].files:
files.extend(interaction["run_response"].files)
return files
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/team.py",
"license": "Apache License 2.0",
"lines": 157,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/vectordb/langchaindb/langchaindb.py | from typing import Any, Dict, List, Optional, Union
from agno.filters import FilterExpr
from agno.knowledge.document import Document
from agno.utils.log import log_debug, log_warning, logger
from agno.vectordb.base import VectorDb
class LangChainVectorDb(VectorDb):
def __init__(
self,
vectorstore: Optional[Any] = None,
search_kwargs: Optional[dict] = None,
knowledge_retriever: Optional[Any] = None,
name: Optional[str] = None,
description: Optional[str] = None,
):
"""
Initialize LangChainVectorDb.
Args:
vectorstore: The LangChain vectorstore instance
name (Optional[str]): Name of the vector database.
description (Optional[str]): Description of the vector database.
search_kwargs: Additional search parameters for the retriever
knowledge_retriever: An optional LangChain retriever instance
"""
self.vectorstore = vectorstore
# Initialize base class with name and description
super().__init__(name=name, description=description)
self.search_kwargs = search_kwargs
self.knowledge_retriever = knowledge_retriever
def create(self) -> None:
raise NotImplementedError
async def async_create(self) -> None:
raise NotImplementedError
def name_exists(self, name: str) -> bool:
raise NotImplementedError
def async_name_exists(self, name: str) -> bool:
raise NotImplementedError
def id_exists(self, id: str) -> bool:
raise NotImplementedError
def content_hash_exists(self, content_hash: str) -> bool:
raise NotImplementedError
def delete_by_content_id(self, content_id: str) -> None:
raise NotImplementedError
def insert(self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None) -> None:
logger.warning("LangChainKnowledgeBase.insert() not supported - please check the vectorstore manually.")
raise NotImplementedError
async def async_insert(
self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None
) -> None:
logger.warning("LangChainKnowledgeBase.async_insert() not supported - please check the vectorstore manually.")
raise NotImplementedError
def upsert(self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None) -> None:
logger.warning("LangChainKnowledgeBase.upsert() not supported - please check the vectorstore manually.")
raise NotImplementedError
async def async_upsert(self, documents: List[Document], filters: Optional[Dict[str, Any]] = None) -> None:
logger.warning("LangChainKnowledgeBase.async_upsert() not supported - please check the vectorstore manually.")
raise NotImplementedError
def search(
self, query: str, limit: int = 5, filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
) -> List[Document]:
"""Returns relevant documents matching the query"""
if isinstance(filters, List):
log_warning(
"Filter Expressions are not supported in LangChainDB. No filters will be applied. Use filters as a dictionary."
)
filters = None
try:
from langchain_core.documents import Document as LangChainDocument
from langchain_core.retrievers import BaseRetriever
except ImportError:
raise ImportError(
"The `langchain` package is not installed. Please install it via `pip install langchain`."
)
if self.vectorstore is not None and self.knowledge_retriever is None:
log_debug("Creating knowledge retriever")
if self.search_kwargs is None:
self.search_kwargs = {"k": limit}
if filters is not None:
self.search_kwargs.update(filters)
self.knowledge_retriever = self.vectorstore.as_retriever(search_kwargs=self.search_kwargs)
if self.knowledge_retriever is None:
logger.error("No knowledge retriever provided")
return []
if not isinstance(self.knowledge_retriever, BaseRetriever):
raise ValueError(f"Knowledge retriever is not of type BaseRetriever: {self.knowledge_retriever}")
log_debug(f"Getting {limit} relevant documents for query: {query}")
lc_documents: List[LangChainDocument] = self.knowledge_retriever.invoke(input=query)
documents = []
for lc_doc in lc_documents:
documents.append(
Document(
content=lc_doc.page_content,
meta_data=lc_doc.metadata,
)
)
return documents
async def async_search(
self, query: str, limit: int = 5, filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
) -> List[Document]:
return self.search(query, limit, filters)
def drop(self) -> None:
raise NotImplementedError
async def async_drop(self) -> None:
raise NotImplementedError
async def async_exists(self) -> bool:
raise NotImplementedError
def delete(self) -> bool:
raise NotImplementedError
def delete_by_id(self, id: str) -> bool:
raise NotImplementedError
def delete_by_name(self, name: str) -> bool:
raise NotImplementedError
def delete_by_metadata(self, metadata: Dict[str, Any]) -> bool:
raise NotImplementedError
def exists(self) -> bool:
logger.warning("LangChainKnowledgeBase.exists() not supported - please check the vectorstore manually.")
return True
def update_metadata(self, content_id: str, metadata: Dict[str, Any]) -> None:
"""
Update the metadata for documents with the given content_id.
Not implemented for LangChain wrapper.
Args:
content_id (str): The content ID to update
metadata (Dict[str, Any]): The metadata to update
"""
raise NotImplementedError("update_metadata not supported for LangChain vectorstores")
def get_supported_search_types(self) -> List[str]:
"""Get the supported search types for this vector database."""
return [] # LangChainVectorDb doesn't use SearchType enum
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/vectordb/langchaindb/langchaindb.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/vectordb/lightrag/lightrag.py | import asyncio
from typing import Any, Dict, List, Optional, Union
import httpx
from agno.filters import FilterExpr
from agno.knowledge.document import Document
from agno.utils.log import log_debug, log_error, log_info, log_warning
from agno.vectordb.base import VectorDb
DEFAULT_SERVER_URL = "http://localhost:9621"
class LightRag(VectorDb):
"""
LightRAG VectorDB implementation
"""
def __init__(
self,
server_url: str = DEFAULT_SERVER_URL,
api_key: Optional[str] = None,
auth_header_name: str = "X-API-KEY",
auth_header_format: str = "{api_key}",
name: Optional[str] = None,
description: Optional[str] = None,
):
self.server_url = server_url
self.api_key = api_key
# Initialize base class with name and description
super().__init__(name=name, description=description)
self.auth_header_name = auth_header_name
self.auth_header_format = auth_header_format
def _get_headers(self) -> Dict[str, str]:
"""Get headers with optional API key authentication."""
headers = {"Content-Type": "application/json"}
if self.api_key:
headers[self.auth_header_name] = self.auth_header_format.format(api_key=self.api_key)
return headers
def _get_auth_headers(self) -> Dict[str, str]:
"""Get minimal headers with just authentication (for file uploads)."""
headers = {}
if self.api_key:
headers[self.auth_header_name] = self.auth_header_format.format(api_key=self.api_key)
return headers
def create(self) -> None:
"""Create the vector database"""
pass
async def async_create(self) -> None:
"""Async create the vector database"""
pass
def name_exists(self, name: str) -> bool:
"""Check if a document with the given name exists"""
return False
async def async_name_exists(self, name: str) -> bool:
"""Async check if a document with the given name exists"""
return False
def id_exists(self, id: str) -> bool:
"""Check if a document with the given ID exists"""
return False
def content_hash_exists(self, content_hash: str) -> bool:
"""Check if content with the given hash exists"""
return False
def insert(self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None) -> None:
"""Insert documents into the vector database"""
pass
async def async_insert(
self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None
) -> None:
"""Async insert documents into the vector database"""
pass
def upsert(self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None) -> None:
"""Upsert documents into the vector database"""
pass
def delete_by_content_id(self, content_id: str) -> None:
"""Delete documents by content ID"""
pass
async def async_upsert(self, documents: List[Document], filters: Optional[Dict[str, Any]] = None) -> None:
"""Async upsert documents into the vector database"""
pass
def search(
self, query: str, limit: int = 5, filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
) -> List[Document]:
result = asyncio.run(self.async_search(query, limit=limit, filters=filters))
return result if result is not None else []
async def async_search(
self, query: str, limit: Optional[int] = None, filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
) -> Optional[List[Document]]:
mode: str = "hybrid" # Default mode, can be "local", "global", or "hybrid"
if filters is not None:
log_warning("Filters are not supported in LightRAG. No filters will be applied.")
try:
async with httpx.AsyncClient(timeout=30.0) as client:
response = await client.post(
f"{self.server_url}/query",
json={"query": query, "mode": "hybrid", "include_references": True},
headers=self._get_headers(),
)
response.raise_for_status()
result = response.json()
return self._format_lightrag_response(result, query, mode)
except httpx.RequestError as e:
log_error(f"HTTP Request Error: {type(e).__name__}: {str(e)}")
return []
except httpx.HTTPStatusError as e:
log_error(f"HTTP Status Error: {e.response.status_code} - {e.response.text}")
return []
except Exception as e:
log_error(f"Unexpected error during LightRAG server search: {type(e).__name__}: {str(e)}")
import traceback
log_error(f"Full traceback: {traceback.format_exc()}")
return None
def drop(self) -> None:
"""Drop the vector database"""
asyncio.run(self.async_drop())
async def async_drop(self) -> None:
"""Async drop the vector database"""
async with httpx.AsyncClient(timeout=30.0) as client:
await client.delete(f"{self.server_url}/documents", headers=self._get_headers())
async with httpx.AsyncClient(timeout=30.0) as client:
await client.post(
f"{self.server_url}/documents/clear_cache",
json={"modes": ["default", "naive"]},
headers=self._get_headers(),
)
def exists(self) -> bool:
"""Check if the vector database exists"""
return False
async def async_exists(self) -> bool:
"""Async check if the vector database exists"""
return False
def delete(self) -> bool:
"""Delete all documents from the vector database"""
return False
def delete_by_id(self, id: str) -> bool:
"""Delete documents by ID"""
return False
def delete_by_name(self, name: str) -> bool:
"""Delete documents by name"""
return False
def delete_by_metadata(self, metadata: Dict[str, Any]) -> bool:
"""Delete documents by metadata"""
return False
def delete_by_external_id(self, external_id: str) -> bool:
"""Delete documents by external ID (sync wrapper)"""
import asyncio
try:
return asyncio.run(self.async_delete_by_external_id(external_id))
except Exception as e:
log_error(f"Error in sync delete_by_external_id: {e}")
return False
async def async_delete_by_external_id(self, external_id: str) -> bool:
"""Delete documents by external ID"""
try:
payload = {"doc_ids": [external_id], "delete_file": False}
async with httpx.AsyncClient() as client:
response = await client.request(
method="DELETE",
url=f"{self.server_url}/documents/delete_document",
headers=self._get_headers(),
json=payload,
)
response.raise_for_status()
return True
except Exception as e:
log_error(f"Error deleting document {external_id}: {e}")
return False
# We use this method when content is coming from unsupported file types that LightRAG can't process
# For these we process the content in Agno and then insert it into LightRAG using text
async def _insert_text(self, text: str) -> Dict[str, Any]:
"""Insert text into the LightRAG server."""
async with httpx.AsyncClient() as client:
response = await client.post(
f"{self.server_url}/documents/text",
json={"text": text},
headers=self._get_headers(),
)
response.raise_for_status()
result = response.json()
log_debug(f"Text insertion result: {result}")
return result
async def insert_file_bytes(
self,
file_content: bytes,
filename: Optional[str] = None,
content_type: Optional[str] = None,
send_metadata: bool = False,
skip_if_exists: bool = False,
) -> Optional[str]:
"""Insert file from raw bytes into the LightRAG server."""
if not file_content:
log_warning("File content is empty.")
return None
if send_metadata and filename and content_type:
# Send with filename and content type (full UploadFile format)
files = {"file": (filename, file_content, content_type)}
else:
files = {"file": file_content} # type: ignore
async with httpx.AsyncClient() as client:
response = await client.post(
f"{self.server_url}/documents/upload",
files=files,
headers=self._get_auth_headers(),
)
response.raise_for_status()
result = response.json()
log_info(f"File insertion result: {result}")
track_id = result["track_id"]
log_info(f"Track ID: {track_id}")
result = await self._get_document_id(track_id) # type: ignore
log_info(f"Document ID: {result}")
return result
async def insert_text(self, file_source: str, text: str) -> Optional[str]:
"""Insert text into the LightRAG server."""
import httpx
async with httpx.AsyncClient() as client:
response = await client.post(
f"{self.server_url}/documents/text",
json={"file_source": file_source, "text": text},
headers=self._get_headers(),
)
response.raise_for_status()
result = response.json()
log_info(f"Text insertion result: {result}")
track_id = result["track_id"]
log_info(f"Track ID: {track_id}")
result = await self._get_document_id(track_id) # type: ignore
log_info(f"Document ID: {result}")
return result
async def _get_document_id(self, track_id: str) -> Optional[str]:
"""Get the document ID from the upload ID."""
async with httpx.AsyncClient() as client:
response = await client.get(
f"{self.server_url}/documents/track_status/{track_id}",
headers=self._get_headers(),
)
response.raise_for_status()
result = response.json()
log_debug(f"Document ID result: {result}")
# Extract document ID from the documents array
if "documents" in result and len(result["documents"]) > 0:
document_id = result["documents"][0]["id"]
return document_id
else:
log_error(f"No documents found in track response: {result}")
return None
def _is_valid_url(self, url: str) -> bool:
"""Helper to check if URL is valid."""
# TODO: Define supported extensions or implement proper URL validation
return True
async def lightrag_knowledge_retriever(
self,
query: str,
) -> Optional[List[Document]]:
"""
Custom knowledge retriever function to search the LightRAG server for relevant documents.
Args:
query: The search query string
num_documents: Number of documents to retrieve (currently unused by LightRAG)
mode: Query mode - "local", "global", or "hybrid"
lightrag_server_url: URL of the LightRAG server
Returns:
List of retrieved documents or None if search fails
"""
mode: str = "hybrid" # Default mode, can be "local", "global", or "hybrid"
try:
import httpx
async with httpx.AsyncClient(timeout=30.0) as client:
response = await client.post(
f"{self.server_url}/query",
json={"query": query, "mode": "hybrid", "include_references": True},
headers=self._get_headers(),
)
response.raise_for_status()
result = response.json()
return self._format_lightrag_response(result, query, mode)
except httpx.RequestError as e:
log_error(f"HTTP Request Error: {type(e).__name__}: {str(e)}")
return None
except httpx.HTTPStatusError as e:
log_error(f"HTTP Status Error: {e.response.status_code} - {e.response.text}")
return None
except Exception as e:
log_error(f"Unexpected error during LightRAG server search: {type(e).__name__}: {str(e)}")
import traceback
log_error(f"Full traceback: {traceback.format_exc()}")
return None
def _format_lightrag_response(self, result: Any, query: str, mode: str) -> List[Document]:
"""Format LightRAG server response to expected document format."""
# LightRAG server returns a dict with 'response' key, but we expect a list of documents
# Convert the response to the expected format
if isinstance(result, dict) and "response" in result:
meta_data = {"source": "lightrag", "query": query, "mode": mode}
# Preserve references from LightRAG response for document citations
if "references" in result:
meta_data["references"] = result["references"]
return [Document(content=result["response"], meta_data=meta_data)]
elif isinstance(result, list):
# Convert list items to Document objects
documents = []
for item in result:
if isinstance(item, dict) and "content" in item:
documents.append(
Document(
content=item["content"],
meta_data=item.get("metadata", {"source": "lightrag", "query": query, "mode": mode}),
)
)
else:
documents.append(
Document(content=str(item), meta_data={"source": "lightrag", "query": query, "mode": mode})
)
return documents
else:
# If it's a string or other format, wrap it in a Document
return [Document(content=str(result), meta_data={"source": "lightrag", "query": query, "mode": mode})]
def update_metadata(self, content_id: str, metadata: Dict[str, Any]) -> None:
"""
Update metadata is not supported for LightRag as it manages its own graph structure.
Args:
content_id (str): The content ID to update
metadata (Dict[str, Any]): The metadata to update
"""
raise NotImplementedError("update_metadata not supported for LightRag - use LightRag's native methods")
def get_supported_search_types(self) -> List[str]:
"""Get the supported search types for this vector database."""
return [] # LightRag doesn't use SearchType enum
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/vectordb/lightrag/lightrag.py",
"license": "Apache License 2.0",
"lines": 321,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/vectordb/llamaindex/llamaindexdb.py | from typing import Any, Callable, Dict, List, Optional, Union
from agno.filters import FilterExpr
from agno.knowledge.document import Document
from agno.utils.log import log_warning, logger
from agno.vectordb.base import VectorDb
try:
from llama_index.core.retrievers import BaseRetriever
from llama_index.core.schema import NodeWithScore
except ImportError:
raise ImportError(
"The `llama-index-core` package is not installed. Please install it via `pip install llama-index-core`."
)
class LlamaIndexVectorDb(VectorDb):
knowledge_retriever: BaseRetriever
loader: Optional[Callable] = None
def __init__(
self,
knowledge_retriever: BaseRetriever,
loader: Optional[Callable] = None,
name: Optional[str] = None,
description: Optional[str] = None,
**kwargs,
):
super().__init__(**kwargs)
# Initialize base class with name and description
super().__init__(name=name, description=description)
self.knowledge_retriever = knowledge_retriever
self.loader = loader
def create(self) -> None:
raise NotImplementedError
async def async_create(self) -> None:
raise NotImplementedError
def name_exists(self, name: str) -> bool:
raise NotImplementedError
def async_name_exists(self, name: str) -> bool:
raise NotImplementedError
def id_exists(self, id: str) -> bool:
raise NotImplementedError
def content_hash_exists(self, content_hash: str) -> bool:
raise NotImplementedError
def insert(self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None) -> None:
logger.warning("LlamaIndexVectorDb.insert() not supported - please check the vectorstore manually.")
raise NotImplementedError
async def async_insert(
self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None
) -> None:
logger.warning("LlamaIndexVectorDb.async_insert() not supported - please check the vectorstore manually.")
raise NotImplementedError
def upsert(self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None) -> None:
logger.warning("LlamaIndexVectorDb.upsert() not supported - please check the vectorstore manually.")
raise NotImplementedError
async def async_upsert(self, documents: List[Document], filters: Optional[Dict[str, Any]] = None) -> None:
logger.warning("LlamaIndexVectorDb.async_upsert() not supported - please check the vectorstore manually.")
raise NotImplementedError
def search(
self, query: str, limit: int = 5, filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
) -> List[Document]:
"""
Returns relevant documents matching the query.
Args:
query (str): The query string to search for.
limit (int): The maximum number of documents to return. Defaults to 5.
filters (Optional[Dict[str, Any]]): Filters to apply to the search. Defaults to None.
Returns:
List[Document]: A list of relevant documents matching the query.
Raises:
ValueError: If the knowledge retriever is not of type BaseRetriever.
"""
if filters is not None:
log_warning("Filters are not supported in LlamaIndex. No filters will be applied.")
if not isinstance(self.knowledge_retriever, BaseRetriever):
raise ValueError(f"Knowledge retriever is not of type BaseRetriever: {self.knowledge_retriever}")
lc_documents: List[NodeWithScore] = self.knowledge_retriever.retrieve(query)
if limit is not None:
lc_documents = lc_documents[:limit]
documents = []
for lc_doc in lc_documents:
documents.append(
Document(
content=lc_doc.text,
meta_data=lc_doc.metadata,
)
)
return documents
async def async_search(
self, query: str, limit: int = 5, filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
) -> List[Document]:
return self.search(query, limit, filters)
def drop(self) -> None:
raise NotImplementedError
async def async_drop(self) -> None:
raise NotImplementedError
async def async_exists(self) -> bool:
raise NotImplementedError
def delete(self) -> bool:
raise NotImplementedError
def delete_by_id(self, id: str) -> bool:
raise NotImplementedError
def delete_by_name(self, name: str) -> bool:
raise NotImplementedError
def delete_by_metadata(self, metadata: Dict[str, Any]) -> bool:
raise NotImplementedError
def exists(self) -> bool:
logger.warning("LlamaIndexKnowledgeBase.exists() not supported - please check the vectorstore manually.")
return True
def update_metadata(self, content_id: str, metadata: Dict[str, Any]) -> None:
"""
Update the metadata for documents with the given content_id.
Not implemented for LlamaIndex wrapper.
Args:
content_id (str): The content ID to update
metadata (Dict[str, Any]): The metadata to update
"""
raise NotImplementedError("update_metadata not supported for LlamaIndex vectorstores")
def delete_by_content_id(self, content_id: str) -> bool:
"""
Delete documents by content ID.
Not implemented for LlamaIndex wrapper.
Args:
content_id (str): The content ID to delete
Returns:
bool: False as this operation is not supported
"""
logger.warning(
"LlamaIndexVectorDb.delete_by_content_id() not supported - please check the vectorstore manually."
)
return False
def get_supported_search_types(self) -> List[str]:
"""Get the supported search types for this vector database."""
return [] # LlamaIndexVectorDb doesn't use SearchType enum
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/vectordb/llamaindex/llamaindexdb.py",
"license": "Apache License 2.0",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/workflow/router.py | import inspect
from dataclasses import dataclass, field
from typing import Any, AsyncIterator, Awaitable, Callable, Dict, Iterator, List, Optional, Union
from uuid import uuid4
from agno.registry import Registry
from agno.run.agent import RunOutputEvent
from agno.run.base import RunContext
from agno.run.team import TeamRunOutputEvent
from agno.run.workflow import (
RouterExecutionCompletedEvent,
RouterExecutionStartedEvent,
WorkflowRunOutput,
WorkflowRunOutputEvent,
)
from agno.session.workflow import WorkflowSession
from agno.utils.log import log_debug, logger
from agno.workflow.cel import CEL_AVAILABLE, evaluate_cel_router_selector, is_cel_expression
from agno.workflow.step import Step
from agno.workflow.types import (
OnReject,
StepInput,
StepOutput,
StepRequirement,
StepType,
UserInputField,
)
WorkflowSteps = List[
Union[
Callable[
[StepInput], Union[StepOutput, Awaitable[StepOutput], Iterator[StepOutput], AsyncIterator[StepOutput]]
],
Step,
"Steps", # type: ignore # noqa: F821
"Loop", # type: ignore # noqa: F821
"Parallel", # type: ignore # noqa: F821
"Condition", # type: ignore # noqa: F821
"Router", # type: ignore # noqa: F821
]
]
@dataclass
class Router:
"""A router that dynamically selects which step(s) to execute based on input.
The Router can operate in three modes:
1. Programmatic selection: Use a `selector` function to determine which steps to execute
2. CEL expression: Use a CEL expression string that returns a step name
3. HITL selection: Set `requires_user_input=True` to pause and let the user choose
The selector can be:
- A callable function that takes StepInput and returns step(s)
- A CEL (Common Expression Language) expression string that returns a step name
CEL expressions for selector have access to (same as Condition, plus step_choices):
- input: The workflow input as a string
- previous_step_content: Content from the previous step
- previous_step_outputs: Map of step name to content string from all previous steps
- additional_data: Map of additional data passed to the workflow
- session_state: Map of session state values
- step_choices: List of step names available to the selector
CEL expressions must return the name of a step from choices.
Example CEL expressions:
- 'input.contains("video") ? "video_step" : "image_step"'
- 'additional_data.route'
- 'previous_step_outputs.classifier.contains("billing") ? "Billing" : "Support"'
When using HITL mode:
- Set `requires_user_input=True`
- Optionally provide `user_input_message` for the prompt
- The workflow will pause and present the user with the available `choices`
- User selects one or more choices by name
- The Router then executes the selected steps
"""
# Available steps that can be selected
choices: WorkflowSteps
# Router function or CEL expression that selects step(s) to execute (optional if using HITL)
selector: Optional[
Union[
Callable[[StepInput], Union[WorkflowSteps, List[WorkflowSteps]]],
Callable[[StepInput], Awaitable[Union[WorkflowSteps, List[WorkflowSteps]]]],
str, # CEL expression returning step name
]
] = None
name: Optional[str] = None
description: Optional[str] = None
# HITL parameters for user-driven routing (selection mode)
requires_user_input: bool = False
user_input_message: Optional[str] = None
allow_multiple_selections: bool = False # If True, user can select multiple choices
user_input_schema: Optional[List[Dict[str, Any]]] = field(default=None) # Custom schema if needed
# HITL parameters for confirmation mode
# If True, the router will pause and ask for confirmation before executing selected steps
# User confirms -> execute the selected steps from selector
# User rejects -> skip the router entirely
requires_confirmation: bool = False
confirmation_message: Optional[str] = None
on_reject: Union[OnReject, str] = OnReject.skip
def to_dict(self) -> Dict[str, Any]:
result: Dict[str, Any] = {
"type": "Router",
"name": self.name,
"description": self.description,
"choices": [step.to_dict() for step in self.choices if hasattr(step, "to_dict")],
"requires_user_input": self.requires_user_input,
"user_input_message": self.user_input_message,
"allow_multiple_selections": self.allow_multiple_selections,
}
# Serialize selector
if self.selector is None:
result["selector"] = None
result["selector_type"] = None
elif callable(self.selector):
result["selector"] = self.selector.__name__
result["selector_type"] = "function"
elif isinstance(self.selector, str):
result["selector"] = self.selector
result["selector_type"] = "cel"
else:
raise ValueError(f"Invalid selector type: {type(self.selector).__name__}")
if self.user_input_schema:
result["user_input_schema"] = self.user_input_schema
# Add confirmation HITL fields
result["requires_confirmation"] = self.requires_confirmation
result["confirmation_message"] = self.confirmation_message
result["on_reject"] = str(self.on_reject)
return result
def create_step_requirement(
self,
step_index: int,
step_input: StepInput,
for_route_selection: bool = False,
) -> StepRequirement:
"""Create a StepRequirement for HITL pause.
This method handles both:
1. Confirmation mode (requires_confirmation=True): Asks user to confirm before executing
2. Route selection mode (requires_user_input=True): Asks user to select which routes to execute
Args:
step_index: Index of the router in the workflow.
step_input: The prepared input for the router.
for_route_selection: If True, creates a requirement for route selection (user chooses routes).
If False, creates a requirement for confirmation (user confirms execution).
Returns:
StepRequirement configured for this router's HITL needs.
"""
step_name = self.name or f"router_{step_index + 1}"
if for_route_selection:
# Route selection mode - user selects which routes to execute
choice_names = self._get_choice_names()
# Build user input schema for selection (optional, for display purposes)
if self.user_input_schema:
schema = [UserInputField.from_dict(f) if isinstance(f, dict) else f for f in self.user_input_schema]
else:
schema = None # Route selection uses available_choices, not user_input_schema
return StepRequirement(
step_id=str(uuid4()),
step_name=step_name,
step_index=step_index,
step_type="Router",
requires_route_selection=True,
user_input_message=self.user_input_message or f"Select a route from: {', '.join(choice_names)}",
user_input_schema=schema,
available_choices=choice_names,
allow_multiple_selections=self.allow_multiple_selections,
step_input=step_input,
)
else:
# Confirmation mode - user confirms before execution
return StepRequirement(
step_id=str(uuid4()),
step_name=step_name,
step_index=step_index,
step_type="Router",
requires_confirmation=self.requires_confirmation,
confirmation_message=self.confirmation_message
or f"Execute router '{self.name or 'router'}' with selected steps?",
on_reject=self.on_reject.value if isinstance(self.on_reject, OnReject) else str(self.on_reject),
requires_user_input=False,
step_input=step_input,
)
@classmethod
def from_dict(
cls,
data: Dict[str, Any],
registry: Optional["Registry"] = None,
db: Optional[Any] = None,
links: Optional[List[Dict[str, Any]]] = None,
) -> "Router":
from agno.workflow.condition import Condition
from agno.workflow.loop import Loop
from agno.workflow.parallel import Parallel
from agno.workflow.steps import Steps
def deserialize_step(step_data: Dict[str, Any]) -> Any:
step_type = step_data.get("type", "Step")
if step_type == "Loop":
return Loop.from_dict(step_data, registry=registry, db=db, links=links)
elif step_type == "Parallel":
return Parallel.from_dict(step_data, registry=registry, db=db, links=links)
elif step_type == "Steps":
return Steps.from_dict(step_data, registry=registry, db=db, links=links)
elif step_type == "Condition":
return Condition.from_dict(step_data, registry=registry, db=db, links=links)
elif step_type == "Router":
return cls.from_dict(step_data, registry=registry, db=db, links=links)
else:
return Step.from_dict(step_data, registry=registry, db=db, links=links)
# Deserialize selector
selector_data = data.get("selector")
selector_type = data.get("selector_type")
selector: Any = None
if selector_data is None:
# Selector is optional when using HITL
selector = None
elif isinstance(selector_data, str):
# Determine if this is a CEL expression or a function name
if selector_type == "cel" or (selector_type is None and is_cel_expression(selector_data)):
# CEL expression - use as-is
selector = selector_data
else:
# Function name - look up in registry
if registry:
func = registry.get_function(selector_data)
if func is None:
raise ValueError(f"Selector function '{selector_data}' not found in registry")
selector = func
else:
raise ValueError(f"Registry required to deserialize selector function '{selector_data}'")
else:
raise ValueError(f"Invalid selector type in data: {type(selector_data).__name__}")
return cls(
selector=selector,
choices=[deserialize_step(step) for step in data.get("choices", [])],
name=data.get("name"),
description=data.get("description"),
requires_user_input=data.get("requires_user_input", False),
user_input_message=data.get("user_input_message"),
allow_multiple_selections=data.get("allow_multiple_selections", False),
user_input_schema=data.get("user_input_schema"),
requires_confirmation=data.get("requires_confirmation", False),
confirmation_message=data.get("confirmation_message"),
on_reject=data.get("on_reject", OnReject.skip),
)
def _prepare_single_step(self, step: Any) -> Any:
"""Prepare a single step for execution."""
from agno.agent.agent import Agent
from agno.team.team import Team
from agno.workflow.condition import Condition
from agno.workflow.loop import Loop
from agno.workflow.parallel import Parallel
from agno.workflow.step import Step
from agno.workflow.steps import Steps
if callable(step) and hasattr(step, "__name__"):
return Step(name=step.__name__, description="User-defined callable step", executor=step)
elif isinstance(step, Agent):
return Step(name=step.name, description=step.description, agent=step)
elif isinstance(step, Team):
return Step(name=step.name, description=step.description, team=step)
elif isinstance(step, (Step, Steps, Loop, Parallel, Condition, Router)):
return step
else:
raise ValueError(f"Invalid step type: {type(step).__name__}")
def _prepare_steps(self):
"""Prepare the steps for execution - mirrors workflow logic"""
from agno.workflow.steps import Steps
prepared_steps: WorkflowSteps = []
for step in self.choices:
if isinstance(step, list):
# Handle nested list of steps - wrap in Steps container
nested_prepared = [self._prepare_single_step(s) for s in step]
# Create a Steps container with a generated name
steps_container = Steps(
name=f"steps_group_{len(prepared_steps)}",
steps=nested_prepared,
)
prepared_steps.append(steps_container)
else:
prepared_steps.append(self._prepare_single_step(step))
self.steps = prepared_steps
# Build name-to-step mapping for string-based selection (used by CEL and callable selectors)
self._step_name_map: Dict[str, Any] = {}
for step in self.steps:
if hasattr(step, "name") and step.name:
self._step_name_map[step.name] = step
def _get_choice_names(self) -> List[str]:
"""Get the names of all available choices for HITL display."""
if not hasattr(self, "steps"):
self._prepare_steps()
names = []
for step in self.steps:
name = getattr(step, "name", None)
if name:
names.append(name)
return names
def _get_step_by_name(self, name: str) -> Optional[Step]:
"""Get a step by its name."""
if not hasattr(self, "steps"):
self._prepare_steps()
for step in self.steps:
if getattr(step, "name", None) == name:
return step # type: ignore[return-value]
return None
def _get_steps_from_user_selection(self, selection: Union[str, List[str]]) -> List[Step]:
"""Get steps based on user selection (by name)."""
if isinstance(selection, str):
selection = [selection]
selected_steps = []
for name in selection:
step = self._get_step_by_name(name.strip())
if step:
selected_steps.append(step)
else:
logger.warning(f"Router: Unknown choice '{name}', skipping")
return selected_steps
@property
def requires_hitl(self) -> bool:
"""Check if this router requires any form of HITL."""
return self.requires_user_input
def _update_step_input_from_outputs(
self,
step_input: StepInput,
step_outputs: Union[StepOutput, List[StepOutput]],
router_step_outputs: Optional[Dict[str, StepOutput]] = None,
) -> StepInput:
"""Helper to update step input from step outputs - mirrors Loop logic"""
current_images = step_input.images or []
current_videos = step_input.videos or []
current_audio = step_input.audio or []
if isinstance(step_outputs, list):
all_images = sum([out.images or [] for out in step_outputs], [])
all_videos = sum([out.videos or [] for out in step_outputs], [])
all_audio = sum([out.audio or [] for out in step_outputs], [])
previous_step_content = step_outputs[-1].content if step_outputs else None
else:
all_images = step_outputs.images or []
all_videos = step_outputs.videos or []
all_audio = step_outputs.audio or []
previous_step_content = step_outputs.content
updated_previous_step_outputs = {}
if step_input.previous_step_outputs:
updated_previous_step_outputs.update(step_input.previous_step_outputs)
if router_step_outputs:
updated_previous_step_outputs.update(router_step_outputs)
return StepInput(
input=step_input.input,
previous_step_content=previous_step_content,
previous_step_outputs=updated_previous_step_outputs,
additional_data=step_input.additional_data,
images=current_images + all_images,
videos=current_videos + all_videos,
audio=current_audio + all_audio,
)
def _resolve_selector_result(self, result: Any) -> List[Any]:
"""Resolve selector result to a list of steps, handling strings, Steps, and lists.
This unified resolver handles:
- String step names (from CEL expressions or callable selectors)
- Step objects directly returned by callable selectors
- Lists of strings or Steps
"""
from agno.workflow.condition import Condition
from agno.workflow.loop import Loop
from agno.workflow.parallel import Parallel
from agno.workflow.steps import Steps
if result is None:
return []
# Handle string - look up by name in the step_name_map
if isinstance(result, str):
if result in self._step_name_map:
return [self._step_name_map[result]]
else:
available_steps = list(self._step_name_map.keys())
logger.warning(
f"Router '{self.name}' selector returned unknown step name: '{result}'. "
f"Available step names are: {available_steps}. "
f"Make sure the selector returns one of the available step names."
)
return []
# Handle step types (Step, Steps, Loop, Parallel, Condition, Router)
if isinstance(result, (Step, Steps, Loop, Parallel, Condition, Router)):
# Validate that the returned step is in the router's choices
step_name = getattr(result, "name", None)
if step_name and step_name not in self._step_name_map:
available_steps = list(self._step_name_map.keys())
logger.warning(
f"Router '{self.name}' selector returned a Step '{step_name}' that is not in choices. "
f"Available step names are: {available_steps}. "
f"The step will still be executed, but this may indicate a configuration error."
)
return [result]
# Handle list of results (could be strings, Steps, or mixed)
if isinstance(result, list):
resolved = []
for item in result:
resolved.extend(self._resolve_selector_result(item))
return resolved
logger.warning(f"Router selector returned unexpected type: {type(result)}")
return []
def _selector_has_step_choices_param(self) -> bool:
"""Check if the selector function has a step_choices parameter"""
if not callable(self.selector):
return False
try:
sig = inspect.signature(self.selector)
return "step_choices" in sig.parameters
except Exception:
return False
def _route_steps(self, step_input: StepInput, session_state: Optional[Dict[str, Any]] = None) -> List[Step]: # type: ignore[return-value]
"""Route to the appropriate steps based on input."""
# Handle CEL expression selector
if isinstance(self.selector, str):
if not CEL_AVAILABLE:
logger.error(
"CEL expression used but cel-python is not installed. Install with: pip install cel-python"
)
return []
try:
step_names = list(self._step_name_map.keys())
step_name = evaluate_cel_router_selector(
self.selector, step_input, session_state, step_choices=step_names
)
return self._resolve_selector_result(step_name)
except Exception as e:
logger.error(f"Router CEL evaluation failed: {e}")
return []
# Handle callable selector
if callable(self.selector):
has_session_state = session_state is not None and self._selector_has_session_state_param()
has_step_choices = self._selector_has_step_choices_param()
# Build kwargs based on what parameters the selector accepts
kwargs: Dict[str, Any] = {}
if has_session_state:
kwargs["session_state"] = session_state
if has_step_choices:
kwargs["step_choices"] = self.steps
result = self.selector(step_input, **kwargs) # type: ignore[call-arg]
return self._resolve_selector_result(result)
return []
async def _aroute_steps(self, step_input: StepInput, session_state: Optional[Dict[str, Any]] = None) -> List[Step]: # type: ignore[return-value]
"""Async version of step routing."""
# Handle CEL expression selector (CEL evaluation is synchronous)
if isinstance(self.selector, str):
if not CEL_AVAILABLE:
logger.error(
"CEL expression used but cel-python is not installed. Install with: pip install cel-python"
)
return []
try:
step_names = list(self._step_name_map.keys())
step_name = evaluate_cel_router_selector(
self.selector, step_input, session_state, step_choices=step_names
)
return self._resolve_selector_result(step_name)
except Exception as e:
logger.error(f"Router CEL evaluation failed: {e}")
return []
# Handle callable selector
if callable(self.selector):
has_session_state = session_state is not None and self._selector_has_session_state_param()
has_step_choices = self._selector_has_step_choices_param()
# Build kwargs based on what parameters the selector accepts
kwargs: Dict[str, Any] = {}
if has_session_state:
kwargs["session_state"] = session_state
if has_step_choices:
kwargs["step_choices"] = self.steps
if inspect.iscoroutinefunction(self.selector):
result = await self.selector(step_input, **kwargs) # type: ignore[call-arg]
else:
result = self.selector(step_input, **kwargs) # type: ignore[call-arg]
return self._resolve_selector_result(result)
return []
def _selector_has_session_state_param(self) -> bool:
"""Check if the selector function has a session_state parameter."""
if not callable(self.selector):
return False
try:
sig = inspect.signature(self.selector)
return "session_state" in sig.parameters
except Exception:
return False
def execute(
self,
step_input: StepInput,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
workflow_run_response: Optional[WorkflowRunOutput] = None,
run_context: Optional[RunContext] = None,
session_state: Optional[Dict[str, Any]] = None,
store_executor_outputs: bool = True,
workflow_session: Optional[WorkflowSession] = None,
add_workflow_history_to_steps: Optional[bool] = False,
num_history_runs: int = 3,
background_tasks: Optional[Any] = None,
) -> StepOutput:
"""Execute the router and its selected steps with sequential chaining"""
log_debug(f"Router Start: {self.name}", center=True, symbol="-")
router_step_id = str(uuid4())
self._prepare_steps()
# Route to appropriate steps
if run_context is not None and run_context.session_state is not None:
steps_to_execute = self._route_steps(step_input, session_state=run_context.session_state)
else:
steps_to_execute = self._route_steps(step_input, session_state=session_state)
log_debug(f"Router {self.name}: Selected {len(steps_to_execute)} steps to execute")
if not steps_to_execute:
return StepOutput(
step_name=self.name,
step_id=router_step_id,
step_type=StepType.ROUTER,
content=f"Router {self.name} completed with 0 results (no steps selected)",
success=True,
)
all_results: List[StepOutput] = []
current_step_input = step_input
router_step_outputs = {}
for i, step in enumerate(steps_to_execute):
try:
step_output = step.execute(
current_step_input,
session_id=session_id,
user_id=user_id,
workflow_run_response=workflow_run_response,
store_executor_outputs=store_executor_outputs,
run_context=run_context,
session_state=session_state,
workflow_session=workflow_session,
add_workflow_history_to_steps=add_workflow_history_to_steps,
num_history_runs=num_history_runs,
background_tasks=background_tasks,
)
# Handle both single StepOutput and List[StepOutput]
if isinstance(step_output, list):
all_results.extend(step_output)
if step_output:
step_name = getattr(step, "name", f"step_{i}")
router_step_outputs[step_name] = step_output[-1]
if any(output.stop for output in step_output):
logger.info(f"Early termination requested by step {step_name}")
break
else:
all_results.append(step_output)
step_name = getattr(step, "name", f"step_{i}")
router_step_outputs[step_name] = step_output
if step_output.stop:
logger.info(f"Early termination requested by step {step_name}")
break
current_step_input = self._update_step_input_from_outputs(
current_step_input, step_output, router_step_outputs
)
except Exception as e:
step_name = getattr(step, "name", f"step_{i}")
logger.error(f"Router step {step_name} failed: {e}")
error_output = StepOutput(
step_name=step_name,
content=f"Step {step_name} failed: {str(e)}",
success=False,
error=str(e),
)
all_results.append(error_output)
break
log_debug(f"Router End: {self.name} ({len(all_results)} results)", center=True, symbol="-")
return StepOutput(
step_name=self.name,
step_id=router_step_id,
step_type=StepType.ROUTER,
content=f"Router {self.name} completed with {len(all_results)} results",
success=all(result.success for result in all_results) if all_results else True,
stop=any(result.stop for result in all_results) if all_results else False,
steps=all_results,
)
def execute_stream(
self,
step_input: StepInput,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
run_context: Optional[RunContext] = None,
session_state: Optional[Dict[str, Any]] = None,
stream_events: bool = False,
stream_executor_events: bool = True,
workflow_run_response: Optional[WorkflowRunOutput] = None,
step_index: Optional[Union[int, tuple]] = None,
store_executor_outputs: bool = True,
parent_step_id: Optional[str] = None,
workflow_session: Optional[WorkflowSession] = None,
add_workflow_history_to_steps: Optional[bool] = False,
num_history_runs: int = 3,
background_tasks: Optional[Any] = None,
) -> Iterator[Union[WorkflowRunOutputEvent, StepOutput]]:
"""Execute the router with streaming support"""
log_debug(f"Router Start: {self.name}", center=True, symbol="-")
self._prepare_steps()
router_step_id = str(uuid4())
# Route to appropriate steps
if run_context is not None and run_context.session_state is not None:
steps_to_execute = self._route_steps(step_input, session_state=run_context.session_state)
else:
steps_to_execute = self._route_steps(step_input, session_state=session_state)
log_debug(f"Router {self.name}: Selected {len(steps_to_execute)} steps to execute")
if stream_events and workflow_run_response:
# Yield router started event
yield RouterExecutionStartedEvent(
run_id=workflow_run_response.run_id or "",
workflow_name=workflow_run_response.workflow_name or "",
workflow_id=workflow_run_response.workflow_id or "",
session_id=workflow_run_response.session_id or "",
step_name=self.name,
step_index=step_index,
selected_steps=[getattr(step, "name", f"step_{i}") for i, step in enumerate(steps_to_execute)],
step_id=router_step_id,
parent_step_id=parent_step_id,
)
if not steps_to_execute:
# Yield router completed event for empty case
if stream_events and workflow_run_response:
yield RouterExecutionCompletedEvent(
run_id=workflow_run_response.run_id or "",
workflow_name=workflow_run_response.workflow_name or "",
workflow_id=workflow_run_response.workflow_id or "",
session_id=workflow_run_response.session_id or "",
step_name=self.name,
step_index=step_index,
selected_steps=[],
executed_steps=0,
step_results=[],
step_id=router_step_id,
parent_step_id=parent_step_id,
)
return
all_results = []
current_step_input = step_input
router_step_outputs = {}
for i, step in enumerate(steps_to_execute):
try:
step_outputs_for_step = []
# Stream step execution
for event in step.execute_stream(
current_step_input,
session_id=session_id,
user_id=user_id,
stream_events=stream_events,
stream_executor_events=stream_executor_events,
workflow_run_response=workflow_run_response,
step_index=step_index,
store_executor_outputs=store_executor_outputs,
run_context=run_context,
session_state=session_state,
parent_step_id=router_step_id,
workflow_session=workflow_session,
add_workflow_history_to_steps=add_workflow_history_to_steps,
num_history_runs=num_history_runs,
background_tasks=background_tasks,
):
if isinstance(event, StepOutput):
step_outputs_for_step.append(event)
all_results.append(event)
else:
# Yield other events (streaming content, step events, etc.)
yield event
step_name = getattr(step, "name", f"step_{i}")
log_debug(f"Router step {step_name} streaming completed")
if step_outputs_for_step:
if len(step_outputs_for_step) == 1:
router_step_outputs[step_name] = step_outputs_for_step[0]
if step_outputs_for_step[0].stop:
logger.info(f"Early termination requested by step {step_name}")
break
current_step_input = self._update_step_input_from_outputs(
current_step_input, step_outputs_for_step[0], router_step_outputs
)
else:
# Use last output
router_step_outputs[step_name] = step_outputs_for_step[-1]
if any(output.stop for output in step_outputs_for_step):
logger.info(f"Early termination requested by step {step_name}")
break
current_step_input = self._update_step_input_from_outputs(
current_step_input, step_outputs_for_step, router_step_outputs
)
except Exception as e:
step_name = getattr(step, "name", f"step_{i}")
logger.error(f"Router step {step_name} streaming failed: {e}")
error_output = StepOutput(
step_name=step_name,
content=f"Step {step_name} failed: {str(e)}",
success=False,
error=str(e),
)
all_results.append(error_output)
break
log_debug(f"Router End: {self.name} ({len(all_results)} results)", center=True, symbol="-")
if stream_events and workflow_run_response:
# Yield router completed event
yield RouterExecutionCompletedEvent(
run_id=workflow_run_response.run_id or "",
workflow_name=workflow_run_response.workflow_name or "",
workflow_id=workflow_run_response.workflow_id or "",
session_id=workflow_run_response.session_id or "",
step_name=self.name,
step_index=step_index,
selected_steps=[getattr(step, "name", f"step_{i}") for i, step in enumerate(steps_to_execute)],
executed_steps=len(steps_to_execute),
step_results=all_results,
step_id=router_step_id,
parent_step_id=parent_step_id,
)
yield StepOutput(
step_name=self.name,
step_id=router_step_id,
step_type=StepType.ROUTER,
content=f"Router {self.name} completed with {len(all_results)} results",
success=all(result.success for result in all_results) if all_results else True,
stop=any(result.stop for result in all_results) if all_results else False,
steps=all_results,
)
async def aexecute(
self,
step_input: StepInput,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
workflow_run_response: Optional[WorkflowRunOutput] = None,
run_context: Optional[RunContext] = None,
session_state: Optional[Dict[str, Any]] = None,
store_executor_outputs: bool = True,
workflow_session: Optional[WorkflowSession] = None,
add_workflow_history_to_steps: Optional[bool] = False,
num_history_runs: int = 3,
background_tasks: Optional[Any] = None,
) -> StepOutput:
"""Async execute the router and its selected steps with sequential chaining"""
log_debug(f"Router Start: {self.name}", center=True, symbol="-")
router_step_id = str(uuid4())
self._prepare_steps()
# Route to appropriate steps
if run_context is not None and run_context.session_state is not None:
steps_to_execute = await self._aroute_steps(step_input, session_state=run_context.session_state)
else:
steps_to_execute = await self._aroute_steps(step_input, session_state=session_state)
log_debug(f"Router {self.name} selected: {len(steps_to_execute)} steps to execute")
if not steps_to_execute:
return StepOutput(
step_name=self.name,
step_id=router_step_id,
step_type=StepType.ROUTER,
content=f"Router {self.name} completed with 0 results (no steps selected)",
success=True,
)
# Chain steps sequentially like Loop does
all_results: List[StepOutput] = []
current_step_input = step_input
router_step_outputs = {}
for i, step in enumerate(steps_to_execute):
try:
step_output = await step.aexecute(
current_step_input,
session_id=session_id,
user_id=user_id,
workflow_run_response=workflow_run_response,
store_executor_outputs=store_executor_outputs,
run_context=run_context,
session_state=session_state,
workflow_session=workflow_session,
add_workflow_history_to_steps=add_workflow_history_to_steps,
num_history_runs=num_history_runs,
background_tasks=background_tasks,
)
# Handle both single StepOutput and List[StepOutput]
if isinstance(step_output, list):
all_results.extend(step_output)
if step_output:
step_name = getattr(step, "name", f"step_{i}")
router_step_outputs[step_name] = step_output[-1]
if any(output.stop for output in step_output):
logger.info(f"Early termination requested by step {step_name}")
break
else:
all_results.append(step_output)
step_name = getattr(step, "name", f"step_{i}")
router_step_outputs[step_name] = step_output
if step_output.stop:
logger.info(f"Early termination requested by step {step_name}")
break
step_name = getattr(step, "name", f"step_{i}")
log_debug(f"Router step {step_name} async completed")
current_step_input = self._update_step_input_from_outputs(
current_step_input, step_output, router_step_outputs
)
except Exception as e:
step_name = getattr(step, "name", f"step_{i}")
logger.error(f"Router step {step_name} async failed: {e}")
error_output = StepOutput(
step_name=step_name,
content=f"Step {step_name} failed: {str(e)}",
success=False,
error=str(e),
)
all_results.append(error_output)
break # Stop on first error
log_debug(f"Router End: {self.name} ({len(all_results)} results)", center=True, symbol="-")
return StepOutput(
step_name=self.name,
step_id=router_step_id,
step_type=StepType.ROUTER,
content=f"Router {self.name} completed with {len(all_results)} results",
success=all(result.success for result in all_results) if all_results else True,
stop=any(result.stop for result in all_results) if all_results else False,
steps=all_results,
)
async def aexecute_stream(
self,
step_input: StepInput,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
run_context: Optional[RunContext] = None,
session_state: Optional[Dict[str, Any]] = None,
stream_events: bool = False,
stream_executor_events: bool = True,
workflow_run_response: Optional[WorkflowRunOutput] = None,
step_index: Optional[Union[int, tuple]] = None,
store_executor_outputs: bool = True,
parent_step_id: Optional[str] = None,
workflow_session: Optional[WorkflowSession] = None,
add_workflow_history_to_steps: Optional[bool] = False,
num_history_runs: int = 3,
background_tasks: Optional[Any] = None,
) -> AsyncIterator[Union[WorkflowRunOutputEvent, TeamRunOutputEvent, RunOutputEvent, StepOutput]]:
"""Async execute the router with streaming support"""
log_debug(f"Router Start: {self.name}", center=True, symbol="-")
self._prepare_steps()
router_step_id = str(uuid4())
# Route to appropriate steps
if run_context is not None and run_context.session_state is not None:
steps_to_execute = await self._aroute_steps(step_input, session_state=run_context.session_state)
else:
steps_to_execute = await self._aroute_steps(step_input, session_state=session_state)
log_debug(f"Router {self.name} selected: {len(steps_to_execute)} steps to execute")
if stream_events and workflow_run_response:
# Yield router started event
yield RouterExecutionStartedEvent(
run_id=workflow_run_response.run_id or "",
workflow_name=workflow_run_response.workflow_name or "",
workflow_id=workflow_run_response.workflow_id or "",
session_id=workflow_run_response.session_id or "",
step_name=self.name,
step_index=step_index,
selected_steps=[getattr(step, "name", f"step_{i}") for i, step in enumerate(steps_to_execute)],
step_id=router_step_id,
parent_step_id=parent_step_id,
)
if not steps_to_execute:
if stream_events and workflow_run_response:
# Yield router completed event for empty case
yield RouterExecutionCompletedEvent(
run_id=workflow_run_response.run_id or "",
workflow_name=workflow_run_response.workflow_name or "",
workflow_id=workflow_run_response.workflow_id or "",
session_id=workflow_run_response.session_id or "",
step_name=self.name,
step_index=step_index,
selected_steps=[],
executed_steps=0,
step_results=[],
step_id=router_step_id,
parent_step_id=parent_step_id,
)
return
# Chain steps sequentially like Loop does
all_results = []
current_step_input = step_input
router_step_outputs = {}
for i, step in enumerate(steps_to_execute):
try:
step_outputs_for_step = []
# Stream step execution - mirroring Loop logic
async for event in step.aexecute_stream(
current_step_input,
session_id=session_id,
user_id=user_id,
stream_events=stream_events,
stream_executor_events=stream_executor_events,
workflow_run_response=workflow_run_response,
step_index=step_index,
store_executor_outputs=store_executor_outputs,
run_context=run_context,
session_state=session_state,
parent_step_id=router_step_id,
workflow_session=workflow_session,
add_workflow_history_to_steps=add_workflow_history_to_steps,
num_history_runs=num_history_runs,
background_tasks=background_tasks,
):
if isinstance(event, StepOutput):
step_outputs_for_step.append(event)
all_results.append(event)
else:
# Yield other events (streaming content, step events, etc.)
yield event
step_name = getattr(step, "name", f"step_{i}")
log_debug(f"Router step {step_name} async streaming completed")
if step_outputs_for_step:
if len(step_outputs_for_step) == 1:
router_step_outputs[step_name] = step_outputs_for_step[0]
if step_outputs_for_step[0].stop:
logger.info(f"Early termination requested by step {step_name}")
break
current_step_input = self._update_step_input_from_outputs(
current_step_input, step_outputs_for_step[0], router_step_outputs
)
else:
# Use last output
router_step_outputs[step_name] = step_outputs_for_step[-1]
if any(output.stop for output in step_outputs_for_step):
logger.info(f"Early termination requested by step {step_name}")
break
current_step_input = self._update_step_input_from_outputs(
current_step_input, step_outputs_for_step, router_step_outputs
)
except Exception as e:
step_name = getattr(step, "name", f"step_{i}")
logger.error(f"Router step {step_name} async streaming failed: {e}")
error_output = StepOutput(
step_name=step_name,
content=f"Step {step_name} failed: {str(e)}",
success=False,
error=str(e),
)
all_results.append(error_output)
break # Stop on first error
log_debug(f"Router End: {self.name} ({len(all_results)} results)", center=True, symbol="-")
if stream_events and workflow_run_response:
# Yield router completed event
yield RouterExecutionCompletedEvent(
run_id=workflow_run_response.run_id or "",
workflow_name=workflow_run_response.workflow_name or "",
workflow_id=workflow_run_response.workflow_id or "",
session_id=workflow_run_response.session_id or "",
step_name=self.name,
step_index=step_index,
selected_steps=[getattr(step, "name", f"step_{i}") for i, step in enumerate(steps_to_execute)],
executed_steps=len(steps_to_execute),
step_results=all_results,
step_id=router_step_id,
parent_step_id=parent_step_id,
)
yield StepOutput(
step_name=self.name,
step_id=router_step_id,
step_type=StepType.ROUTER,
content=f"Router {self.name} completed with {len(all_results)} results",
success=all(result.success for result in all_results) if all_results else True,
error=None,
stop=any(result.stop for result in all_results) if all_results else False,
steps=all_results,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/workflow/router.py",
"license": "Apache License 2.0",
"lines": 946,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/workflow/steps.py | from dataclasses import dataclass
from typing import Any, AsyncIterator, Awaitable, Callable, Dict, Iterator, List, Optional, Union
from uuid import uuid4
from agno.registry import Registry
from agno.run.agent import RunOutputEvent
from agno.run.base import RunContext
from agno.run.team import TeamRunOutputEvent
from agno.run.workflow import (
StepsExecutionCompletedEvent,
StepsExecutionStartedEvent,
WorkflowRunOutput,
WorkflowRunOutputEvent,
)
from agno.session.workflow import WorkflowSession
from agno.utils.log import log_debug, logger
from agno.workflow.step import Step
from agno.workflow.types import OnReject, StepInput, StepOutput, StepRequirement, StepType
WorkflowSteps = List[
Union[
Callable[
[StepInput], Union[StepOutput, Awaitable[StepOutput], Iterator[StepOutput], AsyncIterator[StepOutput]]
],
Step,
"Steps", # type: ignore # noqa: F821
"Loop", # type: ignore # noqa: F821
"Parallel", # type: ignore # noqa: F821
"Condition", # type: ignore # noqa: F821
"Router", # type: ignore # noqa: F821
]
]
@dataclass
class Steps:
"""A pipeline of steps that execute in order.
HITL Mode:
When `requires_confirmation=True`, the workflow pauses before executing
the steps pipeline and asks the user to confirm:
- User confirms -> execute all steps in the pipeline
- User rejects -> skip the entire pipeline
"""
# Steps to execute
steps: WorkflowSteps
# Pipeline identification
name: Optional[str] = None
description: Optional[str] = None
# Human-in-the-loop (HITL) configuration
# If True, the steps pipeline will pause before execution and require user confirmation
requires_confirmation: bool = False
confirmation_message: Optional[str] = None
on_reject: Union[OnReject, str] = OnReject.skip
def __init__(
self,
name: Optional[str] = None,
description: Optional[str] = None,
steps: Optional[List[Any]] = None,
requires_confirmation: bool = False,
confirmation_message: Optional[str] = None,
on_reject: Union[OnReject, str] = OnReject.skip,
):
self.name = name
self.description = description
self.steps = steps if steps else []
self.requires_confirmation = requires_confirmation
self.confirmation_message = confirmation_message
self.on_reject = on_reject
def to_dict(self) -> Dict[str, Any]:
return {
"type": "Steps",
"name": self.name,
"description": self.description,
"steps": [step.to_dict() for step in self.steps if hasattr(step, "to_dict")],
"requires_confirmation": self.requires_confirmation,
"confirmation_message": self.confirmation_message,
"on_reject": str(self.on_reject),
}
def create_step_requirement(
self,
step_index: int,
step_input: StepInput,
) -> StepRequirement:
"""Create a StepRequirement for HITL pause (confirmation).
Args:
step_index: Index of the steps pipeline in the workflow.
step_input: The prepared input for the steps.
Returns:
StepRequirement configured for this steps pipeline's HITL needs.
"""
return StepRequirement(
step_id=str(uuid4()),
step_name=self.name or f"steps_{step_index + 1}",
step_index=step_index,
step_type="Steps",
requires_confirmation=self.requires_confirmation,
confirmation_message=self.confirmation_message or f"Execute steps pipeline '{self.name or 'steps'}'?",
on_reject=self.on_reject.value if isinstance(self.on_reject, OnReject) else str(self.on_reject),
requires_user_input=False,
step_input=step_input,
)
@classmethod
def from_dict(
cls,
data: Dict[str, Any],
registry: Optional["Registry"] = None,
db: Optional[Any] = None,
links: Optional[List[Dict[str, Any]]] = None,
) -> "Steps":
from agno.workflow.condition import Condition
from agno.workflow.loop import Loop
from agno.workflow.parallel import Parallel
from agno.workflow.router import Router
def deserialize_step(step_data: Dict[str, Any]) -> Any:
step_type = step_data.get("type", "Step")
if step_type == "Loop":
return Loop.from_dict(step_data, registry=registry, db=db, links=links)
elif step_type == "Parallel":
return Parallel.from_dict(step_data, registry=registry, db=db, links=links)
elif step_type == "Steps":
return cls.from_dict(step_data, registry=registry, db=db, links=links)
elif step_type == "Condition":
return Condition.from_dict(step_data, registry=registry, db=db, links=links)
elif step_type == "Router":
return Router.from_dict(step_data, registry=registry, db=db, links=links)
else:
return Step.from_dict(step_data, registry=registry, db=db, links=links)
return cls(
name=data.get("name"),
description=data.get("description"),
steps=[deserialize_step(step) for step in data.get("steps", [])],
requires_confirmation=data.get("requires_confirmation", False),
confirmation_message=data.get("confirmation_message"),
on_reject=data.get("on_reject", OnReject.skip),
)
def _prepare_steps(self):
"""Prepare the steps for execution - mirrors workflow logic"""
from agno.agent.agent import Agent
from agno.team.team import Team
from agno.workflow.condition import Condition
from agno.workflow.loop import Loop
from agno.workflow.parallel import Parallel
from agno.workflow.router import Router
from agno.workflow.step import Step
prepared_steps: WorkflowSteps = []
for step in self.steps:
if callable(step) and hasattr(step, "__name__"):
prepared_steps.append(Step(name=step.__name__, description="User-defined callable step", executor=step))
elif isinstance(step, Agent):
prepared_steps.append(Step(name=step.name, description=step.description, agent=step))
elif isinstance(step, Team):
prepared_steps.append(Step(name=step.name, description=step.description, team=step))
elif isinstance(step, (Step, Steps, Loop, Parallel, Condition, Router)):
prepared_steps.append(step)
else:
raise ValueError(f"Invalid step type: {type(step).__name__}")
self.steps = prepared_steps
def _update_step_input_from_outputs(
self,
step_input: StepInput,
step_outputs: Union[StepOutput, List[StepOutput]],
steps_step_outputs: Optional[Dict[str, StepOutput]] = None,
) -> StepInput:
"""Helper to update step input from step outputs - mirrors Condition/Router logic"""
current_images = step_input.images or []
current_videos = step_input.videos or []
current_audio = step_input.audio or []
if isinstance(step_outputs, list):
step_images = sum([out.images or [] for out in step_outputs], [])
step_videos = sum([out.videos or [] for out in step_outputs], [])
step_audio = sum([out.audio or [] for out in step_outputs], [])
# Use the last output's content for chaining
previous_step_content = step_outputs[-1].content if step_outputs else None
else:
# Single output
step_images = step_outputs.images or []
step_videos = step_outputs.videos or []
step_audio = step_outputs.audio or []
previous_step_content = step_outputs.content
updated_previous_step_outputs = {}
if step_input.previous_step_outputs:
updated_previous_step_outputs.update(step_input.previous_step_outputs)
if steps_step_outputs:
updated_previous_step_outputs.update(steps_step_outputs)
return StepInput(
input=step_input.input,
previous_step_content=previous_step_content,
previous_step_outputs=updated_previous_step_outputs,
additional_data=step_input.additional_data,
images=current_images + step_images,
videos=current_videos + step_videos,
audio=current_audio + step_audio,
)
def execute(
self,
step_input: StepInput,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
workflow_run_response: Optional[WorkflowRunOutput] = None,
run_context: Optional[RunContext] = None,
session_state: Optional[Dict[str, Any]] = None,
store_executor_outputs: bool = True,
workflow_session: Optional[WorkflowSession] = None,
add_workflow_history_to_steps: Optional[bool] = False,
num_history_runs: int = 3,
background_tasks: Optional[Any] = None,
) -> StepOutput:
"""Execute all steps in sequence and return the final result"""
log_debug(f"Steps Start: {self.name} ({len(self.steps)} steps)", center=True, symbol="-")
steps_id = str(uuid4())
self._prepare_steps()
if not self.steps:
return StepOutput(step_name=self.name or "Steps", content="No steps to execute")
# Track outputs and pass data between steps - following Condition/Router pattern
all_results: List[StepOutput] = []
current_step_input = step_input
steps_step_outputs = {}
try:
for i, step in enumerate(self.steps):
step_name = getattr(step, "name", f"step_{i + 1}")
log_debug(f"Steps {self.name}: Executing step {i + 1}/{len(self.steps)} - {step_name}")
# Execute step
step_output = step.execute( # type: ignore
current_step_input,
session_id=session_id,
user_id=user_id,
workflow_run_response=workflow_run_response,
store_executor_outputs=store_executor_outputs,
run_context=run_context,
session_state=session_state,
workflow_session=workflow_session,
add_workflow_history_to_steps=add_workflow_history_to_steps,
num_history_runs=num_history_runs,
background_tasks=background_tasks,
)
# Handle both single StepOutput and List[StepOutput] (from Loop/Condition/Router steps)
if isinstance(step_output, list):
all_results.extend(step_output)
if step_output:
steps_step_outputs[step_name] = step_output[-1]
if any(output.stop for output in step_output):
logger.info(f"Early termination requested by step {step_name}")
break
else:
all_results.append(step_output)
steps_step_outputs[step_name] = step_output
if step_output.stop:
logger.info(f"Early termination requested by step {step_name}")
break
log_debug(f"Steps {self.name}: Step {step_name} completed successfully")
# Update input for next step with proper chaining
current_step_input = self._update_step_input_from_outputs(
current_step_input, step_output, steps_step_outputs
)
log_debug(f"Steps End: {self.name} ({len(all_results)} results)", center=True, symbol="-")
return StepOutput(
step_name=self.name,
step_id=steps_id,
step_type=StepType.STEPS,
content=f"Steps {self.name} completed with {len(all_results)} results",
success=all(result.success for result in all_results) if all_results else True,
stop=any(result.stop for result in all_results) if all_results else False,
steps=all_results,
)
except Exception as e:
logger.error(f"Steps execution failed: {e}")
return StepOutput(
step_name=self.name or "Steps",
content=f"Steps execution failed: {str(e)}",
success=False,
error=str(e),
)
def execute_stream(
self,
step_input: StepInput,
workflow_run_response: WorkflowRunOutput,
run_context: Optional[RunContext] = None,
session_state: Optional[Dict[str, Any]] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
stream_events: bool = False,
stream_executor_events: bool = True,
step_index: Optional[Union[int, tuple]] = None,
store_executor_outputs: bool = True,
parent_step_id: Optional[str] = None,
workflow_session: Optional[WorkflowSession] = None,
add_workflow_history_to_steps: Optional[bool] = False,
num_history_runs: int = 3,
background_tasks: Optional[Any] = None,
) -> Iterator[Union[WorkflowRunOutputEvent, TeamRunOutputEvent, RunOutputEvent, StepOutput]]:
"""Execute all steps in sequence with streaming support"""
log_debug(f"Steps Start: {self.name} ({len(self.steps)} steps)", center=True, symbol="-")
steps_id = str(uuid4())
self._prepare_steps()
if stream_events:
# Yield steps execution started event
yield StepsExecutionStartedEvent(
run_id=workflow_run_response.run_id or "",
workflow_name=workflow_run_response.workflow_name or "",
workflow_id=workflow_run_response.workflow_id or "",
session_id=workflow_run_response.session_id or "",
step_name=self.name,
step_index=step_index,
steps_count=len(self.steps),
step_id=steps_id,
parent_step_id=parent_step_id,
)
if not self.steps:
yield StepOutput(step_name=self.name or "Steps", content="No steps to execute")
return
# Track outputs and pass data between steps - following Condition/Router pattern
all_results = []
current_step_input = step_input
steps_step_outputs = {}
try:
for i, step in enumerate(self.steps):
step_name = getattr(step, "name", f"step_{i + 1}")
log_debug(f"Steps {self.name}: Executing step {i + 1}/{len(self.steps)} - {step_name}")
step_outputs_for_step = []
if step_index is None or isinstance(step_index, int):
# Steps is a main step - child steps get x.1, x.2, x.3 format
child_step_index = (step_index if step_index is not None else 1, i) # Use i, not i+1
else:
# Steps is already a child step - child steps get parent.1, parent.2, parent.3
child_step_index = step_index + (i,) # Extend the tuple
# Stream step execution
for event in step.execute_stream( # type: ignore
current_step_input,
session_id=session_id,
user_id=user_id,
run_context=run_context,
session_state=session_state,
stream_events=stream_events,
stream_executor_events=stream_executor_events,
workflow_run_response=workflow_run_response,
step_index=child_step_index,
store_executor_outputs=store_executor_outputs,
parent_step_id=steps_id,
workflow_session=workflow_session,
add_workflow_history_to_steps=add_workflow_history_to_steps,
num_history_runs=num_history_runs,
background_tasks=background_tasks,
):
if isinstance(event, StepOutput):
step_outputs_for_step.append(event)
all_results.append(event)
else:
# Yield other events (streaming content, step events, etc.)
yield event
# Update step outputs tracking and prepare input for next step
if step_outputs_for_step:
if len(step_outputs_for_step) == 1:
steps_step_outputs[step_name] = step_outputs_for_step[0]
if step_outputs_for_step[0].stop:
logger.info(f"Early termination requested by step {step_name}")
break
current_step_input = self._update_step_input_from_outputs(
current_step_input, step_outputs_for_step[0], steps_step_outputs
)
else:
# Use last output
steps_step_outputs[step_name] = step_outputs_for_step[-1]
if any(output.stop for output in step_outputs_for_step):
logger.info(f"Early termination requested by step {step_name}")
break
current_step_input = self._update_step_input_from_outputs(
current_step_input, step_outputs_for_step, steps_step_outputs
)
log_debug(f"Steps End: {self.name} ({len(all_results)} results)", center=True, symbol="-")
if stream_events:
# Yield steps execution completed event
yield StepsExecutionCompletedEvent(
run_id=workflow_run_response.run_id or "",
workflow_name=workflow_run_response.workflow_name or "",
workflow_id=workflow_run_response.workflow_id or "",
session_id=workflow_run_response.session_id or "",
step_name=self.name,
step_index=step_index,
steps_count=len(self.steps),
executed_steps=len(all_results),
step_results=all_results,
step_id=steps_id,
parent_step_id=parent_step_id,
)
yield StepOutput(
step_name=self.name,
step_id=steps_id,
step_type=StepType.STEPS,
content=f"Steps {self.name} completed with {len(all_results)} results",
success=all(result.success for result in all_results) if all_results else True,
stop=any(result.stop for result in all_results) if all_results else False,
steps=all_results,
)
except Exception as e:
logger.error(f"Steps streaming failed: {e}")
error_result = StepOutput(
step_name=self.name or "Steps",
content=f"Steps execution failed: {str(e)}",
success=False,
error=str(e),
)
yield error_result
async def aexecute(
self,
step_input: StepInput,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
workflow_run_response: Optional[WorkflowRunOutput] = None,
run_context: Optional[RunContext] = None,
session_state: Optional[Dict[str, Any]] = None,
store_executor_outputs: bool = True,
workflow_session: Optional[WorkflowSession] = None,
add_workflow_history_to_steps: Optional[bool] = False,
num_history_runs: int = 3,
background_tasks: Optional[Any] = None,
) -> StepOutput:
"""Execute all steps in sequence asynchronously and return the final result"""
log_debug(f"Steps Start: {self.name} ({len(self.steps)} steps)", center=True, symbol="-")
steps_id = str(uuid4())
self._prepare_steps()
if not self.steps:
return StepOutput(step_name=self.name or "Steps", content="No steps to execute")
# Track outputs and pass data between steps - following Condition/Router pattern
all_results: List[StepOutput] = []
current_step_input = step_input
steps_step_outputs = {}
try:
for i, step in enumerate(self.steps):
step_name = getattr(step, "name", f"step_{i + 1}")
log_debug(f"Steps {self.name}: Executing async step {i + 1}/{len(self.steps)} - {step_name}")
# Execute step
step_output = await step.aexecute( # type: ignore
current_step_input,
session_id=session_id,
user_id=user_id,
workflow_run_response=workflow_run_response,
store_executor_outputs=store_executor_outputs,
run_context=run_context,
session_state=session_state,
workflow_session=workflow_session,
add_workflow_history_to_steps=add_workflow_history_to_steps,
num_history_runs=num_history_runs,
background_tasks=background_tasks,
)
# Handle both single StepOutput and List[StepOutput] (from Loop/Condition/Router steps)
if isinstance(step_output, list):
all_results.extend(step_output)
if step_output:
steps_step_outputs[step_name] = step_output[-1]
if any(output.stop for output in step_output):
logger.info(f"Early termination requested by step {step_name}")
break
else:
all_results.append(step_output)
steps_step_outputs[step_name] = step_output
if step_output.stop:
logger.info(f"Early termination requested by step {step_name}")
break
# Update input for next step with proper chaining
current_step_input = self._update_step_input_from_outputs(
current_step_input, step_output, steps_step_outputs
)
log_debug(f"Steps End: {self.name} ({len(all_results)} results)", center=True, symbol="-")
return StepOutput(
step_name=self.name,
step_id=steps_id,
step_type=StepType.STEPS,
content=f"Steps {self.name} completed with {len(all_results)} results",
success=all(result.success for result in all_results) if all_results else True,
stop=any(result.stop for result in all_results) if all_results else False,
steps=all_results,
)
except Exception as e:
logger.error(f"Async steps execution failed: {e}")
return StepOutput(
step_name=self.name or "Steps",
content=f"Steps execution failed: {str(e)}",
success=False,
error=str(e),
)
async def aexecute_stream(
self,
step_input: StepInput,
workflow_run_response: WorkflowRunOutput,
run_context: Optional[RunContext] = None,
session_state: Optional[Dict[str, Any]] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
stream_events: bool = False,
stream_executor_events: bool = True,
step_index: Optional[Union[int, tuple]] = None,
store_executor_outputs: bool = True,
parent_step_id: Optional[str] = None,
workflow_session: Optional[WorkflowSession] = None,
add_workflow_history_to_steps: Optional[bool] = False,
num_history_runs: int = 3,
background_tasks: Optional[Any] = None,
) -> AsyncIterator[Union[WorkflowRunOutputEvent, TeamRunOutputEvent, RunOutputEvent, StepOutput]]:
"""Execute all steps in sequence with async streaming support"""
log_debug(f"Steps Start: {self.name} ({len(self.steps)} steps)", center=True, symbol="-")
steps_id = str(uuid4())
self._prepare_steps()
if stream_events:
# Yield steps execution started event
yield StepsExecutionStartedEvent(
run_id=workflow_run_response.run_id or "",
workflow_name=workflow_run_response.workflow_name or "",
workflow_id=workflow_run_response.workflow_id or "",
session_id=workflow_run_response.session_id or "",
step_name=self.name,
step_index=step_index,
steps_count=len(self.steps),
step_id=steps_id,
parent_step_id=parent_step_id,
)
if not self.steps:
yield StepOutput(step_name=self.name or "Steps", content="No steps to execute")
return
# Track outputs and pass data between steps - following Condition/Router pattern
all_results = []
current_step_input = step_input
steps_step_outputs = {}
try:
for i, step in enumerate(self.steps):
step_name = getattr(step, "name", f"step_{i + 1}")
log_debug(f"Steps {self.name}: Executing async step {i + 1}/{len(self.steps)} - {step_name}")
step_outputs_for_step = []
if step_index is None or isinstance(step_index, int):
# Steps is a main step - child steps get x.1, x.2, x.3 format
child_step_index = (step_index if step_index is not None else 1, i) # Use i, not i+1
else:
# Steps is already a child step - child steps get parent.1, parent.2, parent.3
child_step_index = step_index + (i,) # Extend the tuple
# Stream step execution
async for event in step.aexecute_stream( # type: ignore
current_step_input,
session_id=session_id,
user_id=user_id,
run_context=run_context,
session_state=session_state,
stream_events=stream_events,
stream_executor_events=stream_executor_events,
workflow_run_response=workflow_run_response,
step_index=child_step_index,
store_executor_outputs=store_executor_outputs,
parent_step_id=steps_id,
workflow_session=workflow_session,
add_workflow_history_to_steps=add_workflow_history_to_steps,
num_history_runs=num_history_runs,
background_tasks=background_tasks,
):
if isinstance(event, StepOutput):
step_outputs_for_step.append(event)
all_results.append(event)
else:
# Yield other events (streaming content, step events, etc.)
yield event
# Update step outputs tracking and prepare input for next step
if step_outputs_for_step:
if len(step_outputs_for_step) == 1:
steps_step_outputs[step_name] = step_outputs_for_step[0]
if step_outputs_for_step[0].stop:
logger.info(f"Early termination requested by step {step_name}")
break
current_step_input = self._update_step_input_from_outputs(
current_step_input, step_outputs_for_step[0], steps_step_outputs
)
else:
# Use last output
steps_step_outputs[step_name] = step_outputs_for_step[-1]
if any(output.stop for output in step_outputs_for_step):
logger.info(f"Early termination requested by step {step_name}")
break
current_step_input = self._update_step_input_from_outputs(
current_step_input, step_outputs_for_step, steps_step_outputs
)
log_debug(f"Steps End: {self.name} ({len(all_results)} results)", center=True, symbol="-")
# Yield steps execution completed event
if stream_events:
yield StepsExecutionCompletedEvent(
run_id=workflow_run_response.run_id or "",
workflow_name=workflow_run_response.workflow_name or "",
workflow_id=workflow_run_response.workflow_id or "",
session_id=workflow_run_response.session_id or "",
step_name=self.name,
step_index=step_index,
steps_count=len(self.steps),
executed_steps=len(all_results),
step_results=all_results,
step_id=steps_id,
parent_step_id=parent_step_id,
)
yield StepOutput(
step_name=self.name,
step_id=steps_id,
step_type=StepType.STEPS,
content=f"Steps {self.name} completed with {len(all_results)} results",
success=all(result.success for result in all_results) if all_results else True,
stop=any(result.stop for result in all_results) if all_results else False,
steps=all_results,
)
except Exception as e:
logger.error(f"Async steps streaming failed: {e}")
error_result = StepOutput(
step_name=self.name or "Steps",
content=f"Steps execution failed: {str(e)}",
success=False,
error=str(e),
)
yield error_result
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/workflow/steps.py",
"license": "Apache License 2.0",
"lines": 606,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/workflow/types.py | from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Union
from pydantic import BaseModel
from agno.media import Audio, File, Image, Video
from agno.models.metrics import RunMetrics
from agno.session.workflow import WorkflowSession
from agno.utils.media import (
reconstruct_audio_list,
reconstruct_files,
reconstruct_images,
reconstruct_videos,
)
from agno.utils.timer import Timer
class OnReject(str, Enum):
"""Action to take when a step requiring confirmation is rejected.
Attributes:
skip: Skip the rejected step and continue with the next step in the workflow.
cancel: Cancel the entire workflow when the step is rejected.
else_branch: For Condition only - execute the else_steps branch when rejected.
"""
skip = "skip"
cancel = "cancel"
else_branch = "else"
class OnError(str, Enum):
"""Action to take when a step encounters an error during execution.
Attributes:
fail: Fail the workflow immediately when an error occurs (default).
skip: Skip the failed step and continue with the next step.
pause: Pause the workflow and allow the user to decide (retry or skip) via HITL.
"""
fail = "fail"
skip = "skip"
pause = "pause"
@dataclass
class WorkflowExecutionInput:
"""Input data for a step execution"""
input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None
additional_data: Optional[Dict[str, Any]] = None
# Media inputs
images: Optional[List[Image]] = None
videos: Optional[List[Video]] = None
audio: Optional[List[Audio]] = None
files: Optional[List[File]] = None
def get_input_as_string(self) -> Optional[str]:
"""Convert input to string representation"""
if self.input is None:
return None
if isinstance(self.input, str):
return self.input
elif isinstance(self.input, BaseModel):
return self.input.model_dump_json(indent=2, exclude_none=True)
elif isinstance(self.input, (dict, list)):
import json
return json.dumps(self.input, indent=2, default=str)
else:
return str(self.input)
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary"""
input_dict: Optional[Union[str, Dict[str, Any], List[Any]]] = None
if self.input is not None:
if isinstance(self.input, BaseModel):
input_dict = self.input.model_dump(exclude_none=True)
elif isinstance(self.input, (dict, list)):
input_dict = self.input
else:
input_dict = str(self.input)
return {
"input": input_dict,
"additional_data": self.additional_data,
"images": [img.to_dict() for img in self.images] if self.images else None,
"videos": [vid.to_dict() for vid in self.videos] if self.videos else None,
"audio": [aud.to_dict() for aud in self.audio] if self.audio else None,
"files": [file.to_dict() for file in self.files] if self.files else None,
}
@dataclass
class StepInput:
"""Input data for a step execution"""
input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None
previous_step_content: Optional[Any] = None
previous_step_outputs: Optional[Dict[str, "StepOutput"]] = None
additional_data: Optional[Dict[str, Any]] = None
# Media inputs
images: Optional[List[Image]] = None
videos: Optional[List[Video]] = None
audio: Optional[List[Audio]] = None
files: Optional[List[File]] = None
workflow_session: Optional["WorkflowSession"] = None
def get_input_as_string(self) -> Optional[str]:
"""Convert input to string representation"""
if self.input is None:
return None
if isinstance(self.input, str):
return self.input
elif isinstance(self.input, BaseModel):
return self.input.model_dump_json(indent=2, exclude_none=True)
elif isinstance(self.input, (dict, list)):
import json
return json.dumps(self.input, indent=2, default=str)
else:
return str(self.input)
def get_step_output(self, step_name: str) -> Optional["StepOutput"]:
"""Get output from a specific previous step by name
Searches recursively through nested steps (Parallel, Condition, Router, Loop, Steps)
to find step outputs at any depth.
"""
if not self.previous_step_outputs:
return None
# First try direct lookup
direct = self.previous_step_outputs.get(step_name)
if direct:
return direct
# Search recursively in nested steps
return self._search_nested_steps(step_name)
def _search_nested_steps(self, step_name: str) -> Optional["StepOutput"]:
"""Recursively search for a step output in nested steps (Parallel, Condition, etc.)"""
if not self.previous_step_outputs:
return None
for step_output in self.previous_step_outputs.values():
result = self._search_in_step_output(step_output, step_name)
if result:
return result
return None
def _search_in_step_output(self, step_output: "StepOutput", step_name: str) -> Optional["StepOutput"]:
"""Helper to recursively search within a single StepOutput"""
if not step_output.steps:
return None
for nested_step in step_output.steps:
if nested_step.step_name == step_name:
return nested_step
# Recursively search deeper
result = self._search_in_step_output(nested_step, step_name)
if result:
return result
return None
def get_step_content(self, step_name: str) -> Optional[Union[str, Dict[str, str]]]:
"""Get content from a specific previous step by name
For parallel steps, if you ask for the parallel step name, returns a dict
with {step_name: content} for each sub-step.
For other nested steps (Condition, Router, Loop, Steps), returns the deepest content.
"""
step_output = self.get_step_output(step_name)
if not step_output:
return None
# Check if this is a parallel step with nested steps
if step_output.step_type == "Parallel" and step_output.steps:
# Return dict with {step_name: content} for each sub-step
parallel_content = {}
for sub_step in step_output.steps:
if sub_step.step_name and sub_step.content:
# Check if this sub-step has its own nested steps (like Condition -> Research Step)
if sub_step.steps and len(sub_step.steps) > 0:
# This is a composite step (like Condition) - get content from its nested steps
for nested_step in sub_step.steps:
if nested_step.step_name and nested_step.content:
parallel_content[nested_step.step_name] = str(nested_step.content)
else:
# This is a direct step - use its content
parallel_content[sub_step.step_name] = str(sub_step.content)
return parallel_content if parallel_content else str(step_output.content)
# For other nested step types (Condition, Router, Loop, Steps), get the deepest content
elif step_output.steps and len(step_output.steps) > 0:
# This is a nested step structure - recursively get the deepest content
return self._get_deepest_step_content(step_output.steps[-1])
# Regular step, return content directly
return step_output.content # type: ignore[return-value]
def _get_deepest_step_content(self, step_output: "StepOutput") -> Optional[Union[str, Dict[str, str]]]:
"""Helper method to recursively extract deepest content from nested steps"""
# If this step has nested steps, go deeper
if step_output.steps and len(step_output.steps) > 0:
return self._get_deepest_step_content(step_output.steps[-1])
# Return the content of this step
return step_output.content # type: ignore[return-value]
def get_all_previous_content(self) -> str:
"""Get concatenated content from all previous steps"""
if not self.previous_step_outputs:
return ""
content_parts = []
for step_name, output in self.previous_step_outputs.items():
if output.content:
content_parts.append(f"=== {step_name} ===\n{output.content}")
return "\n\n".join(content_parts)
def get_last_step_content(self) -> Optional[str]:
"""Get content from the most recent step (for backward compatibility)"""
if not self.previous_step_outputs:
return None
last_output = list(self.previous_step_outputs.values())[-1] if self.previous_step_outputs else None
if not last_output:
return None
# Use the helper method to get the deepest content
return self._get_deepest_step_content(last_output) # type: ignore[return-value]
def get_workflow_history(self, num_runs: Optional[int] = None) -> List[Tuple[str, str]]:
"""Get workflow conversation history as structured data for custom function steps
Args:
num_runs: Number of recent runs to include. If None, returns all available history.
"""
if not self.workflow_session:
return []
return self.workflow_session.get_workflow_history(num_runs=num_runs)
def get_workflow_history_context(self, num_runs: Optional[int] = None) -> Optional[str]:
"""Get formatted workflow conversation history context for custom function steps
Args:
num_runs: Number of recent runs to include. If None, returns all available history.
"""
if not self.workflow_session:
return None
return self.workflow_session.get_workflow_history_context(num_runs=num_runs)
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary"""
# Handle the unified message field
input_dict: Optional[Union[str, Dict[str, Any], List[Any]]] = None
if self.input is not None:
if isinstance(self.input, BaseModel):
input_dict = self.input.model_dump(exclude_none=True, mode="json")
elif isinstance(self.input, (dict, list)):
input_dict = self.input
else:
input_dict = str(self.input)
previous_step_content_str: Optional[str] = None
# Handle previous_step_content (keep existing logic)
if isinstance(self.previous_step_content, BaseModel):
previous_step_content_str = self.previous_step_content.model_dump_json(indent=2, exclude_none=True)
elif isinstance(self.previous_step_content, dict):
import json
previous_step_content_str = json.dumps(self.previous_step_content, indent=2, default=str)
elif self.previous_step_content:
previous_step_content_str = str(self.previous_step_content)
# Convert previous_step_outputs to serializable format (keep existing logic)
previous_steps_dict = {}
if self.previous_step_outputs:
for step_name, output in self.previous_step_outputs.items():
previous_steps_dict[step_name] = output.to_dict()
return {
"input": input_dict,
"previous_step_outputs": previous_steps_dict,
"previous_step_content": previous_step_content_str,
"additional_data": self.additional_data,
"images": [img.to_dict() for img in self.images] if self.images else None,
"videos": [vid.to_dict() for vid in self.videos] if self.videos else None,
"audio": [aud.to_dict() for aud in self.audio] if self.audio else None,
"files": [file for file in self.files] if self.files else None,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "StepInput":
"""Create StepInput from dictionary"""
# Reconstruct media artifacts
images = reconstruct_images(data.get("images"))
videos = reconstruct_videos(data.get("videos"))
audio = reconstruct_audio_list(data.get("audio"))
files = reconstruct_files(data.get("files"))
# Reconstruct previous_step_outputs
previous_step_outputs = None
if data.get("previous_step_outputs"):
previous_step_outputs = {
name: StepOutput.from_dict(output_data) for name, output_data in data["previous_step_outputs"].items()
}
return cls(
input=data.get("input"),
previous_step_content=data.get("previous_step_content"),
previous_step_outputs=previous_step_outputs,
additional_data=data.get("additional_data"),
images=images,
videos=videos,
audio=audio,
files=files,
)
@dataclass
class StepOutput:
"""Output data from a step execution"""
step_name: Optional[str] = None
step_id: Optional[str] = None
step_type: Optional[str] = None
executor_type: Optional[str] = None
executor_name: Optional[str] = None
# Primary output
content: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, Any]] = None
# Link to the run ID of the step execution
step_run_id: Optional[str] = None
# Media outputs
images: Optional[List[Image]] = None
videos: Optional[List[Video]] = None
audio: Optional[List[Audio]] = None
files: Optional[List[File]] = None
# Metrics for this step execution
metrics: Optional[RunMetrics] = None
success: bool = True
error: Optional[str] = None
stop: bool = False
steps: Optional[List["StepOutput"]] = None
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary"""
# Handle the unified content field
content_dict: Optional[Union[str, Dict[str, Any], List[Any]]] = None
if self.content is not None:
if isinstance(self.content, BaseModel):
content_dict = self.content.model_dump(exclude_none=True, mode="json")
elif isinstance(self.content, (dict, list)):
content_dict = self.content
else:
content_dict = str(self.content)
result = {
"content": content_dict,
"step_name": self.step_name,
"step_id": self.step_id,
"step_type": self.step_type,
"executor_type": self.executor_type,
"executor_name": self.executor_name,
"step_run_id": self.step_run_id,
"images": [img.to_dict() for img in self.images] if self.images else None,
"videos": [vid.to_dict() for vid in self.videos] if self.videos else None,
"audio": [aud.to_dict() for aud in self.audio] if self.audio else None,
"files": [f.to_dict() for f in self.files] if self.files else None,
"metrics": self.metrics.to_dict() if self.metrics else None,
"success": self.success,
"error": self.error,
"stop": self.stop,
}
# Add nested steps if they exist
if self.steps:
result["steps"] = [step.to_dict() for step in self.steps]
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "StepOutput":
"""Create StepOutput from dictionary"""
# Reconstruct media artifacts
images = reconstruct_images(data.get("images"))
videos = reconstruct_videos(data.get("videos"))
audio = reconstruct_audio_list(data.get("audio"))
files = reconstruct_files(data.get("files"))
metrics_data = data.get("metrics")
metrics = None
if metrics_data:
if isinstance(metrics_data, dict):
metrics = RunMetrics.from_dict(metrics_data)
else:
metrics = metrics_data
# Handle nested steps
steps_data = data.get("steps")
steps = None
if steps_data:
steps = [cls.from_dict(step_data) for step_data in steps_data]
return cls(
step_name=data.get("step_name"),
step_id=data.get("step_id"),
step_type=data.get("step_type"),
executor_type=data.get("executor_type"),
executor_name=data.get("executor_name"),
content=data.get("content"),
step_run_id=data.get("step_run_id"),
images=images,
videos=videos,
audio=audio,
files=files,
metrics=metrics,
success=data.get("success", True),
error=data.get("error"),
stop=data.get("stop", False),
steps=steps,
)
@dataclass
class StepMetrics:
"""Metrics for a single step execution"""
step_name: str
executor_type: str # "agent", "team", etc.
executor_name: str
metrics: Optional[RunMetrics] = None
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary"""
return {
"step_name": self.step_name,
"executor_type": self.executor_type,
"executor_name": self.executor_name,
"metrics": self.metrics.to_dict() if self.metrics else None,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "StepMetrics":
"""Create StepMetrics from dictionary"""
# Handle metrics properly
metrics_data = data.get("metrics")
metrics = None
if metrics_data:
if isinstance(metrics_data, dict):
metrics = RunMetrics.from_dict(metrics_data)
else:
metrics = metrics_data
return cls(
step_name=data["step_name"],
executor_type=data["executor_type"],
executor_name=data["executor_name"],
metrics=metrics,
)
@dataclass
class WorkflowMetrics:
"""Complete metrics for a workflow execution"""
steps: Dict[str, StepMetrics]
# Timer utility for tracking execution time
timer: Optional[Timer] = None
# Total workflow execution time
duration: Optional[float] = None
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary"""
result: Dict[str, Any] = {
"steps": {name: step.to_dict() for name, step in self.steps.items()},
"duration": self.duration,
}
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "WorkflowMetrics":
"""Create WorkflowMetrics from dictionary"""
steps = {name: StepMetrics.from_dict(step_data) for name, step_data in data["steps"].items()}
return cls(
steps=steps,
duration=data.get("duration"),
)
def start_timer(self):
if self.timer is None:
self.timer = Timer()
self.timer.start()
def stop_timer(self, set_duration: bool = True):
if self.timer is not None:
self.timer.stop()
if set_duration:
self.duration = self.timer.elapsed
class StepType(str, Enum):
FUNCTION = "Function"
STEP = "Step"
STEPS = "Steps"
LOOP = "Loop"
PARALLEL = "Parallel"
CONDITION = "Condition"
ROUTER = "Router"
@dataclass
class UserInputField:
"""A field that requires user input.
Attributes:
name: The field name (used as the key in user input).
field_type: The expected type ("str", "int", "float", "bool", "list", "dict").
description: Optional description shown to the user.
value: The value provided by the user (set after input).
required: Whether this field is required.
allowed_values: Optional list of allowed values for validation.
"""
name: str
field_type: str # "str", "int", "float", "bool", "list", "dict"
description: Optional[str] = None
value: Optional[Any] = None
required: bool = True
allowed_values: Optional[List[Any]] = None
def to_dict(self) -> Dict[str, Any]:
result = {
"name": self.name,
"field_type": self.field_type,
"description": self.description,
"value": self.value,
"required": self.required,
}
if self.allowed_values is not None:
result["allowed_values"] = self.allowed_values
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "UserInputField":
return cls(
name=data["name"],
field_type=data.get("field_type", "str"),
description=data.get("description"),
value=data.get("value"),
required=data.get("required", True),
allowed_values=data.get("allowed_values"),
)
@dataclass
class StepRequirement:
"""Unified requirement for all HITL (Human-in-the-Loop) workflow pauses.
This class handles three types of HITL scenarios:
1. **Confirmation**: User confirms or rejects execution (Step, Loop, Condition, Steps, Router)
2. **User Input**: User provides custom input values (Step with user_input_schema)
3. **Route Selection**: User selects which route(s) to take (Router with requires_user_input)
The `step_type` field indicates what kind of component created this requirement.
It accepts both StepType enum values and strings for flexibility.
The `on_reject` field determines behavior when a step is rejected:
- OnReject.skip / "skip": Skip the step and continue workflow
- OnReject.cancel / "cancel": Cancel the entire workflow
- OnReject.else_branch / "else": For Condition only, execute else_steps
"""
step_id: str
step_name: Optional[str] = None
step_index: Optional[int] = None
# Component type that created this requirement
# Accepts StepType enum or string for flexibility
step_type: Optional[Union[StepType, str]] = None
# Confirmation fields (for Step, Loop, Condition, Steps, Router confirmation mode)
requires_confirmation: bool = False
confirmation_message: Optional[str] = None
confirmed: Optional[bool] = None
# What to do when step is rejected
# Accepts OnReject enum or string for flexibility
on_reject: Union[OnReject, str] = OnReject.cancel
# User input fields (for Step with custom input)
requires_user_input: bool = False
user_input_message: Optional[str] = None
user_input_schema: Optional[List[UserInputField]] = None
user_input: Optional[Dict[str, Any]] = None # The actual user input values
# Route selection fields (for Router user selection mode)
requires_route_selection: bool = False
available_choices: Optional[List[str]] = None # Available route names
allow_multiple_selections: bool = False # If True, user can select multiple routes
selected_choices: Optional[List[str]] = None # User's selected route(s)
# The step input that was prepared before pausing
step_input: Optional["StepInput"] = None
def confirm(self) -> None:
"""Confirm the step execution"""
self.confirmed = True
def reject(self) -> None:
"""Reject the step execution"""
self.confirmed = False
def set_user_input(self, validate: bool = True, **kwargs) -> None:
"""Set user input values.
Args:
validate: Whether to validate the input against the schema. Defaults to True.
**kwargs: The user input values as key-value pairs.
Raises:
ValueError: If validation is enabled and required fields are missing,
or if field types don't match the schema.
"""
if self.user_input is None:
self.user_input = {}
self.user_input.update(kwargs)
# Also update the schema values if present
if self.user_input_schema:
for field in self.user_input_schema:
if field.name in kwargs:
field.value = kwargs[field.name]
# Validate if schema is present and validation is enabled
if validate and self.user_input_schema:
self._validate_user_input(kwargs)
def _validate_user_input(self, user_input: Dict[str, Any]) -> None:
"""Validate user input against the schema.
Args:
user_input: The user input values to validate.
Raises:
ValueError: If required fields are missing or types don't match.
"""
if not self.user_input_schema:
return
errors = []
for field in self.user_input_schema:
value = user_input.get(field.name)
# Check required fields
if field.required and (value is None or value == ""):
errors.append(f"Required field '{field.name}' is missing or empty")
continue
# Skip type validation if value is not provided (and not required)
if value is None:
continue
# Validate type
expected_type = field.field_type
if expected_type == "str" and not isinstance(value, str):
errors.append(f"Field '{field.name}' expected str, got {type(value).__name__}")
elif expected_type == "int":
if not isinstance(value, int) or isinstance(value, bool):
errors.append(f"Field '{field.name}' expected int, got {type(value).__name__}")
elif expected_type == "float":
if not isinstance(value, (int, float)) or isinstance(value, bool):
errors.append(f"Field '{field.name}' expected float, got {type(value).__name__}")
elif expected_type == "bool" and not isinstance(value, bool):
errors.append(f"Field '{field.name}' expected bool, got {type(value).__name__}")
# Validate allowed values if specified
if field.allowed_values and value not in field.allowed_values:
errors.append(f"Field '{field.name}' value '{value}' is not in allowed values: {field.allowed_values}")
if errors:
raise ValueError("User input validation failed:\n - " + "\n - ".join(errors))
def get_user_input(self, field_name: str) -> Optional[Any]:
"""Get a specific user input value"""
if self.user_input:
return self.user_input.get(field_name)
return None
# Route selection methods (for Router)
def select(self, *choices: str) -> None:
"""Select one or more route choices by name."""
if not self.allow_multiple_selections and len(choices) > 1:
raise ValueError("This router only allows single selection. Use select() with one choice.")
self.selected_choices = list(choices)
def select_single(self, choice: str) -> None:
"""Select a single route choice by name."""
self.selected_choices = [choice]
def select_multiple(self, choices: List[str]) -> None:
"""Select multiple route choices by name."""
if not self.allow_multiple_selections:
raise ValueError("This router does not allow multiple selections.")
self.selected_choices = choices
@property
def needs_confirmation(self) -> bool:
"""Check if this requirement still needs confirmation"""
if self.confirmed is not None:
return False
return self.requires_confirmation
@property
def needs_user_input(self) -> bool:
"""Check if this requirement still needs user input"""
if not self.requires_user_input:
return False
if self.user_input_schema:
# Check if all required fields have values
for field in self.user_input_schema:
if field.required and field.value is None:
return True
return False
# If no schema, check if user_input dict has any values
return self.user_input is None or len(self.user_input) == 0
@property
def needs_route_selection(self) -> bool:
"""Check if this requirement still needs route selection"""
if not self.requires_route_selection:
return False
return self.selected_choices is None or len(self.selected_choices) == 0
@property
def is_resolved(self) -> bool:
"""Check if this requirement has been resolved"""
if self.requires_confirmation and self.confirmed is None:
return False
if self.requires_user_input and self.needs_user_input:
return False
if self.requires_route_selection and self.needs_route_selection:
return False
return True
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary"""
# Convert enum values to strings for serialization
step_type_str = self.step_type.value if isinstance(self.step_type, StepType) else self.step_type
on_reject_str = self.on_reject.value if isinstance(self.on_reject, OnReject) else self.on_reject
result: Dict[str, Any] = {
"step_id": self.step_id,
"step_name": self.step_name,
"step_index": self.step_index,
"step_type": step_type_str,
"requires_confirmation": self.requires_confirmation,
"confirmation_message": self.confirmation_message,
"confirmed": self.confirmed,
"on_reject": on_reject_str,
"requires_user_input": self.requires_user_input,
"user_input_message": self.user_input_message,
"user_input": self.user_input,
"requires_route_selection": self.requires_route_selection,
"available_choices": self.available_choices,
"allow_multiple_selections": self.allow_multiple_selections,
"selected_choices": self.selected_choices,
}
if self.user_input_schema is not None:
result["user_input_schema"] = [f.to_dict() for f in self.user_input_schema]
if self.step_input is not None:
result["step_input"] = self.step_input.to_dict()
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "StepRequirement":
"""Create StepRequirement from dictionary"""
step_input = None
if data.get("step_input"):
step_input = StepInput.from_dict(data["step_input"])
user_input_schema = None
if data.get("user_input_schema"):
user_input_schema = [UserInputField.from_dict(f) for f in data["user_input_schema"]]
return cls(
step_id=data["step_id"],
step_name=data.get("step_name"),
step_index=data.get("step_index"),
step_type=data.get("step_type"),
requires_confirmation=data.get("requires_confirmation", False),
confirmation_message=data.get("confirmation_message"),
confirmed=data.get("confirmed"),
on_reject=data.get("on_reject", "cancel"),
requires_user_input=data.get("requires_user_input", False),
user_input_message=data.get("user_input_message"),
user_input_schema=user_input_schema,
user_input=data.get("user_input"),
requires_route_selection=data.get("requires_route_selection", False),
available_choices=data.get("available_choices"),
allow_multiple_selections=data.get("allow_multiple_selections", False),
selected_choices=data.get("selected_choices"),
step_input=step_input,
)
@dataclass
class ErrorRequirement:
"""Requirement to handle a step error (used for error-based HITL flows).
When a Step has `on_error="pause"` and encounters an exception,
the workflow pauses and creates this requirement. The user can
decide to retry the step or skip it and continue with the next step.
"""
step_id: str
step_name: Optional[str] = None
step_index: Optional[int] = None
# Error information
error_message: str = ""
error_type: Optional[str] = None # e.g., "ValueError", "TimeoutError"
retry_count: int = 0 # How many times this step has been retried
# User's decision: "retry" or "skip"
decision: Optional[str] = None
# The step input that was used when the error occurred
step_input: Optional["StepInput"] = None
def retry(self) -> None:
"""Retry the failed step."""
self.decision = "retry"
def skip(self) -> None:
"""Skip the failed step and continue with the next step."""
self.decision = "skip"
@property
def needs_decision(self) -> bool:
"""Check if this requirement still needs a user decision."""
return self.decision is None
@property
def is_resolved(self) -> bool:
"""Check if this requirement has been resolved."""
return self.decision is not None
@property
def should_retry(self) -> bool:
"""Check if the user decided to retry."""
return self.decision == "retry"
@property
def should_skip(self) -> bool:
"""Check if the user decided to skip."""
return self.decision == "skip"
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary."""
# Note: We intentionally don't serialize step_input to avoid circular reference issues
# The step_input will be reconstructed when resuming the workflow
return {
"step_id": self.step_id,
"step_name": self.step_name,
"step_index": self.step_index,
"error_message": self.error_message,
"error_type": self.error_type,
"retry_count": self.retry_count,
"decision": self.decision,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "ErrorRequirement":
"""Create ErrorRequirement from dictionary."""
# Note: step_input is not serialized/deserialized to avoid circular reference issues
return cls(
step_id=data["step_id"],
step_name=data.get("step_name"),
step_index=data.get("step_index"),
error_message=data.get("error_message", ""),
error_type=data.get("error_type"),
retry_count=data.get("retry_count", 0),
decision=data.get("decision"),
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/workflow/types.py",
"license": "Apache License 2.0",
"lines": 745,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/agent/test_chat_history.py | import pytest
from agno.agent.agent import Agent
from agno.db.base import SessionType
from agno.models.openai.chat import OpenAIChat
@pytest.fixture
def chat_agent(shared_db):
"""Create an agent with storage and memory for testing."""
return Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
)
@pytest.fixture
def memory_agent(shared_db):
"""Create an agent that creates memories."""
return Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
update_memory_on_run=True,
)
def test_agent_runs_in_memory(chat_agent):
session_id = "test_session"
response = chat_agent.run("Hello, how are you?", session_id=session_id)
assert response is not None
assert response.content is not None
assert response.run_id is not None
session_from_db = chat_agent.get_session(session_id=session_id)
assert session_from_db is not None
assert len(session_from_db.runs) == 1
stored_run_response = session_from_db.runs[0]
assert stored_run_response.run_id == response.run_id
assert len(stored_run_response.messages) == 2
@pytest.mark.asyncio
async def test_multi_user_multi_session_chat(memory_agent, shared_db):
"""Test multi-user multi-session chat with storage and memory."""
# Define user and session IDs
user_1_id = "user_1@example.com"
user_2_id = "user_2@example.com"
user_1_session_1_id = "user_1_session_1"
user_1_session_2_id = "user_1_session_2"
user_2_session_1_id = "user_2_session_1"
# Chat with user 1 - Session 1
await memory_agent.arun(
"Remember that my name is Mark Gonzales",
user_id=user_1_id,
session_id=user_1_session_1_id,
)
await memory_agent.arun(
"Remember that I enjoy reading manga.",
user_id=user_1_id,
session_id=user_1_session_1_id,
)
# Chat with user 1 - Session 2
await memory_agent.arun("I'm going to the movies tonight.", user_id=user_1_id, session_id=user_1_session_2_id)
# Chat with user 2
await memory_agent.arun("Hi my name is John Doe.", user_id=user_2_id, session_id=user_2_session_1_id)
await memory_agent.arun(
"I love hiking and go hiking every weekend.", user_id=user_2_id, session_id=user_2_session_1_id
)
# Continue the conversation with user 1
await memory_agent.arun("What do you suggest I do this weekend?", user_id=user_1_id, session_id=user_1_session_1_id)
# Verify storage DB has the right sessions
all_session_ids = shared_db.get_sessions(session_type=SessionType.AGENT)
assert len(all_session_ids) == 3 # 3 sessions total
# Check that each user has the expected sessions
user_1_sessions = shared_db.get_sessions(session_type=SessionType.AGENT, user_id=user_1_id)
assert len(user_1_sessions) == 2
assert user_1_session_1_id in [session.session_id for session in user_1_sessions]
assert user_1_session_2_id in [session.session_id for session in user_1_sessions]
user_2_sessions = shared_db.get_sessions(session_type=SessionType.AGENT, user_id=user_2_id)
assert len(user_2_sessions) == 1
assert user_2_session_1_id in [session.session_id for session in user_2_sessions]
# Verify memory DB has the right memories
user_1_memories = shared_db.get_user_memories(user_id=user_1_id)
assert len(user_1_memories) >= 1 # At least 1 memory for user 1
user_2_memories = shared_db.get_user_memories(user_id=user_2_id)
assert len(user_2_memories) >= 1 # At least 1 memory for user 2
# Verify memory content for user 1
user_1_memory_texts = [m.memory for m in user_1_memories]
assert user_1_memory_texts is not None
assert "Mark Gonzales" in user_1_memory_texts[0]
assert "manga" in user_1_memory_texts[1]
# Verify memory content for user 2
user_2_memory_texts = [m.memory for m in user_2_memories]
assert "John Doe" in user_2_memory_texts[0]
assert "hike" in user_2_memory_texts[1] or "hiking" in user_2_memory_texts[1]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_chat_history.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/test_custom_retriever.py | from agno.agent import Agent
from agno.models.openai import OpenAIChat
def test_agent_with_custom_knowledge_retriever():
def custom_knowledge_retriever(**kwargs):
return ["Paris is the capital of France"]
agent = Agent(
model=OpenAIChat(id="gpt-4o"),
knowledge_retriever=custom_knowledge_retriever, # type: ignore
add_knowledge_to_context=True,
)
response = agent.run("What is the capital of France?")
assert response is not None and response.references is not None
assert response.references[0].references == ["Paris is the capital of France"]
def test_agent_with_custom_knowledge_retriever_error():
def custom_knowledge_retriever(**kwargs):
raise Exception("Test error")
agent = Agent(
model=OpenAIChat(id="gpt-4o"),
knowledge_retriever=custom_knowledge_retriever,
add_knowledge_to_context=True,
)
response = agent.run("What is the capital of France?")
assert response.metadata is None, "There should be no references"
assert "<references>" not in response.messages[0].content # type: ignore
def test_agent_with_custom_knowledge_retriever_search_knowledge_error():
def custom_knowledge_retriever(**kwargs):
raise Exception("Test error")
agent = Agent(
model=OpenAIChat(id="gpt-4o"),
knowledge_retriever=custom_knowledge_retriever,
search_knowledge=True,
debug_mode=True,
instructions="Always search the knowledge base for information before answering.",
)
response = agent.run("Search my knowledge base for information about the capital of France")
assert response.metadata is None, "There should be no references"
assert response.tools and response.tools[0].tool_name == "search_knowledge_base"
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_custom_retriever.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/test_input.py | from pydantic import BaseModel
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.media import Image
from agno.models.message import Message
from agno.models.openai import OpenAIChat
from agno.session.summary import SessionSummaryManager
def test_message_as_input():
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
markdown=True,
)
response = agent.run(input=Message(role="user", content="Hello, how are you?"))
assert response.content is not None
def test_list_as_input(image_path):
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
markdown=True,
)
response = agent.run(
input=[
{"type": "text", "text": "What's in this image?"},
{
"type": "image_url",
"image_url": {
"url": image_path,
},
},
]
)
assert response.content is not None
def test_dict_as_input():
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
markdown=True,
)
response = agent.run(
input={
"role": "user",
"content": "Hello, how are you?",
}
)
assert response.content is not None
def test_base_model_as_input():
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
markdown=True,
)
class InputMessage(BaseModel):
topic: str
content: str
response = agent.run(input=InputMessage(topic="Greetings", content="Hello, how are you?"))
assert response.content is not None
def test_empty_string_with_image(image_path):
"""Test that agent handles empty string input with image media"""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
instructions="Describe the image provided",
markdown=True,
)
response = agent.run(
input="",
images=[Image(filepath=image_path)],
)
assert response.content is not None
assert len(response.content) > 0
def test_none_input_with_image(image_path):
"""Test that agent handles None input with image media"""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
instructions="Describe the image provided",
markdown=True,
)
response = agent.run(
input=None, # type: ignore
images=[Image(filepath=image_path)],
)
assert response.content is not None
assert len(response.content) > 0
def test_empty_string_with_multiple_media(image_path):
"""Test that agent handles empty string with multiple media types"""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
instructions="Analyze the provided media",
markdown=True,
)
response = agent.run(
input="",
images=[Image(filepath=image_path)],
)
assert response.content is not None
assert len(response.content) > 0
def test_empty_string_with_image_and_user_memories(image_path):
"""Test that agent with user memories handles empty string input with image"""
db = SqliteDb(db_file="tmp/test_empty_input_memories.db")
session_summary_manager = SessionSummaryManager(model=OpenAIChat(id="gpt-4o-mini"))
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
instructions="Describe the image provided",
db=db,
update_memory_on_run=True,
session_summary_manager=session_summary_manager,
markdown=True,
)
response = agent.run(
input="",
images=[Image(filepath=image_path)],
)
assert response.content is not None
assert len(response.content) > 0
def test_none_input_with_image_and_user_memories(image_path):
"""Test that agent with user memories handles None input with image"""
db = SqliteDb(db_file="tmp/test_none_input_memories.db")
session_summary_manager = SessionSummaryManager(model=OpenAIChat(id="gpt-4o-mini"))
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
instructions="Describe the image provided",
db=db,
update_memory_on_run=True,
session_summary_manager=session_summary_manager,
markdown=True,
)
response = agent.run(
input=None, # type: ignore
images=[Image(filepath=image_path)],
)
assert response.content is not None
assert len(response.content) > 0
def test_empty_string_with_image_and_session_summaries(image_path):
"""Test that agent with session summaries handles empty string input with image"""
db = SqliteDb(db_file="tmp/test_empty_input_summaries.db")
session_summary_manager = SessionSummaryManager(model=OpenAIChat(id="gpt-4o-mini"))
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
instructions="Describe the image provided",
db=db,
enable_session_summaries=True,
session_summary_manager=session_summary_manager,
markdown=True,
)
response = agent.run(
input="",
images=[Image(filepath=image_path)],
)
assert response.content is not None
assert len(response.content) > 0
def test_none_input_with_image_and_session_summaries(image_path):
"""Test that agent with session summaries handles None input with image"""
db = SqliteDb(db_file="tmp/test_none_input_summaries.db")
session_summary_manager = SessionSummaryManager(model=OpenAIChat(id="gpt-4o-mini"))
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
instructions="Describe the image provided",
db=db,
enable_session_summaries=True,
session_summary_manager=session_summary_manager,
markdown=True,
)
response = agent.run(
input=None, # type: ignore
images=[Image(filepath=image_path)],
)
assert response.content is not None
assert len(response.content) > 0
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_input.py",
"license": "Apache License 2.0",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/test_multimodal.py | from agno.agent.agent import Agent
from agno.db.base import SessionType
from agno.media import Image
from agno.models.openai.chat import OpenAIChat
def test_agent_image_input(shared_db, image_path):
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
markdown=True,
db=shared_db,
)
response = agent.run(
"Tell me about this image and give me the latest news about it.",
images=[Image(filepath=image_path)],
)
assert response.content is not None
session_in_db = shared_db.get_session(response.session_id, session_type=SessionType.AGENT)
assert session_in_db is not None
assert session_in_db.runs is not None
assert len(session_in_db.runs) == 1
assert session_in_db.runs[0].messages is not None
assert len(session_in_db.runs[0].messages) == 3
assert session_in_db.runs[0].messages[1].role == "user"
assert session_in_db.runs[0].messages[2].role == "assistant"
assert session_in_db.runs[0].messages[1].images is not None
assert session_in_db.runs[0].messages[1].images[0].filepath == str(image_path)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_multimodal.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/test_print_response.py | from unittest.mock import MagicMock, Mock, patch
import pytest
from rich.console import Console
from rich.text import Text
from agno.agent import Agent
from agno.models.openai import OpenAIChat
def test_print_response_with_message_panel():
"""Test that print_response creates a message panel when show_message=True"""
def get_the_weather():
return "It is currently 70 degrees and cloudy in Tokyo"
with patch("agno.utils.print_response.agent.Live") as mock_live_class:
mock_live = MagicMock()
mock_live_class.return_value.__enter__ = Mock(return_value=mock_live)
mock_live_class.return_value.__exit__ = Mock(return_value=None)
with patch("agno.utils.print_response.agent.create_panel") as mock_create_panel:
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
markdown=True,
telemetry=False,
)
mock_console = MagicMock(spec=Console)
mock_console.is_jupyter = False
# Mock a successful run response
with patch.object(agent, "run") as mock_run:
mock_response = Mock()
mock_response.content = "It is currently 70 degrees and cloudy in Tokyo"
mock_response.formatted_tool_calls = []
mock_response.citations = None
mock_response.is_paused = False
mock_response.metadata = None
mock_response.get_content_as_string = Mock(
return_value="It is currently 70 degrees and cloudy in Tokyo"
)
mock_run.return_value = mock_response
# Run print_response with a message
agent.print_response(
input="What is the weather in Tokyo?", show_message=True, console=mock_console, stream=False
)
# More specific verification - check exact call arguments
message_panel_calls = [
call
for call in mock_create_panel.call_args_list
if len(call) > 1 and call[1].get("title") == "Message"
]
assert len(message_panel_calls) > 0, "Message panel should be created when show_message=True"
# Verify the message content and styling
message_call = message_panel_calls[0]
content_arg = message_call[1]["content"]
# Check that the content is a Text object with the right text
if isinstance(content_arg, Text):
assert "What is the weather in Tokyo?" in content_arg.plain
else:
assert "What is the weather in Tokyo?" in str(content_arg)
# Verify border style is correct
assert message_call[1].get("border_style") == "cyan"
def test_panel_creation_and_structure():
"""Test that the right panels are created with the right structure"""
with patch("agno.utils.print_response.agent.Live") as mock_live_class:
mock_live = MagicMock()
mock_live_class.return_value.__enter__ = Mock(return_value=mock_live)
mock_live_class.return_value.__exit__ = Mock(return_value=None)
with patch("agno.utils.print_response.agent.create_panel") as mock_create_panel:
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
markdown=False,
telemetry=False,
)
mock_console = MagicMock(spec=Console)
mock_console.is_jupyter = False
with patch.object(agent, "run") as mock_run:
mock_response = Mock()
mock_response.content = "Test response content"
mock_response.formatted_tool_calls = []
mock_response.citations = None
mock_response.is_paused = False
mock_response.metadata = None
mock_response.get_content_as_string.return_value = "Test response content"
mock_run.return_value = mock_response
agent.print_response(input="Test message", show_message=True, console=mock_console, stream=False)
# Verify the structure of what was created
calls = mock_create_panel.call_args_list
# Should have at least 2 calls: message panel and response panel
assert len(calls) >= 2, f"Expected at least 2 panel calls, got {len(calls)}"
# First call should be message panel
message_call = calls[0]
assert len(message_call) > 1, "Call should have keyword arguments"
assert message_call[1]["title"] == "Message", "First panel should be Message"
assert message_call[1]["border_style"] == "cyan", "Message panel should have cyan border"
# Last call should be response panel
response_call = calls[-1]
assert "Response" in response_call[1]["title"], "Last panel should be Response"
assert response_call[1]["border_style"] == "blue", "Response panel should have blue border"
assert "0.0s" in response_call[1]["title"], "Response title should include timing"
def test_print_response_content_verification():
"""Test that the actual response content makes it into the panel"""
with patch("agno.utils.print_response.agent.Live") as mock_live_class:
mock_live = MagicMock()
mock_live_class.return_value.__enter__ = Mock(return_value=mock_live)
mock_live_class.return_value.__exit__ = Mock(return_value=None)
with patch("agno.utils.print_response.agent.create_panel") as mock_create_panel:
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
markdown=False, # Test without markdown first
telemetry=False,
)
mock_console = MagicMock(spec=Console)
mock_console.is_jupyter = False
expected_response = "The weather is sunny and 75 degrees"
with patch.object(agent, "run") as mock_run:
mock_response = Mock()
mock_response.content = expected_response
mock_response.formatted_tool_calls = []
mock_response.citations = None
mock_response.is_paused = False
mock_response.metadata = None
# Based on the debug output, get_content_as_string is called, so let's make sure it works
mock_response.get_content_as_string.return_value = expected_response
mock_run.return_value = mock_response
agent.print_response(input="What's the weather?", console=mock_console, stream=False)
# Find the response panel call
response_panel_calls = [
call
for call in mock_create_panel.call_args_list
if len(call) > 1 and "Response" in str(call[1].get("title", ""))
]
assert len(response_panel_calls) > 0, "Should create a response panel"
# Verify the response panel was created (content might be processed differently)
response_call = response_panel_calls[0]
assert response_call[1]["title"].startswith("Response"), "Should have Response title"
assert response_call[1]["border_style"] == "blue", "Should have blue border"
# The key test: verify that run() was called and returned our mock response
assert mock_run.called, "run() should be called"
assert mock_run.return_value.content == expected_response, "Response should have our content"
def test_markdown_content_type():
"""Test that markdown=True processes content differently than markdown=False"""
with patch("agno.utils.print_response.agent.Live") as mock_live_class:
mock_live = MagicMock()
mock_live_class.return_value.__enter__ = Mock(return_value=mock_live)
mock_live_class.return_value.__exit__ = Mock(return_value=None)
with patch("agno.utils.print_response.agent.create_panel") as mock_create_panel:
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
markdown=True,
telemetry=False,
)
mock_console = MagicMock(spec=Console)
mock_console.is_jupyter = False
markdown_content = "**Bold** and *italic* text"
with patch.object(agent, "run") as mock_run:
mock_response = Mock()
mock_response.content = markdown_content
mock_response.formatted_tool_calls = []
mock_response.citations = None
mock_response.is_paused = False
mock_response.metadata = None
mock_run.return_value = mock_response
agent.print_response(input="Test markdown", console=mock_console, stream=False)
# Just verify that agent.markdown is True and panels were created
assert agent.markdown, "Agent should have markdown=True"
# Verify panels were created
assert mock_create_panel.called, "create_panel should have been called"
# Check if any panel content looks like it was processed for markdown
panel_calls = mock_create_panel.call_args_list
response_panels = [
call for call in panel_calls if len(call) > 1 and "Response" in str(call[1].get("title", ""))
]
assert len(response_panels) > 0, "Should create response panels even with markdown"
def test_tool_calls_panel_creation():
"""Test that tool calls are handled properly"""
with patch("agno.utils.print_response.agent.Live") as mock_live_class:
mock_live = MagicMock()
mock_live_class.return_value.__enter__ = Mock(return_value=mock_live)
mock_live_class.return_value.__exit__ = Mock(return_value=None)
with patch("agno.utils.print_response.agent.create_panel") as mock_create_panel:
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
telemetry=False,
)
mock_console = MagicMock(spec=Console)
mock_console.is_jupyter = False
with patch.object(agent, "run") as mock_run:
mock_response = Mock()
mock_response.content = "Response with tool calls"
mock_response.formatted_tool_calls = ["get_weather(location='Tokyo')", "get_temperature()"]
mock_response.citations = None
mock_response.is_paused = False
mock_response.metadata = None
mock_response.get_content_as_string = Mock(return_value="Response with tool calls")
mock_run.return_value = mock_response
agent.print_response(input="What's the weather?", console=mock_console, stream=False)
# Debug: Print all create_panel calls
print("All create_panel calls for tool test:")
for i, call in enumerate(mock_create_panel.call_args_list):
print(f"Call {i}: {call}")
# Check if any panel was created with tool-related content
all_panel_calls = mock_create_panel.call_args_list
# Look for tool calls panel specifically, or check if tools are mentioned anywhere
for call in all_panel_calls:
if len(call) > 1:
title = call[1].get("title", "")
content = str(call[1].get("content", ""))
if "Tool" in title or "get_weather" in content or "get_temperature" in content:
break
assert mock_response.formatted_tool_calls, "Response should have formatted_tool_calls"
# If no tool panel was created, maybe tool calls are shown differently
# Let's just verify the basic functionality works
assert len(all_panel_calls) > 0, "Some panels should be created"
def test_live_update_calls():
"""Test that Live.update is called the right number of times"""
with patch("agno.utils.print_response.agent.Live") as mock_live_class:
with patch("agno.utils.print_response.agent.create_panel"):
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
telemetry=False,
)
mock_console = MagicMock(spec=Console)
mock_console.is_jupyter = False
with patch.object(agent, "run") as mock_run:
mock_response = Mock()
mock_response.content = "Simple response"
mock_response.formatted_tool_calls = []
mock_response.citations = None
mock_response.is_paused = False
mock_response.metadata = None
mock_response.get_content_as_string = Mock(return_value="Simple response")
mock_run.return_value = mock_response
agent.print_response(input="Test", show_message=True, console=mock_console, stream=False)
# Live.update should be called multiple times as panels are added
assert mock_live_class.return_value.__enter__.return_value.update.call_count >= 1, (
"Live.update should be called at least once"
)
def test_simple_functionality():
"""Basic test to understand what print_response actually does"""
with patch("agno.utils.print_response.agent.Live") as mock_live_class:
with patch("agno.utils.print_response.agent.create_panel") as mock_create_panel:
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
telemetry=False,
)
mock_console = MagicMock(spec=Console)
mock_console.is_jupyter = False
with patch.object(agent, "run") as mock_run:
mock_response = Mock()
mock_response.content = "Simple test response"
mock_response.formatted_tool_calls = []
mock_response.citations = None
mock_response.is_paused = False
mock_response.metadata = None
mock_response.get_content_as_string = Mock(return_value="Simple test response")
mock_run.return_value = mock_response
# Call print_response
agent.print_response(input="Test message", console=mock_console, stream=False)
# Basic verifications that should always pass
assert mock_run.called, "run() should be called"
assert mock_live_class.called, "Live should be created"
assert mock_create_panel.called, "create_panel should be called"
# Print debug info
print(f"Number of create_panel calls: {len(mock_create_panel.call_args_list)}")
for i, call in enumerate(mock_create_panel.call_args_list):
if len(call) > 1:
print(f"Panel {i}: title='{call[1].get('title')}', content type={type(call[1].get('content'))}")
def test_error_handling():
"""Test that print_response behavior when run() fails"""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
telemetry=False,
)
mock_console = Mock(spec=Console)
with patch("agno.utils.print_response.agent.Live") as mock_live_class:
mock_live = Mock()
mock_live_class.return_value = mock_live
mock_live.__enter__ = Mock(return_value=mock_live)
mock_live.__exit__ = Mock(return_value=None)
with patch.object(agent, "run") as mock_run:
# Simulate an exception in the run method
mock_run.side_effect = Exception("Test error")
# Check that the exception is propagated (which seems to be the current behavior)
with pytest.raises(Exception) as exc_info:
agent.print_response(input="Test error handling", console=mock_console, stream=False)
# Verify it's our test exception
assert "Test error" in str(exc_info.value)
# The test shows that print_response doesn't handle run() exceptions,
# which is actually useful behavior - errors should bubble up
def test_stream_vs_non_stream_behavior():
"""Test that streaming and non-streaming modes behave differently"""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
telemetry=False,
)
mock_console = Mock(spec=Console)
with patch("agno.utils.print_response.agent.Live") as mock_live_class:
with patch("agno.utils.print_response.agent.create_panel") as mock_create_panel:
mock_live = Mock()
mock_live_class.return_value = mock_live
mock_live.__enter__ = Mock(return_value=mock_live)
mock_live.__exit__ = Mock(return_value=None)
# Test non-streaming first
with patch.object(agent, "run") as mock_run:
mock_response = Mock()
mock_response.content = "Non-streaming response"
mock_response.formatted_tool_calls = []
mock_response.citations = None
mock_response.is_paused = False
mock_response.metadata = None
mock_response.get_content_as_string = Mock(return_value="Non-streaming response")
mock_run.return_value = mock_response
agent.print_response(input="Test", console=mock_console, stream=False)
# Reset mocks
mock_run.reset_mock()
mock_create_panel.reset_mock()
# Test streaming
mock_run.return_value = [mock_response] # Return iterable for streaming
agent.print_response(input="Test", console=mock_console, stream=True)
# Verify run was called with stream=True
assert any(call.kwargs.get("stream") for call in mock_run.call_args_list), (
"run() should be called with stream=True in streaming mode"
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_print_response.py",
"license": "Apache License 2.0",
"lines": 318,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/test_session.py | import uuid
from typing import Any, Dict, Optional
import pytest
from agno.agent.agent import Agent
from agno.db.base import SessionType
from agno.models.openai.chat import OpenAIChat
from agno.run import RunContext
from agno.run.agent import RunEvent
def add_item(run_context: RunContext, item: str) -> str:
"""Add an item to the shopping list (sync version)."""
if run_context.session_state is None:
run_context.session_state = {}
run_context.session_state["shopping_list"].append(item)
return f"The shopping list is now {run_context.session_state['shopping_list']}"
async def async_add_item(run_context: RunContext, item: str) -> str:
"""Add an item to the shopping list (async version)."""
if run_context.session_state is None:
run_context.session_state = {}
run_context.session_state["shopping_list"].append(item)
return f"The shopping list is now {run_context.session_state['shopping_list']}"
def chat_agent_factory(shared_db, session_id: Optional[str] = None, session_state: Optional[Dict[str, Any]] = None):
"""Create an agent with storage and memory for testing."""
return Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id or str(uuid.uuid4()),
session_state=session_state if session_state is not None else {},
)
def test_agent_default_state(shared_db):
session_id = "session_1"
session_state = {"test_key": "test_value"}
chat_agent = chat_agent_factory(shared_db, session_id, session_state)
response = chat_agent.run("Hello, how are you?")
assert response.run_id is not None
assert chat_agent.session_id == session_id
assert chat_agent.session_state == session_state
session_from_storage = shared_db.get_session(session_id=session_id, session_type=SessionType.AGENT)
assert session_from_storage is not None
assert session_from_storage.session_id == session_id
assert session_from_storage.session_data["session_state"] == {
"test_key": "test_value",
}
def test_agent_set_session_name(shared_db):
session_id = "session_1"
chat_agent = chat_agent_factory(shared_db, session_id)
chat_agent.run("Hello, how are you?")
chat_agent.set_session_name(session_id=session_id, session_name="my_test_session")
session_from_storage = shared_db.get_session(session_id=session_id, session_type=SessionType.AGENT)
assert session_from_storage is not None
assert session_from_storage.session_id == session_id
assert session_from_storage.session_data["session_name"] == "my_test_session"
def test_agent_get_session_name(shared_db):
session_id = "session_1"
chat_agent = chat_agent_factory(shared_db, session_id)
chat_agent.run("Hello, how are you?")
chat_agent.set_session_name(session_id=session_id, session_name="my_test_session")
assert chat_agent.get_session_name() == "my_test_session"
def test_agent_get_session_state(shared_db):
session_id = "session_1"
chat_agent = chat_agent_factory(shared_db, session_id, session_state={"test_key": "test_value"})
chat_agent.run("Hello, how are you?")
assert chat_agent.get_session_state() == {"test_key": "test_value"}
def test_agent_get_session_metrics(shared_db):
session_id = "session_1"
chat_agent = chat_agent_factory(shared_db, session_id)
chat_agent.run("Hello, how are you?")
metrics = chat_agent.get_session_metrics()
assert metrics is not None
assert metrics.total_tokens > 0
assert metrics.input_tokens > 0
assert metrics.output_tokens > 0
assert metrics.total_tokens == metrics.input_tokens + metrics.output_tokens
def test_agent_session_state_switch_session_id(shared_db):
session_id_1 = "session_1"
session_id_2 = "session_2"
chat_agent = chat_agent_factory(shared_db, session_id_1, session_state={"test_key": "test_value"})
# First run with a session ID (reset should not happen)
chat_agent.run("What can you do?")
session_from_storage = shared_db.get_session(session_id=session_id_1, session_type=SessionType.AGENT)
assert session_from_storage is not None
assert session_from_storage.session_id == session_id_1
assert session_from_storage.session_data["session_state"] == {"test_key": "test_value"}
# Second run with different session ID, and override session state
chat_agent.run("What can you do?", session_id=session_id_2, session_state={"test_key": "test_value_2"})
session_from_storage = shared_db.get_session(session_id=session_id_2, session_type=SessionType.AGENT)
assert session_from_storage is not None
assert session_from_storage.session_id == session_id_2
assert session_from_storage.session_data["session_state"] == {"test_key": "test_value_2"}
# Third run with the original session ID
chat_agent.run("What can you do?", session_id=session_id_1)
session_from_storage = shared_db.get_session(session_id=session_id_1, session_type=SessionType.AGENT)
assert session_from_storage is not None
assert session_from_storage.session_id == session_id_1
assert session_from_storage.session_data["session_state"] == {"test_key": "test_value"}
def test_agent_with_state_on_agent(shared_db):
# Define a tool that increments our counter and returns the new value
def add_item(run_context: RunContext, item: str) -> str:
"""Add an item to the shopping list."""
if not run_context.session_state:
run_context.session_state = {}
run_context.session_state["shopping_list"].append(item)
return f"The shopping list is now {run_context.session_state['shopping_list']}"
# Create an Agent that maintains state
agent = Agent(
db=shared_db,
session_state={"shopping_list": []},
tools=[add_item],
instructions="Current state (shopping list) is: {shopping_list}",
markdown=True,
)
agent.run("Add oranges to my shopping list")
response = agent.run(
'Current shopping list: {shopping_list}. Other random json ```json { "properties": { "title": { "title": "a" } } }```'
)
assert (
response.messages[1].content
== 'Current shopping list: [\'oranges\']. Other random json ```json { "properties": { "title": { "title": "a" } } }```'
)
def test_agent_with_state_on_agent_stream(shared_db):
# Define a tool that increments our counter and returns the new value
def add_item(run_context: RunContext, item: str) -> str:
"""Add an item to the shopping list."""
if not run_context.session_state:
run_context.session_state = {}
run_context.session_state["shopping_list"].append(item)
return f"The shopping list is now {run_context.session_state['shopping_list']}"
# Create an Agent that maintains state
agent = Agent(
db=shared_db,
session_state={"shopping_list": []},
session_id=str(uuid.uuid4()),
tools=[add_item],
instructions="Current state (shopping list) is: {shopping_list}",
markdown=True,
)
for _ in agent.run("Add oranges to my shopping list", stream=True):
pass
session_from_storage = shared_db.get_session(session_id=agent.session_id, session_type=SessionType.AGENT)
assert session_from_storage.session_data["session_state"] == {"shopping_list": ["oranges"]}
for _ in agent.run(
'Current shopping list: {shopping_list}. Other random json ```json { "properties": { "title": { "title": "a" } } }```',
stream=True,
):
pass
run_response = agent.get_last_run_output()
assert (
run_response.messages[1].content
== 'Current shopping list: [\'oranges\']. Other random json ```json { "properties": { "title": { "title": "a" } } }```'
)
def test_agent_with_state_on_run(shared_db):
# Define a tool that increments our counter and returns the new value
def add_item(run_context: RunContext, item: str) -> str:
"""Add an item to the shopping list."""
if not run_context.session_state:
run_context.session_state = {}
run_context.session_state["shopping_list"].append(item)
return f"The shopping list is now {run_context.session_state['shopping_list']}"
# Create an Agent that maintains state
agent = Agent(
db=shared_db,
tools=[add_item],
instructions="Current state (shopping list) is: {shopping_list}",
markdown=True,
)
agent.run("Add oranges to my shopping list", session_id="session_1", session_state={"shopping_list": []})
session_from_storage = shared_db.get_session(session_id="session_1", session_type=SessionType.AGENT)
assert session_from_storage.session_data["session_state"] == {"shopping_list": ["oranges"]}
response = agent.run(
'Current shopping list: {shopping_list}. Other random json ```json { "properties": { "title": { "title": "a" } } }```',
session_id="session_1",
)
assert (
response.messages[1].content
== 'Current shopping list: [\'oranges\']. Other random json ```json { "properties": { "title": { "title": "a" } } }```'
)
def test_agent_with_state_on_run_stream(shared_db):
# Define a tool that increments our counter and returns the new value
def add_item(run_context: RunContext, item: str) -> str:
"""Add an item to the shopping list."""
if not run_context.session_state:
run_context.session_state = {}
run_context.session_state["shopping_list"].append(item)
return f"The shopping list is now {run_context.session_state['shopping_list']}"
# Create an Agent that maintains state
agent = Agent(
db=shared_db,
tools=[add_item],
instructions="Current state (shopping list) is: {shopping_list}",
markdown=True,
)
for response in agent.run(
"Add oranges to my shopping list", session_id="session_1", session_state={"shopping_list": []}, stream=True
):
pass
session_from_storage = shared_db.get_session(session_id="session_1", session_type=SessionType.AGENT)
assert session_from_storage.session_data["session_state"] == {"shopping_list": ["oranges"]}
for response in agent.run(
'Current shopping list: {shopping_list}. Other random json ```json { "properties": { "title": { "title": "a" } } }```',
session_id="session_1",
stream=True,
):
pass
run_response = agent.get_last_run_output(session_id="session_1")
assert (
run_response.messages[1].content
== 'Current shopping list: [\'oranges\']. Other random json ```json { "properties": { "title": { "title": "a" } } }```'
)
async def test_agent_with_state_on_run_async(shared_db):
# Define a tool that increments our counter and returns the new value
async def add_item(run_context: RunContext, item: str) -> str:
"""Add an item to the shopping list."""
if not run_context.session_state:
run_context.session_state = {}
run_context.session_state["shopping_list"].append(item)
return f"The shopping list is now {run_context.session_state['shopping_list']}"
# Create an Agent that maintains state
agent = Agent(
db=shared_db,
tools=[async_add_item],
instructions="Current state (shopping list) is: {shopping_list}",
markdown=True,
)
await agent.arun("Add oranges to my shopping list", session_id="session_1", session_state={"shopping_list": []})
session_from_storage = shared_db.get_session(session_id="session_1", session_type=SessionType.AGENT)
assert session_from_storage.session_data["session_state"] == {"shopping_list": ["oranges"]}
response = await agent.arun(
'Current shopping list: {shopping_list}. Other random json ```json { "properties": { "title": { "title": "a" } } }```',
session_id="session_1",
)
assert (
response.messages[1].content
== 'Current shopping list: [\'oranges\']. Other random json ```json { "properties": { "title": { "title": "a" } } }```'
)
async def test_agent_with_state_on_run_stream_async(shared_db):
# Define a tool that increments our counter and returns the new value
async def add_item(run_context: RunContext, item: str) -> str:
"""Add an item to the shopping list."""
if not run_context.session_state:
run_context.session_state = {}
run_context.session_state["shopping_list"].append(item)
return f"The shopping list is now {run_context.session_state['shopping_list']}"
# Create an Agent that maintains state
agent = Agent(
db=shared_db,
tools=[async_add_item],
instructions="Current state (shopping list) is: {shopping_list}",
markdown=True,
)
async for response in agent.arun(
"Add oranges to my shopping list", session_id="session_1", session_state={"shopping_list": []}, stream=True
):
pass
session_from_storage = shared_db.get_session(session_id="session_1", session_type=SessionType.AGENT)
assert session_from_storage.session_data["session_state"] == {"shopping_list": ["oranges"]}
async for response in agent.arun(
'Current shopping list: {shopping_list}. Other random json ```json { "properties": { "title": { "title": "a" } } }```',
session_id="session_1",
stream=True,
):
pass
run_response = agent.get_last_run_output(session_id="session_1")
assert (
run_response.messages[1].content
== 'Current shopping list: [\'oranges\']. Other random json ```json { "properties": { "title": { "title": "a" } } }```'
)
def test_add_session_state_to_context(shared_db):
agent = Agent(
db=shared_db,
session_state={"shopping_list": ["oranges"]},
markdown=True,
add_session_state_to_context=True,
)
response = agent.run("What is in my shopping list?")
assert response is not None
assert response.messages is not None
# Check the system message
assert "'shopping_list': ['oranges']" in response.messages[0].content
assert "oranges" in response.content.lower()
def test_session_state_in_run_output(shared_db):
"""Test that RunOutput contains the updated session_state in non-streaming mode."""
session_id = str(uuid.uuid4())
agent = Agent(
db=shared_db,
session_id=session_id,
session_state={"shopping_list": []},
tools=[add_item],
instructions="You help manage shopping lists.",
markdown=True,
)
response = agent.run("Add apples to my shopping list")
# Verify RunOutput has session_state field
assert response.session_state is not None, "RunOutput should have session_state"
assert isinstance(response.session_state, dict), "session_state should be a dict"
assert "shopping_list" in response.session_state, "shopping_list key should be present"
assert isinstance(response.session_state["shopping_list"], list), "shopping_list should be a list"
# Verify state was updated by the tool
assert len(response.session_state.get("shopping_list", [])) == 1, "Shopping list should have 1 item"
assert "apples" in response.session_state["shopping_list"], "Shopping list should contain apples"
def test_session_state_in_run_completed_event_stream(shared_db):
"""Test that RunCompletedEvent contains session_state in streaming mode."""
session_id = str(uuid.uuid4())
agent = Agent(
db=shared_db,
session_id=session_id,
session_state={"shopping_list": ["bananas"]},
tools=[add_item],
instructions="You help manage shopping lists.",
markdown=True,
)
run_completed_event = None
for event in agent.run("Add oranges to my shopping list", stream=True, stream_events=True):
if hasattr(event, "event") and event.event == "RunCompleted":
run_completed_event = event
break
# Verify RunCompletedEvent structure
assert run_completed_event is not None, "Should receive RunCompleted event"
assert run_completed_event.session_state is not None, "RunCompletedEvent should have session_state"
assert isinstance(run_completed_event.session_state, dict), "session_state should be a dict"
assert "shopping_list" in run_completed_event.session_state, "shopping_list key should be present"
assert "bananas" in run_completed_event.session_state.get("shopping_list", []), "Initial item should be preserved"
# Verify state was updated by the tool
assert len(run_completed_event.session_state.get("shopping_list", [])) == 2, "Shopping list should have 2 items"
assert "oranges" in run_completed_event.session_state["shopping_list"], "Shopping list should contain oranges"
async def test_session_state_in_run_output_async(shared_db):
"""Test that RunOutput contains session_state in async non-streaming mode."""
session_id = str(uuid.uuid4())
agent = Agent(
db=shared_db,
session_id=session_id,
session_state={"shopping_list": []},
tools=[async_add_item],
instructions="You help manage shopping lists.",
markdown=True,
)
response = await agent.arun("Add apples to my shopping list")
# Verify RunOutput has session_state
assert response.session_state is not None, "RunOutput should have session_state"
assert isinstance(response.session_state, dict), "session_state should be a dict"
assert "shopping_list" in response.session_state, "shopping_list key should be present"
assert isinstance(response.session_state["shopping_list"], list), "shopping_list should be a list"
# Verify state was updated by the tool
assert len(response.session_state.get("shopping_list", [])) == 1, "Shopping list should have 1 item"
assert "apples" in response.session_state["shopping_list"], "Shopping list should contain apples"
async def test_session_state_in_run_completed_event_stream_async(shared_db):
"""Test that RunCompletedEvent contains session_state in async streaming mode."""
session_id = str(uuid.uuid4())
agent = Agent(
db=shared_db,
session_id=session_id,
session_state={"shopping_list": ["bananas"]},
tools=[async_add_item],
instructions="You help manage shopping lists.",
markdown=True,
)
events = {}
async for event in agent.arun("Add oranges to my shopping list", stream=True, stream_events=True):
if event.event not in events:
events[event.event] = []
events[event.event].append(event)
# Get the RunCompleted event
assert RunEvent.run_completed in events, "Should receive RunCompleted event"
run_completed_event = events[RunEvent.run_completed][0]
# Verify RunCompletedEvent structure
assert run_completed_event.session_state is not None, "RunCompletedEvent should have session_state"
assert isinstance(run_completed_event.session_state, dict), "session_state should be a dict"
assert "shopping_list" in run_completed_event.session_state, "shopping_list key should be present"
assert "bananas" in run_completed_event.session_state.get("shopping_list", []), "Initial item should be preserved"
# Verify state was updated by the tool
assert len(run_completed_event.session_state.get("shopping_list", [])) == 2, "Shopping list should have 2 items"
assert "oranges" in run_completed_event.session_state["shopping_list"], "Shopping list should contain oranges"
@pytest.mark.asyncio
async def test_async_run_with_async_db(async_shared_db):
"""Test async arun() with async database."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
response = await agent.arun("Hello", session_id=session_id)
assert response is not None
assert response.content is not None
@pytest.mark.asyncio
async def test_async_run_stream_with_async_db(async_shared_db):
"""Test async arun() streaming with async database."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
final_response = None
async for response in agent.arun("Hello", session_id=session_id, stream=True):
final_response = response
assert final_response is not None
assert final_response.content is not None
@pytest.mark.asyncio
async def test_async_run_stream_events_with_async_db(async_shared_db):
"""Test async arun() with stream_events=True and async database."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
events = {}
async for run_response_delta in agent.arun("Hello", session_id=session_id, stream=True, stream_events=True):
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert RunEvent.run_completed in events
assert len(events[RunEvent.run_completed]) == 1
assert events[RunEvent.run_completed][0].content is not None
@pytest.mark.asyncio
async def test_aget_session_with_async_db(async_shared_db):
"""Test aget_session with async database."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await agent.arun("Hello", session_id=session_id)
session = await agent.aget_session(session_id=session_id)
assert session is not None
assert session.session_id == session_id
assert len(session.runs) == 1
@pytest.mark.asyncio
async def test_asave_session_with_async_db(async_shared_db):
"""Test asave_session with async database."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await agent.arun("Hello", session_id=session_id)
session = await agent.aget_session(session_id=session_id)
session.session_data["custom_key"] = "custom_value"
await agent.asave_session(session)
retrieved_session = await agent.aget_session(session_id=session_id)
assert retrieved_session.session_data["custom_key"] == "custom_value"
@pytest.mark.asyncio
async def test_aget_last_run_output_with_async_db(async_shared_db):
"""Test aget_last_run_output with async database."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await agent.arun("First message", session_id=session_id)
response2 = await agent.arun("Second message", session_id=session_id)
last_output = await agent.aget_last_run_output(session_id=session_id)
assert last_output is not None
assert last_output.run_id == response2.run_id
@pytest.mark.asyncio
async def test_aget_run_output_with_async_db(async_shared_db):
"""Test aget_run_output with async database."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
response = await agent.arun("Hello", session_id=session_id)
run_id = response.run_id
retrieved_output = await agent.aget_run_output(run_id=run_id, session_id=session_id)
assert retrieved_output is not None
assert retrieved_output.run_id == run_id
@pytest.mark.asyncio
async def test_aget_chat_history_with_async_db(async_shared_db):
"""Test aget_chat_history with async database."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await agent.arun("Hello", session_id=session_id)
await agent.arun("How are you?", session_id=session_id)
chat_history = await agent.aget_chat_history(session_id=session_id)
assert len(chat_history) >= 4
@pytest.mark.asyncio
async def test_aget_session_messages_with_async_db(async_shared_db):
"""Test aget_session_messages with async database."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await agent.arun("Hello", session_id=session_id)
await agent.arun("How are you?", session_id=session_id)
messages = await agent.aget_session_messages(session_id=session_id)
assert len(messages) >= 4
@pytest.mark.asyncio
async def test_aget_session_state_with_async_db(async_shared_db):
"""Test aget_session_state with async database."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await agent.arun("Hello", session_id=session_id, session_state={"counter": 5, "name": "test"})
state = await agent.aget_session_state(session_id=session_id)
assert state == {"counter": 5, "name": "test"}
@pytest.mark.asyncio
async def test_aupdate_session_state_with_async_db(async_shared_db):
"""Test aupdate_session_state with async database."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await agent.arun("Hello", session_id=session_id, session_state={"counter": 0, "items": []})
result = await agent.aupdate_session_state({"counter": 10}, session_id=session_id)
assert result == {"counter": 10, "items": []}
updated_state = await agent.aget_session_state(session_id=session_id)
assert updated_state["counter"] == 10
@pytest.mark.asyncio
async def test_aget_session_name_with_async_db(async_shared_db):
"""Test aget_session_name with async database."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await agent.arun("Hello", session_id=session_id)
await agent.aset_session_name(session_id=session_id, session_name="Async Session")
name = await agent.aget_session_name(session_id=session_id)
assert name == "Async Session"
@pytest.mark.asyncio
async def test_aset_session_name_with_async_db(async_shared_db):
"""Test aset_session_name with async database."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await agent.arun("Hello", session_id=session_id)
updated_session = await agent.aset_session_name(session_id=session_id, session_name="Test Session")
assert updated_session.session_data["session_name"] == "Test Session"
@pytest.mark.asyncio
async def test_aget_session_metrics_with_async_db(async_shared_db):
"""Test aget_session_metrics with async database."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await agent.arun("Hello", session_id=session_id)
metrics = await agent.aget_session_metrics(session_id=session_id)
assert metrics is not None
assert metrics.total_tokens > 0
@pytest.mark.asyncio
async def test_adelete_session_with_async_db(async_shared_db):
"""Test adelete_session with async database."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await agent.arun("Hello", session_id=session_id)
# Verify session exists
session = await agent.aget_session(session_id=session_id)
assert session is not None
# Delete session
await agent.adelete_session(session_id=session_id)
# Verify session is deleted
session = await agent.aget_session(session_id=session_id)
assert session is None
@pytest.mark.asyncio
async def test_session_persistence_across_async_runs(async_shared_db):
"""Test that session persists correctly across different async run types."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
# Async run
await agent.arun("First message", session_id=session_id)
# Async streaming run
async for response in agent.arun("Second message", session_id=session_id, stream=True):
pass
# Async run again
await agent.arun("Third message", session_id=session_id)
# Verify all runs are in session
session = await agent.aget_session(session_id=session_id)
assert session is not None
assert len(session.runs) == 3
@pytest.mark.asyncio
async def test_aget_session_summary_with_async_db(async_shared_db):
"""Test aget_session_summary with async database."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await agent.arun("Hello", session_id=session_id)
summary = await agent.aget_session_summary(session_id=session_id)
assert summary is None # Summaries not enabled by default
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_session.py",
"license": "Apache License 2.0",
"lines": 610,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/test_shared_model.py | from unittest.mock import patch
import pytest
from agno.agent import Agent
from agno.models.openai.chat import OpenAIChat
@pytest.fixture(scope="session")
def shared_model():
return OpenAIChat(id="gpt-4o-mini")
@pytest.fixture
def web_agent(shared_model):
"""Create a web agent for testing."""
from agno.tools.websearch import WebSearchTools
return Agent(
name="Web Agent",
model=shared_model,
role="Search the web for information",
tools=[WebSearchTools(cache_results=True)],
)
@pytest.fixture
def finance_agent(shared_model):
"""Create a finance agent for testing."""
from agno.tools.yfinance import YFinanceTools
return Agent(
name="Finance Agent",
model=shared_model,
role="Get financial data",
tools=[YFinanceTools(all=True)],
)
def test_tools_available_to_agents(web_agent, finance_agent):
with patch.object(finance_agent.model, "invoke", wraps=finance_agent.model.invoke) as mock_invoke:
finance_agent.run("What is the current stock price of AAPL?")
# Get the tools passed to invoke
tools = mock_invoke.call_args[1].get("tools", [])
tool_names = [tool["function"]["name"] for tool in tools]
assert tool_names == [
"get_current_stock_price",
"get_company_info",
"get_stock_fundamentals",
"get_income_statements",
"get_key_financial_ratios",
"get_analyst_recommendations",
"get_company_news",
"get_technical_indicators",
"get_historical_stock_prices",
]
with patch.object(web_agent.model, "invoke", wraps=web_agent.model.invoke) as mock_invoke:
web_agent.run("What is currently happening in the news?")
# Get the tools passed to invoke
tools = mock_invoke.call_args[1].get("tools", [])
tool_names = [tool["function"]["name"] for tool in tools]
assert tool_names == ["web_search", "search_news"]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_shared_model.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/test_tool_call_limit.py | import pytest
from agno.agent.agent import Agent
from agno.models.openai import OpenAIChat
from agno.run.agent import RunEvent
from agno.tools.yfinance import YFinanceTools
def test_tool_use_tool_call_limit():
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[YFinanceTools(cache_results=True)],
tool_call_limit=1,
markdown=True,
telemetry=False,
)
response = agent.run("Find me the current price of TSLA and APPL.")
# Verify tool usage, should only call the first tool
assert len(response.tools) == 1
assert response.tools[0].tool_name == "get_current_stock_price"
assert response.tools[0].tool_args == {"symbol": "TSLA"}
assert response.tools[0].result is not None
assert response.content is not None
def test_tool_use_tool_call_limit_stream():
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[YFinanceTools(cache_results=True)],
tool_call_limit=1,
markdown=True,
telemetry=False,
)
response_stream = agent.run("Find me the current price of TSLA and APPL.", stream=True, stream_events=True)
tools = []
for chunk in response_stream:
if chunk.event == RunEvent.tool_call_completed:
tools.append(chunk.tool)
assert len(tools) == 1
assert tools[0].tool_name == "get_current_stock_price"
assert tools[0].tool_args == {"symbol": "TSLA"}
assert tools[0].result is not None
@pytest.mark.asyncio
async def test_tool_use_tool_call_limit_async():
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[YFinanceTools(cache_results=True)],
tool_call_limit=1,
markdown=True,
telemetry=False,
)
response = await agent.arun("Find me the current price of TSLA and APPL.")
# Verify tool usage, should only call the first tool
assert len(response.tools) == 1
assert response.tools[0].tool_name == "get_current_stock_price"
assert response.tools[0].tool_args == {"symbol": "TSLA"}
assert response.tools[0].result is not None
assert response.content is not None
@pytest.mark.asyncio
async def test_tool_use_tool_call_limit_stream_async():
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[YFinanceTools(cache_results=True)],
tool_call_limit=1,
markdown=True,
telemetry=False,
)
tools = []
async for chunk in agent.arun("Find me the current price of TSLA and APPL.", stream=True, stream_events=True):
if chunk.event == RunEvent.tool_call_completed:
tools.append(chunk.tool)
assert len(tools) == 1
assert tools[0].tool_name == "get_current_stock_price"
assert tools[0].tool_args == {"symbol": "TSLA"}
assert tools[0].result is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_tool_call_limit.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/postgres/test_db.py | """Integration tests for the setup and main methods of the PostgresDb class"""
from datetime import datetime, timezone
from unittest.mock import patch
from sqlalchemy import text
from agno.db.postgres.postgres import PostgresDb
def test_init_with_db_url():
"""Test initialization with actual database URL format"""
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url, session_table="test_sessions")
assert db.db_url == db_url
assert db.session_table_name == "test_sessions"
assert db.db_schema == "ai"
# Test connection
with db.Session() as sess:
result = sess.execute(text("SELECT 1"))
assert result.scalar() == 1
def test_create_session_table_integration(postgres_db_real):
"""Test actual session table creation with PostgreSQL"""
# Create table
postgres_db_real._create_table("test_sessions", "sessions")
# Verify table exists in database with correct schema
with postgres_db_real.Session() as sess:
result = sess.execute(
text(
"SELECT table_name FROM information_schema.tables WHERE table_schema = :schema AND table_name = :table"
),
{"schema": "test_schema", "table": "test_sessions"},
)
assert result.fetchone() is not None
# Verify columns exist and have correct types
with postgres_db_real.Session() as sess:
result = sess.execute(
text(
"SELECT column_name, data_type, is_nullable "
"FROM information_schema.columns "
"WHERE table_schema = :schema AND table_name = :table "
"ORDER BY ordinal_position"
),
{"schema": "test_schema", "table": "test_sessions"},
)
columns = {row[0]: {"type": row[1], "nullable": row[2]} for row in result}
# Verify key columns
assert "session_id" in columns
assert columns["session_id"]["nullable"] == "NO"
assert "created_at" in columns
assert columns["created_at"]["type"] == "bigint"
assert "session_data" in columns
assert columns["session_data"]["type"] in ["json", "jsonb"]
def test_create_metrics_table_with_constraints(postgres_db_real):
"""Test creating metrics table with unique constraints"""
postgres_db_real._create_table("test_metrics", "metrics")
# Verify unique constraint exists
with postgres_db_real.Session() as sess:
result = sess.execute(
text(
"SELECT constraint_name FROM information_schema.table_constraints "
"WHERE table_schema = :schema AND table_name = :table "
"AND constraint_type = 'UNIQUE'"
),
{"schema": "test_schema", "table": "test_metrics"},
)
constraints = [row[0] for row in result]
assert any("uq_metrics_date_period" in c for c in constraints)
def test_create_table_with_indexes(postgres_db_real):
"""Test that indexes are created correctly"""
postgres_db_real._create_table("test_memories", "memories")
# Verify indexes exist
with postgres_db_real.Session() as sess:
result = sess.execute(
text("SELECT indexname FROM pg_indexes WHERE schemaname = :schema AND tablename = :table"),
{"schema": "test_schema", "table": "test_memories"},
)
indexes = [row[0] for row in result]
# Should have indexes on user_id and updated_at
assert any("user_id" in idx for idx in indexes)
assert any("updated_at" in idx for idx in indexes)
def test_get_table_with_create_table_if_not_found(postgres_db_real):
"""Test getting a table with create_table_if_not_found=True"""
table = postgres_db_real._get_table("sessions", create_table_if_not_found=False)
assert table is None
table = postgres_db_real._get_table("sessions", create_table_if_not_found=True)
assert table is not None
def test_get_or_create_existing_table(postgres_db_real):
"""Test getting an existing table"""
# First create the table
postgres_db_real._create_table("test_sessions", "sessions")
# Clear the cached table attribute
if hasattr(postgres_db_real, "session_table"):
delattr(postgres_db_real, "session_table")
# Now get it again - should not recreate
with patch.object(postgres_db_real, "_create_table") as mock_create:
table = postgres_db_real._get_or_create_table("test_sessions", "sessions")
# Should not call create since table exists
mock_create.assert_not_called()
assert table.name == "test_sessions"
def test_full_workflow(postgres_db_real):
"""Test a complete workflow of creating and using tables"""
# Get tables (will create them)
session_table = postgres_db_real._get_table("sessions", create_table_if_not_found=True)
postgres_db_real._get_table("memories", create_table_if_not_found=True)
# Verify tables are cached
assert hasattr(postgres_db_real, "session_table")
assert hasattr(postgres_db_real, "memory_table")
# Verify we can insert data (basic smoke test)
with postgres_db_real.Session() as sess:
# Insert a test session
sess.execute(
session_table.insert().values(
session_id="test-session-123",
session_type="agent",
created_at=int(datetime.now(timezone.utc).timestamp() * 1000),
session_data={"test": "data"},
)
)
sess.commit()
# Query it back
result = sess.execute(session_table.select().where(session_table.c.session_id == "test-session-123")).fetchone()
assert result is not None
assert result.session_type == "agent"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/postgres/test_db.py",
"license": "Apache License 2.0",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/postgres/test_evals.py | """Integration tests for the Eval related methods of the PostgresDb class"""
import time
import pytest
from agno.db.postgres.postgres import PostgresDb
from agno.db.schemas.evals import EvalFilterType, EvalRunRecord, EvalType
@pytest.fixture(autouse=True)
def cleanup_evals(postgres_db_real: PostgresDb):
"""Fixture to clean-up eval rows after each test"""
yield
with postgres_db_real.Session() as session:
try:
eval_table = postgres_db_real._get_table("evals", create_table_if_not_found=True)
session.execute(eval_table.delete())
session.commit()
except Exception:
session.rollback()
@pytest.fixture
def sample_eval_run_agent() -> EvalRunRecord:
"""Fixture returning a sample EvalRunRecord for agent evaluation"""
return EvalRunRecord(
run_id="test_eval_run_agent_1",
agent_id="test_agent_1",
model_id="gpt-4",
model_provider="openai",
name="Agent Accuracy Test",
evaluated_component_name="Test Agent",
eval_type=EvalType.ACCURACY,
eval_data={
"score": 0.85,
"total_questions": 100,
"correct_answers": 85,
"test_duration": 120.5,
"categories": ["math", "logic", "reasoning"],
"details": {"math_score": 0.90, "logic_score": 0.80, "reasoning_score": 0.85},
},
)
@pytest.fixture
def sample_eval_run_team() -> EvalRunRecord:
"""Fixture returning a sample EvalRunRecord for team evaluation"""
return EvalRunRecord(
run_id="test_eval_run_team_1",
team_id="test_team_1",
model_id="gpt-4-turbo",
model_provider="openai",
name="Team Performance Test",
evaluated_component_name="Test Team",
eval_type=EvalType.PERFORMANCE,
eval_data={
"response_time": 45.2,
"throughput": 25.7,
"success_rate": 0.92,
"collaboration_score": 0.88,
"efficiency_metrics": {
"task_completion_time": 30.5,
"resource_utilization": 0.75,
"coordination_overhead": 0.12,
},
},
)
@pytest.fixture
def sample_eval_run_workflow() -> EvalRunRecord:
"""Fixture returning a sample EvalRunRecord for workflow evaluation"""
return EvalRunRecord(
run_id="test_eval_run_workflow_1",
workflow_id="test_workflow_1",
model_id="claude-3-opus",
model_provider="anthropic",
name="Workflow Reliability Test",
evaluated_component_name="Test Workflow",
eval_type=EvalType.RELIABILITY,
eval_data={
"uptime": 0.999,
"error_rate": 0.001,
"recovery_time": 2.5,
"consistency_score": 0.95,
"fault_tolerance": {
"max_failures_handled": 5,
"recovery_success_rate": 1.0,
"mean_time_to_recovery": 1.8,
},
},
)
def test_create_eval_run_agent(postgres_db_real: PostgresDb, sample_eval_run_agent: EvalRunRecord):
"""Test creating an eval run for an agent"""
result = postgres_db_real.create_eval_run(sample_eval_run_agent)
assert result is not None
assert isinstance(result, EvalRunRecord)
# Verify all fields are set correctly
assert result.run_id == sample_eval_run_agent.run_id
assert result.agent_id == sample_eval_run_agent.agent_id
assert result.eval_type == sample_eval_run_agent.eval_type
assert result.eval_data == sample_eval_run_agent.eval_data
assert result.name == sample_eval_run_agent.name
assert result.model_id == sample_eval_run_agent.model_id
def test_create_eval_run_team(postgres_db_real: PostgresDb, sample_eval_run_team: EvalRunRecord):
"""Test creating an eval run for a team"""
result = postgres_db_real.create_eval_run(sample_eval_run_team)
assert result is not None
assert isinstance(result, EvalRunRecord)
# Verify all fields are set correctly
assert result.run_id == sample_eval_run_team.run_id
assert result.team_id == sample_eval_run_team.team_id
assert result.eval_type == sample_eval_run_team.eval_type
assert result.eval_data == sample_eval_run_team.eval_data
def test_get_eval_run_agent(postgres_db_real: PostgresDb, sample_eval_run_agent: EvalRunRecord):
"""Test getting an eval run for an agent"""
postgres_db_real.create_eval_run(sample_eval_run_agent)
result = postgres_db_real.get_eval_run(sample_eval_run_agent.run_id)
assert result is not None
assert isinstance(result, EvalRunRecord)
# Verify all fields are set correctly
assert result.run_id == sample_eval_run_agent.run_id
assert result.agent_id == sample_eval_run_agent.agent_id
assert result.eval_type == sample_eval_run_agent.eval_type
assert result.eval_data == sample_eval_run_agent.eval_data
def test_get_eval_run_agent_without_deserialization(postgres_db_real: PostgresDb, sample_eval_run_agent: EvalRunRecord):
"""Test getting an eval run for an agent without deserialization"""
postgres_db_real.create_eval_run(sample_eval_run_agent)
result = postgres_db_real.get_eval_run(sample_eval_run_agent.run_id, deserialize=False)
assert result is not None
assert isinstance(result, dict)
assert result["run_id"] == sample_eval_run_agent.run_id
assert result["agent_id"] == sample_eval_run_agent.agent_id
def test_delete_eval_run_agent(postgres_db_real: PostgresDb, sample_eval_run_agent: EvalRunRecord):
"""Test deleting an eval run for an agent"""
postgres_db_real.create_eval_run(sample_eval_run_agent)
# Verify it exists
eval_run = postgres_db_real.get_eval_run(sample_eval_run_agent.run_id)
assert eval_run is not None
# Delete it
postgres_db_real.delete_eval_run(sample_eval_run_agent.run_id)
# Verify it's gone
eval_run = postgres_db_real.get_eval_run(sample_eval_run_agent.run_id)
assert eval_run is None
def test_delete_multiple_eval_runs_agent(postgres_db_real: PostgresDb):
"""Test deleting multiple eval runs for an agent"""
# Create multiple eval runs
eval_runs = []
run_ids = []
for i in range(3):
eval_run = EvalRunRecord(
run_id=f"test_eval_run_{i}",
agent_id=f"test_agent_{i}",
eval_type=EvalType.ACCURACY,
eval_data={"score": 0.8 + (i * 0.05)},
name=f"Test Eval {i}",
)
eval_runs.append(eval_run)
run_ids.append(eval_run.run_id)
postgres_db_real.create_eval_run(eval_run)
# Verify they exist
for run_id in run_ids:
eval_run = postgres_db_real.get_eval_run(run_id)
assert eval_run is not None
# Delete first 2
postgres_db_real.delete_eval_runs(run_ids[:2])
# Verify deletions
assert postgres_db_real.get_eval_run(run_ids[0]) is None
assert postgres_db_real.get_eval_run(run_ids[1]) is None
assert postgres_db_real.get_eval_run(run_ids[2]) is not None
def test_get_eval_runs_no_filters(postgres_db_real: PostgresDb):
"""Test getting all eval runs without filters"""
# Create multiple eval runs
eval_runs = []
for i in range(3):
eval_run = EvalRunRecord(
run_id=f"test_eval_run_{i}",
agent_id=f"test_agent_{i}",
eval_type=EvalType.ACCURACY,
eval_data={"score": 0.8 + (i * 0.05)},
name=f"Test Eval {i}",
)
eval_runs.append(eval_run)
postgres_db_real.create_eval_run(eval_run)
result = postgres_db_real.get_eval_runs()
assert isinstance(result, list)
assert len(result) == 3
assert all(isinstance(run, EvalRunRecord) for run in result)
def test_get_eval_runs_with_agent_filter(
postgres_db_real: PostgresDb, sample_eval_run_agent: EvalRunRecord, sample_eval_run_team: EvalRunRecord
):
"""Test getting eval runs filtered by agent_id"""
postgres_db_real.create_eval_run(sample_eval_run_agent)
postgres_db_real.create_eval_run(sample_eval_run_team)
result = postgres_db_real.get_eval_runs(agent_id="test_agent_1")
assert isinstance(result, list)
assert len(result) == 1
assert result[0].agent_id == "test_agent_1"
def test_get_eval_runs_with_team_filter(
postgres_db_real: PostgresDb, sample_eval_run_agent: EvalRunRecord, sample_eval_run_team: EvalRunRecord
):
"""Test getting eval runs filtered by team_id"""
postgres_db_real.create_eval_run(sample_eval_run_agent)
postgres_db_real.create_eval_run(sample_eval_run_team)
result = postgres_db_real.get_eval_runs(team_id="test_team_1")
assert isinstance(result, list)
assert len(result) == 1
assert result[0].team_id == "test_team_1"
def test_get_eval_runs_with_workflow_filter(postgres_db_real: PostgresDb, sample_eval_run_workflow: EvalRunRecord):
"""Test getting eval runs filtered by workflow_id"""
postgres_db_real.create_eval_run(sample_eval_run_workflow)
result = postgres_db_real.get_eval_runs(workflow_id="test_workflow_1")
assert isinstance(result, list)
assert len(result) == 1
assert result[0].workflow_id == "test_workflow_1"
def test_get_eval_runs_with_model_filter(
postgres_db_real: PostgresDb, sample_eval_run_agent: EvalRunRecord, sample_eval_run_team: EvalRunRecord
):
"""Test getting eval runs filtered by model_id"""
postgres_db_real.create_eval_run(sample_eval_run_agent)
postgres_db_real.create_eval_run(sample_eval_run_team)
result = postgres_db_real.get_eval_runs(model_id="gpt-4")
assert isinstance(result, list)
assert len(result) == 1
assert result[0].model_id == "gpt-4"
def test_get_eval_runs_with_eval_type_filter(
postgres_db_real: PostgresDb, sample_eval_run_agent: EvalRunRecord, sample_eval_run_team: EvalRunRecord
):
"""Test getting eval runs filtered by eval_type"""
postgres_db_real.create_eval_run(sample_eval_run_agent)
postgres_db_real.create_eval_run(sample_eval_run_team)
result = postgres_db_real.get_eval_runs(eval_type=[EvalType.ACCURACY])
assert isinstance(result, list)
assert len(result) == 1
assert result[0].eval_type == EvalType.ACCURACY
def test_get_eval_runs_with_filter_type(
postgres_db_real: PostgresDb,
sample_eval_run_agent: EvalRunRecord,
sample_eval_run_team: EvalRunRecord,
sample_eval_run_workflow: EvalRunRecord,
):
"""Test getting eval runs filtering by component type"""
postgres_db_real.create_eval_run(sample_eval_run_agent)
postgres_db_real.create_eval_run(sample_eval_run_team)
postgres_db_real.create_eval_run(sample_eval_run_workflow)
# Filter by agent
agent_results = postgres_db_real.get_eval_runs(filter_type=EvalFilterType.AGENT)
assert len(agent_results) == 1
assert agent_results[0].agent_id is not None
# Filter by team
team_results = postgres_db_real.get_eval_runs(filter_type=EvalFilterType.TEAM)
assert len(team_results) == 1
assert team_results[0].team_id is not None
# Filter by workflow
workflow_results = postgres_db_real.get_eval_runs(filter_type=EvalFilterType.WORKFLOW)
assert len(workflow_results) == 1
assert workflow_results[0].workflow_id is not None
def test_get_eval_runs_with_pagination(postgres_db_real: PostgresDb):
"""Test getting eval runs with pagination"""
# Create multiple eval runs
for i in range(5):
eval_run = EvalRunRecord(
run_id=f"test_eval_run_{i}",
agent_id=f"test_agent_{i}",
eval_type=EvalType.ACCURACY,
eval_data={"score": 0.8 + (i * 0.05)},
name=f"Test Eval {i}",
)
postgres_db_real.create_eval_run(eval_run)
# Test pagination
page1 = postgres_db_real.get_eval_runs(limit=2, page=1)
assert isinstance(page1, list)
assert len(page1) == 2
page2 = postgres_db_real.get_eval_runs(limit=2, page=2)
assert isinstance(page2, list)
assert len(page2) == 2
# Verify no overlap
page1_ids = {run.run_id for run in page1}
page2_ids = {run.run_id for run in page2}
assert len(page1_ids & page2_ids) == 0
def test_get_eval_runs_with_sorting(postgres_db_real: PostgresDb):
"""Test getting eval runs with sorting"""
# Create eval runs with different timestamps by spacing them out
eval_runs = []
for i in range(3):
eval_run = EvalRunRecord(
run_id=f"test_eval_run_{i}",
agent_id=f"test_agent_{i}",
eval_type=EvalType.ACCURACY,
eval_data={"score": 0.8 + (i * 0.05)},
name=f"Test Eval {i}",
)
eval_runs.append(eval_run)
postgres_db_real.create_eval_run(eval_run)
time.sleep(0.1) # Small delay to ensure different timestamps
# Test default sorting (created_at desc)
results = postgres_db_real.get_eval_runs()
assert isinstance(results, list)
assert len(results) == 3
# Test explicit sorting by run_id ascending
results_asc = postgres_db_real.get_eval_runs(sort_by="run_id", sort_order="asc")
assert isinstance(results_asc, list)
assert results_asc[0].run_id == "test_eval_run_0"
assert results_asc[1].run_id == "test_eval_run_1"
assert results_asc[2].run_id == "test_eval_run_2"
def test_get_eval_runs_without_deserialization(postgres_db_real: PostgresDb, sample_eval_run_agent: EvalRunRecord):
"""Test getting eval runs without deserialization"""
postgres_db_real.create_eval_run(sample_eval_run_agent)
result, total_count = postgres_db_real.get_eval_runs(deserialize=False)
assert isinstance(result, list)
assert len(result) == 1
# result[0] is a RowMapping object, which behaves like a dict but isn't exactly a dict
assert result[0]["run_id"] == sample_eval_run_agent.run_id
assert total_count == 1
def test_rename_eval_run(postgres_db_real: PostgresDb, sample_eval_run_agent: EvalRunRecord):
"""Test renaming an eval run"""
postgres_db_real.create_eval_run(sample_eval_run_agent)
new_name = "Renamed Eval Run"
result = postgres_db_real.rename_eval_run(sample_eval_run_agent.run_id, new_name)
assert result is not None
assert isinstance(result, EvalRunRecord)
assert result.name == new_name
assert result.run_id == sample_eval_run_agent.run_id
def test_rename_eval_run_without_deserialization(postgres_db_real: PostgresDb, sample_eval_run_agent: EvalRunRecord):
"""Test renaming an eval run without deserialization"""
postgres_db_real.create_eval_run(sample_eval_run_agent)
new_name = "Renamed Eval Run Dict"
result = postgres_db_real.rename_eval_run(sample_eval_run_agent.run_id, new_name, deserialize=False)
assert result is not None
assert isinstance(result, dict)
assert result["name"] == new_name
assert result["run_id"] == sample_eval_run_agent.run_id
def test_eval_table_creation_and_structure(postgres_db_real: PostgresDb):
"""Test that the eval table is created with the correct structure"""
eval_table = postgres_db_real._get_table("evals", create_table_if_not_found=True)
assert eval_table is not None
assert eval_table.name == "test_evals"
assert eval_table.schema == postgres_db_real.db_schema
# Verify essential columns exist
column_names = [col.name for col in eval_table.columns]
expected_columns = [
"run_id",
"agent_id",
"team_id",
"workflow_id",
"model_id",
"model_provider",
"name",
"evaluated_component_name",
"eval_type",
"eval_data",
"created_at",
"updated_at",
]
for col in expected_columns:
assert col in column_names, f"Missing column: {col}"
def test_comprehensive_eval_run_fields(postgres_db_real: PostgresDb):
"""Test that all EvalRunRecord fields are properly handled"""
comprehensive_eval = EvalRunRecord(
run_id="comprehensive_eval_run",
agent_id="comprehensive_agent",
model_id="gpt-4-comprehensive",
model_provider="openai",
name="Comprehensive Eval Test",
evaluated_component_name="Comprehensive Agent",
eval_type=EvalType.RELIABILITY,
eval_data={
"primary_score": 0.95,
"secondary_metrics": {"latency": 150.0, "throughput": 45.2, "error_rate": 0.02},
"test_conditions": {"environment": "production", "duration_minutes": 60, "concurrent_users": 100},
"detailed_results": [
{"test_id": "test_1", "score": 0.98, "category": "accuracy"},
{"test_id": "test_2", "score": 0.92, "category": "speed"},
{"test_id": "test_3", "score": 0.95, "category": "reliability"},
],
},
)
# Create the eval run
result = postgres_db_real.create_eval_run(comprehensive_eval)
assert result is not None
# Retrieve and verify all fields are preserved
retrieved = postgres_db_real.get_eval_run(comprehensive_eval.run_id)
assert retrieved is not None
assert isinstance(retrieved, EvalRunRecord)
# Verify all fields
assert retrieved.run_id == comprehensive_eval.run_id
assert retrieved.agent_id == comprehensive_eval.agent_id
assert retrieved.model_id == comprehensive_eval.model_id
assert retrieved.model_provider == comprehensive_eval.model_provider
assert retrieved.name == comprehensive_eval.name
assert retrieved.evaluated_component_name == comprehensive_eval.evaluated_component_name
assert retrieved.eval_type == comprehensive_eval.eval_type
assert retrieved.eval_data == comprehensive_eval.eval_data
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/postgres/test_evals.py",
"license": "Apache License 2.0",
"lines": 383,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/postgres/test_knowledge.py | """Integration tests for the Knowledge related methods of the PostgresDb class"""
import time
import pytest
from agno.db.postgres.postgres import PostgresDb
from agno.db.schemas.knowledge import KnowledgeRow
@pytest.fixture(autouse=True)
def cleanup_knowledge(postgres_db_real: PostgresDb):
"""Fixture to clean-up knowledge rows after each test"""
yield
with postgres_db_real.Session() as session:
try:
knowledge_table = postgres_db_real._get_table("knowledge")
session.execute(knowledge_table.delete())
session.commit()
except Exception:
session.rollback()
@pytest.fixture
def sample_knowledge_document() -> KnowledgeRow:
"""Fixture returning a sample KnowledgeRow for a document"""
return KnowledgeRow(
id="test_knowledge_doc_1",
name="API Documentation",
description="Comprehensive API documentation for the platform",
metadata={
"format": "markdown",
"language": "en",
"version": "1.0.0",
"tags": ["api", "documentation", "reference"],
"author": "Engineering Team",
"last_reviewed": "2024-01-15",
},
type="document",
size=15420,
linked_to=None,
access_count=45,
status="active",
status_message="Document is up to date and ready for use",
created_at=int(time.time()) - 3600, # 1 hour ago
updated_at=int(time.time()) - 1800, # 30 minutes ago
)
@pytest.fixture
def sample_knowledge_dataset() -> KnowledgeRow:
"""Fixture returning a sample KnowledgeRow for a dataset"""
return KnowledgeRow(
id="test_knowledge_dataset_1",
name="Customer Support Conversations",
description="Training dataset containing customer support chat conversations",
metadata={
"format": "json",
"schema_version": "2.1",
"total_conversations": 5000,
"date_range": {"start": "2023-01-01", "end": "2023-12-31"},
"categories": ["support", "billing", "technical", "general"],
"data_quality": {"completeness": 0.98, "accuracy": 0.95, "consistency": 0.92},
},
type="dataset",
size=2048000, # ~2MB
linked_to="training_pipeline_v2",
access_count=12,
status="processed",
status_message="Dataset has been processed and is ready for training",
created_at=int(time.time()) - 7200, # 2 hours ago
updated_at=int(time.time()) - 3600, # 1 hour ago
)
@pytest.fixture
def sample_knowledge_model() -> KnowledgeRow:
"""Fixture returning a sample KnowledgeRow for a model"""
return KnowledgeRow(
id="test_knowledge_model_1",
name="Text Classification Model v3.2",
description="Fine-tuned BERT model for classifying customer support tickets",
metadata={
"model_type": "bert-base-uncased",
"framework": "transformers",
"training_data": "customer_support_conversations",
"performance_metrics": {"accuracy": 0.94, "precision": 0.92, "recall": 0.91, "f1_score": 0.915},
"hyperparameters": {"learning_rate": 2e-5, "batch_size": 32, "epochs": 10},
"deployment_info": {
"environment": "production",
"endpoint": "https://api.example.com/classify",
"version": "3.2",
},
},
type="model",
size=440000000, # ~440MB
linked_to="classification_service",
access_count=234,
status="deployed",
status_message="Model is deployed and serving predictions",
created_at=int(time.time()) - 86400, # 1 day ago
updated_at=int(time.time()) - 7200, # 2 hours ago
)
def test_upsert_knowledge_content_document(postgres_db_real: PostgresDb, sample_knowledge_document: KnowledgeRow):
"""Test upserting a knowledge document"""
result = postgres_db_real.upsert_knowledge_content(sample_knowledge_document)
assert result is not None
assert isinstance(result, KnowledgeRow)
assert result.id == sample_knowledge_document.id
assert result.name == sample_knowledge_document.name
assert result.description == sample_knowledge_document.description
assert result.type == sample_knowledge_document.type
assert result.metadata == sample_knowledge_document.metadata
assert result.size == sample_knowledge_document.size
def test_upsert_knowledge_content_dataset(postgres_db_real: PostgresDb, sample_knowledge_dataset: KnowledgeRow):
"""Test upserting a knowledge dataset"""
result = postgres_db_real.upsert_knowledge_content(sample_knowledge_dataset)
assert result is not None
assert isinstance(result, KnowledgeRow)
assert result.id == sample_knowledge_dataset.id
assert result.name == sample_knowledge_dataset.name
assert result.type == sample_knowledge_dataset.type
assert result.linked_to == sample_knowledge_dataset.linked_to
def test_upsert_knowledge_content_model(postgres_db_real: PostgresDb, sample_knowledge_model: KnowledgeRow):
"""Test upserting a knowledge model"""
result = postgres_db_real.upsert_knowledge_content(sample_knowledge_model)
assert result is not None
assert isinstance(result, KnowledgeRow)
assert result.id == sample_knowledge_model.id
assert result.name == sample_knowledge_model.name
assert result.type == sample_knowledge_model.type
assert result.status == sample_knowledge_model.status
def test_upsert_knowledge_content_update(postgres_db_real: PostgresDb, sample_knowledge_document: KnowledgeRow):
"""Test updating existing knowledge content"""
# Insert initial content
postgres_db_real.upsert_knowledge_content(sample_knowledge_document)
# Update the content
sample_knowledge_document.description = "Updated API documentation with new endpoints"
sample_knowledge_document.access_count = 50
sample_knowledge_document.status = "updated"
result = postgres_db_real.upsert_knowledge_content(sample_knowledge_document)
assert result is not None
assert result.description == "Updated API documentation with new endpoints"
assert result.access_count == 50
assert result.status == "updated"
def test_get_knowledge_content_by_id(postgres_db_real: PostgresDb, sample_knowledge_document: KnowledgeRow):
"""Test getting knowledge content by ID"""
postgres_db_real.upsert_knowledge_content(sample_knowledge_document)
result = postgres_db_real.get_knowledge_content(sample_knowledge_document.id) # type: ignore
assert result is not None
assert isinstance(result, KnowledgeRow)
assert result.id == sample_knowledge_document.id
assert result.name == sample_knowledge_document.name
assert result.description == sample_knowledge_document.description
assert result.metadata == sample_knowledge_document.metadata
def test_get_knowledge_contents_no_pagination(postgres_db_real: PostgresDb):
"""Test getting all knowledge contents without pagination"""
# Create multiple knowledge rows
knowledge_rows = []
for i in range(3):
knowledge_row = KnowledgeRow(
id=f"test_knowledge_{i}",
name=f"Test Knowledge {i}",
description=f"Description for test knowledge {i}",
type="document",
size=1000 + (i * 100),
access_count=i * 5,
status="active",
)
knowledge_rows.append(knowledge_row)
postgres_db_real.upsert_knowledge_content(knowledge_row)
result, total_count = postgres_db_real.get_knowledge_contents()
assert isinstance(result, list)
assert len(result) == 3
assert total_count == 3
assert all(isinstance(row, KnowledgeRow) for row in result)
def test_get_knowledge_contents_with_pagination(postgres_db_real: PostgresDb):
"""Test getting knowledge contents with pagination"""
# Create multiple knowledge rows
for i in range(5):
knowledge_row = KnowledgeRow(
id=f"test_knowledge_page_{i}",
name=f"Test Knowledge Page {i}",
description=f"Description for test knowledge page {i}",
type="document",
size=1000 + (i * 100),
access_count=i * 2,
status="active",
)
postgres_db_real.upsert_knowledge_content(knowledge_row)
# Test pagination
page1, total_count = postgres_db_real.get_knowledge_contents(limit=2, page=1)
assert len(page1) == 2
assert total_count == 5
page2, _ = postgres_db_real.get_knowledge_contents(limit=2, page=2)
assert len(page2) == 2
# Verify no overlap
page1_ids = {row.id for row in page1}
page2_ids = {row.id for row in page2}
assert len(page1_ids & page2_ids) == 0
def test_get_knowledge_contents_with_sorting(postgres_db_real: PostgresDb):
"""Test getting knowledge contents with sorting"""
# Create knowledge rows with different sizes for sorting
knowledge_rows = []
sizes = [5000, 1000, 3000]
for i, size in enumerate(sizes):
knowledge_row = KnowledgeRow(
id=f"test_knowledge_sort_{i}",
name=f"Test Knowledge Sort {i}",
description=f"Description for sorting test {i}",
type="document",
size=size,
access_count=i * 3,
status="active",
)
knowledge_rows.append(knowledge_row)
postgres_db_real.upsert_knowledge_content(knowledge_row)
time.sleep(0.1) # Small delay for created_at timestamps
# Test sorting by size ascending
results_asc, _ = postgres_db_real.get_knowledge_contents(sort_by="size", sort_order="asc")
assert len(results_asc) == 3
assert results_asc[0].size == 1000
assert results_asc[1].size == 3000
assert results_asc[2].size == 5000
def test_delete_knowledge_content(postgres_db_real: PostgresDb, sample_knowledge_document: KnowledgeRow):
"""Test deleting knowledge content"""
postgres_db_real.upsert_knowledge_content(sample_knowledge_document)
# Verify it exists
knowledge = postgres_db_real.get_knowledge_content(sample_knowledge_document.id) # type: ignore
assert knowledge is not None
# Delete it
postgres_db_real.delete_knowledge_content(sample_knowledge_document.id) # type: ignore
# Verify it's gone
knowledge = postgres_db_real.get_knowledge_content(sample_knowledge_document.id) # type: ignore
assert knowledge is None
def test_knowledge_table_creation_and_structure(postgres_db_real: PostgresDb):
"""Test that the knowledge table is created with the correct structure"""
knowledge_table = postgres_db_real._get_table("knowledge", create_table_if_not_found=True)
assert knowledge_table is not None
assert knowledge_table.name == "test_knowledge"
assert knowledge_table.schema == postgres_db_real.db_schema
# Verify essential columns exist
column_names = [col.name for col in knowledge_table.columns]
expected_columns = [
"id",
"name",
"description",
"metadata",
"type",
"size",
"linked_to",
"access_count",
"status",
"status_message",
"created_at",
"updated_at",
]
for col in expected_columns:
assert col in column_names, f"Missing column: {col}"
def test_comprehensive_knowledge_row_fields(postgres_db_real: PostgresDb):
"""Test that all KnowledgeRow fields are properly handled"""
comprehensive_knowledge = KnowledgeRow(
id="comprehensive_knowledge_test",
name="Comprehensive Knowledge Test",
description="A comprehensive knowledge row to test all field handling",
metadata={
"comprehensive": True,
"nested_data": {
"level1": {"level2": {"data": "deeply nested value", "numbers": [1, 2, 3, 4, 5], "boolean": True}}
},
"arrays": ["item1", "item2", "item3"],
"performance_data": {
"metrics": {"accuracy": 0.98, "precision": 0.97, "recall": 0.96, "f1": 0.965},
"benchmarks": [
{"name": "test1", "score": 95.5},
{"name": "test2", "score": 98.2},
{"name": "test3", "score": 92.8},
],
},
},
type="comprehensive_test",
size=1234567,
linked_to="related_comprehensive_item",
access_count=999,
status="comprehensive_active",
status_message="All fields are populated and being tested comprehensively",
created_at=int(time.time()) - 86400,
updated_at=int(time.time()) - 3600,
)
# Upsert the comprehensive knowledge
result = postgres_db_real.upsert_knowledge_content(comprehensive_knowledge)
assert result is not None
# Retrieve and verify all fields are preserved
retrieved = postgres_db_real.get_knowledge_content(comprehensive_knowledge.id) # type: ignore
assert retrieved is not None
assert isinstance(retrieved, KnowledgeRow)
# Verify all fields
assert retrieved.id == comprehensive_knowledge.id
assert retrieved.name == comprehensive_knowledge.name
assert retrieved.description == comprehensive_knowledge.description
assert retrieved.metadata == comprehensive_knowledge.metadata
assert retrieved.type == comprehensive_knowledge.type
assert retrieved.size == comprehensive_knowledge.size
assert retrieved.linked_to == comprehensive_knowledge.linked_to
assert retrieved.access_count == comprehensive_knowledge.access_count
assert retrieved.status == comprehensive_knowledge.status
assert retrieved.status_message == comprehensive_knowledge.status_message
assert retrieved.created_at == comprehensive_knowledge.created_at
assert retrieved.updated_at == comprehensive_knowledge.updated_at
def test_knowledge_with_auto_generated_id(postgres_db_real: PostgresDb):
"""Test auto id generation for knowledge content"""
knowledge_without_id = KnowledgeRow(
name="Auto ID Knowledge",
description="Knowledge row that should get an auto-generated ID",
type="auto_test",
size=500,
status="active",
)
# Asserting the ID was generated
assert knowledge_without_id.id is not None
assert len(knowledge_without_id.id) > 0
result = postgres_db_real.upsert_knowledge_content(knowledge_without_id)
assert result is not None
assert result.id == knowledge_without_id.id
def test_knowledge_with_none_optional_fields(postgres_db_real: PostgresDb):
"""Test knowledge row with minimal required fields and None optional fields"""
minimal_knowledge = KnowledgeRow(
id="minimal_knowledge_test",
name="Minimal Knowledge",
description="Knowledge with minimal fields",
metadata=None,
type=None,
size=None,
linked_to=None,
access_count=None,
status=None,
status_message=None,
created_at=None,
updated_at=None,
)
result = postgres_db_real.upsert_knowledge_content(minimal_knowledge)
assert result is not None
assert result.name == "Minimal Knowledge"
assert result.description == "Knowledge with minimal fields"
# Retrieve and verify None fields are handled properly
retrieved = postgres_db_real.get_knowledge_content(minimal_knowledge.id) # type: ignore
assert retrieved is not None
assert retrieved.name == "Minimal Knowledge"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/postgres/test_knowledge.py",
"license": "Apache License 2.0",
"lines": 337,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/postgres/test_memory.py | """Integration tests for the Memory related methods of the PostgresDb class"""
from datetime import datetime
import pytest
from agno.db.postgres.postgres import PostgresDb
from agno.db.schemas.memory import UserMemory
@pytest.fixture(autouse=True)
def cleanup_memories(postgres_db_real: PostgresDb):
"""Fixture to clean-up memory rows after each test"""
yield
with postgres_db_real.Session() as session:
try:
memory_table = postgres_db_real._get_table("memories")
session.execute(memory_table.delete())
session.commit()
except Exception:
session.rollback()
@pytest.fixture
def sample_user_memory() -> UserMemory:
"""Fixture returning a sample UserMemory"""
return UserMemory(
memory_id="test_memory_1",
memory="User prefers coffee over tea and likes working in the morning",
topics=["preferences", "work_habits"],
user_id="test_user_1",
input="I prefer coffee and work best in the morning",
updated_at=datetime.now(),
feedback="positive",
agent_id="test_agent_1",
team_id="test_team_1",
)
def test_insert_memory(postgres_db_real: PostgresDb, sample_user_memory):
"""Ensure upsert_user_memory inserts a new memory correctly"""
result = postgres_db_real.upsert_user_memory(sample_user_memory)
assert result is not None
assert isinstance(result, UserMemory)
assert result.memory_id == sample_user_memory.memory_id
assert result.memory == sample_user_memory.memory
assert result.topics == sample_user_memory.topics
assert result.user_id == sample_user_memory.user_id
assert result.agent_id == sample_user_memory.agent_id
assert result.team_id == sample_user_memory.team_id
def test_update_memory(postgres_db_real: PostgresDb, sample_user_memory):
"""Ensure upsert_user_memory updates an existing memory correctly"""
postgres_db_real.upsert_user_memory(sample_user_memory)
sample_user_memory.memory = "Updated: User prefers tea now and works best at night"
sample_user_memory.topics = ["preferences", "work_habits", "updated"]
result = postgres_db_real.upsert_user_memory(sample_user_memory)
assert result is not None
assert isinstance(result, UserMemory)
assert result.memory == sample_user_memory.memory
assert result.topics == sample_user_memory.topics
def test_upsert_memory_without_deserialization(postgres_db_real: PostgresDb, sample_user_memory):
"""Ensure upsert_user_memory without deserialization returns a dict"""
result = postgres_db_real.upsert_user_memory(sample_user_memory, deserialize=False)
assert result is not None
assert isinstance(result, dict)
assert result["memory_id"] == sample_user_memory.memory_id
def test_get_memory_by_id(postgres_db_real: PostgresDb, sample_user_memory):
"""Ensure get_user_memory returns a UserMemory"""
postgres_db_real.upsert_user_memory(sample_user_memory)
result = postgres_db_real.get_user_memory(memory_id=sample_user_memory.memory_id)
assert result is not None
assert isinstance(result, UserMemory)
assert result.memory_id == sample_user_memory.memory_id
assert result.memory == sample_user_memory.memory
def test_get_user_memory_without_deserialize(postgres_db_real: PostgresDb, sample_user_memory):
"""Test get_user_memory without deserialization"""
postgres_db_real.upsert_user_memory(sample_user_memory)
result = postgres_db_real.get_user_memory(memory_id=sample_user_memory.memory_id, deserialize=False)
assert result is not None
assert isinstance(result, dict)
assert result["memory_id"] == sample_user_memory.memory_id
def test_delete_user_memory(postgres_db_real: PostgresDb, sample_user_memory):
"""Ensure delete_user_memory deletes the memory"""
postgres_db_real.upsert_user_memory(sample_user_memory)
# Verify the memory exists
memory = postgres_db_real.get_user_memory(memory_id=sample_user_memory.memory_id)
assert memory is not None
# Delete the memory
postgres_db_real.delete_user_memory(sample_user_memory.memory_id)
# Verify the memory has been deleted
memory = postgres_db_real.get_user_memory(memory_id=sample_user_memory.memory_id)
assert memory is None
def test_delete_multiple_user_memories(postgres_db_real: PostgresDb):
"""Ensure delete_user_memories deletes multiple memories"""
# Inserting some memories
memory_ids = []
for i in range(3):
memory = UserMemory(
memory_id=f"memory_{i}", memory=f"Test memory {i}", user_id="test_user", updated_at=datetime.now()
)
postgres_db_real.upsert_user_memory(memory)
memory_ids.append(memory.memory_id)
# Deleting the first two memories
postgres_db_real.delete_user_memories(memory_ids[:2])
# Verify deletions
deleted_memory_1 = postgres_db_real.get_user_memory(memory_id="memory_0")
deleted_memory_2 = postgres_db_real.get_user_memory(memory_id="memory_1")
assert deleted_memory_1 is None
assert deleted_memory_2 is None
# Verify the third memory was not deleted
remaining_memory = postgres_db_real.get_user_memory(memory_id="memory_2")
assert remaining_memory is not None
def test_get_all_memory_topics(postgres_db_real: PostgresDb):
"""Ensure get_all_memory_topics returns all unique memory topics"""
# Create memories with different topics
memories = [
UserMemory(
memory_id="memory_1",
memory="Memory 1",
topics=["topic1", "topic2"],
user_id="user1",
updated_at=datetime.now(),
),
UserMemory(
memory_id="memory_2",
memory="Memory 2",
topics=["topic2", "topic3"],
user_id="user2",
updated_at=datetime.now(),
),
UserMemory(
memory_id="memory_3",
memory="Memory 3",
topics=["topic1", "topic4"],
user_id="user3",
updated_at=datetime.now(),
),
]
for memory in memories:
postgres_db_real.upsert_user_memory(memory)
# Get all topics
topics = postgres_db_real.get_all_memory_topics()
assert set(topics) == {"topic1", "topic2", "topic3", "topic4"}
def test_get_user_memory_stats(postgres_db_real: PostgresDb):
"""Ensure get_user_memory_stats returns the correct statistics"""
# Inserting some memories
memories = [
UserMemory(
memory_id="memory_1", memory="Memory 1", user_id="user1", agent_id="agent1", updated_at=datetime.now()
),
UserMemory(
memory_id="memory_2", memory="Memory 2", user_id="user1", agent_id="agent2", updated_at=datetime.now()
),
]
for memory in memories:
postgres_db_real.upsert_user_memory(memory)
# Verify the correct statistics are returned
stats, count = postgres_db_real.get_user_memory_stats()
assert count == 1
assert len(stats) == 1
assert stats[0]["user_id"] == "user1"
assert stats[0]["total_memories"] == 2
def test_comprehensive_user_memory_fields(postgres_db_real: PostgresDb):
"""Ensure all UserMemory fields are properly handled"""
# Creating a comprehensive memory
comprehensive_memory = UserMemory(
memory_id="comprehensive_memory",
memory="This is a comprehensive test memory with detailed information about user preferences and behaviors",
topics=["preferences", "behavior", "detailed", "comprehensive"],
user_id="comprehensive_user",
input="Original input that led to this memory being created",
updated_at=datetime(2021, 1, 1, 12, 0, 0),
feedback="Very positive feedback about this memory",
agent_id="comprehensive_agent",
team_id="comprehensive_team",
)
# Inserting the memory
result = postgres_db_real.upsert_user_memory(comprehensive_memory)
assert result is not None
assert isinstance(result, UserMemory)
# Verify all fields are preserved
assert result.memory_id == comprehensive_memory.memory_id
assert result.memory == comprehensive_memory.memory
assert result.topics == comprehensive_memory.topics
assert result.user_id == comprehensive_memory.user_id
assert result.input == comprehensive_memory.input
assert result.agent_id == comprehensive_memory.agent_id
assert result.team_id == comprehensive_memory.team_id
# Verify the memory can be retrieved with all fields intact
retrieved = postgres_db_real.get_user_memory(memory_id=comprehensive_memory.memory_id) # type: ignore
assert retrieved is not None and isinstance(retrieved, UserMemory)
assert retrieved.memory_id == comprehensive_memory.memory_id
assert retrieved.memory == comprehensive_memory.memory
assert retrieved.topics == comprehensive_memory.topics
assert retrieved.user_id == comprehensive_memory.user_id
assert retrieved.input == comprehensive_memory.input
assert retrieved.agent_id == comprehensive_memory.agent_id
assert retrieved.team_id == comprehensive_memory.team_id
def test_upsert_memories(postgres_db_real: PostgresDb):
"""Test upsert_memories for inserting new memories"""
# Create memories
memories = []
for i in range(5):
memory = UserMemory(
memory_id=f"bulk_memory_{i}",
memory=f"Bulk test memory {i} with user preferences and information",
topics=[f"topic_{i}", "bulk_test"],
user_id=f"user_{i}",
input=f"Input that generated memory {i}",
agent_id=f"agent_{i}",
updated_at=datetime.now(),
)
memories.append(memory)
# Bulk upsert memories
results = postgres_db_real.upsert_memories(memories)
# Verify results
assert len(results) == 5
for i, result in enumerate(results):
assert isinstance(result, UserMemory)
assert result.memory_id == f"bulk_memory_{i}"
assert result.user_id == f"user_{i}"
assert result.agent_id == f"agent_{i}"
assert result.topics is not None
assert f"topic_{i}" in result.topics
assert "bulk_test" in result.topics
def test_upsert_memories_update(postgres_db_real: PostgresDb):
"""Test upsert_memories for updating existing memories"""
# Create memories
initial_memories = []
for i in range(3):
memory = UserMemory(
memory_id=f"update_memory_{i}",
memory=f"Original memory {i}",
topics=["original"],
user_id=f"user_{i}",
input=f"Original input {i}",
updated_at=datetime.now(),
)
initial_memories.append(memory)
postgres_db_real.upsert_memories(initial_memories)
# Update memories
updated_memories = []
for i in range(3):
memory = UserMemory(
memory_id=f"update_memory_{i}", # Same ID for update
memory=f"Updated memory {i} with more information",
topics=["updated", "enhanced"],
user_id=f"user_{i}",
input=f"Updated input {i}",
feedback="positive",
agent_id=f"new_agent_{i}",
updated_at=datetime.now(),
)
updated_memories.append(memory)
results = postgres_db_real.upsert_memories(updated_memories)
assert len(results) == 3
# Verify updates
for i, result in enumerate(results):
assert isinstance(result, UserMemory)
assert result.memory_id == f"update_memory_{i}"
assert "Updated memory" in result.memory
assert result.topics == ["updated", "enhanced"]
assert result.agent_id == f"new_agent_{i}"
def test_upsert_memories_performance(postgres_db_real: PostgresDb):
"""Ensure the bulk upsert method is considerably faster than individual upserts"""
import time as time_module
# Create memories
memories = []
for i in range(30):
memory = UserMemory(
memory_id=f"perf_memory_{i}",
memory=f"Performance test memory {i} with detailed information",
topics=["performance", "test"],
user_id="perf_user",
agent_id=f"perf_agent_{i}",
updated_at=datetime.now(),
)
memories.append(memory)
# Test individual upsert
start_time = time_module.time()
for memory in memories:
postgres_db_real.upsert_user_memory(memory)
individual_time = time_module.time() - start_time
# Clean up for bulk upsert
memory_ids = [m.memory_id for m in memories if m.memory_id]
postgres_db_real.delete_user_memories(memory_ids)
# Test bulk upsert
start_time = time_module.time()
postgres_db_real.upsert_memories(memories)
bulk_time = time_module.time() - start_time
# Verify all memories were created
all_memories = postgres_db_real.get_user_memories(user_id="perf_user")
assert len(all_memories) == 30
# Bulk should be at least 2x faster
assert bulk_time < individual_time / 2, (
f"Bulk upsert is not fast enough: {bulk_time:.3f}s vs {individual_time:.3f}s"
)
def test_get_user_memory_with_user_id_filter(postgres_db_real: PostgresDb):
"""Test get_user_memory with user_id filtering"""
# Create memories for different users
memory1 = UserMemory(
memory_id="memory_user1",
memory="Memory for user 1",
user_id="user1",
updated_at=datetime.now(),
)
memory2 = UserMemory(
memory_id="memory_user2",
memory="Memory for user 2",
user_id="user2",
updated_at=datetime.now(),
)
postgres_db_real.upsert_user_memory(memory1)
postgres_db_real.upsert_user_memory(memory2)
# Get memory with correct user_id
result = postgres_db_real.get_user_memory(memory_id="memory_user1", user_id="user1")
assert result is not None
assert isinstance(result, UserMemory)
assert result.memory_id == "memory_user1"
assert result.user_id == "user1"
# Get memory with wrong user_id should return None
result = postgres_db_real.get_user_memory(memory_id="memory_user1", user_id="user2")
assert result is None
# Get memory without user_id filter should work
result = postgres_db_real.get_user_memory(memory_id="memory_user1")
assert result is not None
assert result.user_id == "user1"
def test_delete_user_memory_with_user_id_filter(postgres_db_real: PostgresDb):
"""Test delete_user_memory with user_id filtering"""
# Create memories for different users
memory1 = UserMemory(
memory_id="del_memory_user1",
memory="Memory for user 1",
user_id="user1",
updated_at=datetime.now(),
)
memory2 = UserMemory(
memory_id="del_memory_user2",
memory="Memory for user 2",
user_id="user2",
updated_at=datetime.now(),
)
postgres_db_real.upsert_user_memory(memory1)
postgres_db_real.upsert_user_memory(memory2)
# Try to delete memory1 with wrong user_id (should not delete)
postgres_db_real.delete_user_memory(memory_id="del_memory_user1", user_id="user2")
# Verify memory1 still exists
result = postgres_db_real.get_user_memory(memory_id="del_memory_user1")
assert result is not None
# Delete memory1 with correct user_id (should delete)
postgres_db_real.delete_user_memory(memory_id="del_memory_user1", user_id="user1")
# Verify memory1 is deleted
result = postgres_db_real.get_user_memory(memory_id="del_memory_user1")
assert result is None
# Verify memory2 still exists
result = postgres_db_real.get_user_memory(memory_id="del_memory_user2")
assert result is not None
def test_delete_user_memories_with_user_id_filter(postgres_db_real: PostgresDb):
"""Test delete_user_memories with user_id filtering"""
# Create memories for different users
memories = [
UserMemory(memory_id="bulk_del_m1", memory="Memory 1", user_id="user1", updated_at=datetime.now()),
UserMemory(memory_id="bulk_del_m2", memory="Memory 2", user_id="user1", updated_at=datetime.now()),
UserMemory(memory_id="bulk_del_m3", memory="Memory 3", user_id="user2", updated_at=datetime.now()),
UserMemory(memory_id="bulk_del_m4", memory="Memory 4", user_id="user2", updated_at=datetime.now()),
]
for memory in memories:
postgres_db_real.upsert_user_memory(memory)
# Try to delete user1's memories with user2's ID (should not delete user1's memories)
postgres_db_real.delete_user_memories(memory_ids=["bulk_del_m1", "bulk_del_m2"], user_id="user2")
# Verify user1's memories still exist
result1 = postgres_db_real.get_user_memory(memory_id="bulk_del_m1")
result2 = postgres_db_real.get_user_memory(memory_id="bulk_del_m2")
assert result1 is not None
assert result2 is not None
# Delete user1's memories with correct user_id
postgres_db_real.delete_user_memories(memory_ids=["bulk_del_m1", "bulk_del_m2"], user_id="user1")
# Verify user1's memories are deleted
result1 = postgres_db_real.get_user_memory(memory_id="bulk_del_m1")
result2 = postgres_db_real.get_user_memory(memory_id="bulk_del_m2")
assert result1 is None
assert result2 is None
# Verify user2's memories still exist
result3 = postgres_db_real.get_user_memory(memory_id="bulk_del_m3")
result4 = postgres_db_real.get_user_memory(memory_id="bulk_del_m4")
assert result3 is not None
assert result4 is not None
def test_delete_user_memories_without_user_id_filter(postgres_db_real: PostgresDb):
"""Test delete_user_memories without user_id filtering deletes all specified memories"""
# Create memories for different users
memories = [
UserMemory(memory_id="no_filter_m1", memory="Memory 1", user_id="user1", updated_at=datetime.now()),
UserMemory(memory_id="no_filter_m2", memory="Memory 2", user_id="user2", updated_at=datetime.now()),
UserMemory(memory_id="no_filter_m3", memory="Memory 3", user_id="user3", updated_at=datetime.now()),
]
for memory in memories:
postgres_db_real.upsert_user_memory(memory)
# Delete memories without user_id filter (should delete all specified)
postgres_db_real.delete_user_memories(memory_ids=["no_filter_m1", "no_filter_m2"])
# Verify memories are deleted regardless of user_id
result1 = postgres_db_real.get_user_memory(memory_id="no_filter_m1")
result2 = postgres_db_real.get_user_memory(memory_id="no_filter_m2")
assert result1 is None
assert result2 is None
# Verify the third memory still exists
result3 = postgres_db_real.get_user_memory(memory_id="no_filter_m3")
assert result3 is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/postgres/test_memory.py",
"license": "Apache License 2.0",
"lines": 398,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/postgres/test_metrics.py | """Integration tests for the Metrics related methods of the PostgresDb class"""
import time
from datetime import date, datetime, timedelta, timezone
from typing import List
import pytest
from agno.db.base import SessionType
from agno.db.postgres.postgres import PostgresDb
from agno.run.agent import RunOutput
from agno.run.base import RunStatus
from agno.run.team import TeamRunOutput
from agno.session.agent import AgentSession
from agno.session.team import TeamSession
@pytest.fixture(autouse=True)
def cleanup_metrics_and_sessions(postgres_db_real: PostgresDb):
"""Fixture to clean-up metrics and session rows after each test"""
yield
with postgres_db_real.Session() as session:
try:
metrics_table = postgres_db_real._get_table("metrics", create_table_if_not_found=True)
session.execute(metrics_table.delete())
sessions_table = postgres_db_real._get_table("sessions", create_table_if_not_found=True)
session.execute(sessions_table.delete())
session.commit()
except Exception:
session.rollback()
@pytest.fixture
def sample_agent_sessions_for_metrics() -> List[AgentSession]:
"""Fixture returning sample AgentSessions for metrics testing"""
base_time = int(time.time()) - 86400 # 1 day ago
sessions = []
for i in range(3):
agent_run = RunOutput(
run_id=f"test_run_{i}",
agent_id=f"test_agent_{i}",
user_id=f"test_user_{i}",
status=RunStatus.completed,
messages=[],
)
session = AgentSession(
session_id=f"test_session_{i}",
agent_id=f"test_agent_{i}",
user_id=f"test_user_{i}",
session_data={"session_name": f"Test Session {i}"},
agent_data={"name": f"Test Agent {i}", "model": "gpt-4"},
runs=[agent_run],
created_at=base_time + (i * 3600), # 1 hour apart
updated_at=base_time + (i * 3600),
)
sessions.append(session)
return sessions
def test_get_all_sessions_for_metrics_calculation(postgres_db_real: PostgresDb, sample_agent_sessions_for_metrics):
"""Test the _get_all_sessions_for_metrics_calculation util method"""
# Insert test sessions
for session in sample_agent_sessions_for_metrics:
postgres_db_real.upsert_session(session)
# Test getting all sessions
sessions = postgres_db_real._get_all_sessions_for_metrics_calculation()
assert len(sessions) == 3
assert all("user_id" in session for session in sessions)
assert all("session_data" in session for session in sessions)
assert all("runs" in session for session in sessions)
assert all("created_at" in session for session in sessions)
assert all("session_type" in session for session in sessions)
def test_get_all_sessions_for_metrics_calculation_with_timestamp_filter(
postgres_db_real: PostgresDb, sample_agent_sessions_for_metrics
):
"""Test the _get_all_sessions_for_metrics_calculation util method with timestamp filters"""
# Insert test sessions
for session in sample_agent_sessions_for_metrics:
postgres_db_real.upsert_session(session)
# Test with start timestamp filter
start_time = sample_agent_sessions_for_metrics[1].created_at
sessions = postgres_db_real._get_all_sessions_for_metrics_calculation(start_timestamp=start_time)
assert len(sessions) == 2 # Should get the last 2 sessions
# Test with end timestamp filter
end_time = sample_agent_sessions_for_metrics[1].created_at
sessions = postgres_db_real._get_all_sessions_for_metrics_calculation(end_timestamp=end_time)
assert len(sessions) == 2 # Should get the first 2 sessions
def test_get_metrics_calculation_starting_date_no_metrics_no_sessions(postgres_db_real: PostgresDb):
"""Test the _get_metrics_calculation_starting_date util method with no metrics and no sessions"""
metrics_table = postgres_db_real._get_table("metrics", create_table_if_not_found=True)
result = postgres_db_real._get_metrics_calculation_starting_date(metrics_table)
assert result is None
def test_get_metrics_calculation_starting_date_no_metrics_with_sessions(
postgres_db_real: PostgresDb, sample_agent_sessions_for_metrics
):
"""Test the _get_metrics_calculation_starting_date util method with no metrics but with sessions"""
# Insert test sessions
for session in sample_agent_sessions_for_metrics:
postgres_db_real.upsert_session(session)
metrics_table = postgres_db_real._get_table("metrics", create_table_if_not_found=True)
result = postgres_db_real._get_metrics_calculation_starting_date(metrics_table)
assert result is not None
# Should return the date of the first session
first_session_date = datetime.fromtimestamp(sample_agent_sessions_for_metrics[0].created_at, tz=timezone.utc).date()
assert result == first_session_date
def test_calculate_metrics_no_sessions(postgres_db_real: PostgresDb):
"""Ensure the calculate_metrics method returns None when there are no sessions"""
result = postgres_db_real.calculate_metrics()
assert result is None
def test_calculate_metrics(postgres_db_real: PostgresDb, sample_agent_sessions_for_metrics):
"""Ensure the calculate_metrics method returns a list of metrics when there are sessions"""
for session in sample_agent_sessions_for_metrics:
postgres_db_real.upsert_session(session)
# Calculate metrics
result = postgres_db_real.calculate_metrics()
assert result is not None
assert isinstance(result, list)
def test_get_metrics_with_date_filter(postgres_db_real: PostgresDb, sample_agent_sessions_for_metrics):
"""Test the get_metrics method with date filters"""
# Insert test sessions and calculate metrics
for session in sample_agent_sessions_for_metrics:
postgres_db_real.upsert_session(session)
# Calculate metrics to populate the metrics table
postgres_db_real.calculate_metrics()
# Test getting metrics without filters
metrics, latest_update = postgres_db_real.get_metrics()
assert isinstance(metrics, list)
assert latest_update is not None
# Test with date range filter
today = date.today()
yesterday = today - timedelta(days=1)
metrics_filtered, _ = postgres_db_real.get_metrics(starting_date=yesterday, ending_date=today)
assert isinstance(metrics_filtered, list)
def test_metrics_table_creation(postgres_db_real: PostgresDb):
"""Ensure the metrics table is created properly"""
metrics_table = postgres_db_real._get_table("metrics", create_table_if_not_found=True)
assert metrics_table is not None
assert metrics_table.name == "test_metrics"
assert metrics_table.schema == postgres_db_real.db_schema
# Verify essential columns exist
column_names = [col.name for col in metrics_table.columns]
expected_columns = ["date", "completed", "updated_at"]
for col in expected_columns:
assert col in column_names, f"Missing column: {col}"
def test_calculate_metrics_idempotency(postgres_db_real: PostgresDb, sample_agent_sessions_for_metrics):
"""Ensure the calculate_metrics method is idempotent"""
# Insert test sessions
for session in sample_agent_sessions_for_metrics:
postgres_db_real.upsert_session(session)
# Calculate metrics first time
result1 = postgres_db_real.calculate_metrics()
assert result1 is not None
# Calculate metrics second time - should not process already completed dates
result2 = postgres_db_real.calculate_metrics()
assert result2 is None or isinstance(result2, list)
def test_get_metrics_with_invalid_date_range(postgres_db_real: PostgresDb):
"""Test get_metrics with invalid date range (end before start)"""
today = date.today()
yesterday = today - timedelta(days=1)
# Pass end date before start date
metrics, latest_update = postgres_db_real.get_metrics(starting_date=today, ending_date=yesterday)
assert metrics == []
assert latest_update is None
def test_metrics_flow(postgres_db_real: PostgresDb, sample_agent_sessions_for_metrics):
"""Comprehensive test for the full metrics flow: insert sessions, calculate metrics, retrieve metrics"""
# Step 1: Insert test sessions
for session in sample_agent_sessions_for_metrics:
postgres_db_real.upsert_session(session)
# Step 2: Verify sessions were inserted
all_sessions = postgres_db_real.get_sessions(session_type=SessionType.AGENT)
assert len(all_sessions) == 3
# Step 3: Calculate metrics
calculated_metrics = postgres_db_real.calculate_metrics()
assert calculated_metrics is not None
# Step 4: Retrieve metrics
metrics, latest_update = postgres_db_real.get_metrics()
assert isinstance(metrics, list)
assert len(metrics) > 0
assert latest_update is not None
# Step 5: Verify relevant metrics fields are there
assert metrics[0] is not None and len(metrics) == 1
metrics_obj = metrics[0]
assert metrics_obj["completed"] is True
assert metrics_obj["agent_runs_count"] == 3
assert metrics_obj["team_runs_count"] == 0
assert metrics_obj["workflow_runs_count"] == 0
assert metrics_obj["updated_at"] is not None
assert metrics_obj["created_at"] is not None
assert metrics_obj["date"] is not None
assert metrics_obj["aggregation_period"] == "daily"
@pytest.fixture
def sample_multi_day_sessions() -> List[AgentSession]:
"""Fixture returning sessions spread across multiple days"""
sessions = []
base_time = int(time.time()) - (3 * 86400) # 3 days ago
# Day 1: 2 sessions
for i in range(2):
agent_run = RunOutput(
run_id=f"day1_run_{i}",
agent_id=f"day1_agent_{i}",
user_id=f"day1_user_{i}",
status=RunStatus.completed,
messages=[],
)
session = AgentSession(
session_id=f"day1_session_{i}",
agent_id=f"day1_agent_{i}",
user_id=f"day1_user_{i}",
session_data={"session_name": f"Day 1 Session {i}"},
agent_data={"name": f"Day 1 Agent {i}", "model": "gpt-4"},
runs=[agent_run],
created_at=base_time + (i * 3600), # 1 hour apart
updated_at=base_time + (i * 3600),
)
sessions.append(session)
# Day 2: 3 sessions (next day)
day2_base = base_time + 86400 # Add 1 day
for i in range(3):
agent_run = RunOutput(
run_id=f"day2_run_{i}",
agent_id=f"day2_agent_{i}",
user_id=f"day2_user_{i}",
status=RunStatus.completed,
messages=[],
)
session = AgentSession(
session_id=f"day2_session_{i}",
agent_id=f"day2_agent_{i}",
user_id=f"day2_user_{i}",
session_data={"session_name": f"Day 2 Session {i}"},
agent_data={"name": f"Day 2 Agent {i}", "model": "gpt-4"},
runs=[agent_run],
created_at=day2_base + (i * 3600), # 1 hour apart
updated_at=day2_base + (i * 3600),
)
sessions.append(session)
# Day 3: 1 session (next day)
day3_base = base_time + (2 * 86400) # Add 2 days
agent_run = RunOutput(
run_id="day3_run_0",
agent_id="day3_agent_0",
user_id="day3_user_0",
status=RunStatus.completed,
messages=[],
)
session = AgentSession(
session_id="day3_session_0",
agent_id="day3_agent_0",
user_id="day3_user_0",
session_data={"session_name": "Day 3 Session 0"},
agent_data={"name": "Day 3 Agent 0", "model": "gpt-4"},
runs=[agent_run],
created_at=day3_base,
updated_at=day3_base,
)
sessions.append(session)
return sessions
def test_calculate_metrics_multiple_days(postgres_db_real: PostgresDb, sample_multi_day_sessions):
"""Test that metrics calculation creates separate rows for different days"""
# Insert sessions across multiple days
for session in sample_multi_day_sessions:
postgres_db_real.upsert_session(session)
# Calculate metrics
result = postgres_db_real.calculate_metrics()
assert result is not None
assert isinstance(result, list)
assert len(result) == 3 # Should have 3 metrics records for 3 different days
# Retrieve all metrics
metrics, latest_update = postgres_db_real.get_metrics()
assert len(metrics) == 3 # Should have 3 rows, one per day
assert latest_update is not None
# Sort metrics by date for consistent checking
metrics_sorted = sorted(metrics, key=lambda x: x["date"])
# Verify Day 1 metrics (2 sessions)
day1_metrics = metrics_sorted[0]
assert day1_metrics["agent_runs_count"] == 2
assert day1_metrics["team_runs_count"] == 0
assert day1_metrics["workflow_runs_count"] == 0
assert day1_metrics["completed"] is True
# Verify Day 2 metrics (3 sessions)
day2_metrics = metrics_sorted[1]
assert day2_metrics["agent_runs_count"] == 3
assert day2_metrics["team_runs_count"] == 0
assert day2_metrics["workflow_runs_count"] == 0
assert day2_metrics["completed"] is True
# Verify Day 3 metrics (1 session)
day3_metrics = metrics_sorted[2]
assert day3_metrics["agent_runs_count"] == 1
assert day3_metrics["team_runs_count"] == 0
assert day3_metrics["workflow_runs_count"] == 0
assert day3_metrics["completed"] is True
def test_calculate_metrics_mixed_session_types_multiple_days(postgres_db_real: PostgresDb):
"""Test metrics calculation with different session types across multiple days"""
base_time = int(time.time()) - (2 * 86400) # 2 days ago
sessions = []
# Day 1: Agent and Team sessions
day1_base = base_time
# Agent session
agent_run = RunOutput(
run_id="mixed_agent_run",
agent_id="mixed_agent",
user_id="mixed_user",
status=RunStatus.completed,
messages=[],
)
agent_session = AgentSession(
session_id="mixed_agent_session",
agent_id="mixed_agent",
user_id="mixed_user",
session_data={"session_name": "Mixed Agent Session"},
agent_data={"name": "Mixed Agent", "model": "gpt-4"},
runs=[agent_run],
created_at=day1_base,
updated_at=day1_base,
)
sessions.append(agent_session)
# Team session
team_run = TeamRunOutput(
run_id="mixed_team_run",
team_id="mixed_team",
status=RunStatus.completed,
messages=[],
created_at=day1_base + 3600,
)
team_session = TeamSession(
session_id="mixed_team_session",
team_id="mixed_team",
user_id="mixed_user",
session_data={"session_name": "Mixed Team Session"},
team_data={"name": "Mixed Team", "model": "gpt-4"},
runs=[team_run],
created_at=day1_base + 3600,
updated_at=day1_base + 3600,
)
sessions.append(team_session)
# Day 2: Only Agent sessions
day2_base = base_time + 86400
for i in range(2):
agent_run = RunOutput(
run_id=f"day2_mixed_run_{i}",
agent_id=f"day2_mixed_agent_{i}",
user_id="mixed_user",
status=RunStatus.completed,
messages=[],
)
agent_session = AgentSession(
session_id=f"day2_mixed_session_{i}",
agent_id=f"day2_mixed_agent_{i}",
user_id="mixed_user",
session_data={"session_name": f"Day 2 Mixed Session {i}"},
agent_data={"name": f"Day 2 Mixed Agent {i}", "model": "gpt-4"},
runs=[agent_run],
created_at=day2_base + (i * 3600),
updated_at=day2_base + (i * 3600),
)
sessions.append(agent_session)
# Insert all sessions
for session in sessions:
postgres_db_real.upsert_session(session)
# Calculate metrics
result = postgres_db_real.calculate_metrics()
assert result is not None
assert len(result) == 2 # Should have 2 metrics records for 2 different days
# Retrieve metrics
metrics, _ = postgres_db_real.get_metrics()
assert len(metrics) == 2
# Sort by date
metrics_sorted = sorted(metrics, key=lambda x: x["date"])
# Day 1: 1 agent run + 1 team run
day1_metrics = metrics_sorted[0]
assert day1_metrics["agent_runs_count"] == 1
assert day1_metrics["team_runs_count"] == 1
assert day1_metrics["workflow_runs_count"] == 0
# Day 2: 2 agent runs
day2_metrics = metrics_sorted[1]
assert day2_metrics["agent_runs_count"] == 2
assert day2_metrics["team_runs_count"] == 0
assert day2_metrics["workflow_runs_count"] == 0
def test_get_metrics_date_range_multiple_days(postgres_db_real: PostgresDb, sample_multi_day_sessions):
"""Test retrieving metrics with date range filters across multiple days"""
# Insert sessions and calculate metrics
for session in sample_multi_day_sessions:
postgres_db_real.upsert_session(session)
postgres_db_real.calculate_metrics()
# Get the date range from the first and last sessions
first_session_date = datetime.fromtimestamp(sample_multi_day_sessions[0].created_at, tz=timezone.utc).date()
last_session_date = datetime.fromtimestamp(sample_multi_day_sessions[-1].created_at, tz=timezone.utc).date()
# Test getting metrics for the full range
metrics_full, _ = postgres_db_real.get_metrics(starting_date=first_session_date, ending_date=last_session_date)
assert len(metrics_full) == 3 # All 3 days
# Test getting metrics for partial range (first 2 days)
second_day = first_session_date + timedelta(days=1)
metrics_partial, _ = postgres_db_real.get_metrics(starting_date=first_session_date, ending_date=second_day)
assert len(metrics_partial) == 2 # First 2 days only
# Test getting metrics for single day
metrics_single, _ = postgres_db_real.get_metrics(starting_date=first_session_date, ending_date=first_session_date)
assert len(metrics_single) == 1 # First day only
assert metrics_single[0]["agent_runs_count"] == 2 # Day 1 had 2 sessions
def test_metrics_calculation_multiple_days(postgres_db_real: PostgresDb):
"""Ensure that metrics calculation can handle calculating metrics for multiple days at once"""
base_time = int(time.time()) - (2 * 86400) # 2 days ago
# Add sessions for Day 1
day1_sessions = []
for i in range(2):
agent_run = RunOutput(
run_id=f"incremental_day1_run_{i}",
agent_id=f"incremental_day1_agent_{i}",
user_id="incremental_user",
status=RunStatus.completed,
messages=[],
)
session = AgentSession(
session_id=f"incremental_day1_session_{i}",
agent_id=f"incremental_day1_agent_{i}",
user_id="incremental_user",
session_data={"session_name": f"Incremental Day 1 Session {i}"},
agent_data={"name": f"Incremental Day 1 Agent {i}", "model": "gpt-4"},
runs=[agent_run],
created_at=base_time + (i * 3600),
updated_at=base_time + (i * 3600),
)
day1_sessions.append(session)
# Insert Day 1 sessions and calculate metrics
for session in day1_sessions:
postgres_db_real.upsert_session(session)
# Calculate metircs for day 1
result1 = postgres_db_real.calculate_metrics()
assert result1 is not None
assert len(result1) == 1
# Verify day 1 metrics exist
metrics1, _ = postgres_db_real.get_metrics()
assert len(metrics1) == 1
assert metrics1[0]["agent_runs_count"] == 2
# Add sessions for day 2
day2_base = base_time + 86400
day2_sessions = []
for i in range(3):
agent_run = RunOutput(
run_id=f"incremental_day2_run_{i}",
agent_id=f"incremental_day2_agent_{i}",
user_id="incremental_user",
status=RunStatus.completed,
messages=[],
)
session = AgentSession(
session_id=f"incremental_day2_session_{i}",
agent_id=f"incremental_day2_agent_{i}",
user_id="incremental_user",
session_data={"session_name": f"Incremental Day 2 Session {i}"},
agent_data={"name": f"Incremental Day 2 Agent {i}", "model": "gpt-4"},
runs=[agent_run],
created_at=day2_base + (i * 3600),
updated_at=day2_base + (i * 3600),
)
day2_sessions.append(session)
# Insert day 2 sessions and calculate metrics again
for session in day2_sessions:
postgres_db_real.upsert_session(session)
# Calculate metrics for day 2
result2 = postgres_db_real.calculate_metrics()
assert result2 is not None
assert len(result2) == 1
# Verify both days' metrics exist
metrics2, _ = postgres_db_real.get_metrics()
assert len(metrics2) == 2
metrics_sorted = sorted(metrics2, key=lambda x: x["date"])
assert metrics_sorted[0]["agent_runs_count"] == 2
assert metrics_sorted[1]["agent_runs_count"] == 3
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/postgres/test_metrics.py",
"license": "Apache License 2.0",
"lines": 459,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/postgres/test_session.py | """Integration tests for the Session related methods of the PostgresDb class"""
import time
from datetime import datetime
import pytest
from sqlalchemy import text
from agno.db.base import SessionType
from agno.db.postgres.postgres import PostgresDb
from agno.run.agent import RunOutput
from agno.run.base import RunStatus
from agno.run.team import TeamRunOutput
from agno.session.agent import AgentSession
from agno.session.summary import SessionSummary
from agno.session.team import TeamSession
@pytest.fixture(autouse=True)
def cleanup_sessions(postgres_db_real: PostgresDb):
"""Fixture to clean-up session rows after each test"""
yield
with postgres_db_real.Session() as session:
try:
sessions_table = postgres_db_real._get_table("sessions", create_table_if_not_found=True)
if sessions_table is not None:
session.execute(sessions_table.delete())
session.commit()
except Exception:
session.rollback()
@pytest.fixture
def sample_agent_session() -> AgentSession:
"""Fixture returning a sample AgentSession"""
agent_run = RunOutput(
run_id="test_agent_run_1",
agent_id="test_agent_1",
user_id="test_user_1",
status=RunStatus.completed,
messages=[],
)
return AgentSession(
session_id="test_agent_session_1",
agent_id="test_agent_1",
user_id="test_user_1",
team_id="test_team_1",
session_data={"session_name": "Test Agent Session", "key": "value"},
agent_data={"name": "Test Agent", "model": "gpt-4"},
metadata={"extra_key": "extra_value"},
runs=[agent_run],
summary=None,
created_at=int(time.time()),
updated_at=int(time.time()),
)
@pytest.fixture
def sample_team_session() -> TeamSession:
"""Fixture returning a sample TeamSession"""
team_run = TeamRunOutput(
run_id="test_team_run_1",
team_id="test_team_1",
status=RunStatus.completed,
messages=[],
created_at=int(time.time()),
)
return TeamSession(
session_id="test_team_session_1",
team_id="test_team_1",
user_id="test_user_1",
session_data={"session_name": "Test Team Session", "key": "value"},
team_data={"name": "Test Team", "model": "gpt-4"},
metadata={"extra_key": "extra_value"},
runs=[team_run],
summary=None,
created_at=int(time.time()),
updated_at=int(time.time()),
)
def test_session_table_constraint_exists(postgres_db_real: PostgresDb):
"""Ensure the session table has a primary key constraint on session_id"""
with postgres_db_real.Session() as session:
# Ensure table is created by calling _get_table with create_table_if_not_found=True
table = postgres_db_real._get_table(table_type="sessions", create_table_if_not_found=True)
assert table is not None, "Session table should be created"
result = session.execute(
text(
"SELECT constraint_name FROM information_schema.table_constraints "
"WHERE table_schema = :schema AND table_name = :table AND constraint_type = 'PRIMARY KEY'"
),
{"schema": postgres_db_real.db_schema, "table": postgres_db_real.session_table_name},
)
constraint_names = [row[0] for row in result.fetchall()]
assert len(constraint_names) > 0, (
f"Session table missing PRIMARY KEY constraint. Found constraints: {constraint_names}"
)
def test_insert_agent_session(postgres_db_real: PostgresDb, sample_agent_session: AgentSession):
"""Ensure the upsert method works as expected when inserting a new AgentSession"""
result = postgres_db_real.upsert_session(sample_agent_session)
assert result is not None
assert isinstance(result, AgentSession)
assert result.session_id == sample_agent_session.session_id
assert result.agent_id == sample_agent_session.agent_id
assert result.user_id == sample_agent_session.user_id
assert result.session_data == sample_agent_session.session_data
assert result.agent_data == sample_agent_session.agent_data
def test_update_agent_session(postgres_db_real: PostgresDb, sample_agent_session: AgentSession):
"""Ensure the upsert method works as expected when updating an existing AgentSession"""
# Inserting
postgres_db_real.upsert_session(sample_agent_session)
# Updating
sample_agent_session.session_data = {"session_name": "Updated Session", "updated": True}
sample_agent_session.agent_data = {"foo": "bar"}
result = postgres_db_real.upsert_session(sample_agent_session)
assert result is not None
assert isinstance(result, AgentSession)
assert result.session_data is not None
assert result.session_data["session_name"] == "Updated Session"
assert result.agent_data is not None
assert result.agent_data["foo"] == "bar"
# Assert Agent runs
assert result.runs is not None and result.runs[0] is not None
assert sample_agent_session.runs is not None and sample_agent_session.runs[0] is not None
assert result.runs[0].run_id == sample_agent_session.runs[0].run_id
def test_insert_team_session(postgres_db_real: PostgresDb, sample_team_session: TeamSession):
"""Ensure the upsert method works as expected when inserting a new TeamSession"""
result = postgres_db_real.upsert_session(sample_team_session)
assert result is not None
assert isinstance(result, TeamSession)
assert result.session_id == sample_team_session.session_id
assert result.team_id == sample_team_session.team_id
assert result.user_id == sample_team_session.user_id
assert result.session_data == sample_team_session.session_data
assert result.team_data == sample_team_session.team_data
# Assert Team runs
assert result.runs is not None and result.runs[0] is not None
assert sample_team_session.runs is not None and sample_team_session.runs[0] is not None
assert result.runs[0].run_id == sample_team_session.runs[0].run_id
def test_update_team_session(postgres_db_real: PostgresDb, sample_team_session: TeamSession):
"""Ensure the upsert method works as expected when updating an existing TeamSession"""
# Inserting
postgres_db_real.upsert_session(sample_team_session)
# Update
sample_team_session.session_data = {"session_name": "Updated Team Session", "updated": True}
sample_team_session.team_data = {"foo": "bar"}
result = postgres_db_real.upsert_session(sample_team_session)
assert result is not None
assert isinstance(result, TeamSession)
assert result.session_data is not None
assert result.session_data["session_name"] == "Updated Team Session"
assert result.team_data is not None
assert result.team_data["foo"] == "bar"
def test_upserting_without_deserialization(postgres_db_real: PostgresDb, sample_agent_session: AgentSession):
"""Ensure the upsert method works as expected when upserting a session without deserialization"""
result = postgres_db_real.upsert_session(sample_agent_session, deserialize=False)
assert result is not None
assert isinstance(result, dict)
assert result["session_id"] == sample_agent_session.session_id
def test_get_agent_session_by_id(postgres_db_real: PostgresDb, sample_agent_session: AgentSession):
"""Ensure the get_session method works as expected when retrieving an AgentSession by session_id"""
# Insert session first
postgres_db_real.upsert_session(sample_agent_session)
# Retrieve session
result = postgres_db_real.get_session(session_id=sample_agent_session.session_id, session_type=SessionType.AGENT)
assert result is not None
assert isinstance(result, AgentSession)
assert result.session_id == sample_agent_session.session_id
assert result.agent_id == sample_agent_session.agent_id
def test_get_team_session_by_id(postgres_db_real: PostgresDb, sample_team_session: TeamSession):
"""Ensure the get_session method works as expected when retrieving a TeamSession by session_id"""
# Insert session first
postgres_db_real.upsert_session(sample_team_session)
# Retrieve session
result = postgres_db_real.get_session(session_id=sample_team_session.session_id, session_type=SessionType.TEAM)
assert result is not None
assert isinstance(result, TeamSession)
assert result.session_id == sample_team_session.session_id
assert result.team_id == sample_team_session.team_id
def test_get_session_with_user_id_filter(postgres_db_real: PostgresDb, sample_agent_session: AgentSession):
"""Ensure the get_session method works as expected when retrieving a session with user_id filter"""
# Insert session
postgres_db_real.upsert_session(sample_agent_session)
# Get with correct user_id
result = postgres_db_real.get_session(
session_id=sample_agent_session.session_id,
user_id=sample_agent_session.user_id,
session_type=SessionType.AGENT,
)
assert result is not None
# Get with wrong user_id
result = postgres_db_real.get_session(
session_id=sample_agent_session.session_id,
user_id="wrong_user",
session_type=SessionType.AGENT,
)
assert result is None
def test_get_session_without_deserialization(postgres_db_real: PostgresDb, sample_agent_session: AgentSession):
"""Ensure the get_session method works as expected when retrieving a session without deserialization"""
# Insert session
postgres_db_real.upsert_session(sample_agent_session)
# Retrieve as dict
result = postgres_db_real.get_session(
session_id=sample_agent_session.session_id, session_type=SessionType.AGENT, deserialize=False
)
assert result is not None
assert isinstance(result, dict)
assert result["session_id"] == sample_agent_session.session_id
def test_get_all_sessions(
postgres_db_real: PostgresDb,
sample_agent_session: AgentSession,
sample_team_session: TeamSession,
):
"""Ensure the get_sessions method works as expected when retrieving all sessions"""
# Insert both sessions
postgres_db_real.upsert_session(sample_agent_session)
postgres_db_real.upsert_session(sample_team_session)
# Get all agent sessions
agent_sessions = postgres_db_real.get_sessions(session_type=SessionType.AGENT)
assert len(agent_sessions) == 1
assert isinstance(agent_sessions[0], AgentSession)
# Get all team sessions
team_sessions = postgres_db_real.get_sessions(session_type=SessionType.TEAM)
assert len(team_sessions) == 1
assert isinstance(team_sessions[0], TeamSession)
def test_filtering_by_user_id(postgres_db_real: PostgresDb):
"""Ensure the get_sessions method works as expected when filtering by user_id"""
# Create sessions with different user_ids
session1 = AgentSession(session_id="session1", agent_id="agent1", user_id="user1", created_at=int(time.time()))
session2 = AgentSession(session_id="session2", agent_id="agent2", user_id="user2", created_at=int(time.time()))
postgres_db_real.upsert_session(session1)
postgres_db_real.upsert_session(session2)
# Filter by user1
user1_sessions = postgres_db_real.get_sessions(session_type=SessionType.AGENT, user_id="user1")
assert len(user1_sessions) == 1
assert user1_sessions[0].user_id == "user1"
def test_filtering_by_component_id(postgres_db_real: PostgresDb):
"""Ensure the get_sessions method works as expected when filtering by component_id (agent_id/team_id)"""
# Create sessions with different agent_ids
session1 = AgentSession(session_id="session1", agent_id="agent1", user_id="user1", created_at=int(time.time()))
session2 = AgentSession(session_id="session2", agent_id="agent2", user_id="user1", created_at=int(time.time()))
postgres_db_real.upsert_session(session1)
postgres_db_real.upsert_session(session2)
# Filter by agent_id
agent1_sessions = postgres_db_real.get_sessions(
session_type=SessionType.AGENT,
component_id="agent1",
)
assert len(agent1_sessions) == 1
assert isinstance(agent1_sessions[0], AgentSession)
assert agent1_sessions[0].agent_id == "agent1"
def test_get_sessions_with_pagination(postgres_db_real: PostgresDb):
"""Test retrieving sessions with pagination"""
# Create multiple sessions
sessions = []
for i in range(5):
session = AgentSession(
session_id=f"session_{i}", agent_id=f"agent_{i}", user_id="test_user", created_at=int(time.time()) + i
)
sessions.append(session)
postgres_db_real.upsert_session(session)
# Test pagination
page1 = postgres_db_real.get_sessions(session_type=SessionType.AGENT, limit=2, page=1)
assert len(page1) == 2
page2 = postgres_db_real.get_sessions(session_type=SessionType.AGENT, limit=2, page=2)
assert len(page2) == 2
# Verify no overlap
assert isinstance(page1, list) and isinstance(page2, list)
page1_ids = {s.session_id for s in page1}
page2_ids = {s.session_id for s in page2}
assert len(page1_ids & page2_ids) == 0
def test_get_sessions_with_sorting(postgres_db_real: PostgresDb):
"""Test retrieving sessions with sorting"""
from agno.db.base import SessionType
from agno.session.agent import AgentSession
# Create sessions with different timestamps
base_time = int(time.time())
session1 = AgentSession(session_id="session1", agent_id="agent1", created_at=base_time + 100)
session2 = AgentSession(session_id="session2", agent_id="agent2", created_at=base_time + 200)
postgres_db_real.upsert_session(session1)
postgres_db_real.upsert_session(session2)
# Sort by created_at ascending
sessions_asc = postgres_db_real.get_sessions(session_type=SessionType.AGENT, sort_by="created_at", sort_order="asc")
assert sessions_asc is not None and isinstance(sessions_asc, list)
assert sessions_asc[0].session_id == "session1"
assert sessions_asc[1].session_id == "session2"
# Sort by created_at descending
sessions_desc = postgres_db_real.get_sessions(
session_type=SessionType.AGENT, sort_by="created_at", sort_order="desc"
)
assert sessions_desc is not None and isinstance(sessions_desc, list)
assert sessions_desc[0].session_id == "session2"
assert sessions_desc[1].session_id == "session1"
def test_get_sessions_with_timestamp_filter(postgres_db_real: PostgresDb):
"""Test retrieving sessions with timestamp filters"""
from agno.db.base import SessionType
from agno.session.agent import AgentSession
base_time = int(time.time())
# Create sessions at different times
session1 = AgentSession(
session_id="session1",
agent_id="agent1",
created_at=base_time - 1000, # Old session
)
session2 = AgentSession(
session_id="session2",
agent_id="agent2",
created_at=base_time + 1000, # New session
)
postgres_db_real.upsert_session(session1)
postgres_db_real.upsert_session(session2)
# Filter by start timestamp
recent_sessions = postgres_db_real.get_sessions(session_type=SessionType.AGENT, start_timestamp=base_time)
assert len(recent_sessions) == 1
assert recent_sessions[0].session_id == "session2"
# Filter by end timestamp
old_sessions = postgres_db_real.get_sessions(session_type=SessionType.AGENT, end_timestamp=base_time)
assert len(old_sessions) == 1
assert old_sessions[0].session_id == "session1"
def test_get_sessions_with_session_name_filter(postgres_db_real: PostgresDb):
"""Test retrieving sessions filtered by session name"""
from agno.db.base import SessionType
from agno.session.agent import AgentSession
# Create sessions with different names
session1 = AgentSession(
session_id="session1",
agent_id="agent1",
session_data={"session_name": "Test Session Alpha"},
created_at=int(time.time()),
)
session2 = AgentSession(
session_id="session2",
agent_id="agent2",
session_data={"session_name": "Test Session Beta"},
created_at=int(time.time()),
)
postgres_db_real.upsert_session(session1)
postgres_db_real.upsert_session(session2)
# Search by partial name
alpha_sessions = postgres_db_real.get_sessions(session_type=SessionType.AGENT, session_name="Alpha")
assert len(alpha_sessions) == 1
assert alpha_sessions[0].session_id == "session1"
def test_get_sessions_without_deserialize(postgres_db_real: PostgresDb, sample_agent_session: AgentSession):
"""Test retrieving sessions without deserialization"""
from agno.db.base import SessionType
# Insert session
postgres_db_real.upsert_session(sample_agent_session)
# Get as dicts
sessions, total_count = postgres_db_real.get_sessions(session_type=SessionType.AGENT, deserialize=False)
assert isinstance(sessions, list)
assert len(sessions) == 1
assert isinstance(sessions[0], dict)
assert sessions[0]["session_id"] == sample_agent_session.session_id
assert total_count == 1
def test_rename_agent_session(postgres_db_real: PostgresDb, sample_agent_session: AgentSession):
"""Test renaming an AgentSession"""
from agno.db.base import SessionType
# Insert session
postgres_db_real.upsert_session(sample_agent_session)
# Rename session
new_name = "Renamed Agent Session"
result = postgres_db_real.rename_session(
session_id=sample_agent_session.session_id,
session_type=SessionType.AGENT,
session_name=new_name,
)
assert result is not None
assert isinstance(result, AgentSession)
assert result.session_data is not None
assert result.session_data["session_name"] == new_name
def test_rename_team_session(postgres_db_real: PostgresDb, sample_team_session: TeamSession):
"""Test renaming a TeamSession"""
from agno.db.base import SessionType
# Insert session
postgres_db_real.upsert_session(sample_team_session)
# Rename session
new_name = "Renamed Team Session"
result = postgres_db_real.rename_session(
session_id=sample_team_session.session_id,
session_type=SessionType.TEAM,
session_name=new_name,
)
assert result is not None
assert isinstance(result, TeamSession)
assert result.session_data is not None
assert result.session_data["session_name"] == new_name
def test_rename_session_without_deserialize(postgres_db_real: PostgresDb, sample_agent_session: AgentSession):
"""Test renaming session without deserialization"""
from agno.db.base import SessionType
# Insert session
postgres_db_real.upsert_session(sample_agent_session)
# Rename session
new_name = "Renamed Session Dict"
result = postgres_db_real.rename_session(
session_id=sample_agent_session.session_id,
session_type=SessionType.AGENT,
session_name=new_name,
deserialize=False,
)
assert result is not None
assert isinstance(result, dict)
assert result["session_data"]["session_name"] == new_name
def test_delete_single_session(postgres_db_real: PostgresDb, sample_agent_session: AgentSession):
"""Test deleting a single session"""
# Insert session
postgres_db_real.upsert_session(sample_agent_session)
# Verify it exists
from agno.db.base import SessionType
session = postgres_db_real.get_session(session_id=sample_agent_session.session_id, session_type=SessionType.AGENT)
assert session is not None
# Delete session
success = postgres_db_real.delete_session(sample_agent_session.session_id)
assert success is True
# Verify it's gone
session = postgres_db_real.get_session(session_id=sample_agent_session.session_id, session_type=SessionType.AGENT)
assert session is None
def test_delete_multiple_sessions(postgres_db_real: PostgresDb):
"""Test deleting multiple sessions"""
from agno.db.base import SessionType
from agno.session.agent import AgentSession
# Create and insert multiple sessions
sessions = []
session_ids = []
for i in range(3):
session = AgentSession(session_id=f"session_{i}", agent_id=f"agent_{i}", created_at=int(time.time()))
sessions.append(session)
session_ids.append(session.session_id)
postgres_db_real.upsert_session(session)
# Verify they exist
all_sessions = postgres_db_real.get_sessions(session_type=SessionType.AGENT)
assert len(all_sessions) == 3
# Delete multiple sessions
postgres_db_real.delete_sessions(session_ids[:2]) # Delete first 2
# Verify deletion
remaining_sessions = postgres_db_real.get_sessions(session_type=SessionType.AGENT)
assert len(remaining_sessions) == 1
assert remaining_sessions[0].session_id == "session_2"
def test_delete_session_scoped_by_user_id(postgres_db_real: PostgresDb):
"""Verify delete_session with user_id only deletes sessions owned by that user (IDOR protection)."""
alice_session = AgentSession(
session_id="shared_sess_1", agent_id="agent_1", user_id="alice", created_at=int(time.time())
)
bob_session = AgentSession(
session_id="shared_sess_2", agent_id="agent_1", user_id="bob", created_at=int(time.time())
)
postgres_db_real.upsert_session(alice_session)
postgres_db_real.upsert_session(bob_session)
# Bob tries to delete Alice's session
result = postgres_db_real.delete_session(session_id="shared_sess_1", user_id="bob")
assert result is False
# Alice's session still exists
assert postgres_db_real.get_session(session_id="shared_sess_1", session_type=SessionType.AGENT) is not None
# Alice can delete her own session
result = postgres_db_real.delete_session(session_id="shared_sess_1", user_id="alice")
assert result is True
assert postgres_db_real.get_session(session_id="shared_sess_1", session_type=SessionType.AGENT) is None
def test_delete_sessions_scoped_by_user_id(postgres_db_real: PostgresDb):
"""Verify bulk delete_sessions with user_id only deletes sessions owned by that user."""
alice_s1 = AgentSession(session_id="alice_s1", agent_id="agent_1", user_id="alice", created_at=int(time.time()))
alice_s2 = AgentSession(session_id="alice_s2", agent_id="agent_1", user_id="alice", created_at=int(time.time()))
bob_s1 = AgentSession(session_id="bob_s1", agent_id="agent_1", user_id="bob", created_at=int(time.time()))
postgres_db_real.upsert_session(alice_s1)
postgres_db_real.upsert_session(alice_s2)
postgres_db_real.upsert_session(bob_s1)
# Bob tries to bulk-delete all three session IDs, but scoped to his user_id
postgres_db_real.delete_sessions(session_ids=["alice_s1", "alice_s2", "bob_s1"], user_id="bob")
# Alice's sessions survive — Bob could only delete his own
assert postgres_db_real.get_session(session_id="alice_s1", session_type=SessionType.AGENT) is not None
assert postgres_db_real.get_session(session_id="alice_s2", session_type=SessionType.AGENT) is not None
# Bob's session is gone
assert postgres_db_real.get_session(session_id="bob_s1", session_type=SessionType.AGENT) is None
def test_rename_session_scoped_by_user_id(postgres_db_real: PostgresDb):
"""Verify rename_session with user_id only renames sessions owned by that user."""
alice_session = AgentSession(
session_id="rename_sess_1",
agent_id="agent_1",
user_id="alice",
session_data={"session_name": "Original Name"},
created_at=int(time.time()),
)
postgres_db_real.upsert_session(alice_session)
# Bob tries to rename Alice's session
result = postgres_db_real.rename_session(
session_id="rename_sess_1", session_type=SessionType.AGENT, session_name="Hacked", user_id="bob"
)
assert result is None
# Alice's session name is unchanged
session = postgres_db_real.get_session(session_id="rename_sess_1", session_type=SessionType.AGENT)
assert session is not None
assert session.session_data["session_name"] == "Original Name"
# Alice can rename her own session
result = postgres_db_real.rename_session(
session_id="rename_sess_1", session_type=SessionType.AGENT, session_name="New Name", user_id="alice"
)
assert result is not None
assert result.session_data["session_name"] == "New Name"
def test_session_type_polymorphism(
postgres_db_real: PostgresDb, sample_agent_session: AgentSession, sample_team_session: TeamSession
):
"""Ensuring session types propagate into types correctly into and out of the database"""
# Insert both session types
postgres_db_real.upsert_session(sample_agent_session)
postgres_db_real.upsert_session(sample_team_session)
# Verify agent session is returned as AgentSession
agent_result = postgres_db_real.get_session(
session_id=sample_agent_session.session_id, session_type=SessionType.AGENT
)
assert isinstance(agent_result, AgentSession)
# Verify team session is returned as TeamSession
team_result = postgres_db_real.get_session(session_id=sample_team_session.session_id, session_type=SessionType.TEAM)
assert isinstance(team_result, TeamSession)
# Verify wrong session type returns None
wrong_type_result = postgres_db_real.get_session(
session_id=sample_agent_session.session_id,
# Wrong session type!
session_type=SessionType.TEAM,
)
assert wrong_type_result is None
def test_upsert_session_handles_all_agent_session_fields(postgres_db_real: PostgresDb):
"""Ensure upsert_session correctly handles all AgentSession fields"""
# Create comprehensive AgentSession with all possible fields populated
agent_run = RunOutput(
run_id="test_run_comprehensive",
agent_id="comprehensive_agent",
user_id="comprehensive_user",
status=RunStatus.completed,
messages=[],
)
comprehensive_agent_session = AgentSession(
session_id="comprehensive_agent_session",
agent_id="comprehensive_agent_id",
user_id="comprehensive_user_id",
session_data={
"session_name": "Comprehensive Agent Session",
"session_state": {"key": "value"},
"images": ["image1.jpg", "image2.png"],
"videos": ["video1.mp4"],
"audio": ["audio1.wav"],
"custom_field": "custom_value",
},
metadata={"extra_key1": "extra_value1", "extra_key2": {"nested": "data"}, "extra_list": [1, 2, 3]},
agent_data={
"name": "Comprehensive Agent",
"model": "gpt-4",
"description": "A comprehensive test agent",
"capabilities": ["chat", "search", "analysis"],
},
runs=[agent_run],
summary=None,
created_at=int(time.time()),
updated_at=int(time.time()),
)
# Insert session
result = postgres_db_real.upsert_session(comprehensive_agent_session)
assert result is not None
assert isinstance(result, AgentSession)
# Verify all fields are preserved
assert result.session_id == comprehensive_agent_session.session_id
assert result.agent_id == comprehensive_agent_session.agent_id
assert result.team_id == comprehensive_agent_session.team_id
assert result.user_id == comprehensive_agent_session.user_id
assert result.session_data == comprehensive_agent_session.session_data
assert result.metadata == comprehensive_agent_session.metadata
assert result.agent_data == comprehensive_agent_session.agent_data
assert result.created_at == comprehensive_agent_session.created_at
assert result.updated_at == comprehensive_agent_session.updated_at
assert result.runs is not None
assert len(result.runs) == 1
assert result.runs[0].run_id == agent_run.run_id
def test_upsert_session_handles_all_team_session_fields(postgres_db_real: PostgresDb):
"""Ensure upsert_session correctly handles all TeamSession fields"""
# Create comprehensive TeamSession with all possible fields populated
team_run = TeamRunOutput(
run_id="test_team_run_comprehensive",
team_id="comprehensive_team",
status=RunStatus.completed,
messages=[],
created_at=int(time.time()),
)
team_summary = SessionSummary(
summary="Comprehensive team session summary",
topics=["tests", "fake"],
updated_at=datetime.now(),
)
comprehensive_team_session = TeamSession(
session_id="comprehensive_team_session",
team_id="comprehensive_team_id",
user_id="comprehensive_user_id",
team_data={
"name": "Comprehensive Team",
"model": "gpt-4",
"description": "A comprehensive test team",
"members": ["agent1", "agent2", "agent3"],
"strategy": "collaborative",
},
session_data={
"session_name": "Comprehensive Team Session",
"session_state": {"phase": "active"},
"images": ["team_image1.jpg"],
"videos": ["team_video1.mp4"],
"audio": ["team_audio1.wav"],
"team_custom_field": "team_custom_value",
},
metadata={
"team_extra_key1": "team_extra_value1",
"team_extra_key2": {"nested": "team_data"},
"team_metrics": {"efficiency": 0.95},
},
runs=[team_run],
summary=team_summary,
created_at=int(time.time()),
updated_at=int(time.time()),
)
# Insert session
result = postgres_db_real.upsert_session(comprehensive_team_session)
assert result is not None
assert isinstance(result, TeamSession)
# Verify all fields are preserved
assert result.session_id == comprehensive_team_session.session_id
assert result.team_id == comprehensive_team_session.team_id
assert result.user_id == comprehensive_team_session.user_id
assert result.team_data == comprehensive_team_session.team_data
assert result.session_data == comprehensive_team_session.session_data
assert result.metadata == comprehensive_team_session.metadata
assert isinstance(result.summary, SessionSummary)
assert result.summary == comprehensive_team_session.summary
assert result.created_at == comprehensive_team_session.created_at
assert result.updated_at == comprehensive_team_session.updated_at
assert result.runs is not None
assert len(result.runs) == 1
assert result.runs[0].run_id == team_run.run_id
def test_upsert_sessions(postgres_db_real: PostgresDb):
"""Test upsert_sessions with mixed session types (Agent, Team, Workflow)"""
from agno.run.workflow import WorkflowRunOutput
from agno.session.workflow import WorkflowSession
# Create sessions
agent_run = RunOutput(
run_id="bulk_agent_run_1",
agent_id="bulk_agent_1",
user_id="bulk_user_1",
status=RunStatus.completed,
messages=[],
)
agent_session = AgentSession(
session_id="bulk_agent_session_1",
agent_id="bulk_agent_1",
user_id="bulk_user_1",
agent_data={"name": "Bulk Agent 1"},
session_data={"type": "bulk_test"},
runs=[agent_run],
created_at=int(time.time()),
)
team_run = TeamRunOutput(
run_id="bulk_team_run_1",
team_id="bulk_team_1",
status=RunStatus.completed,
messages=[],
created_at=int(time.time()),
)
team_session = TeamSession(
session_id="bulk_team_session_1",
team_id="bulk_team_1",
user_id="bulk_user_1",
team_data={"name": "Bulk Team 1"},
session_data={"type": "bulk_test"},
runs=[team_run],
created_at=int(time.time()),
)
workflow_run = WorkflowRunOutput(
run_id="bulk_workflow_run_1",
workflow_id="bulk_workflow_1",
status=RunStatus.completed,
created_at=int(time.time()),
)
workflow_session = WorkflowSession(
session_id="bulk_workflow_session_1",
workflow_id="bulk_workflow_1",
user_id="bulk_user_1",
workflow_data={"name": "Bulk Workflow 1"},
session_data={"type": "bulk_test"},
runs=[workflow_run],
created_at=int(time.time()),
)
# Bulk upsert all sessions
sessions = [agent_session, team_session, workflow_session]
results = postgres_db_real.upsert_sessions(sessions)
# Verify results
assert len(results) == 3
# Find and verify per session type
agent_result = next(r for r in results if isinstance(r, AgentSession))
team_result = next(r for r in results if isinstance(r, TeamSession))
workflow_result = next(r for r in results if isinstance(r, WorkflowSession))
# Verify agent session
assert agent_result.session_id == agent_session.session_id
assert agent_result.agent_id == agent_session.agent_id
assert agent_result.agent_data == agent_session.agent_data
# Verify team session
assert team_result.session_id == team_session.session_id
assert team_result.team_id == team_session.team_id
assert team_result.team_data == team_session.team_data
# Verify workflow session
assert workflow_result.session_id == workflow_session.session_id
assert workflow_result.workflow_id == workflow_session.workflow_id
assert workflow_result.workflow_data == workflow_session.workflow_data
def test_upsert_sessions_update(postgres_db_real: PostgresDb):
"""Test upsert_sessions correctly updates existing sessions"""
# Insert sessions
session1 = AgentSession(
session_id="bulk_update_1",
agent_id="agent_1",
user_id="user_1",
agent_data={"name": "Original Agent 1"},
session_data={"version": 1},
created_at=int(time.time()),
)
session2 = AgentSession(
session_id="bulk_update_2",
agent_id="agent_2",
user_id="user_1",
agent_data={"name": "Original Agent 2"},
session_data={"version": 1},
created_at=int(time.time()),
)
postgres_db_real.upsert_sessions([session1, session2])
# Update sessions
updated_session1 = AgentSession(
session_id="bulk_update_1",
agent_id="agent_1",
user_id="user_1",
agent_data={"name": "Updated Agent 1", "updated": True},
session_data={"version": 2, "updated": True},
created_at=session1.created_at, # Keep original created_at
)
updated_session2 = AgentSession(
session_id="bulk_update_2",
agent_id="agent_2",
user_id="user_1",
agent_data={"name": "Updated Agent 2", "updated": True},
session_data={"version": 2, "updated": True},
created_at=session2.created_at, # Keep original created_at
)
results = postgres_db_real.upsert_sessions([updated_session1, updated_session2])
assert len(results) == 2
# Verify sessions were updated
for result in results:
assert isinstance(result, AgentSession)
assert result.agent_data is not None and result.agent_data["updated"] is True
assert result.session_data is not None and result.session_data["version"] == 2
assert result.session_data is not None and result.session_data["updated"] is True
# created_at should be preserved
if result.session_id == "bulk_update_1":
assert result.created_at == session1.created_at
else:
assert result.created_at == session2.created_at
def test_upsert_sessions_performance(postgres_db_real: PostgresDb):
"""Ensure the bulk upsert method is considerably faster than individual upserts"""
import time as time_module
# Create sessions
sessions = []
for i in range(50):
session = AgentSession(
session_id=f"perf_test_{i}",
agent_id=f"agent_{i}",
user_id="perf_user",
agent_data={"name": f"Performance Agent {i}"},
session_data={"index": i},
created_at=int(time.time()),
)
sessions.append(session)
# Test individual upsert
start_time = time_module.time()
for session in sessions:
postgres_db_real.upsert_session(session)
individual_time = time_module.time() - start_time
# Clean up for bulk test
session_ids = [s.session_id for s in sessions]
postgres_db_real.delete_sessions(session_ids)
# Test bulk upsert
start_time = time_module.time()
postgres_db_real.upsert_sessions(sessions)
bulk_time = time_module.time() - start_time
# Verify all sessions were created
all_sessions = postgres_db_real.get_sessions(session_type=SessionType.AGENT, user_id="perf_user")
assert len(all_sessions) == 50
# Asserting bulk upsert is at least 2x faster
assert bulk_time < individual_time / 2, (
f"Bulk upsert is not fast enough: {bulk_time:.3f}s vs {individual_time:.3f}s"
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/postgres/test_session.py",
"license": "Apache License 2.0",
"lines": 778,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/knowledge/test_arxiv_knowledge.py | import pytest
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.knowledge.reader.arxiv_reader import ArxivReader
from agno.vectordb.chroma import ChromaDb
@pytest.fixture
def setup_vector_db():
"""Setup a temporary vector DB for testing."""
vector_db = ChromaDb(collection="vectors", path="tmp/chromadb", persistent_client=True)
yield vector_db
# Clean up after test
vector_db.drop()
def test_arxiv_knowledge_base_integration(setup_vector_db):
"""Integration test using real arXiv papers."""
reader = ArxivReader(
max_results=1, # Limit to exactly one result per query
)
knowledge = Knowledge(
vector_db=setup_vector_db,
)
knowledge.insert(
metadata={"user_tag": "Arxiv content"},
# "Attention Is All You Need" and "BERT" papers
topics=["1706.03762", "1810.04805"],
reader=reader,
)
assert setup_vector_db.exists()
# Check that we have at least the papers we requested
assert setup_vector_db.get_count() >= 2
agent = Agent(knowledge=knowledge)
response = agent.run("Explain the key concepts of transformer architecture", markdown=True)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [call for call in tool_calls if call.get("type") == "function"]
assert any(call["function"]["name"] == "search_knowledge_base" for call in function_calls)
def test_arxiv_knowledge_base_search_integration(setup_vector_db):
"""Integration test using real arXiv search query."""
reader = ArxivReader(
max_results=3, # Limit results for testing
)
knowledge = Knowledge(
vector_db=setup_vector_db,
)
knowledge.insert(
metadata={"user_tag": "Arxiv content"},
topics=["transformer architecture language models"],
reader=reader,
)
assert setup_vector_db.exists()
assert setup_vector_db.get_count() > 0
agent = Agent(knowledge=knowledge)
response = agent.run("What are the recent developments in transformer models?", markdown=True)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [call for call in tool_calls if call.get("type") == "function"]
assert any(call["function"]["name"] == "search_knowledge_base" for call in function_calls)
@pytest.mark.asyncio
async def test_arxiv_knowledge_base_async_integration(setup_vector_db):
"""Integration test using real arXiv papers with async loading."""
reader = ArxivReader()
knowledge = Knowledge(
vector_db=setup_vector_db,
max_results=1, # Limit to exactly one result per query
)
await knowledge.ainsert(
# "GPT-3" and "AlphaFold" papers
topics=["2005.14165", "2003.02645"],
reader=reader,
)
assert await setup_vector_db.async_exists()
# Check that we have at least the papers we requested
assert setup_vector_db.get_count() >= 2
agent = Agent(
knowledge=knowledge,
search_knowledge=True,
instructions=[
"You are a helpful assistant that can answer questions.",
"You can use the search_knowledge_base tool to search the knowledge base of journal articles for information.",
],
)
response = await agent.arun("What are the key capabilities of GPT-3?", markdown=True)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [call for call in tool_calls if call.get("type") == "function"]
assert any(call["function"]["name"] == "search_knowledge_base" for call in function_calls)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/knowledge/test_arxiv_knowledge.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/knowledge/test_csv_knowledge.py | import asyncio
import io
import tempfile
import uuid
from pathlib import Path
import pytest
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.knowledge.reader.csv_reader import CSVReader
from agno.vectordb.chroma import ChromaDb
from agno.vectordb.lancedb import LanceDb
# Sample CSV data to use in tests
EMPLOYEE_CSV_DATA = """id,name,department,salary,years_experience
1,John Smith,Engineering,75000,5
2,Sarah Johnson,Marketing,65000,3
3,Michael Brown,Finance,85000,8
4,Jessica Lee,Engineering,80000,6
5,David Wilson,HR,55000,2
6,Emily Chen,Product,70000,4
7,Robert Miller,Engineering,90000,10
8,Amanda White,Marketing,60000,3
9,Thomas Garcia,Finance,82000,7
10,Lisa Thompson,Engineering,78000,5
"""
SALES_CSV_DATA = """quarter,region,product,revenue,units_sold
Q1,North,Laptop,128500,85
Q1,South,Laptop,95000,65
Q1,East,Laptop,110200,75
Q1,West,Laptop,142300,95
Q2,North,Laptop,138600,90
Q2,South,Laptop,105800,70
Q2,East,Laptop,115000,78
Q2,West,Laptop,155000,100
"""
@pytest.fixture
def setup_csv_files():
"""Create temporary CSV files for testing."""
with tempfile.TemporaryDirectory() as temp_dir:
# Create the directory for CSV files
data_dir = Path(temp_dir) / "csvs"
data_dir.mkdir(parents=True, exist_ok=True)
# Create employees.csv
employee_path = data_dir / "employees.csv"
with open(employee_path, "w") as f:
f.write(EMPLOYEE_CSV_DATA)
# Create sales.csv
sales_path = data_dir / "sales.csv"
with open(sales_path, "w") as f:
f.write(SALES_CSV_DATA)
yield temp_dir
def test_csv_knowledge(setup_csv_files):
vector_db = ChromaDb(collection="vectors", path="tmp/chromadb", persistent_client=True)
# Use the temporary directory with CSV files
csv_dir = Path(setup_csv_files) / "csvs"
print(f"Testing with CSV directory: {csv_dir}")
# Create a knowledge base with the test CSV files
knowledge = Knowledge(
vector_db=vector_db,
)
reader = CSVReader(
chunk=False,
)
asyncio.run(
knowledge.ainsert(
path=str(csv_dir),
reader=reader,
)
)
assert vector_db.exists()
assert vector_db.get_count() == 2
agent = Agent(knowledge=knowledge)
response = agent.run("Tell me about the employees in the Engineering department", markdown=True)
assert any(term in response.content.lower() for term in ["engineering", "employee", "department"])
vector_db.drop()
def test_csv_knowledge_single_file():
"""Test with a single in-memory CSV file."""
vector_db = ChromaDb(collection="vectors", path="tmp/chromadb", persistent_client=True)
csv_file = io.StringIO(SALES_CSV_DATA)
csv_file.name = "sales.csv"
with tempfile.NamedTemporaryFile(suffix=".csv", mode="w+") as temp_file:
temp_file.write(SALES_CSV_DATA)
temp_file.flush()
knowledge = Knowledge(
vector_db=vector_db,
)
reader = CSVReader(
chunk=False,
)
asyncio.run(
knowledge.ainsert(
path=temp_file.name,
reader=reader,
)
)
assert vector_db.exists()
assert vector_db.get_count() == 1
# Create and use the agent
agent = Agent(knowledge=knowledge)
response = agent.run("What was the revenue for the West region?", markdown=True)
assert any(term in response.content.lower() for term in ["west", "revenue", "region"])
vector_db.drop()
@pytest.mark.asyncio
async def test_csv_knowledge_async(setup_csv_files):
table_name = f"csv_test_{uuid.uuid4().hex}"
vector_db = LanceDb(table_name=table_name, uri="tmp/lancedb")
csv_dir = Path(setup_csv_files) / "csvs"
knowledge = Knowledge(
vector_db=vector_db,
)
reader = CSVReader(
chunk=False,
)
await knowledge.ainsert(
path=str(csv_dir),
reader=reader,
)
assert await vector_db.async_exists()
count = await vector_db.async_get_count()
assert count >= 2
# Create and use the agent
agent = Agent(knowledge=knowledge)
response = await agent.arun("Which employees have salaries above 80000?", markdown=True)
# Check for relevant content in the response
assert any(term in response.content.lower() for term in ["salary", "80000", "employee"])
# Clean up
await vector_db.async_drop()
@pytest.mark.asyncio
async def test_csv_knowledge_async_single_file():
"""Test with a single in-memory CSV file asynchronously."""
table_name = f"csv_test_{uuid.uuid4().hex}"
vector_db = LanceDb(table_name=table_name, uri="tmp/lancedb")
with tempfile.NamedTemporaryFile(suffix=".csv", mode="w+") as temp_file:
temp_file.write(SALES_CSV_DATA)
temp_file.flush()
knowledge = Knowledge(
vector_db=vector_db,
)
reader = CSVReader(
chunk=False,
)
await knowledge.ainsert(
path=temp_file.name,
reader=reader,
)
assert await vector_db.async_exists()
count = await vector_db.async_get_count()
assert count >= 1
agent = Agent(knowledge=knowledge)
response = await agent.arun("Compare Q1 and Q2 laptop sales", markdown=True)
assert any(term in response.content.lower() for term in ["q1", "q2", "laptop", "sales"])
await vector_db.async_drop()
def test_csv_via_url():
table_name = f"csv_test_{uuid.uuid4().hex}"
vector_db = LanceDb(table_name=table_name, uri="tmp/lancedb")
knowledge = Knowledge(
vector_db=vector_db,
)
knowledge.insert(
url="https://agno-public.s3.amazonaws.com/demo_data/IMDB-Movie-Data.csv",
)
knowledge.insert(
url="https://agno-public.s3.amazonaws.com/csvs/employees.csv",
)
assert vector_db.exists()
doc_count = vector_db.get_count()
assert doc_count > 2, f"Expected multiple documents but got {doc_count}"
# Query the agent
agent = Agent(
knowledge=knowledge,
search_knowledge=True,
instructions=[
"You are a helpful assistant that can answer questions.",
"You can use the search_knowledge tool to search the knowledge base of CSVs for information.",
],
)
response = agent.run("Give me top rated movies", markdown=True)
# Check that we got relevant content
assert response.content is not None
assert any(term in response.content.lower() for term in ["movie", "rating", "imdb", "title"])
# Clean up
vector_db.drop()
@pytest.mark.asyncio
async def test_csv_via_url_async():
table_name = f"csv_test_{uuid.uuid4().hex}"
vector_db = LanceDb(table_name=table_name, uri="tmp/lancedb")
knowledge = Knowledge(
vector_db=vector_db,
)
# Set chunk explicitly to False
await knowledge.ainsert_many(
urls=[
"https://agno-public.s3.amazonaws.com/demo_data/IMDB-Movie-Data.csv",
"https://agno-public.s3.amazonaws.com/csvs/employees.csv",
],
)
assert await vector_db.async_exists()
doc_count = await vector_db.async_get_count()
assert doc_count > 2, f"Expected multiple documents but got {doc_count}"
# Query the agent
agent = Agent(
knowledge=knowledge,
search_knowledge=True,
instructions=[
"You are a helpful assistant that can answer questions.",
"You can use the search_knowledge tool to search the knowledge base of CSVs for information.",
],
)
response = await agent.arun("Which employees have salaries above 50000?", markdown=True)
assert response.content is not None
assert "employees" in response.content.lower()
await vector_db.async_drop()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/knowledge/test_csv_knowledge.py",
"license": "Apache License 2.0",
"lines": 211,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/knowledge/test_docx_knowledge.py | from pathlib import Path
import pytest
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.vectordb.chroma import ChromaDb
@pytest.fixture
def setup_vector_db():
"""Setup a temporary vector DB for testing."""
vector_db = ChromaDb(collection="vectors", path="tmp/chromadb", persistent_client=True)
yield vector_db
# Clean up after test
vector_db.drop()
def get_test_data_dir():
"""Get the path to the test data directory."""
return Path(__file__).parent / "data"
def get_filtered_data_dir():
"""Get the path to the filtered test data directory."""
return Path(__file__).parent / "data" / "filters"
def prepare_knowledge_base(setup_vector_db):
"""Prepare a knowledge base with filtered data."""
# Create knowledge base
kb = Knowledge(vector_db=setup_vector_db)
# Load documents with different user IDs and metadata
kb.insert(
path=get_filtered_data_dir() / "cv_1.docx",
metadata={"user_id": "jordan_mitchell", "document_type": "cv", "experience_level": "entry"},
)
kb.insert(
path=get_filtered_data_dir() / "cv_2.docx",
metadata={"user_id": "taylor_brooks", "document_type": "cv", "experience_level": "mid"},
)
return kb
async def aprepare_knowledge_base(setup_vector_db):
"""Prepare a knowledge base with filtered data asynchronously."""
# Create knowledge base
kb = Knowledge(vector_db=setup_vector_db)
# Load contents with different user IDs and metadata
await kb.ainsert(
path=get_filtered_data_dir() / "cv_1.docx",
metadata={"user_id": "jordan_mitchell", "document_type": "cv", "experience_level": "entry"},
)
await kb.ainsert(
path=get_filtered_data_dir() / "cv_2.docx",
metadata={"user_id": "taylor_brooks", "document_type": "cv", "experience_level": "mid"},
)
return kb
def test_docx_knowledge_base_directory(setup_vector_db):
"""Test loading a directory of DOCX files into the knowledge base."""
docx_dir = get_test_data_dir()
kb = Knowledge(vector_db=setup_vector_db)
kb.insert(
path=docx_dir,
)
assert setup_vector_db.exists()
assert setup_vector_db.get_count() > 0
# Enable search on the agent
agent = Agent(knowledge=kb, search_knowledge=True)
response = agent.run("What is the story of little prince about?", markdown=True)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [call for call in tool_calls if call.get("type") == "function"]
assert any(call["function"]["name"] == "search_knowledge_base" for call in function_calls)
@pytest.mark.asyncio
async def test_docx_knowledge_base_async_directory(setup_vector_db):
"""Test asynchronously loading a directory of DOCX files into the knowledge base."""
docx_dir = get_test_data_dir()
kb = Knowledge(vector_db=setup_vector_db)
await kb.ainsert(
path=docx_dir,
)
assert await setup_vector_db.async_exists()
assert setup_vector_db.get_count() > 0
# Enable search on the agent
agent = Agent(knowledge=kb, search_knowledge=True)
response = await agent.arun("What is the story of little prince about?", markdown=True)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [call for call in tool_calls if call.get("type") == "function"]
# For async operations, we use search_knowledge_base
assert any(call["function"]["name"] == "search_knowledge_base" for call in function_calls)
# for the one with new knowledge filter DX- filters at initialization
def test_text_knowledge_base_with_metadata_path(setup_vector_db):
"""Test loading text files with metadata using the new path structure."""
kb = Knowledge(
vector_db=setup_vector_db,
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_1.docx"),
metadata={"user_id": "jordan_mitchell", "document_type": "cv", "experience_level": "entry"},
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_2.docx"),
metadata={"user_id": "taylor_brooks", "document_type": "cv", "experience_level": "mid"},
)
# Verify documents were loaded with metadata
agent = Agent(knowledge=kb)
response = agent.run(
"Tell me about Jordan Mitchell's experience?", knowledge_filters={"user_id": "jordan_mitchell"}, markdown=True
)
assert "jordan" in response.content.lower()
def test_docx_knowledge_base_with_metadata_path_invalid_filter(setup_vector_db):
"""Test filtering docx knowledge base with invalid filters using the new path structure."""
kb = Knowledge(
vector_db=setup_vector_db,
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_1.docx"),
metadata={"user_id": "jordan_mitchell", "document_type": "cv", "experience_level": "entry"},
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_2.docx"),
metadata={"user_id": "taylor_brooks", "document_type": "cv", "experience_level": "mid"},
)
# Initialize agent with invalid filters
agent = Agent(knowledge=kb, knowledge_filters={"nonexistent_filter": "value"})
response = agent.run("Tell me about the candidate's experience?", markdown=True)
response_content = response.content.lower()
assert len(response_content) > 50
# Check the tool calls to verify the invalid filter was not used
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [
call
for call in tool_calls
if call.get("type") == "function" and call["function"]["name"] == "search_knowledge_base"
]
found_invalid_filters = False
for call in function_calls:
call_args = call["function"].get("arguments", "{}")
if "nonexistent_filter" in call_args:
found_invalid_filters = True
assert not found_invalid_filters
# for the one with new knowledge filter DX- filters at load
def test_knowledge_base_with_valid_filter(setup_vector_db):
"""Test filtering knowledge base with valid filters."""
kb = prepare_knowledge_base(setup_vector_db)
# Initialize agent with filters for Jordan Mitchell
agent = Agent(knowledge=kb, knowledge_filters={"user_id": "jordan_mitchell"})
# Run a query that should only return results from Jordan Mitchell's CV
response = agent.run("Tell me about the Jordan Mitchell's experience?", markdown=True)
# Check response content to verify filtering worked
response_content = response.content
# Jordan Mitchell's CV should mention "software engineering intern"
assert (
"entry-level" in response_content.lower()
or "junior" in response_content.lower()
or "jordan mitchell" in response_content.lower()
)
# Should not mention Taylor Brooks' experience as "senior developer"
assert "senior developer" not in response_content.lower()
def test_knowledge_base_with_run_level_filter(setup_vector_db):
"""Test filtering knowledge base with filters passed at run time."""
kb = prepare_knowledge_base(setup_vector_db)
# Initialize agent without filters
agent = Agent(knowledge=kb)
# Run a query with filters provided at run time
response = agent.run(
"Tell me about Jordan Mitchell experience?", knowledge_filters={"user_id": "jordan_mitchell"}, markdown=True
)
# Check response content to verify filtering worked
response_content = response.content.lower()
# Check that we have a response with actual content
assert len(response_content) > 50
# Should not mention Jordan Mitchell's experience
assert any(term in response_content for term in ["jordan mitchell", "entry-level", "junior"])
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/knowledge/test_docx_knowledge.py",
"license": "Apache License 2.0",
"lines": 171,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/knowledge/test_firecrawl_knowledge.py | import os
import pytest
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.knowledge.reader.firecrawl_reader import FirecrawlReader
from agno.vectordb.lancedb import LanceDb
@pytest.fixture
def setup_vector_db():
"""Setup a temporary vector DB for testing."""
table_name = f"firecrawl_test_{os.urandom(4).hex()}"
vector_db = LanceDb(table_name=table_name, uri="tmp/lancedb")
yield vector_db
# Clean up after test
vector_db.drop()
@pytest.mark.skip(reason="Skipping firecrawl knowledge base tests")
def test_firecrawl_knowledge_base_directory(setup_vector_db):
"""Test loading multiple URLs into knowledge base"""
kb = Knowledge(vector_db=setup_vector_db)
kb.insert_many(
urls=["https://docs.agno.com/knowledge/introduction", "https://docs.agno.com/knowledge/pdf"],
reader=FirecrawlReader(),
)
assert setup_vector_db.exists()
assert setup_vector_db.get_count() > 0
agent = Agent(knowledge=kb)
response = agent.run("What is knowledge in Agno and what types are available?", markdown=True)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [call for call in tool_calls if call.get("type") == "function"]
assert any(call["function"]["name"] == "search_knowledge_base" for call in function_calls)
@pytest.mark.skip(reason="Skipping firecrawl knowledge base tests")
def test_firecrawl_knowledge_base_single_url(setup_vector_db):
"""Test loading a single URL into knowledge base"""
kb = Knowledge(vector_db=setup_vector_db)
kb.insert_many(
urls=["https://docs.agno.com/knowledge/pdf"],
reader=FirecrawlReader(),
)
assert setup_vector_db.exists()
assert setup_vector_db.get_count() > 0
agent = Agent(knowledge=kb)
response = agent.run("How do I use Knowledge in Agno?", markdown=True)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [call for call in tool_calls if call.get("type") == "function"]
assert any(call["function"]["name"] == "search_knowledge_base" for call in function_calls)
@pytest.mark.skip(reason="Skipping firecrawl knowledge base tests")
@pytest.mark.asyncio
async def test_firecrawl_knowledge_base_async_directory(setup_vector_db):
"""Test async loading of multiple URLs into knowledge base"""
kb = Knowledge(vector_db=setup_vector_db)
await kb.ainsert_many(
urls=["https://docs.agno.com/knowledge/introduction", "https://docs.agno.com/knowledge/pdf"],
reader=FirecrawlReader(),
)
assert await setup_vector_db.async_exists()
assert await setup_vector_db.async_get_count() > 0
agent = Agent(knowledge=kb, search_knowledge=True)
response = await agent.arun(
"What are the different types of knowledge bases available in Agno and how do I use PDF knowledge base?",
markdown=True,
)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [call for call in tool_calls if call.get("type") == "function"]
assert any(call["function"]["name"] == "search_knowledge_base" for call in function_calls)
@pytest.mark.skip(reason="Skipping firecrawl knowledge base tests")
@pytest.mark.asyncio
async def test_firecrawl_knowledge_base_async_single_url(setup_vector_db):
"""Test async loading of a single URL into knowledge base"""
kb = Knowledge(vector_db=setup_vector_db)
await kb.ainsert_many(
urls=["https://docs.agno.com/knowledge/introduction"],
reader=FirecrawlReader(),
)
assert await setup_vector_db.async_exists()
assert await setup_vector_db.async_get_count() > 0
agent = Agent(knowledge=kb)
response = await agent.arun("What is a knowledge base in Agno?", markdown=True)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [call for call in tool_calls if call.get("type") == "function"]
assert any(call["function"]["name"] == "search_knowledge_base" for call in function_calls)
@pytest.mark.skip(reason="Skipping firecrawl knowledge base tests")
def test_firecrawl_knowledge_base_empty_urls(setup_vector_db):
"""Test handling of empty URL list"""
kb = Knowledge(vector_db=setup_vector_db)
kb.insert_many(
urls=[],
reader=FirecrawlReader(),
)
assert setup_vector_db.exists()
assert setup_vector_db.get_count() == 0
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/knowledge/test_firecrawl_knowledge.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/knowledge/test_json_knowledge.py | from pathlib import Path
import pytest
from agno.agent import Agent
from agno.db.json.json_db import JsonDb
from agno.knowledge.knowledge import Knowledge
from agno.vectordb.chroma import ChromaDb
@pytest.fixture
def setup_vector_db():
"""Setup a temporary vector DB for testing."""
vector_db = ChromaDb(collection="vectors", path="tmp/chromadb", persistent_client=True)
yield vector_db
# Clean up after test
vector_db.drop()
def get_filtered_data_dir():
"""Get the path to the filtered test data directory."""
return Path(__file__).parent / "data" / "filters"
def prepare_knowledge_base(setup_vector_db):
"""Prepare a knowledge base with filtered data."""
# Create knowledge base
kb = Knowledge(vector_db=setup_vector_db)
# Load documents with different user IDs and metadata
kb.insert(
path=get_filtered_data_dir() / "cv_1.json",
metadata={"user_id": "jordan_mitchell", "document_type": "cv", "experience_level": "entry"},
)
kb.insert(
path=get_filtered_data_dir() / "cv_2.json",
metadata={"user_id": "taylor_brooks", "document_type": "cv", "experience_level": "mid"},
)
return kb
def test_json_knowledge_base():
contents_db = JsonDb(db_path="tmp/json_db")
vector_db = ChromaDb(collection="vectors_1", path="tmp/chromadb", persistent_client=True)
knowledge_base = Knowledge(
vector_db=vector_db,
contents_db=contents_db,
)
knowledge_base.insert(
path=str(Path(__file__).parent / "data/json"),
)
assert vector_db.exists()
# We have 2 JSON files with 3 and 2 documents respectively
expected_docs = 5
assert vector_db.get_count() == expected_docs
# Create and use the agent
agent = Agent(knowledge=knowledge_base)
response = agent.run("Tell me about Thai curry recipes", markdown=True)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
for call in tool_calls:
if call.get("type", "") == "function":
assert call["function"]["name"] == "search_knowledge_base"
# Clean up
vector_db.drop()
def test_json_knowledge_base_single_file():
vector_db = ChromaDb(collection="vectors", path="tmp/chromadb", persistent_client=True)
# Create a knowledge base with a single JSON file
knowledge_base = Knowledge(
vector_db=vector_db,
)
knowledge_base.insert(
path=str(Path(__file__).parent / "data/json/recipes.json"),
)
assert vector_db.exists()
# The recipes.json file contains 3 documents
expected_docs = 3
assert vector_db.get_count() == expected_docs
# Clean up
vector_db.drop()
@pytest.mark.asyncio
async def test_json_knowledge_base_async():
vector_db = ChromaDb(collection="vectors", path="tmp/chromadb", persistent_client=True)
# Create knowledge base
knowledge_base = Knowledge(
vector_db=vector_db,
)
await knowledge_base.ainsert(
path=str(Path(__file__).parent / "data/json"),
)
assert await vector_db.async_exists()
# We have 2 JSON files with 3 and 2 documents respectively
expected_docs = 5
assert vector_db.get_count() == expected_docs
# Create and use the agent
agent = Agent(knowledge=knowledge_base)
response = await agent.arun("What ingredients do I need for Tom Kha Gai?", markdown=True)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
for call in tool_calls:
if call.get("type", "") == "function":
assert call["function"]["name"] == "search_knowledge_base"
assert any(ingredient in response.content.lower() for ingredient in ["coconut", "chicken", "galangal"])
# Clean up
await vector_db.async_drop()
# for the one with new knowledge filter DX- filters at initialization
def test_text_knowledge_base_with_metadata_path(setup_vector_db):
"""Test loading text files with metadata using the new path structure."""
kb = Knowledge(
vector_db=setup_vector_db,
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_1.json"),
metadata={"user_id": "jordan_mitchell", "document_type": "cv", "experience_level": "entry"},
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_2.json"),
metadata={"user_id": "taylor_brooks", "document_type": "cv", "experience_level": "mid"},
)
# Verify documents were loaded with metadata
agent = Agent(knowledge=kb)
response = agent.run(
"Tell me about Jordan Mitchell's experience?", knowledge_filters={"user_id": "jordan_mitchell"}, markdown=True
)
assert (
"entry" in response.content.lower()
or "junior" in response.content.lower()
or "jordan" in response.content.lower()
)
assert "senior developer" not in response.content.lower()
def test_knowledge_base_with_metadata_path_invalid_filter(setup_vector_db):
"""Test filtering docx knowledge base with invalid filters using the new path structure."""
kb = Knowledge(
vector_db=setup_vector_db,
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_1.json"),
metadata={"user_id": "jordan_mitchell", "document_type": "cv", "experience_level": "entry"},
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_2.json"),
metadata={"user_id": "taylor_brooks", "document_type": "cv", "experience_level": "mid"},
)
# Initialize agent with invalid filters
agent = Agent(knowledge=kb, knowledge_filters={"nonexistent_filter": "value"})
response = agent.run("Tell me about the candidate's experience?", markdown=True)
response_content = response.content.lower()
assert len(response_content) > 50
# Check the tool calls to verify the invalid filter was not used
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [
call
for call in tool_calls
if call.get("type") == "function" and call["function"]["name"] == "search_knowledge_base"
]
found_invalid_filters = False
for call in function_calls:
call_args = call["function"].get("arguments", "{}")
if "nonexistent_filter" in call_args:
found_invalid_filters = True
assert not found_invalid_filters
# for the one with new knowledge filter DX- filters at load
def test_knowledge_base_with_valid_filter(setup_vector_db):
"""Test filtering knowledge base with valid filters."""
kb = prepare_knowledge_base(setup_vector_db)
# Initialize agent with filters for Jordan Mitchell
agent = Agent(knowledge=kb, knowledge_filters={"user_id": "jordan_mitchell"})
# Run a query that should only return results from Jordan Mitchell's CV
response = agent.run("Tell me about the Jordan Mitchell's experience?", markdown=True)
# Check response content to verify filtering worked
response_content = response.content
# Jordan Mitchell's CV should mention "software engineering intern"
assert (
"entry-level" in response_content.lower()
or "junior" in response_content.lower()
or "jordan mitchell" in response_content.lower()
)
# Should not mention Taylor Brooks' experience as "senior developer"
assert "senior developer" not in response_content.lower()
def test_knowledge_base_with_run_level_filter(setup_vector_db):
"""Test filtering knowledge base with filters passed at run time."""
kb = prepare_knowledge_base(setup_vector_db)
# Initialize agent without filters
agent = Agent(knowledge=kb)
# Run a query with filters provided at run time
response = agent.run(
"Tell me about Jordan Mitchell experience?", knowledge_filters={"user_id": "jordan_mitchell"}, markdown=True
)
# Check response content to verify filtering worked
response_content = response.content.lower()
# Check that we have a response with actual content
assert len(response_content) > 50
# Should not mention Jordan Mitchell's experience
assert any(term in response_content for term in ["jordan mitchell", "entry-level", "junior"])
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/knowledge/test_json_knowledge.py",
"license": "Apache License 2.0",
"lines": 190,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/knowledge/test_md_knowledge.py | import os
from pathlib import Path
import pytest
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.knowledge.reader.markdown_reader import MarkdownReader
from agno.vectordb.lancedb import LanceDb
@pytest.fixture
def setup_vector_db():
"""Setup a temporary vector DB for testing."""
table_name = f"md_test_{os.urandom(4).hex()}"
vector_db = LanceDb(table_name=table_name, uri="tmp/lancedb")
yield vector_db
# Clean up after test
vector_db.drop()
def get_test_data_dir():
"""Get the path to the test data directory."""
return Path(__file__).parent / "data"
def get_filtered_data_dir():
"""Get the path to the filtered test data directory."""
return Path(__file__).parent / "data" / "filters"
def prepare_knowledge_base(setup_vector_db):
"""Prepare a knowledge base with filtered data."""
# Create knowledge base
kb = Knowledge(vector_db=setup_vector_db)
# Load documents with different user IDs and metadata
kb.insert(
path=get_filtered_data_dir() / "cv_1.md",
metadata={"user_id": "jordan_mitchell", "document_type": "cv", "experience_level": "entry"},
)
kb.insert(
path=get_filtered_data_dir() / "cv_2.md",
metadata={"user_id": "taylor_brooks", "document_type": "cv", "experience_level": "mid"},
)
return kb
def test_text_knowledge_base_directory(setup_vector_db):
"""Test loading a directory of text files into the knowledge base."""
text_dir = get_test_data_dir()
kb = Knowledge(
vector_db=setup_vector_db,
)
kb.insert(
path=text_dir,
include=["*.md"],
reader=MarkdownReader(),
skip_if_exists=True,
)
# Asserting the vector DB exists and pg_essay.md was split as expected
assert setup_vector_db.exists()
assert setup_vector_db.get_count() == 4
agent = Agent(knowledge=kb)
response = agent.run("What are the key factors in doing great work?", markdown=True)
tool_calls = []
for msg in response.messages: # type: ignore
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [call for call in tool_calls if call.get("type") == "function"]
assert any(call["function"]["name"] == "search_knowledge_base" for call in function_calls)
@pytest.mark.asyncio
async def test_text_knowledge_base_async_directory(setup_vector_db):
"""Test asynchronously loading a directory of text files into the knowledge base."""
text_dir = get_test_data_dir()
kb = Knowledge(vector_db=setup_vector_db)
await kb.ainsert(
path=text_dir,
include=["*.md"],
)
# Asserting the vector DB exists and pg_essay.md was split as expected
assert setup_vector_db.exists()
assert setup_vector_db.get_count() == 4
agent = Agent(knowledge=kb)
response = await agent.arun("What does Paul Graham say about great work?", markdown=True)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [call for call in tool_calls if call.get("type") == "function"]
assert any(call["function"]["name"] == "search_knowledge_base" for call in function_calls)
def test_text_knowledge_base_with_metadata_path(setup_vector_db):
"""Test loading text files with metadata using the new path structure."""
kb = Knowledge(
vector_db=setup_vector_db,
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_1.md"),
metadata={"user_id": "jordan_mitchell", "document_type": "cv", "experience_level": "entry"},
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_2.md"),
metadata={"user_id": "taylor_brooks", "document_type": "cv", "experience_level": "mid"},
)
# Verify documents were loaded with metadata
agent = Agent(knowledge=kb)
response = agent.run(
"Tell me about Jordan Mitchell's experience?", knowledge_filters={"user_id": "jordan_mitchell"}, markdown=True
)
assert (
"entry" in response.content.lower() # type: ignore
or "junior" in response.content.lower() # type: ignore
or "jordan" in response.content.lower() # type: ignore
)
assert "senior developer" not in response.content.lower() # type: ignore
def test_knowledge_base_with_metadata_path_invalid_filter(setup_vector_db):
"""Test filtering docx knowledge base with invalid filters using the new path structure."""
kb = Knowledge(
vector_db=setup_vector_db,
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_1.md"),
metadata={"user_id": "jordan_mitchell", "document_type": "cv", "experience_level": "entry"},
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_2.md"),
metadata={"user_id": "taylor_brooks", "document_type": "cv", "experience_level": "mid"},
)
# Initialize agent with invalid filters
agent = Agent(knowledge=kb, knowledge_filters={"nonexistent_filter": "value"})
response = agent.run("Tell me about the candidate's experience?", markdown=True)
response_content = response.content.lower() # type: ignore
assert len(response_content) > 50
# Check the tool calls to verify the invalid filter was not used
tool_calls = []
for msg in response.messages: # type: ignore
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [
call
for call in tool_calls
if call.get("type") == "function" and call["function"]["name"] == "search_knowledge_base"
]
found_invalid_filters = False
for call in function_calls:
call_args = call["function"].get("arguments", "{}")
if "nonexistent_filter" in call_args:
found_invalid_filters = True
assert not found_invalid_filters
def test_knowledge_base_with_valid_filter(setup_vector_db):
"""Test filtering knowledge base with valid filters."""
kb = prepare_knowledge_base(setup_vector_db)
# Initialize agent with filters for Jordan Mitchell
agent = Agent(knowledge=kb, knowledge_filters={"user_id": "jordan_mitchell"})
# Run a query that should only return results from Jordan Mitchell's CV
response = agent.run("Tell me about the Jordan Mitchell's experience?", markdown=True)
# Check response content to verify filtering worked
response_content = response.content
# Jordan Mitchell's CV should mention "software engineering intern"
assert (
"entry-level" in response_content.lower() # type: ignore
or "junior" in response_content.lower() # type: ignore
or "jordan mitchell" in response_content.lower() # type: ignore
)
# Should not mention Taylor Brooks' experience as "senior developer"
assert "senior developer" not in response_content.lower() # type: ignore
def test_knowledge_base_with_run_level_filter(setup_vector_db):
"""Test filtering knowledge base with filters passed at run time."""
kb = prepare_knowledge_base(setup_vector_db)
# Initialize agent without filters
agent = Agent(knowledge=kb)
# Run a query with filters provided at run time
response = agent.run(
"Tell me about Jordan Mitchell experience?", knowledge_filters={"user_id": "jordan_mitchell"}, markdown=True
)
# Check response content to verify filtering worked
response_content = response.content.lower() # type: ignore
# Check that we have a response with actual content
assert len(response_content) > 50
# Should not mention Jordan Mitchell's experience
assert any(term in response_content for term in ["jordan mitchell", "entry-level", "junior"]) # type: ignore
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/knowledge/test_md_knowledge.py",
"license": "Apache License 2.0",
"lines": 168,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/knowledge/test_pdf_knowledge.py | import os
from pathlib import Path
import pytest
from agno.agent import Agent
from agno.db.sqlite.sqlite import SqliteDb
from agno.knowledge.knowledge import Knowledge
from agno.vectordb.chroma import ChromaDb
from agno.vectordb.lancedb.lance_db import LanceDb
@pytest.fixture
def setup_vector_db():
"""Setup a temporary vector DB for testing."""
path = f"tmp/chromadb_{os.urandom(4).hex()}"
vector_db = ChromaDb(collection="vectors", path=path, persistent_client=True)
yield vector_db
# Clean up after test
vector_db.drop()
@pytest.fixture
def setup_contents_db():
"""Setup a temporary contents DB for testing."""
contents_db = SqliteDb("tmp/contentsdb")
yield contents_db
# Clean up after test
os.remove("tmp/contentsdb")
def get_filtered_data_dir():
"""Get the path to the filtered test data directory."""
return Path(__file__).parent / "data" / "filters"
def prepare_knowledge(setup_vector_db, setup_contents_db):
"""Prepare a knowledge with filtered data."""
kb = Knowledge(vector_db=setup_vector_db, contents_db=setup_contents_db)
# Load with different user IDs and metadata
kb.insert(
path=get_filtered_data_dir() / "cv_1.pdf",
metadata={"user_id": "jordan_mitchell", "document_type": "cv", "experience_level": "entry"},
)
kb.insert(
path=get_filtered_data_dir() / "cv_2.pdf",
metadata={"user_id": "taylor_brooks", "document_type": "cv", "experience_level": "mid"},
)
return kb
async def aprepare_knowledge(setup_vector_db, setup_contents_db):
"""Prepare a knowledge with filtered data asynchronously."""
# Create knowledge
kb = Knowledge(vector_db=setup_vector_db, contents_db=setup_contents_db)
# Load documents with different user IDs and metadata
await kb.ainsert(
path=get_filtered_data_dir() / "cv_1.pdf",
metadata={"user_id": "jordan_mitchell", "document_type": "cv", "experience_level": "entry"},
)
await kb.ainsert(
path=get_filtered_data_dir() / "cv_2.pdf",
metadata={"user_id": "taylor_brooks", "document_type": "cv", "experience_level": "mid"},
)
return kb
def test_pdf_knowledge():
vector_db = LanceDb(
table_name="recipes",
uri="tmp/lancedb",
)
# Create a knowledge with the PDFs from the data/pdfs directory
knowledge = Knowledge(
vector_db=vector_db,
)
knowledge.insert(path=str(Path(__file__).parent / "data/thai_recipes_short.pdf"))
assert vector_db.exists()
# Create and use the agent
agent = Agent(knowledge=knowledge)
response = agent.run("Show me how to make Tom Kha Gai", markdown=True)
tool_calls = []
assert response.messages is not None
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
for call in tool_calls:
if call.get("type", "") == "function":
assert call["function"]["name"] == "search_knowledge_base"
# Clean up
vector_db.drop()
@pytest.mark.asyncio
async def test_pdf_knowledge_async():
vector_db = LanceDb(
table_name="recipes_async",
uri="tmp/lancedb",
)
# Create knowledge
knowledge = Knowledge(
vector_db=vector_db,
)
await knowledge.ainsert(path=str(Path(__file__).parent / "data/thai_recipes_short.pdf"))
assert await vector_db.async_exists()
# Create and use the agent
agent = Agent(knowledge=knowledge)
response = await agent.arun("What ingredients do I need for Tom Kha Gai?", markdown=True)
tool_calls = []
assert response.messages is not None
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
for call in tool_calls:
if call.get("type", "") == "function":
assert call["function"]["name"] == "search_knowledge_base"
assert response.content is not None
assert any(ingredient in response.content.lower() for ingredient in ["coconut", "chicken", "galangal"])
# Clean up
await vector_db.async_drop()
# for the one with new knowledge filter DX- filters at initialization
def test_text_knowledge_with_metadata_path(setup_vector_db):
"""Test loading text files with metadata using the new path structure."""
kb = Knowledge(
vector_db=setup_vector_db,
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_1.pdf"),
metadata={"user_id": "jordan_mitchell", "document_type": "cv", "experience_level": "entry"},
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_2.pdf"),
metadata={"user_id": "taylor_brooks", "document_type": "cv", "experience_level": "mid"},
)
# Verify documents were loaded with metadata
agent = Agent(knowledge=kb)
response = agent.run(
"Tell me about Jordan Mitchell's experience?", knowledge_filters={"user_id": "jordan_mitchell"}, markdown=True
)
assert response.content is not None
assert (
"entry" in response.content.lower()
or "junior" in response.content.lower()
or "Jordan" in response.content.lower()
)
assert "senior developer" not in response.content.lower()
def test_knowledge_with_metadata_path_invalid_filter(setup_vector_db):
"""Test filtering docx knowledge with invalid filters using the new path structure."""
kb = Knowledge(
vector_db=setup_vector_db,
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_1.pdf"),
metadata={"user_id": "jordan_mitchell", "document_type": "cv", "experience_level": "entry"},
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_2.pdf"),
metadata={"user_id": "taylor_brooks", "document_type": "cv", "experience_level": "mid"},
)
# Initialize agent with invalid filters
agent = Agent(knowledge=kb, knowledge_filters={"nonexistent_filter": "value"})
response = agent.run("Tell me about the candidate's experience?", markdown=True)
response_content = response.content.lower()
assert len(response_content) > 50
# Check the tool calls to verify the invalid filter was not used
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [
call for call in tool_calls if call.get("type") == "function" and call["function"]["name"] == "search_knowledge"
]
found_invalid_filters = False
for call in function_calls:
call_args = call["function"].get("arguments", "{}")
if "nonexistent_filter" in call_args:
found_invalid_filters = True
assert not found_invalid_filters
# for the one with new knowledge filter DX- filters at load
def test_knowledge_with_valid_filter(setup_vector_db, setup_contents_db):
"""Test filtering knowledge with valid filters."""
kb = prepare_knowledge(setup_vector_db, setup_contents_db)
# Initialize agent with filters for Jordan Mitchell
agent = Agent(knowledge=kb, knowledge_filters={"user_id": "jordan_mitchell"})
# Run a query that should only return results from Jordan Mitchell's CV
response = agent.run("Tell me about the Jordan Mitchell's experience?", markdown=True)
# Check response content to verify filtering worked
assert response.content is not None
response_content = response.content
# Jordan Mitchell's CV should mention "software engineering intern"
assert (
"entry-level" in response_content.lower()
or "junior" in response_content.lower()
or "jordan mitchell" in response_content.lower()
)
# Should not mention Taylor Brooks' experience as "senior developer"
assert "senior developer" not in response_content.lower()
def test_knowledge_with_run_level_filter(setup_vector_db, setup_contents_db):
"""Test filtering knowledge with filters passed at run time."""
kb = prepare_knowledge(setup_vector_db, setup_contents_db)
# Initialize agent without filters
agent = Agent(knowledge=kb)
# Run a query with filters provided at run time
response = agent.run(
"Tell me about Jordan Mitchell experience?", knowledge_filters={"user_id": "jordan_mitchell"}, markdown=True
)
# Check response content to verify filtering worked
response_content = response.content.lower()
# Check that we have a response with actual content
assert len(response_content) > 50
# Should not mention Jordan Mitchell's experience
assert any(term in response_content for term in ["jordan mitchell", "entry-level", "junior"])
def test_knowledge_filter_override(setup_vector_db, setup_contents_db):
"""Test that run-level filters override agent-level filters."""
kb = prepare_knowledge(setup_vector_db, setup_contents_db)
# Initialize agent with jordan_mitchell filter
agent = Agent(knowledge=kb, knowledge_filters={"user_id": "taylor_brooks"})
# Run a query with taylor_brooks filter - should override the agent filter
response = agent.run(
"Tell me about Jordan Mitchell's experience?", knowledge_filters={"user_id": "jordan_mitchell"}, markdown=True
)
# Check response content to verify filtering worked
response_content = response.content.lower()
# Check that we have a response with actual content
assert len(response_content) > 50
# Should mention Jordan Mitchell's experience
assert any(term in response_content for term in ["jordan mitchell", "entry-level", "intern", "junior"])
# Taylor Brooks' CV should not be used instead of Jordan Mitchell's
assert not any(term in response_content for term in ["taylor", "brooks", "senior", "developer", "mid level"])
# -- Via URL
@pytest.mark.asyncio
async def test_pdf_url_knowledge_base_async():
vector_db = LanceDb(
table_name="recipes_async",
uri="tmp/lancedb",
)
# Create knowledge base
knowledge = Knowledge(
vector_db=vector_db,
)
await knowledge.ainsert_many(
urls=[
"https://agno-public.s3.amazonaws.com/recipes/cape_recipes_short_2.pdf",
"https://agno-public.s3.amazonaws.com/recipes/thai_recipes_short.pdf",
]
)
assert await vector_db.async_exists()
assert await vector_db.async_get_count() > 1
# Create and use the agent
agent = Agent(knowledge=knowledge)
response = await agent.arun("What ingredients do I need for Tom Kha Gai?", markdown=True)
tool_calls = []
assert response.messages is not None
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
for call in tool_calls:
if call.get("type", "") == "function":
assert call["function"]["name"] == "search_knowledge_base"
assert response.content is not None
assert any(ingredient in response.content.lower() for ingredient in ["coconut", "chicken", "galangal"])
# Clean up
await vector_db.async_drop()
# for the one with new knowledge filter DX- filters at initialize
@pytest.mark.asyncio
async def test_pdf_url_knowledge_base_with_metadata_path(setup_vector_db):
"""Test loading PDF URLs with metadata using the new path structure."""
kb = Knowledge(
vector_db=setup_vector_db,
)
await kb.ainsert(
url="https://agno-public.s3.amazonaws.com/recipes/thai_recipes_short.pdf",
metadata={"cuisine": "Thai", "source": "Thai Cookbook", "region": "Southeast Asia"},
)
await kb.ainsert(
url="https://agno-public.s3.amazonaws.com/recipes/cape_recipes_short_2.pdf",
metadata={"cuisine": "Cape", "source": "Cape Cookbook", "region": "South Africa"},
)
# Verify documents were loaded with metadata
agent = Agent(knowledge=kb)
response = agent.run("Tell me about Thai recipes", knowledge_filters={"cuisine": "Thai"}, markdown=True)
assert response.content is not None
response_content = response.content.lower()
# Thai cuisine recipe should mention Thai ingredients or dishes
assert any(term in response_content for term in ["tom kha", "pad thai", "thai cuisine", "coconut milk"])
# Should not mention Cape cuisine terms
assert not any(term in response_content for term in ["cape malay", "bobotie", "south african"])
def test_pdf_url_knowledge_base_with_metadata_path_invalid_filter(setup_vector_db, setup_contents_db):
"""Test loading PDF URLs with metadata using the new path structure and invalid filters."""
kb = Knowledge(
vector_db=setup_vector_db,
contents_db=setup_contents_db,
)
kb.insert(
url="https://agno-public.s3.amazonaws.com/recipes/thai_recipes_short.pdf",
metadata={"cuisine": "Thai", "source": "Thai Cookbook", "region": "Southeast Asia"},
)
kb.insert(
url="https://agno-public.s3.amazonaws.com/recipes/cape_recipes_short_2.pdf",
metadata={"cuisine": "Cape", "source": "Cape Cookbook", "region": "South Africa"},
)
# Initialize agent with invalid filters
agent = Agent(knowledge=kb, knowledge_filters={"nonexistent_filter": "value"})
response = agent.run("Tell me about the recipes available", markdown=True)
assert response.content is not None
response_content = response.content.lower()
# Check that we have a substantive response
assert len(response_content) > 50
# The response should either ask for clarification or mention recipes
clarification_phrases = [
"specify",
"which cuisine",
"please clarify",
"need more information",
"be more specific",
"specific",
]
recipes_mentioned = any(cuisine in response_content for cuisine in ["thai", "cape", "tom kha", "cape malay"])
valid_response = any(phrase in response_content for phrase in clarification_phrases) or recipes_mentioned
# Print debug information
print(f"Response content: {response_content}")
print(f"Contains clarification phrase: {any(phrase in response_content for phrase in clarification_phrases)}")
print(f"Recipes mentioned: {recipes_mentioned}")
assert valid_response
# Verify that invalid filter was not used in tool calls
tool_calls = []
assert response.messages is not None
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [
call
for call in tool_calls
if call.get("type") == "function" and call["function"]["name"] == "search_knowledge_base"
]
# Check if any of the search_knowledge_base calls had the invalid filter
found_invalid_filters = False
for call in function_calls:
call_args = call["function"].get("arguments", "{}")
if "nonexistent_filter" in call_args:
found_invalid_filters = True
# Assert that the invalid filter was not used in the actual calls
assert not found_invalid_filters
@pytest.mark.asyncio
async def test_async_pdf_url_knowledge_base_with_metadata_path(setup_vector_db):
"""Test async loading of PDF URLs with metadata using the new path structure."""
kb = Knowledge(
vector_db=setup_vector_db,
)
await kb.ainsert(
url="https://agno-public.s3.amazonaws.com/recipes/thai_recipes_short.pdf",
metadata={"cuisine": "Thai", "source": "Thai Cookbook", "region": "Southeast Asia"},
)
await kb.ainsert(
url="https://agno-public.s3.amazonaws.com/recipes/cape_recipes_short_2.pdf",
metadata={"cuisine": "Cape", "source": "Cape Cookbook", "region": "South Africa"},
)
agent = Agent(knowledge=kb)
response = await agent.arun("Tell me about Thai recipes", knowledge_filters={"cuisine": "Thai"}, markdown=True)
assert response.content is not None
response_content = response.content.lower()
# Thai cuisine recipe should mention Thai ingredients or dishes
assert any(term in response_content for term in ["tom kha", "pad thai", "thai cuisine", "coconut milk"])
# Should not mention Cape cuisine terms
assert not any(term in response_content for term in ["cape malay", "bobotie", "south african"])
@pytest.mark.asyncio
async def test_async_pdf_url_knowledge_base_with_metadata_path_invalid_filter(setup_vector_db, setup_contents_db):
"""Test async loading of PDF URLs with metadata using the new path structure and invalid filters."""
kb = Knowledge(
vector_db=setup_vector_db,
contents_db=setup_contents_db,
)
await kb.ainsert(
url="https://agno-public.s3.amazonaws.com/recipes/thai_recipes_short.pdf",
metadata={"cuisine": "Thai", "source": "Thai Cookbook", "region": "Southeast Asia"},
)
await kb.ainsert(
url="https://agno-public.s3.amazonaws.com/recipes/cape_recipes_short_2.pdf",
metadata={"cuisine": "Cape", "source": "Cape Cookbook", "region": "South Africa"},
)
# Initialize agent with invalid filters
agent = Agent(knowledge=kb, knowledge_filters={"nonexistent_filter": "value"})
response = await agent.arun("Tell me about the recipes available", markdown=True)
assert response.content is not None
response_content = response.content.lower()
# Check that we have a substantive response
assert len(response_content) > 50
# The response should either ask for clarification or mention recipes
clarification_phrases = [
"specify",
"which cuisine",
"please clarify",
"need more information",
"be more specific",
"specific",
]
recipes_mentioned = any(cuisine in response_content for cuisine in ["thai", "cape", "tom kha", "cape malay"])
valid_response = any(phrase in response_content for phrase in clarification_phrases) or recipes_mentioned
# Print debug information
print(f"Response content: {response_content}")
print(f"Contains clarification phrase: {any(phrase in response_content for phrase in clarification_phrases)}")
print(f"Recipes mentioned: {recipes_mentioned}")
assert valid_response
# Verify that invalid filter was not used in tool calls
tool_calls = []
assert response.messages is not None
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [
call
for call in tool_calls
if call.get("type") == "function" and call["function"]["name"] == "search_knowledge_base"
]
# Check if any of the search_knowledge_base calls had the invalid filter
found_invalid_filters = False
for call in function_calls:
call_args = call["function"].get("arguments", "{}")
if "nonexistent_filter" in call_args:
found_invalid_filters = True
# Assert that the invalid filter was not used in the actual calls
assert not found_invalid_filters
# for the one with new knowledge filter DX - filters at load
def test_pdf_url_knowledge_base_with_valid_filter(setup_vector_db, setup_contents_db):
"""Test filtering PDF URL knowledge base with valid filters."""
kb = prepare_knowledge(setup_vector_db, setup_contents_db)
# Initialize agent with filters for Thai cuisine
agent = Agent(knowledge=kb, knowledge_filters={"cuisine": "Thai"})
# Run a query that should only return results from Thai cuisine
response = agent.run("Tell me about Tom Kha Gai recipe", markdown=True)
# Check response content to verify filtering worked
assert response.content is not None
response_content = response.content.lower()
# Check that we have a response with actual content
assert len(response_content) > 50
# Thai cuisine recipe should mention coconut milk, galangal, or other Thai ingredients
assert any(term in response_content for term in ["coconut milk", "galangal", "lemongrass", "tom kha"])
# Should not mention Cape Malay curry or bobotie (Cape cuisine)
assert not any(term in response_content for term in ["cape malay curry", "bobotie", "apricot jam"])
def test_pdf_url_knowledge_base_with_run_level_filter(setup_vector_db, setup_contents_db):
"""Test filtering PDF URL knowledge base with filters passed at run time."""
kb = prepare_knowledge(setup_vector_db, setup_contents_db)
# Initialize agent without filters
agent = Agent(knowledge=kb)
# Run a query with filters provided at run time
response = agent.run("Tell me about Cape Malay curry recipe", knowledge_filters={"cuisine": "Cape"}, markdown=True)
# Check response content to verify filtering worked
assert response.content is not None
response_content = response.content.lower()
# Check that we have a response with actual content
assert len(response_content) > 50
# Cape cuisine recipe should mention Cape Malay curry or related terms
assert any(term in response_content for term in ["cape malay", "curry", "turmeric", "cinnamon"])
# Should not mention Thai recipes like Pad Thai or Tom Kha Gai
assert not any(term in response_content for term in ["pad thai", "tom kha gai", "galangal"])
def test_pdf_url_knowledge_base_with_invalid_filter(setup_vector_db, setup_contents_db):
"""Test filtering PDF URL knowledge base with invalid filters."""
kb = prepare_knowledge(setup_vector_db, setup_contents_db)
# Initialize agent with invalid filters
agent = Agent(knowledge=kb, knowledge_filters={"nonexistent_filter": "value"})
response = agent.run("Tell me about recipes in the document", markdown=True)
assert response.content is not None
response_content = response.content.lower()
assert len(response_content) > 50
# Check the tool calls to verify the invalid filter was not used
tool_calls = []
assert response.messages is not None
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [
call
for call in tool_calls
if call.get("type") == "function" and call["function"]["name"] == "search_knowledge_base"
]
# Check if any of the search_knowledge_base calls had the invalid filter
found_invalid_filters = False
for call in function_calls:
call_args = call["function"].get("arguments", "{}")
if "nonexistent_filter" in call_args:
found_invalid_filters = True
# Assert that the invalid filter was not used in the actual calls
assert not found_invalid_filters
def test_pdf_url_knowledge_base_filter_override(setup_vector_db, setup_contents_db):
"""Test that run-level filters override agent-level filters."""
kb = prepare_knowledge(setup_vector_db, setup_contents_db)
# Initialize agent with Cape cuisine filter
agent = Agent(knowledge=kb, knowledge_filters={"cuisine": "Cape"})
# Run a query with Thai cuisine filter - should override the agent filter
response = agent.run("Tell me about how to make Pad Thai", knowledge_filters={"cuisine": "Thai"}, markdown=True)
# Check response content to verify filtering worked
assert response.content is not None
response_content = response.content.lower()
# Check that we have a response with actual content
assert len(response_content) > 50
# Thai cuisine should be mentioned instead of Cape cuisine
assert any(term in response_content for term in ["thai", "tom kha", "pad thai", "lemongrass"])
# Cape cuisine should not be mentioned
assert not any(term in response_content for term in ["cape malay", "bobotie", "apricot"])
@pytest.mark.asyncio
async def test_async_pdf_url_knowledge_base_with_valid_filter(setup_vector_db, setup_contents_db):
"""Test asynchronously filtering PDF URL knowledge base with valid filters."""
kb = await aprepare_knowledge(setup_vector_db, setup_contents_db)
# Initialize agent with filters for Thai cuisine
agent = Agent(knowledge=kb, knowledge_filters={"cuisine": "Thai"})
# Run a query that should only return results from Thai cuisine
response = await agent.arun("Tell me about Tom Kha Gai recipe", markdown=True)
# Check response content to verify filtering worked
assert response.content is not None
response_content = response.content.lower() # type: ignore
# Check that we have a response with actual content
assert len(response_content) > 50
# Thai cuisine recipe should mention coconut milk, galangal, or other Thai ingredients
assert any(term in response_content for term in ["coconut milk", "galangal", "lemongrass", "tom kha"])
# Should not mention Cape Malay curry or bobotie (Cape cuisine)
assert not any(term in response_content for term in ["cape malay curry", "bobotie", "apricot jam"])
@pytest.mark.asyncio
async def test_async_pdf_url_knowledge_base_with_run_level_filter(setup_vector_db, setup_contents_db):
"""Test asynchronously filtering PDF URL knowledge base with filters passed at run time."""
kb = await aprepare_knowledge(setup_vector_db, setup_contents_db)
# Initialize agent without filters
agent = Agent(knowledge=kb)
# Run a query with filters provided at run time
response = await agent.arun(
"Tell me about Cape Malay curry recipe", knowledge_filters={"cuisine": "Cape"}, markdown=True
)
# Check response content to verify filtering worked
assert response.content is not None
response_content = response.content.lower()
# Check that we have a response with actual content
assert len(response_content) > 50
# Cape cuisine recipe should mention Cape Malay curry or related terms
assert any(term in response_content for term in ["cape malay", "curry", "turmeric", "cinnamon"])
# Should not mention Thai recipes like Pad Thai or Tom Kha Gai
assert not any(term in response_content for term in ["pad thai", "tom kha gai", "galangal"])
@pytest.mark.asyncio
async def test_async_pdf_url_knowledge_base_with_invalid_filter(setup_vector_db, setup_contents_db):
"""Test asynchronously filtering PDF URL knowledge base with invalid filters."""
kb = await aprepare_knowledge(setup_vector_db, setup_contents_db)
# Initialize agent with invalid filters
agent = Agent(knowledge=kb, knowledge_filters={"nonexistent_filter": "value"})
response = await agent.arun("Tell me about recipes in the document", markdown=True)
assert response.content is not None
response_content = response.content.lower()
assert len(response_content) > 50
# Check the tool calls to verify the invalid filter was not used
tool_calls = []
assert response.messages is not None
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [
call
for call in tool_calls
if call.get("type") == "function" and call["function"]["name"] == "search_knowledge_base"
]
# Check if any of the search_knowledge_base calls had the invalid filter
found_invalid_filters = False
for call in function_calls:
call_args = call["function"].get("arguments", "{}")
if "nonexistent_filter" in call_args:
found_invalid_filters = True
# Assert that the invalid filter was not used in the actual calls
assert not found_invalid_filters
@pytest.mark.asyncio
async def test_async_pdf_url_knowledge_base_filter_override(setup_vector_db, setup_contents_db):
"""Test that run-level filters override agent-level filters in async mode."""
kb = await aprepare_knowledge(setup_vector_db, setup_contents_db)
# Initialize agent with Cape cuisine filter
agent = Agent(knowledge=kb, knowledge_filters={"cuisine": "Cape"})
# Run a query with Thai cuisine filter - should override the agent filter
response = await agent.arun("Tell me how to make Pad thai", knowledge_filters={"cuisine": "Thai"}, markdown=True)
# Check response content to verify filtering worked
assert response.content is not None
response_content = response.content.lower()
# Check that we have a response with actual content
assert len(response_content) > 50
# Thai cuisine should be mentioned instead of Cape cuisine
assert any(term in response_content for term in ["thai", "tom kha", "pad thai", "lemongrass"])
# Cape cuisine should not be mentioned
assert not any(term in response_content for term in ["cape malay", "bobotie", "apricot"])
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/knowledge/test_pdf_knowledge.py",
"license": "Apache License 2.0",
"lines": 565,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/knowledge/test_text_knowledge.py | import os
from pathlib import Path
import pytest
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.vectordb.lancedb import LanceDb
@pytest.fixture
def setup_vector_db():
"""Setup a temporary vector DB for testing."""
table_name = f"text_test_{os.urandom(4).hex()}"
vector_db = LanceDb(table_name=table_name, uri="tmp/lancedb")
yield vector_db
# Clean up after test
vector_db.drop()
def get_test_data_dir():
"""Get the path to the test data directory."""
return Path(__file__).parent / "data/pg_essay.txt"
def get_filtered_data_dir():
"""Get the path to the filtered test data directory."""
return Path(__file__).parent / "data" / "filters"
def prepare_knowledge_base(setup_vector_db):
"""Prepare a knowledge base with filtered data."""
# Create knowledge base
kb = Knowledge(vector_db=setup_vector_db)
# Load documents with different user IDs and metadata
kb.insert(
path=get_filtered_data_dir() / "cv_1.txt",
metadata={"user_id": "jordan_mitchell", "document_type": "cv", "experience_level": "entry"},
)
kb.insert(
path=get_filtered_data_dir() / "cv_2.txt",
metadata={"user_id": "taylor_brooks", "document_type": "cv", "experience_level": "mid"},
)
return kb
async def aprepare_knowledge_base(setup_vector_db):
"""Prepare a knowledge base with filtered data asynchronously."""
# Create knowledge base
kb = Knowledge(vector_db=setup_vector_db)
# Load documents with different user IDs and metadata
await kb.ainsert(
path=get_filtered_data_dir() / "cv_1.txt",
metadata={"user_id": "jordan_mitchell", "document_type": "cv", "experience_level": "entry"},
)
await kb.ainsert(
path=get_filtered_data_dir() / "cv_2.txt",
metadata={"user_id": "taylor_brooks", "document_type": "cv", "experience_level": "mid"},
)
return kb
def test_text_knowledge_base_directory(setup_vector_db):
"""Test loading a directory of text files into the knowledge base."""
text_dir = get_test_data_dir()
kb = Knowledge(
vector_db=setup_vector_db,
)
kb.insert(
path=text_dir,
)
assert setup_vector_db.exists()
agent = Agent(knowledge=kb)
response = agent.run("What are the key factors in doing great work?", markdown=True)
tool_calls = []
assert response.messages is not None
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [call for call in tool_calls if call.get("type") == "function"]
assert any(call["function"]["name"] == "search_knowledge_base" for call in function_calls)
@pytest.mark.asyncio
async def test_text_knowledge_base_async_directory(setup_vector_db):
"""Test asynchronously loading a directory of text files into the knowledge base."""
text_dir = get_test_data_dir()
kb = Knowledge(vector_db=setup_vector_db)
await kb.ainsert(
path=text_dir,
)
assert await setup_vector_db.async_exists()
agent = Agent(knowledge=kb)
response = await agent.arun("What does Paul Graham say about great work?", markdown=True)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [call for call in tool_calls if call.get("type") == "function"]
assert any(call["function"]["name"] == "search_knowledge_base" for call in function_calls)
# for the one with new knowledge filter DX- filters at initialization
def test_text_knowledge_base_with_metadata_path(setup_vector_db):
"""Test loading text files with metadata using the new path structure."""
kb = Knowledge(
vector_db=setup_vector_db,
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_1.txt"),
metadata={"user_id": "jordan_mitchell", "document_type": "cv", "experience_level": "entry"},
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_2.txt"),
metadata={"user_id": "taylor_brooks", "document_type": "cv", "experience_level": "mid"},
)
# Verify documents were loaded with metadata
agent = Agent(knowledge=kb)
response = agent.run(
"Tell me about Jordan Mitchell's experience?", knowledge_filters={"user_id": "jordan_mitchell"}, markdown=True
)
assert (
"entry" in response.content.lower()
or "junior" in response.content.lower()
or "Jordan" in response.content.lower()
)
assert "senior developer" not in response.content.lower()
def test_knowledge_base_with_metadata_path_invalid_filter(setup_vector_db):
"""Test filtering docx knowledge base with invalid filters using the new path structure."""
kb = Knowledge(
vector_db=setup_vector_db,
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_1.txt"),
metadata={"user_id": "jordan_mitchell", "document_type": "cv", "experience_level": "entry"},
)
kb.insert(
path=str(get_filtered_data_dir() / "cv_2.txt"),
metadata={"user_id": "taylor_brooks", "document_type": "cv", "experience_level": "mid"},
)
# Initialize agent with invalid filters
agent = Agent(knowledge=kb, knowledge_filters={"nonexistent_filter": "value"})
response = agent.run("Tell me about the candidate's experience?", markdown=True)
response_content = response.content.lower()
assert len(response_content) > 50
# Check the tool calls to verify the invalid filter was not used
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [
call
for call in tool_calls
if call.get("type") == "function" and call["function"]["name"] == "search_knowledge_base"
]
found_invalid_filters = False
for call in function_calls:
call_args = call["function"].get("arguments", "{}")
if "nonexistent_filter" in call_args:
found_invalid_filters = True
assert not found_invalid_filters
# for the one with new knowledge filter DX- filters at load
def test_knowledge_base_with_valid_filter(setup_vector_db):
"""Test filtering knowledge base with valid filters."""
kb = prepare_knowledge_base(setup_vector_db)
# Initialize agent with filters for Jordan Mitchell
agent = Agent(knowledge=kb, knowledge_filters={"user_id": "jordan_mitchell"})
# Run a query that should only return results from Jordan Mitchell's CV
response = agent.run("Tell me about the Jordan Mitchell's experience?", markdown=True)
# Check response content to verify filtering worked
response_content = response.content
# Jordan Mitchell's CV should mention "software engineering intern"
assert (
"entry-level" in response_content.lower()
or "junior" in response_content.lower()
or "jordan mitchell" in response_content.lower()
)
# Should not mention Taylor Brooks' experience as "senior developer"
assert "senior developer" not in response_content.lower()
def test_knowledge_base_with_run_level_filter(setup_vector_db):
"""Test filtering knowledge base with filters passed at run time."""
kb = prepare_knowledge_base(setup_vector_db)
# Initialize agent without filters
agent = Agent(knowledge=kb)
# Run a query with filters provided at run time
response = agent.run(
"Tell me about Jordan Mitchell experience?", knowledge_filters={"user_id": "jordan_mitchell"}, markdown=True
)
# Check response content to verify filtering worked
response_content = response.content.lower()
# Check that we have a response with actual content
assert len(response_content) > 50
# Should not mention Jordan Mitchell's experience
assert any(term in response_content for term in ["jordan mitchell", "entry-level", "junior"])
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/knowledge/test_text_knowledge.py",
"license": "Apache License 2.0",
"lines": 176,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/knowledge/test_website_knowledge.py | import os
import pytest
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.knowledge.reader.website_reader import WebsiteReader
from agno.vectordb.lancedb import LanceDb
@pytest.fixture
def setup_vector_db():
"""Setup a temporary vector DB for testing."""
table_name = f"website_test_{os.urandom(4).hex()}"
vector_db = LanceDb(table_name=table_name, uri="tmp/lancedb")
yield vector_db
# Clean up after test
vector_db.drop()
@pytest.mark.skip(reason="Skipping test")
def test_website_knowledge_base_directory(setup_vector_db):
"""Test loading multiple websites into the knowledge base."""
urls = ["https://docs.agno.com/basics/agents/overview", "https://fastapi.tiangolo.com/"]
kb = Knowledge(vector_db=setup_vector_db)
kb.insert_many(
urls=urls,
reader=WebsiteReader(max_links=1),
)
assert setup_vector_db.exists()
agent = Agent(knowledge=kb)
response = agent.run("What are agents in Agno and what levels are there?", markdown=True)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [call for call in tool_calls if call.get("type") == "function"]
assert any(call["function"]["name"] == "search_knowledge_base" for call in function_calls)
def test_website_knowledge_base_single_url(setup_vector_db):
"""Test loading a single website into the knowledge base."""
kb = Knowledge(vector_db=setup_vector_db)
kb.insert_many(
urls=["https://docs.agno.com/basics/agents/overview"],
reader=WebsiteReader(max_links=1),
)
assert setup_vector_db.exists()
agent = Agent(knowledge=kb)
response = agent.run("How do I create a basic agent in Agno?", markdown=True)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [call for call in tool_calls if call.get("type") == "function"]
assert any(call["function"]["name"] == "search_knowledge_base" for call in function_calls)
@pytest.mark.skip(reason="Skipping test")
@pytest.mark.asyncio
async def test_website_knowledge_base_async_directory(setup_vector_db):
"""Test asynchronously loading multiple websites into the knowledge base."""
urls = ["https://docs.agno.com/basics/agents/overview", "https://fastapi.tiangolo.com/"]
kb = Knowledge(vector_db=setup_vector_db)
await kb.ainsert_many(
urls=urls,
reader=WebsiteReader(max_links=1),
)
assert await setup_vector_db.async_exists()
agent = Agent(
knowledge=kb,
search_knowledge=True,
)
response = await agent.arun("What are agents in Agno and what levels are there?", markdown=True)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
assert "search_knowledge_base" in [
call["function"]["name"] for call in tool_calls if call.get("type") == "function"
]
@pytest.mark.asyncio
async def test_website_knowledge_base_async_single_url(setup_vector_db):
"""Test asynchronously loading a single website into the knowledge base."""
kb = Knowledge(vector_db=setup_vector_db)
await kb.ainsert_many(
urls=["https://docs.agno.com/basics/agents/overview"],
reader=WebsiteReader(max_links=1),
)
assert await setup_vector_db.async_exists()
agent = Agent(
knowledge=kb,
search_knowledge=True,
)
response = await agent.arun("How do I create a basic agent in Agno?", markdown=True)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
assert "search_knowledge_base" in [
call["function"]["name"] for call in tool_calls if call.get("type") == "function"
]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/knowledge/test_website_knowledge.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/knowledge/test_youtube_knowledge.py | import os
import pytest
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.vectordb.lancedb import LanceDb
@pytest.fixture
def setup_vector_db():
"""Setup a temporary vector DB for testing."""
table_name = f"youtube_test_{os.urandom(4).hex()}"
vector_db = LanceDb(table_name=table_name, uri="tmp/lancedb")
yield vector_db
vector_db.drop()
@pytest.mark.skip(reason="They block requests from CI")
def test_youtube_knowledge_base_directory(setup_vector_db):
"""Test loading multiple YouTube videos into the knowledge base."""
urls = ["https://www.youtube.com/watch?v=NwZ26lxl8wU", "https://www.youtube.com/watch?v=lrg8ZWI7MCg"]
kb = Knowledge(vector_db=setup_vector_db)
kb.insert_many(
urls=urls,
)
assert setup_vector_db.exists()
assert setup_vector_db.get_count() > 0
agent = Agent(knowledge=kb, search_knowledge=True)
response = agent.run(
"What is the major focus of the knowledge provided in both the videos, explain briefly.", markdown=True
)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [call for call in tool_calls if call.get("type") == "function"]
print(f"Function calls: {function_calls}")
assert any(call["function"]["name"] == "search_knowledge_base" for call in function_calls)
@pytest.mark.skip(reason="They block requests from CI")
def test_youtube_knowledge_base_single_url(setup_vector_db):
"""Test loading a single YouTube video into the knowledge base."""
kb = Knowledge(vector_db=setup_vector_db)
kb.insert_many(
urls=["https://www.youtube.com/watch?v=NwZ26lxl8wU"],
)
assert setup_vector_db.exists()
assert setup_vector_db.get_count() > 0
agent = Agent(
knowledge=kb,
search_knowledge=True,
instructions=[
"You are a helpful assistant that can answer questions about the video.",
"You can use the search_knowledge_base tool to search the knowledge base of videos for information.",
],
)
response = agent.run("What is the major focus of the knowledge provided in the video?", markdown=True)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
function_calls = [call for call in tool_calls if call.get("type") == "function"]
assert any(call["function"]["name"] == "search_knowledge_base" for call in function_calls)
@pytest.mark.skip(reason="They block requests from CI")
@pytest.mark.asyncio
async def test_youtube_knowledge_base_async_directory(setup_vector_db):
"""Test asynchronously loading multiple YouTube videos."""
urls = ["https://www.youtube.com/watch?v=NwZ26lxl8wU", "https://www.youtube.com/watch?v=lrg8ZWI7MCg"]
kb = Knowledge(vector_db=setup_vector_db)
kb.insert_many(
urls=urls,
)
assert await setup_vector_db.async_exists()
assert await setup_vector_db.async_get_count() > 0
agent = Agent(
knowledge=kb,
search_knowledge=True,
instructions=[
"You are a helpful assistant that can answer questions about the video.",
"You can use the search_knowledge_base tool to search the knowledge base of videos for information.",
],
)
response = await agent.arun(
"What is the major focus of the knowledge provided in both the videos, explain briefly.", markdown=True
)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
assert "search_knowledge_base" in [
call["function"]["name"] for call in tool_calls if call.get("type") == "function"
]
@pytest.mark.skip(reason="They block requests from CI")
@pytest.mark.asyncio
async def test_youtube_knowledge_base_async_single_url(setup_vector_db):
"""Test asynchronously loading a single YouTube video."""
kb = Knowledge(vector_db=setup_vector_db)
await kb.ainsert(
url="https://www.youtube.com/watch?v=lrg8ZWI7MCg",
)
assert await setup_vector_db.async_exists()
assert await setup_vector_db.async_get_count() > 0
agent = Agent(
knowledge=kb,
search_knowledge=True, # Keep for async
instructions=[
"You are a helpful assistant that can answer questions about the video.",
"You can use the search_knowledge_base tool to search the knowledge base of videos for information.",
],
)
response = await agent.arun("What is the major focus of the knowledge provided in the video?", markdown=True)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
assert "search_knowledge_base" in [
call["function"]["name"] for call in tool_calls if call.get("type") == "function"
]
def test_youtube_knowledge_base_empty_urls(setup_vector_db):
"""Test loading with empty URL list."""
kb = Knowledge(vector_db=setup_vector_db)
kb.insert_many(
urls=[],
)
assert setup_vector_db.exists()
assert setup_vector_db.get_count() == 0
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/knowledge/test_youtube_knowledge.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/managers/test_memory_manager.py | import os
import tempfile
from datetime import datetime
import pytest
from agno.db.sqlite import SqliteDb
from agno.memory import MemoryManager, UserMemory
from agno.memory.strategies.types import MemoryOptimizationStrategyType
from agno.models.message import Message
from agno.models.openai import OpenAIChat
@pytest.fixture
def temp_db_file():
"""Create a temporary SQLite database file for testing."""
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as temp_file:
db_path = temp_file.name
yield db_path
# Clean up the temporary file after the test
if os.path.exists(db_path):
os.unlink(db_path)
@pytest.fixture
def memory_db(temp_db_file):
"""Create a SQLite memory database for testing."""
db = SqliteDb(db_file=temp_db_file)
return db
@pytest.fixture
def model():
"""Create a Gemini model for testing."""
return OpenAIChat(id="gpt-4o-mini")
@pytest.fixture
def memory_with_db(model, memory_db):
"""Create a Memory instance with database connections."""
return MemoryManager(model=model, db=memory_db)
def test_add_user_memory_with_db(memory_with_db: MemoryManager):
"""Test adding a user memory with database persistence."""
# Create a user memory
user_memory = UserMemory(
user_id="test_user",
memory="The user's name is John Doe",
topics=["name", "user"],
updated_at=datetime.now(),
)
# Add the memory
memory_id = memory_with_db.add_user_memory(memory=user_memory, user_id="test_user")
# Verify the memory was added to the in-memory store
assert memory_id is not None
assert memory_with_db.get_user_memory(user_id="test_user", memory_id=memory_id) is not None
assert (
memory_with_db.get_user_memory(user_id="test_user", memory_id=memory_id).memory == "The user's name is John Doe"
)
# Create a new Memory instance with the same database
new_memory = MemoryManager(model=memory_with_db.model, db=memory_with_db.db)
# Verify the memory was loaded from the database
assert new_memory.get_user_memory(user_id="test_user", memory_id=memory_id) is not None
assert new_memory.get_user_memory(user_id="test_user", memory_id=memory_id).memory == "The user's name is John Doe"
def test_create_user_memory_with_db(memory_with_db):
"""Test creating user memories with database persistence."""
# Create messages to generate memories from
message = "My name is John Doe and I like to play basketball"
# Create memories from the messages
result = memory_with_db.create_user_memories(message, user_id="test_user")
# Verify memories were created
assert len(result) > 0
# Get all memories for the user
memories = memory_with_db.get_user_memories("test_user")
# Verify memories were added to the in-memory store
assert len(memories) > 0
assert memories[0].input == message
assert "john doe" in memories[0].memory.lower()
def test_create_user_memories_with_db(memory_with_db):
"""Test creating user memories with database persistence."""
# Create messages to generate memories from
messages = [
Message(role="user", content="My name is John Doe"),
Message(role="user", content="I like to play basketball"),
]
# Create memories from the messages
result = memory_with_db.create_user_memories(messages=messages, user_id="test_user")
# Verify memories were created
assert len(result) > 0
# Get all memories for the user
memories = memory_with_db.get_user_memories(user_id="test_user")
# Verify memories were added to the in-memory store
assert len(memories) > 0
# Create a new Memory instance with the same database
new_memory = MemoryManager(model=memory_with_db.model, db=memory_with_db.db)
# Verify memories were loaded from the database
new_memories = new_memory.get_user_memories(user_id="test_user")
assert len(new_memories) > 0
@pytest.mark.asyncio
async def test_acreate_user_memory_with_db(memory_with_db):
"""Test async creation of a user memory with database persistence."""
# Create a message to generate a memory from
message = "My name is John Doe and I like to play basketball"
# Create memory from the message
result = await memory_with_db.acreate_user_memories(message, user_id="test_user")
# Verify memory was created
assert len(result) > 0
# Get all memories for the user
memories = memory_with_db.get_user_memories(user_id="test_user")
# Verify memory was added to the in-memory store
assert len(memories) > 0
# Create a new Memory instance with the same database
new_memory = MemoryManager(model=memory_with_db.model, db=memory_with_db.db)
# Verify memory was loaded from the database
new_memories = new_memory.get_user_memories(user_id="test_user")
assert len(new_memories) > 0
@pytest.mark.asyncio
async def test_acreate_user_memories_with_db(memory_with_db):
"""Test async creation of multiple user memories with database persistence."""
# Create messages to generate memories from
messages = [
Message(role="user", content="My name is John Doe"),
Message(role="user", content="I like to play basketball"),
Message(role="user", content="My favorite color is blue"),
]
# Create memories from the messages
result = await memory_with_db.acreate_user_memories(messages=messages, user_id="test_user")
# Verify memories were created
assert len(result) > 0
# Get all memories for the user
memories = memory_with_db.get_user_memories("test_user")
# Verify memories were added to the in-memory store
assert len(memories) > 0
# Create a new Memory instance with the same database
new_memory = MemoryManager(model=memory_with_db.model, db=memory_with_db.db)
# Verify memories were loaded from the database
new_memories = new_memory.get_user_memories(user_id="test_user")
assert len(new_memories) > 0
def test_search_user_memories_semantic(memory_with_db):
"""Test semantic search of user memories."""
# Add multiple memories with different content
memory1 = UserMemory(memory="The user's name is John Doe", topics=["name", "user"], updated_at=datetime.now())
memory2 = UserMemory(
memory="The user likes to play basketball", topics=["sports", "hobbies"], updated_at=datetime.now()
)
memory3 = UserMemory(
memory="The user's favorite color is blue", topics=["preferences", "colors"], updated_at=datetime.now()
)
# Add the memories
memory_with_db.add_user_memory(memory=memory1, user_id="test_user")
memory_with_db.add_user_memory(memory=memory2, user_id="test_user")
memory_with_db.add_user_memory(memory=memory3, user_id="test_user")
# Search for memories related to sports
results = memory_with_db.search_user_memories(
query="sports and hobbies", retrieval_method="semantic", user_id="test_user"
)
# Verify the search returned relevant memories
assert len(results) > 0
assert any("basketball" in memory.memory for memory in results)
def test_memory_persistence_across_instances(model, memory_db):
"""Test that memories persist across different Memory instances."""
# Create the first Memory instance
memory1 = MemoryManager(model=model, db=memory_db)
# Add a user memory
user_memory = UserMemory(memory="The user's name is John Doe", topics=["name", "user"], updated_at=datetime.now())
memory_id = memory1.add_user_memory(memory=user_memory, user_id="test_user")
# Create a second Memory instance with the same database
memory2 = MemoryManager(model=model, db=memory_db)
# Verify the memory is accessible from the second instance
assert memory2.get_user_memory(user_id="test_user", memory_id=memory_id) is not None
assert memory2.get_user_memory(user_id="test_user", memory_id=memory_id).memory == "The user's name is John Doe"
def test_memory_operations_with_db(memory_with_db):
"""Test various memory operations with database persistence."""
# Add a user memory
user_memory = UserMemory(memory="The user's name is John Doe", topics=["name", "user"], updated_at=datetime.now())
memory_id = memory_with_db.add_user_memory(memory=user_memory, user_id="test_user")
# Replace the memory
updated_memory = UserMemory(
memory="The user's name is Jane Doe", topics=["name", "user"], updated_at=datetime.now()
)
memory_with_db.replace_user_memory(memory_id=memory_id, memory=updated_memory, user_id="test_user")
# Verify the memory was updated
assert (
memory_with_db.get_user_memory(user_id="test_user", memory_id=memory_id).memory == "The user's name is Jane Doe"
)
# Delete the memory
memory_with_db.delete_user_memory(user_id="test_user", memory_id=memory_id)
# Verify the memory was deleted
assert memory_with_db.get_user_memory(user_id="test_user", memory_id=memory_id) is None
# Create a new Memory instance with the same database
new_memory = MemoryManager(model=memory_with_db.model, db=memory_with_db.db)
# Verify the memory is still deleted in the new instance
assert new_memory.get_user_memory(user_id="test_user", memory_id=memory_id) is None
def test_search_user_memories_last_n(memory_with_db):
"""Test retrieving the most recent memories."""
# Add multiple memories with different timestamps
memory1 = UserMemory(memory="First memory", topics=["test"], updated_at=datetime(2023, 1, 1))
memory2 = UserMemory(memory="Second memory", topics=["test"], updated_at=datetime(2023, 1, 2))
memory3 = UserMemory(memory="Third memory", topics=["test"], updated_at=datetime(2023, 1, 3))
# Add the memories
memory_with_db.add_user_memory(memory=memory1, user_id="test_user")
memory_with_db.add_user_memory(memory=memory2, user_id="test_user")
memory_with_db.add_user_memory(memory=memory3, user_id="test_user")
# Get the last 2 memories
results = memory_with_db.search_user_memories(retrieval_method="last_n", limit=2, user_id="test_user")
# Verify the search returned the most recent memories
assert len(results) == 2
assert results[0].memory == "Second memory"
assert results[1].memory == "Third memory"
def test_search_user_memories_first_n(memory_with_db):
"""Test retrieving the oldest memories."""
# Add multiple memories with different timestamps
memory1 = UserMemory(memory="First memory", topics=["test"], updated_at=datetime(2023, 1, 1))
memory2 = UserMemory(memory="Second memory", topics=["test"], updated_at=datetime(2023, 1, 2))
memory3 = UserMemory(memory="Third memory", topics=["test"], updated_at=datetime(2023, 1, 3))
# Add the memories
memory_with_db.add_user_memory(memory=memory1, user_id="test_user")
memory_with_db.add_user_memory(memory=memory2, user_id="test_user")
memory_with_db.add_user_memory(memory=memory3, user_id="test_user")
# Get the first 2 memories
results = memory_with_db.search_user_memories(retrieval_method="first_n", limit=2, user_id="test_user")
# Verify the search returned the oldest memories
assert len(results) == 2
assert results[0].memory == "First memory"
assert results[1].memory == "Second memory"
def test_update_memory_task_with_db(memory_with_db):
"""Test updating memory with a task using database persistence."""
# Add multiple memories with different content
memory1 = UserMemory(memory="The user's name is John Doe", topics=["name", "user"], updated_at=datetime.now())
memory2 = UserMemory(
memory="The user likes to play basketball", topics=["sports", "hobbies"], updated_at=datetime.now()
)
memory3 = UserMemory(
memory="The user's favorite color is blue", topics=["preferences", "colors"], updated_at=datetime.now()
)
# Add the memories
memory_with_db.add_user_memory(memory=memory1, user_id="test_user")
memory_with_db.add_user_memory(memory=memory2, user_id="test_user")
memory_with_db.add_user_memory(memory=memory3, user_id="test_user")
# Update memories with a task
task = "The user's age is 30"
response = memory_with_db.update_memory_task(task=task, user_id="test_user")
# Verify the task was processed
assert response is not None
# Get all memories for the user
memories = memory_with_db.get_user_memories("test_user")
# Verify memories were updated
assert len(memories) > 0
assert any("30" in memory.memory for memory in memories)
response = memory_with_db.update_memory_task(task="Delete any memories of the user's name", user_id="test_user")
# Verify the task was processed
assert response is not None
# Get all memories for the user
memories = memory_with_db.get_user_memories("test_user")
assert len(memories) > 0
assert any("John Doe" not in memory.memory for memory in memories)
@pytest.mark.flaky(max_runs=3)
@pytest.mark.asyncio
async def test_aupdate_memory_task_with_db(memory_with_db):
"""Test async updating memory with a task using database persistence."""
# Add multiple memories with different content
memory1 = UserMemory(memory="The user's name is John Doe", topics=["name", "user"], updated_at=datetime.now())
memory2 = UserMemory(
memory="The user likes to play basketball", topics=["sports", "hobbies"], updated_at=datetime.now()
)
memory3 = UserMemory(
memory="The user's favorite color is blue", topics=["preferences", "colors"], updated_at=datetime.now()
)
# Add the memories
memory_with_db.add_user_memory(memory=memory1, user_id="test_user")
memory_with_db.add_user_memory(memory=memory2, user_id="test_user")
memory_with_db.add_user_memory(memory=memory3, user_id="test_user")
# Update memories with a task asynchronously
task = "The user's occupation is software engineer"
response = await memory_with_db.aupdate_memory_task(task=task, user_id="test_user")
# Verify the task was processed
assert response is not None
# Get all memories for the user
memories = memory_with_db.get_user_memories("test_user")
# Verify memories were updated
assert len(memories) > 0
assert any(
"occupation" in memory.memory.lower() and "software engineer" in memory.memory.lower() for memory in memories
)
response = await memory_with_db.aupdate_memory_task(
task="Delete any memories of the user's name", user_id="test_user"
)
# Verify the task was processed
assert response is not None
# Get all memories for the user
memories = memory_with_db.get_user_memories("test_user")
assert len(memories) > 0
assert any("John Doe" not in memory.memory for memory in memories)
def test_optimize_memories_with_db(memory_with_db):
"""Test optimizing memories with database persistence."""
# Add multiple memories with different content
memory1 = UserMemory(
memory="The user's name is John Doe", topics=["name", "user"], user_id="test_user", updated_at=datetime.now()
)
memory2 = UserMemory(
memory="The user likes to play basketball",
topics=["sports", "hobbies"],
user_id="test_user",
updated_at=datetime.now(),
)
memory3 = UserMemory(
memory="The user's favorite color is blue",
topics=["preferences", "colors"],
user_id="test_user",
updated_at=datetime.now(),
)
# Add the memories
memory_with_db.add_user_memory(memory=memory1, user_id="test_user")
memory_with_db.add_user_memory(memory=memory2, user_id="test_user")
memory_with_db.add_user_memory(memory=memory3, user_id="test_user")
# Get original count
original_memories = memory_with_db.get_user_memories(user_id="test_user")
original_count = len(original_memories)
# Optimize memories with default SUMMARIZE strategy
optimized = memory_with_db.optimize_memories(
user_id="test_user",
strategy=MemoryOptimizationStrategyType.SUMMARIZE,
apply=True,
)
# Verify optimization returned results
assert len(optimized) > 0
assert all(isinstance(mem, UserMemory) for mem in optimized)
# Verify memories were replaced in database
final_memories = memory_with_db.get_user_memories(user_id="test_user")
assert len(final_memories) == len(optimized)
assert len(final_memories) < original_count # Should be fewer after summarization
# Verify optimized memory contains information from original memories
optimized_content = " ".join([mem.memory.lower() for mem in optimized])
assert "john doe" in optimized_content or "basketball" in optimized_content or "blue" in optimized_content
def test_optimize_memories_with_db_apply_false(memory_with_db):
"""Test optimizing memories without applying to database."""
# Add multiple memories with different content
memory1 = UserMemory(
memory="The user's name is John Doe", topics=["name", "user"], user_id="test_user", updated_at=datetime.now()
)
memory2 = UserMemory(
memory="The user likes to play basketball",
topics=["sports", "hobbies"],
user_id="test_user",
updated_at=datetime.now(),
)
# Add the memories
memory_with_db.add_user_memory(memory=memory1, user_id="test_user")
memory_with_db.add_user_memory(memory=memory2, user_id="test_user")
# Get original memories
original_memories = memory_with_db.get_user_memories(user_id="test_user")
original_count = len(original_memories)
# Optimize memories with apply=False
optimized = memory_with_db.optimize_memories(
user_id="test_user",
strategy=MemoryOptimizationStrategyType.SUMMARIZE,
apply=False,
)
# Verify optimization returned results
assert len(optimized) > 0
# Verify original memories are still in database (not replaced)
final_memories = memory_with_db.get_user_memories(user_id="test_user")
assert len(final_memories) == original_count
assert final_memories == original_memories
def test_optimize_memories_with_db_empty(memory_with_db):
"""Test optimizing memories when no memories exist."""
# Optimize memories for user with no memories
optimized = memory_with_db.optimize_memories(user_id="test_user", apply=False)
# Should return empty list
assert optimized == []
def test_optimize_memories_with_db_default_user_id(memory_with_db):
"""Test optimizing memories with default user_id."""
# Add memories with default user_id
memory1 = UserMemory(memory="Default user memory", topics=["test"], user_id="default", updated_at=datetime.now())
memory_with_db.add_user_memory(memory=memory1, user_id="default")
# Optimize without specifying user_id (should default to "default")
optimized = memory_with_db.optimize_memories(strategy=MemoryOptimizationStrategyType.SUMMARIZE, apply=False)
# Verify optimization worked
assert len(optimized) > 0
def test_optimize_memories_persistence_across_instances(model, memory_db):
"""Test that optimized memories persist across different Memory instances."""
# Create the first Memory instance
memory1 = MemoryManager(model=model, db=memory_db)
# Add multiple memories
memory1_obj = UserMemory(
memory="The user's name is John Doe", topics=["name"], user_id="test_user", updated_at=datetime.now()
)
memory2_obj = UserMemory(
memory="The user likes basketball", topics=["sports"], user_id="test_user", updated_at=datetime.now()
)
memory1.add_user_memory(memory=memory1_obj, user_id="test_user")
memory1.add_user_memory(memory=memory2_obj, user_id="test_user")
# Optimize memories
optimized = memory1.optimize_memories(user_id="test_user", apply=True)
# Create a second Memory instance with the same database
memory2 = MemoryManager(model=model, db=memory_db)
# Verify optimized memories are accessible from the second instance
final_memories = memory2.get_user_memories(user_id="test_user")
assert len(final_memories) == len(optimized)
assert final_memories[0].memory_id == optimized[0].memory_id
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/managers/test_memory_manager.py",
"license": "Apache License 2.0",
"lines": 389,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/managers/test_session_summary_manager.py | import os
import tempfile
from datetime import datetime
from unittest.mock import Mock, patch
import pytest
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIChat
from agno.run.agent import Message, RunOutput
from agno.session.agent import AgentSession
from agno.session.summary import SessionSummary, SessionSummaryManager, SessionSummaryResponse
@pytest.fixture
def temp_db_file():
"""Create a temporary SQLite database file for testing."""
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as temp_file:
db_path = temp_file.name
yield db_path
# Clean up the temporary file after the test
if os.path.exists(db_path):
os.unlink(db_path)
@pytest.fixture
def session_db(temp_db_file):
"""Create a SQLite session database for testing."""
db = SqliteDb(db_file=temp_db_file)
return db
@pytest.fixture
def model():
"""Create an OpenAI model for testing."""
return OpenAIChat(id="gpt-4o-mini")
@pytest.fixture
def session_summary_manager(model):
"""Create a SessionSummaryManager instance for testing."""
return SessionSummaryManager(model=model)
@pytest.fixture
def mock_agent_session():
"""Create a mock agent session with sample messages."""
session = Mock(spec=AgentSession)
session.get_messages.return_value = [
Message(role="user", content="Hello, I need help with Python programming."),
Message(
role="assistant",
content="I'd be happy to help you with Python! What specific topic would you like to learn about?",
),
Message(role="user", content="I want to learn about list comprehensions."),
Message(
role="assistant",
content="List comprehensions are a concise way to create lists in Python. Here's the basic syntax: [expression for item in iterable if condition].",
),
Message(role="user", content="Can you give me an example?"),
Message(
role="assistant",
content="Sure! Here's an example: squares = [x**2 for x in range(10)] creates a list of squares from 0 to 81.",
),
]
session.summary = None
return session
@pytest.fixture
def agent_session_with_db():
"""Create an agent session with sample runs and messages."""
from agno.run.base import RunStatus
# Create sample messages
messages1 = [
Message(role="user", content="Hello, I need help with Python programming."),
Message(
role="assistant",
content="I'd be happy to help you with Python! What specific topic would you like to learn about?",
),
]
messages2 = [
Message(role="user", content="I want to learn about list comprehensions."),
Message(
role="assistant",
content="List comprehensions are a concise way to create lists in Python. Here's the basic syntax: [expression for item in iterable if condition].",
),
]
# Create sample runs
run1 = RunOutput(run_id="run_1", messages=messages1, status=RunStatus.completed)
run2 = RunOutput(run_id="run_2", messages=messages2, status=RunStatus.completed)
# Create agent session
session = AgentSession(session_id="test_session", agent_id="test_agent", user_id="test_user", runs=[run1, run2])
return session
def test_get_response_format_native_structured_outputs(session_summary_manager):
"""Test get_response_format with native structured outputs support."""
# Mock model with native structured outputs
model = Mock()
model.supports_native_structured_outputs = True
model.supports_json_schema_outputs = False
response_format = session_summary_manager.get_response_format(model)
assert response_format == SessionSummaryResponse
def test_get_response_format_json_schema_outputs(session_summary_manager):
"""Test get_response_format with JSON schema outputs support."""
# Mock model with JSON schema outputs
model = Mock()
model.supports_native_structured_outputs = False
model.supports_json_schema_outputs = True
response_format = session_summary_manager.get_response_format(model)
assert response_format["type"] == "json_schema"
assert response_format["json_schema"]["name"] == SessionSummaryResponse.__name__
def test_get_response_format_json_object_fallback(session_summary_manager):
"""Test get_response_format with JSON object fallback."""
# Mock model without structured outputs
model = Mock()
model.supports_native_structured_outputs = False
model.supports_json_schema_outputs = False
response_format = session_summary_manager.get_response_format(model)
assert response_format == {"type": "json_object"}
def test_get_system_message_with_custom_prompt(session_summary_manager, mock_agent_session):
"""Test get_system_message with custom session summary prompt."""
custom_prompt = "Summarize this conversation in a specific way."
session_summary_manager.session_summary_prompt = custom_prompt
conversation = mock_agent_session.get_messages()
response_format = {"type": "json_object"}
system_message = session_summary_manager.get_system_message(conversation, response_format)
assert system_message.role == "system"
assert custom_prompt in system_message.content
assert "<conversation>" in system_message.content
def test_get_system_message_default_prompt(session_summary_manager, mock_agent_session):
"""Test get_system_message with default prompt generation."""
conversation = mock_agent_session.get_messages()
response_format = SessionSummaryResponse
system_message = session_summary_manager.get_system_message(conversation, response_format)
assert system_message.role == "system"
assert "Analyze the following conversation" in system_message.content
assert "<conversation>" in system_message.content
assert "User: Hello, I need help with Python programming." in system_message.content
assert "Assistant: I'd be happy to help you with Python!" in system_message.content
def test_get_system_message_with_json_object_format(session_summary_manager, mock_agent_session):
"""Test get_system_message with JSON object response format."""
conversation = mock_agent_session.get_messages()
response_format = {"type": "json_object"}
with patch("agno.utils.prompts.get_json_output_prompt") as mock_json_prompt:
mock_json_prompt.return_value = "\nPlease respond with valid JSON."
system_message = session_summary_manager.get_system_message(conversation, response_format)
assert "Please respond with valid JSON." in system_message.content
mock_json_prompt.assert_called_once()
def test_prepare_summary_messages(session_summary_manager, mock_agent_session):
"""Test _prepare_summary_messages method."""
messages = session_summary_manager._prepare_summary_messages(mock_agent_session)
assert len(messages) == 2
assert messages[0].role == "system"
assert messages[1].role == "user"
assert messages[1].content == "Provide the summary of the conversation."
def test_process_summary_response_native_structured(session_summary_manager):
"""Test _process_summary_response with native structured outputs."""
# Mock response with native structured output
mock_response = Mock()
mock_parsed = SessionSummaryResponse(
summary="Discussion about Python list comprehensions", topics=["Python", "programming", "list comprehensions"]
)
mock_response.parsed = mock_parsed
# Mock model with native structured outputs
model = Mock()
model.supports_native_structured_outputs = True
result = session_summary_manager._process_summary_response(mock_response, model)
assert isinstance(result, SessionSummary)
assert result.summary == "Discussion about Python list comprehensions"
assert result.topics == ["Python", "programming", "list comprehensions"]
assert result.updated_at is not None
def test_process_summary_response_string_content(session_summary_manager):
"""Test _process_summary_response with string content."""
# Mock response with string content
mock_response = Mock()
mock_response.content = '{"summary": "Python programming help", "topics": ["Python", "programming"]}'
mock_response.parsed = None
# Mock model without native structured outputs
model = Mock()
model.supports_native_structured_outputs = False
with patch("agno.utils.string.parse_response_model_str") as mock_parse:
mock_parse.return_value = SessionSummaryResponse(
summary="Python programming help", topics=["Python", "programming"]
)
result = session_summary_manager._process_summary_response(mock_response, model)
assert isinstance(result, SessionSummary)
assert result.summary == "Python programming help"
assert result.topics == ["Python", "programming"]
def test_process_summary_response_parse_failure(session_summary_manager):
"""Test _process_summary_response with parsing failure."""
# Mock response with unparseable content
mock_response = Mock()
mock_response.content = "invalid json content"
mock_response.parsed = None
# Mock model without native structured outputs
model = Mock()
model.supports_native_structured_outputs = False
with patch("agno.utils.string.parse_response_model_str") as mock_parse:
mock_parse.return_value = None
result = session_summary_manager._process_summary_response(mock_response, model)
assert result is None
def test_process_summary_response_none_input(session_summary_manager):
"""Test _process_summary_response with None input."""
model = Mock()
result = session_summary_manager._process_summary_response(None, model)
assert result is None
def test_create_session_summary_success(session_summary_manager, mock_agent_session):
"""Test successful session summary creation."""
# Mock model response
mock_response = Mock()
mock_parsed = SessionSummaryResponse(
summary="Discussion about Python list comprehensions and programming concepts",
topics=["Python", "programming", "list comprehensions", "examples"],
)
mock_response.parsed = mock_parsed
# Mock model
session_summary_manager.model.supports_native_structured_outputs = True
with patch.object(session_summary_manager.model, "response", return_value=mock_response):
result = session_summary_manager.create_session_summary(mock_agent_session)
assert isinstance(result, SessionSummary)
assert "Python" in result.summary
assert "programming" in result.summary
assert len(result.topics) > 0
assert mock_agent_session.summary == result
assert session_summary_manager.summaries_updated is True
def test_create_session_summary_no_model(mock_agent_session):
"""Test session summary creation with no model."""
session_summary_manager = SessionSummaryManager(model=None)
result = session_summary_manager.create_session_summary(mock_agent_session)
assert result is None
assert session_summary_manager.summaries_updated is False
@pytest.mark.asyncio
async def test_acreate_session_summary_success(session_summary_manager, mock_agent_session):
"""Test successful async session summary creation."""
# Mock model response
mock_response = Mock()
mock_parsed = SessionSummaryResponse(
summary="Async discussion about Python programming",
topics=["Python", "async programming", "list comprehensions"],
)
mock_response.parsed = mock_parsed
# Mock model
session_summary_manager.model.supports_native_structured_outputs = True
with patch.object(session_summary_manager.model, "aresponse", return_value=mock_response):
result = await session_summary_manager.acreate_session_summary(mock_agent_session)
assert isinstance(result, SessionSummary)
assert "Python" in result.summary
assert "programming" in result.summary
assert len(result.topics) > 0
assert mock_agent_session.summary == result
assert session_summary_manager.summaries_updated is True
@pytest.mark.asyncio
async def test_acreate_session_summary_no_model(mock_agent_session):
"""Test async session summary creation with no model."""
session_summary_manager = SessionSummaryManager(model=None)
result = await session_summary_manager.acreate_session_summary(mock_agent_session)
assert result is None
assert session_summary_manager.summaries_updated is False
def test_create_session_summary_with_real_session(session_summary_manager, agent_session_with_db):
"""Test session summary creation with a real agent session."""
# Mock model response for real session
mock_response = Mock()
mock_parsed = SessionSummaryResponse(
summary="User asked for help with Python programming, specifically list comprehensions",
topics=["Python", "programming", "list comprehensions", "help"],
)
mock_response.parsed = mock_parsed
# Mock model
session_summary_manager.model.supports_native_structured_outputs = True
with patch.object(session_summary_manager.model, "response", return_value=mock_response):
result = session_summary_manager.create_session_summary(agent_session_with_db)
assert isinstance(result, SessionSummary)
assert "Python" in result.summary
assert "programming" in result.summary
assert len(result.topics) > 0
assert agent_session_with_db.summary == result
def test_session_summary_to_dict():
"""Test SessionSummary to_dict method."""
summary = SessionSummary(
summary="Test summary", topics=["topic1", "topic2"], updated_at=datetime(2023, 1, 1, 12, 0, 0)
)
result = summary.to_dict()
assert result["summary"] == "Test summary"
assert result["topics"] == ["topic1", "topic2"]
assert result["updated_at"] == "2023-01-01T12:00:00"
def test_session_summary_from_dict():
"""Test SessionSummary from_dict method."""
data = {"summary": "Test summary", "topics": ["topic1", "topic2"], "updated_at": "2023-01-01T12:00:00"}
summary = SessionSummary.from_dict(data)
assert summary.summary == "Test summary"
assert summary.topics == ["topic1", "topic2"]
assert summary.updated_at == datetime(2023, 1, 1, 12, 0, 0)
def test_session_summary_from_dict_no_timestamp():
"""Test SessionSummary from_dict method without timestamp."""
data = {"summary": "Test summary", "topics": ["topic1", "topic2"]}
summary = SessionSummary.from_dict(data)
assert summary.summary == "Test summary"
assert summary.topics == ["topic1", "topic2"]
assert summary.updated_at is None
def test_session_summary_response_to_dict():
"""Test SessionSummaryResponse to_dict method."""
response = SessionSummaryResponse(summary="Test summary", topics=["topic1", "topic2"])
result = response.to_dict()
assert result["summary"] == "Test summary"
assert result["topics"] == ["topic1", "topic2"]
def test_session_summary_response_to_json():
"""Test SessionSummaryResponse to_json method."""
response = SessionSummaryResponse(summary="Test summary", topics=["topic1", "topic2"])
result = response.to_json()
assert '"summary": "Test summary"' in result
# Fix: Check for individual topic items instead of the whole array
assert '"topic1"' in result
assert '"topic2"' in result
# Or check for the topics key
assert '"topics":' in result
def test_summaries_updated_flag(session_summary_manager, mock_agent_session):
"""Test that summaries_updated flag is properly set."""
# Initially should be False
assert session_summary_manager.summaries_updated is False
# Mock successful response
mock_response = Mock()
mock_parsed = SessionSummaryResponse(summary="Test", topics=["test"])
mock_response.parsed = mock_parsed
session_summary_manager.model.supports_native_structured_outputs = True
with patch.object(session_summary_manager.model, "response", return_value=mock_response):
# After creating summary, should be True
session_summary_manager.create_session_summary(mock_agent_session)
assert session_summary_manager.summaries_updated is True
@pytest.mark.asyncio
async def test_async_summaries_updated_flag(session_summary_manager, mock_agent_session):
"""Test that summaries_updated flag is properly set in async method."""
# Initially should be False
assert session_summary_manager.summaries_updated is False
# Mock successful response
mock_response = Mock()
mock_parsed = SessionSummaryResponse(summary="Test", topics=["test"])
mock_response.parsed = mock_parsed
session_summary_manager.model.supports_native_structured_outputs = True
with patch.object(session_summary_manager.model, "aresponse", return_value=mock_response):
# After creating summary, should be True
await session_summary_manager.acreate_session_summary(mock_agent_session)
assert session_summary_manager.summaries_updated is True
def test_summaries_updated_flag_failure_case(session_summary_manager, mock_agent_session):
"""Test that summaries_updated flag is NOT set when summary creation fails."""
# Initially should be False
assert session_summary_manager.summaries_updated is False
# Mock failed response that returns None from _process_summary_response
mock_response = Mock()
mock_response.parsed = None
mock_response.content = "invalid json content"
session_summary_manager.model.supports_native_structured_outputs = False
# Mock parse_response_model_str to return None (parsing failure)
with (
patch("agno.utils.string.parse_response_model_str") as mock_parse,
patch.object(session_summary_manager.model, "response", return_value=mock_response),
):
mock_parse.return_value = None
result = session_summary_manager.create_session_summary(mock_agent_session)
# Should return None and flag should remain False
assert result is None
assert session_summary_manager.summaries_updated is False
assert mock_agent_session.summary is None
@pytest.mark.asyncio
async def test_async_summaries_updated_flag_failure_case(session_summary_manager, mock_agent_session):
"""Test that summaries_updated flag is NOT set when async summary creation fails."""
# Initially should be False
assert session_summary_manager.summaries_updated is False
# Mock failed response that returns None from _process_summary_response
mock_response = Mock()
mock_response.parsed = None
mock_response.content = "invalid json content"
session_summary_manager.model.supports_native_structured_outputs = False
# Mock parse_response_model_str to return None (parsing failure)
with (
patch("agno.utils.string.parse_response_model_str") as mock_parse,
patch.object(session_summary_manager.model, "aresponse", return_value=mock_response),
):
mock_parse.return_value = None
result = await session_summary_manager.acreate_session_summary(mock_agent_session)
# Should return None and flag should remain False
assert result is None
assert session_summary_manager.summaries_updated is False
assert mock_agent_session.summary is None
def test_summaries_updated_flag_none_response(session_summary_manager, mock_agent_session):
"""Test that summaries_updated flag is NOT set when model returns None response."""
# Initially should be False
assert session_summary_manager.summaries_updated is False
with patch.object(session_summary_manager.model, "response", return_value=None):
result = session_summary_manager.create_session_summary(mock_agent_session)
# Should return None and flag should remain False
assert result is None
assert session_summary_manager.summaries_updated is False
assert mock_agent_session.summary is None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/managers/test_session_summary_manager.py",
"license": "Apache License 2.0",
"lines": 379,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/cerebras/cerebras_openai/test_basic.py | import pytest
from pydantic import BaseModel, Field
from agno.agent import Agent, RunOutput
from agno.db.sqlite import SqliteDb
from agno.models.cerebras import CerebrasOpenAI
def _assert_metrics(response: RunOutput):
assert response.metrics is not None
input_tokens = response.metrics.input_tokens
output_tokens = response.metrics.output_tokens
total_tokens = response.metrics.total_tokens
assert input_tokens > 0
assert output_tokens > 0
assert total_tokens > 0
assert total_tokens == input_tokens + output_tokens
def test_basic():
agent = Agent(model=CerebrasOpenAI(id="gpt-oss-120b"), markdown=True, telemetry=False)
response: RunOutput = agent.run("Share a 2 sentence horror story")
assert response.content is not None and response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
def test_basic_stream():
agent = Agent(model=CerebrasOpenAI(id="gpt-oss-120b"), markdown=True, telemetry=False)
response_stream = agent.run("Share a 2 sentence horror story", stream=True)
# Verify it's an iterator
assert hasattr(response_stream, "__iter__")
responses = list(response_stream)
assert len(responses) > 0
for response in responses:
assert response.content is not None or response.reasoning_content is not None
@pytest.mark.asyncio
async def test_async_basic():
agent = Agent(model=CerebrasOpenAI(id="gpt-oss-120b"), markdown=True, telemetry=False)
response = await agent.arun("Share a 2 sentence horror story")
assert response.content is not None and response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
@pytest.mark.asyncio
async def test_async_basic_stream():
agent = Agent(model=CerebrasOpenAI(id="gpt-oss-120b"), markdown=True, telemetry=False)
async for response in agent.arun("Share a 2 sentence horror story", stream=True):
assert response.content is not None or response.reasoning_content is not None
def test_with_memory():
agent = Agent(
db=SqliteDb(db_file="tmp/test_with_memory.db"),
model=CerebrasOpenAI(id="gpt-oss-120b"),
add_history_to_context=True,
num_history_runs=5,
markdown=True,
telemetry=False,
)
# First interaction
response1 = agent.run("My name is John Smith")
assert response1.content is not None
# Second interaction should remember the name
response2 = agent.run("What's my name?")
assert response2.content is not None
assert "John" in response2.content
# Verify memories were created
messages = agent.get_session_messages()
assert len(messages) == 5
assert [m.role for m in messages] == ["system", "user", "assistant", "user", "assistant"]
# Test metrics structure and types
_assert_metrics(response2)
def test_structured_output():
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(
model=CerebrasOpenAI(id="gpt-oss-120b"),
output_schema=MovieScript,
telemetry=False,
)
response = agent.run("Create a movie about time travel")
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/cerebras/cerebras_openai/test_basic.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/cerebras/cerebras_openai/test_tool_use.py | import pytest
from agno.agent import Agent
from agno.models.cerebras import CerebrasOpenAI
from agno.tools.websearch import WebSearchTools
from agno.tools.yfinance import YFinanceTools
def test_tool_use():
agent = Agent(
model=CerebrasOpenAI(id="gpt-oss-120b"),
tools=[YFinanceTools(cache_results=True)],
telemetry=False,
)
response = agent.run("What is the current price of TSLA?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages)
assert response.content is not None
def test_tool_use_stream():
agent = Agent(
model=CerebrasOpenAI(id="gpt-oss-120b"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response_stream = agent.run("What is the current price of TSLA?", stream=True, stream_events=True)
responses = []
tool_call_seen = False
for chunk in response_stream:
responses.append(chunk)
# Check for ToolCallStartedEvent or ToolCallCompletedEvent
if chunk.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(chunk, "tool") and chunk.tool: # type: ignore
if chunk.tool.tool_name: # type: ignore
tool_call_seen = True
assert len(responses) > 0
assert tool_call_seen, "No tool calls observed in stream"
@pytest.mark.asyncio
async def test_async_tool_use():
agent = Agent(
model=CerebrasOpenAI(id="gpt-oss-120b"),
tools=[WebSearchTools(cache_results=True)],
telemetry=False,
)
response = await agent.arun("What's happening in France?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.role == "assistant")
assert response.content is not None
@pytest.mark.asyncio
async def test_async_tool_use_stream():
agent = Agent(
model=CerebrasOpenAI(id="gpt-oss-120b"),
tools=[YFinanceTools(cache_results=True)],
telemetry=False,
)
tool_call_seen = False
keyword_seen_in_response = False
async for response in agent.arun(
"What is the current price of TSLA?",
stream=True,
stream_events=True,
):
if response.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(response, "tool") and response.tool: # type: ignore
if response.tool.tool_name: # type: ignore
tool_call_seen = True
if response.content is not None and "TSLA" in response.content:
keyword_seen_in_response = True
# Asserting we found tool responses in the response stream
assert tool_call_seen, "No tool calls observed in stream"
# Asserting we found the expected keyword in the response stream -> proving the correct tool was called
assert keyword_seen_in_response, "Keyword not found in response"
def test_tool_use_with_content():
agent = Agent(
model=CerebrasOpenAI(id="gpt-oss-120b"),
tools=[WebSearchTools(cache_results=True)],
telemetry=False,
)
response = agent.run("What's happening in France? Summarize the key events.")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/cerebras/cerebras_openai/test_tool_use.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/teams/test_metrics.py | from typing import Iterator
import pytest
from agno.agent import Agent
from agno.eval.agent_as_judge import AgentAsJudgeEval
from agno.metrics import ModelMetrics, RunMetrics, SessionMetrics, ToolCallMetrics
from agno.models.openai import OpenAIChat
from agno.run.team import TeamRunOutput
from agno.team.team import Team
from agno.tools.yfinance import YFinanceTools
def add(a: int, b: int) -> str:
"""Add two numbers."""
return str(a + b)
def multiply(a: int, b: int) -> str:
"""Multiply two numbers."""
return str(a * b)
def test_team_metrics_basic(shared_db):
"""Test basic team metrics functionality."""
stock_agent = Agent(
name="Stock Agent",
model=OpenAIChat("gpt-4o"),
role="Get stock information",
tools=[YFinanceTools()],
)
team = Team(
name="Stock Research Team",
model=OpenAIChat("gpt-4o"),
members=[stock_agent],
db=shared_db,
store_member_responses=True,
)
response = team.run("What is the current stock price of AAPL?")
# Verify response metrics exist
assert response.metrics is not None
# Check basic metrics
assert response.metrics.input_tokens is not None
assert response.metrics.output_tokens is not None
assert response.metrics.total_tokens is not None
# Check member response metrics
assert len(response.member_responses) == 1
member_response = response.member_responses[0]
assert member_response.metrics is not None
assert member_response.metrics.input_tokens is not None
assert member_response.metrics.output_tokens is not None
assert member_response.metrics.total_tokens is not None
# Check session metrics
session_from_db = team.get_session(session_id=team.session_id)
assert session_from_db is not None and session_from_db.session_data is not None
assert session_from_db.session_data["session_metrics"]["input_tokens"] is not None
assert session_from_db.session_data["session_metrics"]["output_tokens"] is not None
assert session_from_db.session_data["session_metrics"]["total_tokens"] is not None
def test_team_metrics_streaming(shared_db):
"""Test team metrics with streaming."""
stock_agent = Agent(
name="Stock Agent",
model=OpenAIChat("gpt-4o"),
role="Get stock information",
tools=[YFinanceTools()],
)
team = Team(
name="Stock Research Team",
model=OpenAIChat("gpt-4o"),
members=[stock_agent],
db=shared_db,
store_member_responses=True,
)
# Run with streaming
run_stream = team.run("What is the stock price of NVDA?", stream=True)
assert isinstance(run_stream, Iterator)
# Consume the stream
responses = list(run_stream)
assert len(responses) > 0
run_response = team.get_last_run_output()
# Verify metrics exist after stream completion
assert run_response is not None
assert run_response.metrics is not None
# Basic metrics checks
assert run_response.metrics.input_tokens is not None
assert run_response.metrics.output_tokens is not None
assert run_response.metrics.total_tokens is not None
def test_team_metrics_multiple_runs(shared_db):
"""Test team metrics across multiple runs."""
stock_agent = Agent(
name="Stock Agent",
model=OpenAIChat("gpt-4o"),
role="Get stock information",
tools=[YFinanceTools()],
)
team = Team(
name="Stock Research Team",
model=OpenAIChat("gpt-4o"),
members=[stock_agent],
db=shared_db,
)
# First run
response = team.run("What is the current stock price of AAPL?")
# Capture metrics after first run
assert response is not None
assert response.metrics is not None
assert response.metrics.total_tokens > 0
# Second run
team.run("What is the current stock price of MSFT?")
# Verify metrics have been updated after second run
session_from_db = team.get_session(session_id=team.session_id)
assert session_from_db is not None and session_from_db.session_data is not None
assert session_from_db.session_data["session_metrics"]["total_tokens"] > response.metrics.total_tokens
def test_team_metrics_with_history(shared_db):
"""Test session metrics are correctly aggregated when history is enabled"""
agent = Agent()
team = Team(
members=[agent],
add_history_to_context=True,
db=shared_db,
)
team.run("Hi")
run_response = team.get_last_run_output()
assert run_response is not None
assert run_response.metrics is not None
assert run_response.metrics.input_tokens is not None
session_from_db = team.get_session(session_id=team.session_id)
# Check the session metrics (team.session_metrics) coincide with the sum of run metrics
assert session_from_db is not None and session_from_db.session_data is not None
assert run_response.metrics.input_tokens == session_from_db.session_data["session_metrics"]["input_tokens"]
assert run_response.metrics.output_tokens == session_from_db.session_data["session_metrics"]["output_tokens"]
assert run_response.metrics.total_tokens == session_from_db.session_data["session_metrics"]["total_tokens"]
# Checking metrics aggregation works with multiple runs
team.run("Hi")
run_response = team.get_last_run_output()
assert run_response is not None
assert run_response.metrics is not None
assert run_response.metrics.input_tokens is not None
session_from_db = team.get_session(session_id=team.session_id)
# run metrics are less than session metrics because we add the history to the context
assert session_from_db is not None and session_from_db.session_data is not None
assert run_response.metrics.input_tokens < session_from_db.session_data["session_metrics"]["input_tokens"]
assert run_response.metrics.output_tokens < session_from_db.session_data["session_metrics"]["output_tokens"]
assert run_response.metrics.total_tokens < session_from_db.session_data["session_metrics"]["total_tokens"]
def test_team_metrics_details_structure():
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Helper")
team = Team(model=OpenAIChat(id="gpt-4o-mini"), members=[member])
response = team.run("Say hello.")
assert response.metrics is not None
assert isinstance(response.metrics, RunMetrics)
assert response.metrics.total_tokens > 0
assert response.metrics.details is not None
assert "model" in response.metrics.details
model_metrics = response.metrics.details["model"]
assert len(model_metrics) >= 1
assert isinstance(model_metrics[0], ModelMetrics)
@pytest.mark.asyncio
async def test_team_metrics_details_structure_async():
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Helper")
team = Team(model=OpenAIChat(id="gpt-4o-mini"), members=[member])
response = await team.arun("Say hello.")
assert response.metrics is not None
assert response.metrics.total_tokens > 0
assert "model" in response.metrics.details
assert isinstance(response.metrics.details["model"][0], ModelMetrics)
def test_team_metrics_details_sum_matches_total():
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Helper")
team = Team(model=OpenAIChat(id="gpt-4o-mini"), members=[member])
response = team.run("What is 2+2?")
detail_total = sum(entry.total_tokens for entries in response.metrics.details.values() for entry in entries)
assert detail_total == response.metrics.total_tokens
def test_team_member_metrics():
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Researcher")
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[member],
store_member_responses=True,
delegate_to_all_members=True,
)
response = team.run("Research the impact of AI on healthcare.")
assert len(response.member_responses) > 0
member_response = response.member_responses[0]
assert member_response.metrics is not None
assert member_response.metrics.total_tokens > 0
@pytest.mark.asyncio
async def test_team_member_metrics_async():
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Researcher", role="Answer questions")
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[member],
store_member_responses=True,
delegate_to_all_members=True,
)
response = await team.arun("Ask Researcher to explain photosynthesis briefly.")
assert len(response.member_responses) > 0
member_response = response.member_responses[0]
assert member_response.metrics is not None
assert member_response.metrics.total_tokens > 0
def test_team_member_metrics_fields():
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Helper")
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[member],
store_member_responses=True,
delegate_to_all_members=True,
)
response = team.run("Say hi.")
if response.member_responses:
member_response = response.member_responses[0]
if member_response.metrics and member_response.metrics.details and "model" in member_response.metrics.details:
model_metric = member_response.metrics.details["model"][0]
assert isinstance(model_metric, ModelMetrics)
assert model_metric.id is not None
assert model_metric.provider is not None
assert model_metric.total_tokens > 0
def test_team_eval_metrics_sync():
eval_hook = AgentAsJudgeEval(
name="Team Eval",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should be helpful",
scoring_strategy="binary",
)
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Helper")
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[member],
post_hooks=[eval_hook],
)
response = team.run("What is the capital of France?")
assert "model" in response.metrics.details
assert "eval_model" in response.metrics.details
assert sum(metric.total_tokens for metric in response.metrics.details["eval_model"]) > 0
detail_total = sum(entry.total_tokens for entries in response.metrics.details.values() for entry in entries)
assert detail_total == response.metrics.total_tokens
@pytest.mark.asyncio
async def test_team_eval_metrics_async():
eval_hook = AgentAsJudgeEval(
name="Async Team Eval",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should be accurate",
scoring_strategy="binary",
)
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Helper")
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[member],
post_hooks=[eval_hook],
)
response = await team.arun("What is 5+3?")
assert "eval_model" in response.metrics.details
assert sum(metric.total_tokens for metric in response.metrics.details["eval_model"]) > 0
detail_total = sum(entry.total_tokens for entries in response.metrics.details.values() for entry in entries)
assert detail_total == response.metrics.total_tokens
def test_team_eval_metrics_streaming():
eval_hook = AgentAsJudgeEval(
name="Stream Team Eval",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should be concise",
scoring_strategy="binary",
)
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Helper")
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[member],
post_hooks=[eval_hook],
)
final = None
for event in team.run("Say hi.", stream=True, yield_run_output=True):
if isinstance(event, TeamRunOutput):
final = event
assert final is not None
assert "eval_model" in final.metrics.details
detail_total = sum(entry.total_tokens for entries in final.metrics.details.values() for entry in entries)
assert detail_total == final.metrics.total_tokens
def test_team_eval_metrics_numeric_scoring():
eval_hook = AgentAsJudgeEval(
name="Numeric Team Eval",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Rate the response quality",
scoring_strategy="numeric",
threshold=5,
)
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Helper")
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[member],
post_hooks=[eval_hook],
)
response = team.run("Explain gravity.")
assert "eval_model" in response.metrics.details
assert sum(metric.total_tokens for metric in response.metrics.details["eval_model"]) > 0
def test_team_tool_call_metrics_sync():
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Calculator", tools=[add])
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[member],
store_member_responses=True,
)
response = team.run("Use the Calculator to add 15 and 27.")
if response.member_responses and response.member_responses[0].tools:
tool = response.member_responses[0].tools[0]
assert isinstance(tool.metrics, ToolCallMetrics)
assert tool.metrics.duration > 0
@pytest.mark.asyncio
async def test_team_tool_call_metrics_async():
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Calculator", tools=[add])
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[member],
store_member_responses=True,
)
response = await team.arun("Use the Calculator to add 10 and 20.")
if response.member_responses and response.member_responses[0].tools:
tool = response.member_responses[0].tools[0]
assert isinstance(tool.metrics, ToolCallMetrics)
assert tool.metrics.duration > 0
def test_team_tool_call_metrics_multiple_tools():
member = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
name="Calculator",
tools=[add, multiply],
)
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[member],
store_member_responses=True,
)
response = team.run("Add 2 and 3, then multiply 4 and 5. Use the Calculator.")
if response.member_responses and response.member_responses[0].tools:
for tool in response.member_responses[0].tools:
assert isinstance(tool.metrics, ToolCallMetrics)
assert tool.metrics.duration > 0
def test_team_provider_metrics_openai():
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Helper")
team = Team(model=OpenAIChat(id="gpt-4o-mini"), members=[member])
response = team.run("Hello")
model_metric = response.metrics.details["model"][0]
assert model_metric.provider == "OpenAI"
assert model_metric.id == "gpt-4o-mini"
assert model_metric.input_tokens > 0
assert model_metric.total_tokens > 0
def test_team_provider_metrics_gemini():
from agno.models.google import Gemini
member = Agent(model=Gemini(id="gemini-2.5-flash"), name="Helper")
team = Team(model=Gemini(id="gemini-2.5-flash"), members=[member])
response = team.run("Hello")
model_metric = response.metrics.details["model"][0]
assert model_metric.provider == "Google"
assert model_metric.id == "gemini-2.5-flash"
assert model_metric.input_tokens > 0
assert model_metric.total_tokens > 0
def test_team_session_metrics_type(shared_db):
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Helper")
team = Team(model=OpenAIChat(id="gpt-4o-mini"), members=[member], db=shared_db)
team.run("First task.")
team.run("Second task.")
session_metrics = team.get_session_metrics()
assert isinstance(session_metrics, SessionMetrics)
assert session_metrics.total_tokens > 0
assert isinstance(session_metrics.details, dict)
assert len(session_metrics.details) > 0
for model_type, metrics_list in session_metrics.details.items():
assert isinstance(metrics_list, list)
for metric in metrics_list:
assert isinstance(metric, ModelMetrics)
@pytest.mark.asyncio
async def test_team_session_metrics_async(shared_db):
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Helper")
team = Team(model=OpenAIChat(id="gpt-4o-mini"), members=[member], db=shared_db)
await team.arun("First async task.")
await team.arun("Second async task.")
session_metrics = team.get_session_metrics()
assert isinstance(session_metrics, SessionMetrics)
assert session_metrics.total_tokens > 0
def test_team_session_metrics_with_eval(shared_db):
eval_hook = AgentAsJudgeEval(
name="Session Eval",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should be helpful",
scoring_strategy="binary",
)
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Helper")
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[member],
post_hooks=[eval_hook],
db=shared_db,
)
response1 = team.run("What is 2+2?")
response2 = team.run("What is 3+3?")
assert "eval_model" in response1.metrics.details
assert "eval_model" in response2.metrics.details
session_metrics = team.get_session_metrics()
assert session_metrics.total_tokens > 0
def test_team_session_metrics_run_independence(shared_db):
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Helper")
team = Team(model=OpenAIChat(id="gpt-4o-mini"), members=[member], db=shared_db)
response1 = team.run("Say hello.")
response2 = team.run("Say goodbye.")
assert response1.metrics.total_tokens > 0
assert response2.metrics.total_tokens > 0
session_metrics = team.get_session_metrics()
assert session_metrics.total_tokens >= response1.metrics.total_tokens + response2.metrics.total_tokens
def test_team_streaming_metrics():
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Helper")
team = Team(model=OpenAIChat(id="gpt-4o-mini"), members=[member])
final = None
for event in team.run("Tell me a joke.", stream=True, yield_run_output=True):
if isinstance(event, TeamRunOutput):
final = event
assert final is not None
assert final.metrics.total_tokens > 0
assert final.metrics.details is not None
def test_team_streaming_metrics_with_tools():
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Calculator", tools=[add])
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[member],
store_member_responses=True,
)
final = None
for event in team.run("Add 3 and 7 using Calculator.", stream=True, yield_run_output=True):
if isinstance(event, TeamRunOutput):
final = event
assert final is not None
assert final.metrics.total_tokens > 0
def test_team_eval_plus_tools():
eval_hook = AgentAsJudgeEval(
name="Tool Team Eval",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should include the computed result",
scoring_strategy="binary",
)
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Calculator", tools=[add])
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[member],
post_hooks=[eval_hook],
store_member_responses=True,
)
response = team.run("Add 7 and 8 using Calculator.")
assert "model" in response.metrics.details
assert "eval_model" in response.metrics.details
detail_total = sum(entry.total_tokens for entries in response.metrics.details.values() for entry in entries)
assert detail_total == response.metrics.total_tokens
def test_team_eval_duration_tracked():
eval_hook = AgentAsJudgeEval(
name="Duration Eval",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should be factually correct",
scoring_strategy="binary",
)
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Helper")
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[member],
post_hooks=[eval_hook],
)
response = team.run("What is the capital of France?")
assert response.metrics.additional_metrics is not None
assert "eval_duration" in response.metrics.additional_metrics
assert response.metrics.additional_metrics["eval_duration"] > 0
def test_team_no_eval_key_without_eval():
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Helper")
team = Team(model=OpenAIChat(id="gpt-4o-mini"), members=[member])
response = team.run("Hello")
assert "model" in response.metrics.details
assert "eval_model" not in response.metrics.details
def test_team_detail_keys_reset_between_runs():
eval_hook = AgentAsJudgeEval(
name="Reset Test",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should be correct",
scoring_strategy="binary",
)
member = Agent(model=OpenAIChat(id="gpt-4o-mini"), name="Helper")
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[member],
post_hooks=[eval_hook],
)
team.run("What is 1+1?")
response2 = team.run("What is 2+2?")
assert "eval_model" in response2.metrics.details
assert sum(metric.total_tokens for metric in response2.metrics.details["eval_model"]) > 0
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/test_metrics.py",
"license": "Apache License 2.0",
"lines": 477,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/teams/test_multimodal.py | from agno.agent.agent import Agent
from agno.media import Image
from agno.models.openai.chat import OpenAIChat
from agno.team.team import Team
def test_team_image_input(shared_db, image_path):
image_analyst = Agent(
name="Image Analyst",
role="Analyze images and provide insights.",
model=OpenAIChat(id="gpt-4o-mini"),
markdown=True,
db=shared_db,
)
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[image_analyst],
name="Team",
db=shared_db,
)
response = team.run(
"Tell me about this image and give me the latest news about it.",
images=[Image(filepath=image_path)],
)
assert response.content is not None
session_in_db = team.get_session(session_id=team.session_id)
assert session_in_db is not None
assert session_in_db.runs is not None
assert session_in_db.runs[-1].messages is not None
assert session_in_db.runs[-1].messages[1].role == "user"
assert session_in_db.runs[-1].messages[1].images is not None # type: ignore
def test_team_image_input_no_prompt(shared_db, image_path):
image_analyst = Agent(
name="Image Analyst",
role="Analyze images and provide insights.",
model=OpenAIChat(id="gpt-4o-mini"),
markdown=True,
db=shared_db,
)
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[image_analyst],
name="Team",
db=shared_db,
)
response = team.run(
images=[Image(filepath=image_path)],
input="Analyze this image and provide insights.",
)
assert response.content is not None
session_in_db = team.get_session(session_id=team.session_id)
assert session_in_db is not None
assert session_in_db.runs is not None
assert session_in_db.runs[-1].messages is not None
assert session_in_db.runs[-1].messages[1].role == "user"
assert session_in_db.runs[-1].messages[1].images is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/test_multimodal.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/teams/test_parser_model.py | """
Test parser model functionality with teams
"""
from typing import List
from pydantic import BaseModel, Field
from agno.models.openai import OpenAIChat
from agno.team import Team
class ParkGuide(BaseModel):
park_name: str = Field(..., description="The official name of the national park.")
activities: List[str] = Field(
..., description="A list of popular activities to do in the park. Provide at least three."
)
best_season_to_visit: str = Field(
..., description="The best season to visit the park (e.g., Spring, Summer, Autumn, Winter)."
)
def test_team_with_parser_model():
team = Team(
name="National Park Expert",
members=[],
output_schema=ParkGuide,
parser_model=OpenAIChat(id="gpt-4o-mini"),
instructions="You have no members, answer directly",
description="You are an expert on national parks and provide concise guides.",
telemetry=False,
)
response = team.run("Tell me about Yosemite National Park.")
print(response.content)
assert response.content is not None
assert isinstance(response.content, ParkGuide)
assert isinstance(response.content.park_name, str)
assert len(response.content.park_name) > 0
def test_team_with_parser_model_stream(shared_db):
team = Team(
name="National Park Expert",
members=[],
output_schema=ParkGuide,
parser_model=OpenAIChat(id="gpt-4o-mini"),
instructions="You have no members, answer directly",
description="You are an expert on national parks and provide concise guides.",
telemetry=False,
db=shared_db,
)
response = team.run("Tell me about Yosemite National Park.", stream=True)
final_content = None
for event in response:
print(event.event)
# Capture the final parsed content from events
if hasattr(event, "content") and isinstance(event.content, ParkGuide):
final_content = event.content
# Fallback: try to get from database if events didn't capture it
if final_content is None:
run_response = team.get_last_run_output()
if run_response:
final_content = run_response.content
assert final_content is not None
assert isinstance(final_content, ParkGuide)
assert isinstance(final_content.park_name, str)
assert len(final_content.park_name) > 0
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/test_parser_model.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/teams/test_session.py | import uuid
from typing import Any, Dict, Optional
import pytest
from agno.agent.agent import Agent
from agno.models.openai.chat import OpenAIChat
from agno.run.team import TeamRunEvent
from agno.team.team import Team
def team_factory(shared_db, session_id: Optional[str] = None, session_state: Optional[Dict[str, Any]] = None):
return Team(
model=OpenAIChat(id="gpt-4o-mini"),
session_id=session_id,
session_state=session_state,
members=[],
db=shared_db,
update_memory_on_run=True,
markdown=True,
telemetry=False,
)
def test_team_set_session_name(shared_db):
session_id = "session_1"
session_state = {"test_key": "test_value"}
team = team_factory(shared_db, session_id, session_state)
team.run("Hello, how are you?")
team.set_session_name(session_id=session_id, session_name="my_test_session")
session_from_storage = team.get_session(session_id=session_id)
assert session_from_storage is not None
assert session_from_storage.session_id == session_id
assert session_from_storage.session_data is not None
assert session_from_storage.session_data["session_name"] == "my_test_session"
def test_team_get_session_name(shared_db):
session_id = "session_1"
team = team_factory(shared_db, session_id)
team.run("Hello, how are you?")
team.set_session_name(session_id=session_id, session_name="my_test_session")
assert team.get_session_name() == "my_test_session"
def test_team_get_session_state(shared_db):
session_id = "session_1"
team = team_factory(shared_db, session_id, session_state={"test_key": "test_value"})
team.run("Hello, how are you?")
assert team.get_session_state() == {"test_key": "test_value"}
def test_team_get_session_metrics(shared_db):
session_id = "session_1"
team = team_factory(shared_db, session_id)
team.run("Hello, how are you?")
metrics = team.get_session_metrics()
assert metrics is not None
assert metrics.total_tokens > 0
assert metrics.input_tokens > 0
assert metrics.output_tokens > 0
assert metrics.total_tokens == metrics.input_tokens + metrics.output_tokens
# Async database tests
@pytest.mark.asyncio
async def test_async_run_with_async_db(async_shared_db):
"""Test Team async arun() with async database."""
agent1 = Agent(
name="Agent1",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
response = await team.arun("Hello team", session_id=session_id)
assert response is not None
assert response.content is not None
@pytest.mark.asyncio
async def test_async_run_stream_with_async_db(async_shared_db):
"""Test Team async arun() streaming with async database."""
agent1 = Agent(
name="Agent1",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
final_response = None
async for response in team.arun("Hello team", session_id=session_id, stream=True):
final_response = response
assert final_response is not None
assert final_response.content is not None
@pytest.mark.asyncio
async def test_async_run_stream_events_with_async_db(async_shared_db):
"""Test Team async arun() with stream_events=True and async database."""
agent1 = Agent(
name="Agent1",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
events = {}
async for run_response_delta in team.arun("Hello team", session_id=session_id, stream=True, stream_events=True):
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert TeamRunEvent.run_completed in events
assert len(events[TeamRunEvent.run_completed]) == 1
assert events[TeamRunEvent.run_completed][0].content is not None
@pytest.mark.asyncio
async def test_aget_session_with_async_db(async_shared_db):
"""Test aget_session with async database."""
agent1 = Agent(
name="Agent1",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await team.arun("Hello", session_id=session_id)
session = await team.aget_session(session_id=session_id)
assert session is not None
assert session.session_id == session_id
assert len(session.runs) == 1
@pytest.mark.asyncio
async def test_asave_session_with_async_db(async_shared_db):
"""Test asave_session with async database."""
agent1 = Agent(
name="Agent1",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await team.arun("Hello", session_id=session_id)
session = await team.aget_session(session_id=session_id)
session.session_data["custom_key"] = "custom_value"
await team.asave_session(session)
retrieved_session = await team.aget_session(session_id=session_id)
assert retrieved_session.session_data["custom_key"] == "custom_value"
@pytest.mark.asyncio
async def test_aget_last_run_output_with_async_db(async_shared_db):
"""Test aget_last_run_output with async database."""
agent1 = Agent(
name="Agent1",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await team.arun("First message", session_id=session_id)
response2 = await team.arun("Second message", session_id=session_id)
last_output = await team.aget_last_run_output(session_id=session_id)
assert last_output is not None
assert last_output.run_id == response2.run_id
@pytest.mark.asyncio
async def test_aget_run_output_with_async_db(async_shared_db):
"""Test aget_run_output with async database."""
agent1 = Agent(
name="Agent1",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
response = await team.arun("Hello", session_id=session_id)
run_id = response.run_id
retrieved_output = await team.aget_run_output(run_id=run_id, session_id=session_id)
assert retrieved_output is not None
assert retrieved_output.run_id == run_id
@pytest.mark.asyncio
async def test_aget_chat_history_with_async_db(async_shared_db):
"""Test aget_chat_history with async database."""
agent1 = Agent(
name="Agent1",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await team.arun("Hello", session_id=session_id)
await team.arun("How are you?", session_id=session_id)
chat_history = await team.aget_chat_history(session_id=session_id)
assert len(chat_history) >= 4
@pytest.mark.asyncio
async def test_aget_session_messages_with_async_db(async_shared_db):
"""Test aget_session_messages with async database."""
agent1 = Agent(
name="Agent1",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await team.arun("Hello", session_id=session_id)
await team.arun("How are you?", session_id=session_id)
messages = await team.aget_session_messages(session_id=session_id)
assert len(messages) >= 4
@pytest.mark.asyncio
async def test_aget_session_state_with_async_db(async_shared_db):
"""Test aget_session_state with async database."""
agent1 = Agent(
name="Agent1",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await team.arun("Hello", session_id=session_id, session_state={"counter": 5, "name": "test"})
state = await team.aget_session_state(session_id=session_id)
assert state == {"counter": 5, "name": "test"}
@pytest.mark.asyncio
async def test_aupdate_session_state_with_async_db(async_shared_db):
"""Test aupdate_session_state with async database."""
agent1 = Agent(
name="Agent1",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await team.arun("Hello", session_id=session_id, session_state={"counter": 0, "items": []})
result = await team.aupdate_session_state({"counter": 10}, session_id=session_id)
assert result == {"counter": 10, "items": []}
updated_state = await team.aget_session_state(session_id=session_id)
assert updated_state["counter"] == 10
@pytest.mark.asyncio
async def test_aget_session_name_with_async_db(async_shared_db):
"""Test aget_session_name with async database."""
agent1 = Agent(
name="Agent1",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await team.arun("Hello", session_id=session_id)
await team.aset_session_name(session_id=session_id, session_name="Async Session")
name = await team.aget_session_name(session_id=session_id)
assert name == "Async Session"
@pytest.mark.asyncio
async def test_aset_session_name_with_async_db(async_shared_db):
"""Test aset_session_name with async database."""
agent1 = Agent(
name="Agent1",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await team.arun("Hello", session_id=session_id)
updated_session = await team.aset_session_name(session_id=session_id, session_name="Test Session")
assert updated_session.session_data["session_name"] == "Test Session"
@pytest.mark.asyncio
async def test_aget_session_metrics_with_async_db(async_shared_db):
"""Test aget_session_metrics with async database."""
agent1 = Agent(
name="Agent1",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await team.arun("Hello", session_id=session_id)
metrics = await team.aget_session_metrics(session_id=session_id)
assert metrics is not None
assert metrics.total_tokens > 0
@pytest.mark.asyncio
async def test_adelete_session_with_async_db(async_shared_db):
"""Test adelete_session with async database."""
agent1 = Agent(
name="Agent1",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await team.arun("Hello", session_id=session_id)
# Verify session exists
session = await team.aget_session(session_id=session_id)
assert session is not None
# Delete session
await team.adelete_session(session_id=session_id)
# Verify session is deleted
session = await team.aget_session(session_id=session_id)
assert session is None
@pytest.mark.asyncio
async def test_aget_session_summary_with_async_db(async_shared_db):
"""Test aget_session_summary with async database."""
agent1 = Agent(
name="Agent1",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
await team.arun("Hello", session_id=session_id)
summary = await team.aget_session_summary(session_id=session_id)
assert summary is None # Summaries not enabled by default
@pytest.mark.asyncio
async def test_session_persistence_across_async_runs(async_shared_db):
"""Test that session persists correctly across different async run types."""
agent1 = Agent(
name="Agent1",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
# Async run
await team.arun("First message", session_id=session_id)
# Async streaming run
async for response in team.arun("Second message", session_id=session_id, stream=True):
pass
# Async run again
await team.arun("Third message", session_id=session_id)
# Verify all runs are in session
session = await team.aget_session(session_id=session_id)
assert session is not None
assert len(session.runs) == 3
@pytest.mark.asyncio
async def test_team_with_multiple_members_async_db(async_shared_db):
"""Test team with multiple members using async database."""
agent1 = Agent(
name="Agent 1",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You are helpful agent 1.",
)
agent2 = Agent(
name="Agent 2",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You are helpful agent 2.",
)
team = Team(
members=[agent1, agent2],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
)
session_id = str(uuid.uuid4())
response = await team.arun("Hello team", session_id=session_id)
assert response is not None
# Test async convenience functions work with multi-member team
session = await team.aget_session(session_id=session_id)
assert session is not None
assert len(session.runs) == 1
metrics = await team.aget_session_metrics(session_id=session_id)
assert metrics is not None
assert metrics.total_tokens > 0
@pytest.mark.asyncio
async def test_async_session_state_persistence(async_shared_db):
"""Test async session state persists across multiple runs."""
agent1 = Agent(
name="Agent1",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
model=OpenAIChat(id="gpt-4o-mini"),
db=async_shared_db,
markdown=True,
)
session_id = str(uuid.uuid4())
# First run
await team.arun("Hello", session_id=session_id, session_state={"counter": 0})
await team.aupdate_session_state({"counter": 1}, session_id=session_id)
# Second run - state should persist
await team.arun("Hi again", session_id=session_id)
state = await team.aget_session_state(session_id=session_id)
assert state["counter"] == 1
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/test_session.py",
"license": "Apache License 2.0",
"lines": 443,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/teams/test_storage_and_memory.py | import pytest
from agno.agent.agent import Agent
from agno.db.base import SessionType
from agno.models.openai.chat import OpenAIChat
from agno.run.base import RunStatus
from agno.team.team import Team
@pytest.fixture
def team(shared_db):
"""Create a route team with db and memory for testing."""
return Team(
model=OpenAIChat(id="gpt-4o"),
members=[],
db=shared_db,
update_memory_on_run=True,
)
@pytest.fixture
def team_with_members(shared_db):
"""Create a route team with db and memory for testing."""
def get_weather(city: str) -> str:
return f"The weather in {city} is sunny."
def get_open_restaurants(city: str) -> str:
return f"The open restaurants in {city} are: {', '.join(['Restaurant 1', 'Restaurant 2', 'Restaurant 3'])}"
travel_agent = Agent(
name="Travel Agent",
model=OpenAIChat(id="gpt-4o"),
db=shared_db,
add_history_to_context=True,
role="Search the web for travel information. Don't call multiple tools at once. First get weather, then restaurants.",
tools=[get_weather, get_open_restaurants],
)
return Team(
model=OpenAIChat(id="gpt-4o"),
members=[travel_agent],
db=shared_db,
instructions="Route a single question to the travel agent. Don't make multiple requests.",
update_memory_on_run=True,
)
@pytest.mark.asyncio
async def test_run_history_persistence(team, shared_db):
"""Test that all runs within a session are persisted in db."""
user_id = "john@example.com"
session_id = "session_123"
num_turns = 3
shared_db.clear_memories()
# Perform multiple turns
conversation_messages = [
"What's the weather like today?",
"What about tomorrow?",
"Any recommendations for indoor activities?",
]
assert len(conversation_messages) == num_turns
for msg in conversation_messages:
response = await team.arun(msg, user_id=user_id, session_id=session_id)
assert response.status == RunStatus.completed
# Verify the stored session data after all turns
team_session = team.get_session(session_id=session_id)
assert team_session is not None
assert len(team_session.runs) == num_turns
for run in team_session.runs:
assert run.status == RunStatus.completed
assert run.messages is not None
first_user_message_content = team_session.runs[0].messages[1].content
assert first_user_message_content == conversation_messages[0]
@pytest.mark.asyncio
async def test_store_member_responses_true(team_with_members, shared_db):
"""Test that all runs within a session are persisted in db."""
team_with_members.store_member_responses = True
user_id = "john@example.com"
session_id = "session_123"
shared_db.clear_memories()
await team_with_members.arun("What's the weather like today in Tokyo?", user_id=user_id, session_id=session_id)
# Verify the stored session data after all turns
team_session = team_with_members.get_session(session_id=session_id)
assert team_session.runs[-1].member_responses is not None
assert len(team_session.runs[-1].member_responses) == 1
assert team_session.runs[-1].member_responses[0].content is not None
@pytest.mark.asyncio
async def test_store_member_responses_false(team_with_members, shared_db):
"""Test that all runs within a session are persisted in db."""
team_with_members.store_member_responses = False
user_id = "john@example.com"
session_id = "session_123"
shared_db.clear_memories()
await team_with_members.arun("What's the weather like today in Tokyo?", user_id=user_id, session_id=session_id)
# Verify the stored session data after all turns
team_session = team_with_members.get_session(session_id=session_id)
assert team_session.runs[-1].member_responses == []
@pytest.mark.asyncio
async def test_store_member_responses_stream_true(team_with_members, shared_db):
"""Test that all runs within a session are persisted in db."""
team_with_members.store_member_responses = True
user_id = "john@example.com"
session_id = "session_123"
shared_db.clear_memories()
response_iterator = team_with_members.arun(
"What's the weather like today in Tokyo?", stream=True, user_id=user_id, session_id=session_id
)
async for _ in response_iterator:
pass
# Verify the stored session data after all turns
team_session = team_with_members.get_session(session_id=session_id)
assert team_session.runs[-1].member_responses is not None
assert len(team_session.runs[-1].member_responses) == 1
assert team_session.runs[-1].member_responses[0].content is not None
@pytest.mark.asyncio
async def test_store_member_responses_stream_false(team_with_members, shared_db):
"""Test that all runs within a session are persisted in db."""
team_with_members.store_member_responses = False
user_id = "john@example.com"
session_id = "session_123"
shared_db.clear_memories()
response_iterator = team_with_members.arun(
"What's the weather like today in Tokyo?", stream=True, user_id=user_id, session_id=session_id
)
async for _ in response_iterator:
pass
# Verify the stored session data after all turns
team_session = team_with_members.get_session(session_id=session_id)
assert team_session.runs[-1].member_responses == []
@pytest.mark.asyncio
async def test_run_session_summary(team, shared_db):
"""Test that the session summary is persisted in db."""
session_id = "session_123"
user_id = "john@example.com"
# Enable session summaries
team.update_memory_on_run = False
team.enable_session_summaries = True
# Clear memory for this specific test case
shared_db.clear_memories()
await team.arun("Where is New York?", user_id=user_id, session_id=session_id)
assert team.get_session_summary(session_id=session_id).summary is not None
team_session = team.get_session(session_id=session_id)
assert team_session.summary is not None
await team.arun("Where is Tokyo?", user_id=user_id, session_id=session_id)
assert team.get_session_summary(session_id=session_id).summary is not None
team_session = team.get_session(session_id=session_id)
assert team_session.summary is not None
@pytest.mark.asyncio
async def test_member_run_history_persistence(team_with_members, shared_db):
"""Test that all runs within a member's session are persisted in db."""
user_id = "john@example.com"
session_id = "session_123"
# Clear memory for this specific test case
shared_db.clear_memories()
# First request
await team_with_members.arun(
"I'm traveling to Tokyo, what is the weather and open restaurants?", user_id=user_id, session_id=session_id
)
session = team_with_members.get_session(session_id=session_id)
assert len(session.runs) >= 2, "Team leader run and atleast 1 member run"
assert len(session.runs[-1].messages) >= 4
first_user_message_content = session.runs[-1].messages[1].content
assert "I'm traveling to Tokyo, what is the weather and open restaurants?" in first_user_message_content
# Second request
await team_with_members.arun(
"I'm traveling to Munich, what is the weather and open restaurants?", user_id=user_id, session_id=session_id
)
session = team_with_members.get_session(session_id=session_id)
assert len(session.runs) >= 4, "2 team leader runs and atleast 2 member runs"
# Third request (to the member directly)
await team_with_members.members[0].arun(
"Write me a report about all the places I have requested information about",
user_id=user_id,
session_id=session_id,
)
session = team_with_members.get_session(session_id=session_id)
assert len(session.runs) >= 4, "3 team leader runs and atleast a member run"
@pytest.mark.asyncio
async def test_multi_user_multi_session_team(team, shared_db):
"""Test multi-user multi-session route team with db and memory."""
# Define user and session IDs
user_1_id = "user_1@example.com"
user_2_id = "user_2@example.com"
user_3_id = "user_3@example.com"
user_1_session_1_id = "user_1_session_1"
user_1_session_2_id = "user_1_session_2"
user_2_session_1_id = "user_2_session_1"
user_3_session_1_id = "user_3_session_1"
# Clear memory for this test
shared_db.clear_memories()
# Team interaction with user 1 - Session 1
await team.arun("What is the current stock price of AAPL?", user_id=user_1_id, session_id=user_1_session_1_id)
await team.arun("What are the latest news about Apple?", user_id=user_1_id, session_id=user_1_session_1_id)
# Team interaction with user 1 - Session 2
await team.arun(
"Compare the stock performance of AAPL with recent tech industry news",
user_id=user_1_id,
session_id=user_1_session_2_id,
)
# Team interaction with user 2
await team.arun("What is the current stock price of MSFT?", user_id=user_2_id, session_id=user_2_session_1_id)
await team.arun("What are the latest news about Microsoft?", user_id=user_2_id, session_id=user_2_session_1_id)
# Team interaction with user 3
await team.arun("What is the current stock price of GOOGL?", user_id=user_3_id, session_id=user_3_session_1_id)
await team.arun("What are the latest news about Google?", user_id=user_3_id, session_id=user_3_session_1_id)
# Continue the conversation with user 1
await team.arun(
"Based on the information you have, what stock would you recommend investing in?",
user_id=user_1_id,
session_id=user_1_session_1_id,
)
# Verify the DB has the right sessions
all_sessions = shared_db.get_sessions(session_type=SessionType.TEAM)
assert len(all_sessions) == 4 # 4 sessions total
# Check that each user has the expected sessions
user_1_sessions = shared_db.get_sessions(user_id=user_1_id, session_type=SessionType.TEAM)
assert len(user_1_sessions) == 2
assert user_1_session_1_id in [session.session_id for session in user_1_sessions]
assert user_1_session_2_id in [session.session_id for session in user_1_sessions]
user_2_sessions = shared_db.get_sessions(user_id=user_2_id, session_type=SessionType.TEAM)
assert len(user_2_sessions) == 1
assert user_2_session_1_id in [session.session_id for session in user_2_sessions]
user_3_sessions = shared_db.get_sessions(user_id=user_3_id, session_type=SessionType.TEAM)
assert len(user_3_sessions) == 1
assert user_3_session_1_id in [session.session_id for session in user_3_sessions]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/test_storage_and_memory.py",
"license": "Apache License 2.0",
"lines": 215,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/teams/test_team_delegation.py | import asyncio
from typing import List
import pytest
from pydantic import BaseModel
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.team.team import Team
ASYNC_TEST_TIMEOUT = 300
def test_team_delegation():
"""Test basic functionality of a coordinator team."""
def get_climate_change_info() -> str:
return "Climate change is a global issue that requires urgent action."
researcher = Agent(
name="Researcher",
model=OpenAIChat("gpt-4o"),
role="Research information",
tools=[get_climate_change_info],
)
writer = Agent(name="Writer", model=OpenAIChat("gpt-4o"), role="Write content based on research")
team = Team(
name="Content Team",
model=OpenAIChat("gpt-4o"),
members=[researcher, writer],
instructions=[
"First, have the Researcher gather information on the topic.",
"Then, have the Writer create content based on the research.",
],
)
response = team.run("Write a short article about climate change solutions")
assert response.content is not None
assert isinstance(response.content, str)
assert len(response.content) > 0
def test_respond_directly():
"""Test basic functionality of a coordinator team."""
english_agent = Agent(name="English Agent", model=OpenAIChat("gpt-5-mini"), role="Answer in English")
spanish_agent = Agent(name="Spanish Agent", model=OpenAIChat("gpt-5-mini"), role="Answer in Spanish")
team = Team(
name="Translation Team",
model=OpenAIChat("gpt-5-mini"),
determine_input_for_members=False,
respond_directly=True,
members=[english_agent, spanish_agent],
instructions=[
"If the user asks in English, respond in English. If the user asks in Spanish, respond in Spanish.",
"Never answer directly, you must delegate the task to the appropriate agent.",
],
)
response = team.run("¿Cuéntame algo interesante sobre Madrid?")
assert response.content is not None
assert isinstance(response.content, str)
assert len(response.content) > 0
assert response.member_responses[0].content == response.content
# Check the user message is the same as the input
assert response.member_responses[0].messages[1].role == "user"
assert response.member_responses[0].messages[1].content == "¿Cuéntame algo interesante sobre Madrid?"
def test_use_input_directly_structured_input():
"""Test basic functionality of a coordinator team."""
class ResearchRequest(BaseModel):
topic: str
focus_areas: List[str]
target_audience: str
sources_required: int
researcher = Agent(name="Researcher", model=OpenAIChat("gpt-4o"), role="Research information")
team = Team(
name="Content Team",
model=OpenAIChat("gpt-4o"),
determine_input_for_members=False,
members=[researcher],
instructions=[
"Have the Researcher gather information on the topic.",
],
)
research_request = ResearchRequest(
topic="AI Agent Frameworks",
focus_areas=["AI Agents", "Framework Design", "Developer Tools", "Open Source"],
target_audience="Software Developers and AI Engineers",
sources_required=7,
)
response = team.run(
input=research_request,
)
assert response.content is not None
assert isinstance(response.content, str)
assert len(response.content) > 0
# Check the user message is the same as the input
assert response.member_responses[0].messages[1].role == "user"
assert response.member_responses[0].messages[1].content == research_request.model_dump_json(
indent=2, exclude_none=True
)
def test_delegate_to_all_members():
"""Test basic functionality of a collaborate team."""
agent1 = Agent(
name="Agent 1",
model=OpenAIChat("gpt-4o"),
role="First perspective provider",
instructions="Provide a perspective on the given topic.",
)
agent2 = Agent(
name="Agent 2",
model=OpenAIChat("gpt-4o"),
role="Second perspective provider",
instructions="Provide a different perspective on the given topic.",
)
team = Team(
name="Collaborative Team",
delegate_to_all_members=True,
model=OpenAIChat("gpt-4o"),
members=[agent1, agent2],
instructions=[
"Synthesize the perspectives from both team members.",
"Provide a balanced view that incorporates insights from both perspectives.",
"Only ask the members once for their perspectives.",
],
)
response = team.run("What are the pros and cons of remote work?")
assert response.content is not None
assert isinstance(response.content, str)
assert len(response.content) > 0
tools = response.tools
assert tools is not None
assert len(tools) == 1
@pytest.mark.asyncio
async def test_async_delegate_to_all_members_agent_identity():
"""
Regression test for closure bug in adelegate_task_to_members (PR #6067).
Verifies that when delegate_to_all_members=True and async mode is used,
each agent correctly receives its own identity (not the last agent's).
Bug: Python closures in loops capture variables by reference, so all
concurrent tasks would see the last loop iteration's values.
Fix: Capture loop variables via default arguments.
"""
# Create 3 agents with distinct names that include their identity in responses
agents = [
Agent(
name=f"Worker{i}",
id=f"worker-{i}",
model=OpenAIChat("gpt-4o-mini"),
instructions=[
f"You are Worker{i}.",
f"Always start your response with 'I am Worker{i}.'",
"Keep your response brief - just one sentence.",
],
)
for i in range(1, 4)
]
team = Team(
name="Identity Test Team",
model=OpenAIChat("gpt-4o-mini"),
members=agents,
delegate_to_all_members=True,
# Force the model to use the delegation tool
tool_choice={"type": "function", "function": {"name": "delegate_task_to_members"}},
instructions=[
"Delegate to all members and collect their responses.",
"Do not modify their responses.",
],
)
# Run async without streaming with timeout
try:
response = await asyncio.wait_for(
team.arun("Identify yourself.", stream=False),
timeout=ASYNC_TEST_TIMEOUT,
)
except asyncio.TimeoutError:
pytest.skip(f"Test timed out after {ASYNC_TEST_TIMEOUT}s - skipping due to slow API response")
assert response is not None
assert response.content is not None
# Skip if the run was cancelled
content = str(response.content)
if "cancelled" in content.lower():
pytest.skip("Run was cancelled, likely due to timeout")
# Delegation should have happened since we forced tool_choice
tool_results = " ".join(str(t.result) for t in response.tools if t.result) if response.tools else ""
combined = content + " " + tool_results
# Verify all three agent identities appear in the response
# Before the fix, all would show "Worker3" - now each should have correct identity
assert "Worker1" in combined, f"Worker1 not found in response: {combined}"
assert "Worker2" in combined, f"Worker2 not found in response: {combined}"
assert "Worker3" in combined, f"Worker3 not found in response: {combined}"
@pytest.mark.asyncio
async def test_async_delegate_to_all_members_streaming_agent_identity():
"""
Regression test for closure bug in streaming mode (PR #6067).
Tests that the streaming branch correctly uses the function parameter
instead of the outer loop variable.
"""
# Create 3 agents with distinct names
agents = [
Agent(
name=f"StreamWorker{i}",
id=f"stream-worker-{i}",
model=OpenAIChat("gpt-4o-mini"),
instructions=[
f"You are StreamWorker{i}.",
f"Always start your response with 'I am StreamWorker{i}.'",
"Keep your response brief - just one sentence.",
],
)
for i in range(1, 4)
]
team = Team(
name="Streaming Identity Test Team",
model=OpenAIChat("gpt-4o-mini"),
members=agents,
delegate_to_all_members=True,
# Force the model to use the delegation tool
tool_choice={"type": "function", "function": {"name": "delegate_task_to_members"}},
instructions=[
"Delegate to all members and collect their responses.",
"Do not modify their responses.",
],
)
# Run async with streaming, with timeout protection
async def collect_stream():
collected = []
async for event in team.arun("Identify yourself.", stream=True, stream_events=True):
if hasattr(event, "content") and event.content:
collected.append(str(event.content))
return collected
try:
collected_content = await asyncio.wait_for(collect_stream(), timeout=ASYNC_TEST_TIMEOUT)
except asyncio.TimeoutError:
pytest.skip(f"Test timed out after {ASYNC_TEST_TIMEOUT}s - skipping due to slow API response")
# Combine all content
full_content = " ".join(collected_content)
# Skip assertion if the run was cancelled (e.g., due to external timeout)
if "cancelled" in full_content.lower():
pytest.skip("Run was cancelled, likely due to timeout")
# Verify all three agent identities appear
# Before the fix in streaming mode, all would show "StreamWorker3"
assert "StreamWorker1" in full_content, f"StreamWorker1 not found in response: {full_content}"
assert "StreamWorker2" in full_content, f"StreamWorker2 not found in response: {full_content}"
assert "StreamWorker3" in full_content, f"StreamWorker3 not found in response: {full_content}"
@pytest.mark.asyncio
async def test_async_delegate_to_all_members_with_tools():
"""
Test that async delegation with tools correctly identifies each agent.
This tests a more complex scenario where agents have tools and the
closure bug could affect tool execution attribution.
"""
# Create agents with a tool that uses their identity - using only 2 agents to speed up
agents = []
for i in range(1, 3):
def create_identity_tool(agent_num: int):
def identify() -> str:
"""Return this agent's identity."""
return f"ToolAgent{agent_num} reporting"
return identify
agent = Agent(
name=f"ToolAgent{i}",
id=f"tool-agent-{i}",
model=OpenAIChat("gpt-4o-mini"),
tools=[create_identity_tool(i)],
instructions=[
f"You are ToolAgent{i}.",
"When asked to identify, call the identify tool.",
"Keep responses brief - one sentence max.",
],
)
agents.append(agent)
team = Team(
name="Tool Identity Test Team",
model=OpenAIChat("gpt-4o-mini"),
members=agents,
delegate_to_all_members=True,
# Force the model to use the delegation tool
tool_choice={"type": "function", "function": {"name": "delegate_task_to_members"}},
instructions=["Delegate to all members. Keep your final response brief."],
)
try:
response = await asyncio.wait_for(
team.arun("Use your identify tool.", stream=False),
timeout=ASYNC_TEST_TIMEOUT,
)
except asyncio.TimeoutError:
pytest.skip(f"Test timed out after {ASYNC_TEST_TIMEOUT}s - skipping due to slow API response")
assert response is not None
assert response.content is not None
# Check that delegation happened and tools were called
content = str(response.content)
# Skip if the run was cancelled (e.g., due to external timeout or rate limiting)
if "cancelled" in content.lower():
pytest.skip("Run was cancelled, likely due to timeout or rate limiting")
tool_results = " ".join(str(t.result) for t in response.tools if t.result) if response.tools else ""
combined = content + " " + tool_results
# Verify agent identities appear (tools should have been called)
assert "ToolAgent1" in combined or "ToolAgent2" in combined, f"No ToolAgent identity found in response: {combined}"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/test_team_delegation.py",
"license": "Apache License 2.0",
"lines": 287,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/workflows/test_access_multiple_previous_steps_outputs.py | """Integration tests for accessing multiple previous step outputs in workflows."""
import pytest
from agno.run.workflow import WorkflowCompletedEvent, WorkflowRunOutput
from agno.workflow import Condition, Parallel, Step, Workflow
from agno.workflow.steps import Steps
from agno.workflow.types import StepInput, StepOutput
# Helper functions
def step_a(step_input: StepInput) -> StepOutput:
"""Step A in parallel execution."""
return StepOutput(step_name="step_a", content=f"Step A processed: {step_input.input}", success=True)
def step_b(step_input: StepInput) -> StepOutput:
"""Step B in parallel execution."""
return StepOutput(step_name="step_b", content=f"Step B analyzed: {step_input.input}", success=True)
def step_c(step_input: StepInput) -> StepOutput:
"""Step C in parallel execution."""
return StepOutput(step_name="step_c", content=f"Step C reviewed: {step_input.input}", success=True)
def parallel_aggregator_step(step_input: StepInput) -> StepOutput:
"""Aggregator step that accesses parallel step outputs."""
# Get the parallel step content - should return a dict
parallel_data = step_input.get_step_content("Parallel Processing")
# Verify we can access individual step content
step_a_data = parallel_data.get("step_a", "") if isinstance(parallel_data, dict) else ""
step_b_data = parallel_data.get("step_b", "") if isinstance(parallel_data, dict) else ""
step_c_data = parallel_data.get("step_c", "") if isinstance(parallel_data, dict) else ""
# Test direct access to individual steps using get_step_output (should now work)
direct_step_a_output = step_input.get_step_output("step_a")
direct_step_a_content = step_input.get_step_content("step_a")
direct_step_a_str = str(direct_step_a_content) if direct_step_a_content else "None"
aggregated_report = f"""Parallel Aggregation Report:
Parallel Data Type: {type(parallel_data).__name__}
Step A: {step_a_data}
Step B: {step_b_data}
Step C: {step_c_data}
Direct Step A Output: {direct_step_a_output is not None}
Direct Step A Access: {direct_step_a_str}
Available Steps: {list(step_input.previous_step_outputs.keys())}
Previous step outputs: {step_input.previous_step_outputs}
"""
return StepOutput(step_name="parallel_aggregator_step", content=aggregated_report, success=True)
def research_step(step_input: StepInput) -> StepOutput:
"""Research step."""
return StepOutput(step_name="research_step", content=f"Research: {step_input.input}", success=True)
def analysis_step(step_input: StepInput) -> StepOutput:
"""Analysis step."""
return StepOutput(step_name="analysis_step", content="Analysis of research data", success=True)
def report_step(step_input: StepInput) -> StepOutput:
"""Report step that accesses multiple previous outputs."""
# Get specific step outputs
research_data = step_input.get_step_content("research_step") or ""
analysis_data = step_input.get_step_content("analysis_step") or ""
# Get all previous content
all_content = step_input.get_all_previous_content()
report = f"""Report:
Research: {research_data}
Analysis: {analysis_data}
Total Content Length: {len(all_content)}
Available Steps: {list(step_input.previous_step_outputs.keys())}"""
return StepOutput(step_name="report_step", content=report, success=True)
def test_basic_access(shared_db):
"""Test basic access to previous steps."""
workflow = Workflow(name="Basic Access", db=shared_db, steps=[research_step, analysis_step, report_step])
response = workflow.run(input="test topic")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 3
# Verify report contains data from previous steps
report = response.step_results[2]
assert "Research:" in report.content
assert "Analysis:" in report.content
assert "research_step" in report.content
assert "analysis_step" in report.content
def test_streaming_access(shared_db):
"""Test streaming with multiple step access."""
workflow = Workflow(name="Streaming Access", db=shared_db, steps=[research_step, analysis_step, report_step])
events = list(workflow.run(input="test topic", stream=True))
# Verify events
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
assert "Report:" in completed_events[0].content
@pytest.mark.asyncio
async def test_async_access(shared_db):
"""Test async execution with multiple step access."""
workflow = Workflow(name="Async Access", db=shared_db, steps=[research_step, analysis_step, report_step])
response = await workflow.arun(input="test topic")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 3
assert "Report:" in response.content
@pytest.mark.asyncio
async def test_async_streaming_access(shared_db):
"""Test async streaming with multiple step access."""
workflow = Workflow(name="Async Streaming", db=shared_db, steps=[research_step, analysis_step, report_step])
events = []
async for event in workflow.arun(input="test topic", stream=True):
events.append(event)
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
assert "Report:" in completed_events[0].content
# Add this test function at the end
def test_parallel_step_access(shared_db):
"""Test accessing content from parallel steps."""
workflow = Workflow(
name="Parallel Step Access",
db=shared_db,
steps=[Parallel(step_a, step_b, step_c, name="Parallel Processing"), parallel_aggregator_step],
)
response = workflow.run(input="test data")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 2
# Verify the aggregator step received parallel data correctly
aggregator_response = response.step_results[1]
assert "Parallel Aggregation Report:" in aggregator_response.content
assert "Parallel Data Type: dict" in aggregator_response.content
assert "Step A: Step A processed: test data" in aggregator_response.content
assert "Step B: Step B analyzed: test data" in aggregator_response.content
assert "Step C: Step C reviewed: test data" in aggregator_response.content
assert "Direct Step A Output: True" in aggregator_response.content
assert "Direct Step A Access: Step A processed: test data" in aggregator_response.content
assert "Parallel Processing" in aggregator_response.content
@pytest.mark.asyncio
async def test_async_parallel_step_access(shared_db):
"""Test async accessing content from parallel steps."""
workflow = Workflow(
name="Async Parallel Step Access",
db=shared_db,
steps=[Parallel(step_a, step_b, step_c, name="Parallel Processing"), parallel_aggregator_step],
)
response = await workflow.arun(input="async test data")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 2
# Verify the aggregator step received parallel data correctly
aggregator_response = response.step_results[1]
assert "Parallel Aggregation Report:" in aggregator_response.content
assert "Parallel Data Type: dict" in aggregator_response.content
assert "Step A: Step A processed: async test data" in aggregator_response.content
assert "Step B: Step B analyzed: async test data" in aggregator_response.content
assert "Step C: Step C reviewed: async test data" in aggregator_response.content
assert "Direct Step A Output: True" in aggregator_response.content
assert "Direct Step A Access: Step A processed: async test data" in aggregator_response.content
def test_single_parallel_step_access(shared_db):
"""Test accessing content from a single step in parallel (edge case)."""
workflow = Workflow(
name="Single Parallel Step Access",
db=shared_db,
steps=[Parallel(step_a, name="Parallel Processing"), parallel_aggregator_step],
)
response = workflow.run(input="single test")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 2
# Verify even single parallel steps return dict structure
aggregator_response = response.step_results[1]
assert "Parallel Data Type: dict" in aggregator_response.content
assert "Step A: Step A processed: single test" in aggregator_response.content
def nested_step_inside_condition(step_input: StepInput) -> StepOutput:
"""Step nested inside a Condition."""
return StepOutput(step_name="nested_step", content=f"Nested: {step_input.input}", success=True)
def condition_evaluator(step_input: StepInput) -> bool:
"""Always return True for testing."""
return True
def test_nested_step_in_parallel_and_condition(shared_db):
"""Test accessing a step nested inside Parallel -> Condition."""
workflow = Workflow(
name="Nested Step Access",
db=shared_db,
steps=[
Parallel(
Condition(
name="nested_condition",
evaluator=condition_evaluator,
steps=[nested_step_inside_condition],
),
name="Parallel Processing",
),
parallel_aggregator_step,
],
)
response = workflow.run(input="nested test")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 2
# Verify we can access the deeply nested step
aggregator_response = response.step_results[1]
assert "Parallel Aggregation Report:" in aggregator_response.content
def test_direct_lookup_priority(shared_db):
"""Test that direct lookup takes priority over nested search."""
# Create a step with the same name as a nested step
def top_level_step(step_input: StepInput) -> StepOutput:
return StepOutput(step_name="step_a", content="Top level step_a", success=True)
workflow = Workflow(
name="Direct Lookup Priority",
db=shared_db,
steps=[
top_level_step, # Top-level step_a
Parallel(step_a, name="Parallel Processing"), # Nested step_a
parallel_aggregator_step,
],
)
response = workflow.run(input="priority test")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 3
# The aggregator should access the top-level step_a, not the nested one
aggregator_response = response.step_results[2]
# Since we have both top-level and nested step_a, get_step_output should return top-level first
# But get_step_content might return the nested one from parallel
assert "Parallel Aggregation Report:" in aggregator_response.content
def test_multiple_depth_nested_access(shared_db):
"""Test accessing steps at multiple depth levels."""
def deep_nested_step(step_input: StepInput) -> StepOutput:
"""Step nested deep inside Parallel -> Condition -> Steps."""
return StepOutput(step_name="deep_nested_step", content="Deep nested content", success=True)
def verify_nested_access(step_input: StepInput) -> StepOutput:
"""Verify we can access deeply nested step."""
nested_output = step_input.get_step_output("deep_nested_step")
nested_content = step_input.get_step_content("deep_nested_step")
result = f"Nested output found: {nested_output is not None}, Content: {nested_content}"
return StepOutput(step_name="verifier", content=result, success=True)
workflow = Workflow(
name="Multiple Depth Access",
db=shared_db,
steps=[
Parallel(
Condition(
name="condition_layer",
evaluator=condition_evaluator,
steps=[
Steps(
name="steps_layer",
steps=[deep_nested_step],
)
],
),
name="Parallel Processing",
),
Step(name="verifier", executor=verify_nested_access),
],
)
response = workflow.run(input="deep test")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 2
# Verify we can access the deeply nested step
verifier_response = response.step_results[1]
assert "Nested output found: True" in verifier_response.content
assert "Content: Deep nested content" in verifier_response.content
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/workflows/test_access_multiple_previous_steps_outputs.py",
"license": "Apache License 2.0",
"lines": 240,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/workflows/test_condition_steps.py | """Integration tests for Condition functionality in workflows."""
import pytest
from agno.run.base import RunStatus
from agno.run.workflow import (
ConditionExecutionCompletedEvent,
ConditionExecutionStartedEvent,
StepCompletedEvent,
StepStartedEvent,
WorkflowCompletedEvent,
WorkflowRunOutput,
)
from agno.workflow import Condition, Parallel, Workflow
from agno.workflow.cel import CEL_AVAILABLE
from agno.workflow.types import StepInput, StepOutput
# Helper functions
def research_step(step_input: StepInput) -> StepOutput:
"""Research step that generates content."""
return StepOutput(content=f"Research findings: {step_input.input}. Found data showing 40% growth.", success=True)
def analysis_step(step_input: StepInput) -> StepOutput:
"""Analysis step."""
return StepOutput(content=f"Analysis of research: {step_input.previous_step_content}", success=True)
def fact_check_step(step_input: StepInput) -> StepOutput:
"""Fact checking step."""
return StepOutput(content="Fact check complete: All statistics verified.", success=True)
# Condition evaluators
def has_statistics(step_input: StepInput) -> bool:
"""Check if content contains statistics."""
content = step_input.previous_step_content or step_input.input or ""
# Only check the input message for statistics
content = step_input.input or ""
return any(x in content.lower() for x in ["percent", "%", "growth", "increase", "decrease"])
def is_tech_topic(step_input: StepInput) -> bool:
"""Check if topic is tech-related."""
content = step_input.input or step_input.previous_step_content or ""
return any(x in content.lower() for x in ["ai", "tech", "software", "data"])
async def async_evaluator(step_input: StepInput) -> bool:
"""Async evaluator."""
return is_tech_topic(step_input)
# ============================================================================
# TESTS (Fast - No Workflow Overhead)
# ============================================================================
def test_condition_direct_execute_true():
"""Test Condition.execute() directly when condition is true."""
condition = Condition(name="Direct True Condition", evaluator=has_statistics, steps=[fact_check_step])
step_input = StepInput(input="Market shows 40% growth")
result = condition.execute(step_input)
assert isinstance(result, StepOutput)
assert len(result.steps) == 1
assert "Fact check complete" in result.steps[0].content
def test_condition_direct_execute_false():
"""Test Condition.execute() directly when condition is false."""
condition = Condition(name="Direct False Condition", evaluator=has_statistics, steps=[fact_check_step])
step_input = StepInput(input="General market overview")
result = condition.execute(step_input)
assert isinstance(result, StepOutput)
assert result.steps is None or len(result.steps) == 0 # No steps executed
def test_condition_direct_boolean_evaluator():
"""Test Condition with boolean evaluator."""
condition = Condition(name="Boolean Condition", evaluator=True, steps=[research_step])
step_input = StepInput(input="test")
result = condition.execute(step_input)
assert isinstance(result, StepOutput)
assert len(result.steps) == 1
assert "Research findings" in result.steps[0].content
@pytest.mark.asyncio
async def test_condition_direct_aexecute():
"""Test Condition.aexecute() directly."""
condition = Condition(name="Direct Async Condition", evaluator=async_evaluator, steps=[research_step])
step_input = StepInput(input="AI technology")
result = await condition.aexecute(step_input)
assert isinstance(result, StepOutput)
assert len(result.steps) == 1
assert "Research findings" in result.steps[0].content
def test_condition_direct_execute_stream():
"""Test Condition.execute_stream() directly."""
from agno.run.workflow import WorkflowRunOutput
condition = Condition(name="Direct Stream Condition", evaluator=is_tech_topic, steps=[research_step])
step_input = StepInput(input="AI trends")
# Mock workflow response for streaming
mock_response = WorkflowRunOutput(
run_id="test-run",
workflow_name="test-workflow",
workflow_id="test-id",
session_id="test-session",
content="",
)
events = list(condition.execute_stream(step_input, workflow_run_response=mock_response, stream_events=True))
# Should have started, completed events and step outputs
started_events = [e for e in events if isinstance(e, ConditionExecutionStartedEvent)]
completed_events = [e for e in events if isinstance(e, ConditionExecutionCompletedEvent)]
step_outputs = [e for e in events if isinstance(e, StepOutput)]
assert len(started_events) == 1
assert len(completed_events) == 1
assert len(step_outputs) == 1
assert started_events[0].condition_result is True
def test_condition_direct_multiple_steps():
"""Test Condition with multiple steps."""
condition = Condition(name="Multi Step Condition", evaluator=is_tech_topic, steps=[research_step, analysis_step])
step_input = StepInput(input="AI technology")
result = condition.execute(step_input)
assert isinstance(result, StepOutput)
assert len(result.steps) == 2
assert "Research findings" in result.steps[0].content
assert "Analysis of research" in result.steps[1].content
# ============================================================================
# EXISTING INTEGRATION TESTS (With Workflow)
# ============================================================================
def test_basic_condition_true(shared_db):
"""Test basic condition that evaluates to True."""
workflow = Workflow(
name="Basic Condition",
db=shared_db,
steps=[research_step, Condition(name="stats_check", evaluator=has_statistics, steps=[fact_check_step])],
)
response = workflow.run(input="Market shows 40% growth")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 2
# Condition output is a list
assert isinstance(response.step_results[1], StepOutput)
# One step executed in condition
assert len(response.step_results[1].steps) == 1
assert "Fact check complete" in response.step_results[1].steps[0].content
def test_basic_condition_false(shared_db):
"""Test basic condition that evaluates to False."""
workflow = Workflow(
name="Basic Condition False",
db=shared_db,
steps=[research_step, Condition(name="stats_check", evaluator=has_statistics, steps=[fact_check_step])],
)
# Using a message without statistics
response = workflow.run(input="General market overview")
assert isinstance(response, WorkflowRunOutput)
# Should have 2 step responses: research_step + condition result
assert len(response.step_results) == 2
assert isinstance(response.step_results[1], StepOutput)
assert (
response.step_results[1].steps is None or len(response.step_results[1].steps) == 0
) # No steps executed when condition is false
assert "not met" in response.step_results[1].content
def test_parallel_with_conditions(shared_db):
"""Test parallel containing multiple conditions."""
workflow = Workflow(
name="Parallel with Conditions",
db=shared_db,
steps=[
research_step, # Add a step before parallel to ensure proper chaining
Parallel(
Condition(name="tech_check", evaluator=is_tech_topic, steps=[analysis_step]),
Condition(name="stats_check", evaluator=has_statistics, steps=[fact_check_step]),
name="parallel_conditions",
),
],
)
response = workflow.run(input="AI market shows 40% growth")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 2 # research_step + parallel
# Check the parallel output structure
parallel_output = response.step_results[1]
# Check that the parallel step has nested condition results
assert parallel_output.step_type == "Parallel"
assert len(parallel_output.steps) == 2 # Two conditions executed
# Check that we can access the nested step content
condition_results = parallel_output.steps
tech_condition = next((step for step in condition_results if step.step_name == "tech_check"), None)
stats_condition = next((step for step in condition_results if step.step_name == "stats_check"), None)
assert tech_condition is not None
assert stats_condition is not None
assert len(tech_condition.steps) == 1 # analysis_step executed
assert len(stats_condition.steps) == 1 # fact_check_step executed
assert "Analysis of research" in tech_condition.steps[0].content
assert "Fact check complete" in stats_condition.steps[0].content
def test_condition_streaming(shared_db):
"""Test condition with streaming."""
workflow = Workflow(
name="Streaming Condition",
db=shared_db,
steps=[Condition(name="tech_check", evaluator=is_tech_topic, steps=[research_step, analysis_step])],
)
events = list(workflow.run(input="AI trends", stream=True, stream_events=True))
# Verify event types
condition_started = [e for e in events if isinstance(e, ConditionExecutionStartedEvent)]
condition_completed = [e for e in events if isinstance(e, ConditionExecutionCompletedEvent)]
workflow_completed = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(condition_started) == 1
assert len(condition_completed) == 1
assert len(workflow_completed) == 1
assert condition_started[0].condition_result is True
def test_condition_error_handling(shared_db):
"""Test condition error handling."""
def failing_evaluator(_: StepInput) -> bool:
raise ValueError("Evaluator failed")
workflow = Workflow(
name="Error Condition",
db=shared_db,
steps=[Condition(name="failing_check", evaluator=failing_evaluator, steps=[research_step])],
)
with pytest.raises(ValueError):
response = workflow.run(input="test")
response = workflow.get_last_run_output()
assert isinstance(response, WorkflowRunOutput)
assert response.status == RunStatus.error
assert "Evaluator failed" in response.content
def test_nested_conditions(shared_db):
"""Test nested conditions."""
workflow = Workflow(
name="Nested Conditions",
db=shared_db,
steps=[
Condition(
name="outer",
evaluator=is_tech_topic,
steps=[research_step, Condition(name="inner", evaluator=has_statistics, steps=[fact_check_step])],
)
],
)
response = workflow.run(input="AI market shows 40% growth")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 1
outer_condition = response.step_results[0]
assert isinstance(outer_condition, StepOutput)
# research_step + inner condition result
assert len(outer_condition.steps) == 2
# Check that the inner condition is properly nested
inner_condition = outer_condition.steps[1] # Second step should be the inner condition
assert inner_condition.step_type == "Condition"
assert inner_condition.step_name == "inner"
assert len(inner_condition.steps) == 1 # fact_check_step executed
assert "Fact check complete" in inner_condition.steps[0].content
@pytest.mark.asyncio
async def test_async_condition(shared_db):
"""Test async condition."""
workflow = Workflow(
name="Async Condition",
db=shared_db,
steps=[Condition(name="async_check", evaluator=async_evaluator, steps=[research_step])],
)
response = await workflow.arun(input="AI technology")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 1
assert isinstance(response.step_results[0], StepOutput)
assert len(response.step_results[0].steps) == 1
assert "Research findings" in response.step_results[0].steps[0].content
@pytest.mark.asyncio
async def test_async_condition_streaming(shared_db):
"""Test async condition with streaming."""
workflow = Workflow(
name="Async Streaming Condition",
db=shared_db,
steps=[Condition(name="async_check", evaluator=async_evaluator, steps=[research_step])],
)
events = []
async for event in workflow.arun(input="AI technology", stream=True, stream_events=True):
events.append(event)
condition_started = [e for e in events if isinstance(e, ConditionExecutionStartedEvent)]
condition_completed = [e for e in events if isinstance(e, ConditionExecutionCompletedEvent)]
workflow_completed = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(condition_started) == 1
assert len(condition_completed) == 1
assert len(workflow_completed) == 1
assert condition_started[0].condition_result is True
# ============================================================================
# EARLY TERMINATION / STOP PROPAGATION TESTS
# ============================================================================
def early_stop_step(step_input: StepInput) -> StepOutput:
"""Step that requests early termination."""
return StepOutput(
content="Early stop requested",
success=True,
stop=True,
)
def should_not_run_step(step_input: StepInput) -> StepOutput:
"""Step that should not run after early stop."""
return StepOutput(
content="This step should not have run",
success=True,
)
def test_condition_propagates_stop_flag():
"""Test that Condition propagates stop flag from inner steps to workflow."""
condition = Condition(
name="Stop Condition",
evaluator=True,
steps=[early_stop_step],
)
step_input = StepInput(input="test")
result = condition.execute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is True, "Condition should propagate stop=True from inner step"
def test_condition_stop_propagation_in_workflow(shared_db):
"""Test that workflow stops when Condition's inner step returns stop=True."""
workflow = Workflow(
name="Stop Propagation Test",
db=shared_db,
steps=[
Condition(
name="stop_condition",
evaluator=True,
steps=[early_stop_step],
),
should_not_run_step, # This should NOT execute
],
)
response = workflow.run(input="test")
assert isinstance(response, WorkflowRunOutput)
# Should only have 1 step result (the Condition), not 2
assert len(response.step_results) == 1, "Workflow should stop after Condition with stop=True"
assert response.step_results[0].stop is True
def test_condition_streaming_propagates_stop(shared_db):
"""Test that streaming Condition propagates stop flag and stops workflow."""
workflow = Workflow(
name="Streaming Stop Test",
db=shared_db,
steps=[
Condition(
name="stop_condition",
evaluator=True,
steps=[early_stop_step],
),
should_not_run_step,
],
)
events = list(workflow.run(input="test", stream=True, stream_events=True))
# Verify that the Condition completed with stop propagation
condition_completed = [e for e in events if isinstance(e, ConditionExecutionCompletedEvent)]
assert len(condition_completed) == 1
# Check that inner step has stop=True in results
step_results = condition_completed[0].step_results or []
assert len(step_results) == 1
assert step_results[0].stop is True
# Most importantly: verify should_not_run_step was NOT executed
# by checking there's no StepStartedEvent/StepCompletedEvent for it
step_events = [e for e in events if isinstance(e, (StepStartedEvent, StepCompletedEvent))]
step_names = [e.step_name for e in step_events]
assert "should_not_run_step" not in step_names, "Workflow should have stopped before should_not_run_step"
@pytest.mark.asyncio
async def test_async_condition_propagates_stop():
"""Test that async Condition propagates stop flag."""
condition = Condition(
name="Async Stop Condition",
evaluator=True,
steps=[early_stop_step],
)
step_input = StepInput(input="test")
result = await condition.aexecute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is True, "Async Condition should propagate stop=True from inner step"
@pytest.mark.asyncio
async def test_async_condition_streaming_propagates_stop(shared_db):
"""Test that async streaming Condition propagates stop flag and stops workflow."""
workflow = Workflow(
name="Async Streaming Stop Test",
db=shared_db,
steps=[
Condition(
name="stop_condition",
evaluator=True,
steps=[early_stop_step],
),
should_not_run_step,
],
)
events = []
async for event in workflow.arun(input="test", stream=True, stream_events=True):
events.append(event)
# Verify that the Condition completed with stop propagation
condition_completed = [e for e in events if isinstance(e, ConditionExecutionCompletedEvent)]
assert len(condition_completed) == 1
# Check that inner step has stop=True in results
step_results = condition_completed[0].step_results or []
assert len(step_results) == 1
assert step_results[0].stop is True
# Most importantly: verify should_not_run_step was NOT executed
step_events = [e for e in events if isinstance(e, (StepStartedEvent, StepCompletedEvent))]
step_names = [e.step_name for e in step_events]
assert "should_not_run_step" not in step_names, "Workflow should have stopped before should_not_run_step"
# ============================================================================
# ELSE_STEPS TESTS
# ============================================================================
def general_step(step_input: StepInput) -> StepOutput:
"""General research step (else branch)."""
return StepOutput(content=f"General research: {step_input.input}", success=True)
def fallback_step(step_input: StepInput) -> StepOutput:
"""Fallback step for else branch."""
return StepOutput(content="Fallback step executed", success=True)
def test_condition_else_steps_execute_when_false():
"""Test that else_steps execute when condition is False."""
condition = Condition(
name="Tech Check",
evaluator=is_tech_topic,
steps=[research_step],
else_steps=[general_step],
)
step_input = StepInput(input="General market overview") # Not tech topic
result = condition.execute(step_input)
assert isinstance(result, StepOutput)
assert result.success is True
assert len(result.steps) == 1
assert "General research" in result.steps[0].content
assert "else branch" in result.content
def test_condition_else_steps_not_executed_when_true():
"""Test that else_steps are NOT executed when condition is True."""
condition = Condition(
name="Tech Check",
evaluator=is_tech_topic,
steps=[research_step],
else_steps=[general_step],
)
step_input = StepInput(input="AI technology trends") # Tech topic
result = condition.execute(step_input)
assert isinstance(result, StepOutput)
assert result.success is True
assert len(result.steps) == 1
assert "Research findings" in result.steps[0].content
assert "if branch" in result.content
def test_condition_no_else_steps_returns_not_met():
"""Test that without else_steps, condition returns 'not met' when False."""
condition = Condition(
name="Tech Check",
evaluator=is_tech_topic,
steps=[research_step],
# No else_steps
)
step_input = StepInput(input="General market overview") # Not tech topic
result = condition.execute(step_input)
assert isinstance(result, StepOutput)
assert result.success is True
assert result.steps is None or len(result.steps) == 0
assert "not met" in result.content
def test_condition_empty_else_steps_treated_as_none():
"""Test that empty else_steps list is treated as None."""
condition = Condition(
name="Tech Check",
evaluator=is_tech_topic,
steps=[research_step],
else_steps=[], # Empty list should be treated as None
)
step_input = StepInput(input="General market overview") # Not tech topic
result = condition.execute(step_input)
assert isinstance(result, StepOutput)
assert result.steps is None or len(result.steps) == 0
assert "not met" in result.content
def test_condition_else_steps_multiple_steps():
"""Test else_steps with multiple steps and chaining."""
condition = Condition(
name="Tech Check",
evaluator=is_tech_topic,
steps=[research_step],
else_steps=[general_step, analysis_step], # Multiple else steps
)
step_input = StepInput(input="General market overview") # Not tech topic
result = condition.execute(step_input)
assert isinstance(result, StepOutput)
assert result.success is True
assert len(result.steps) == 2
assert "General research" in result.steps[0].content
assert "Analysis" in result.steps[1].content
assert "else branch" in result.content
@pytest.mark.asyncio
async def test_condition_else_steps_aexecute():
"""Test else_steps with async execution."""
condition = Condition(
name="Async Tech Check",
evaluator=is_tech_topic,
steps=[research_step],
else_steps=[general_step],
)
step_input = StepInput(input="General market overview") # Not tech topic
result = await condition.aexecute(step_input)
assert isinstance(result, StepOutput)
assert result.success is True
assert len(result.steps) == 1
assert "General research" in result.steps[0].content
assert "else branch" in result.content
def test_condition_else_steps_streaming():
"""Test else_steps with streaming."""
from agno.run.workflow import WorkflowRunOutput
condition = Condition(
name="Stream Tech Check",
evaluator=is_tech_topic,
steps=[research_step],
else_steps=[general_step],
)
step_input = StepInput(input="General market overview") # Not tech topic
mock_response = WorkflowRunOutput(
run_id="test-run",
workflow_name="test-workflow",
workflow_id="test-id",
session_id="test-session",
content="",
)
events = list(condition.execute_stream(step_input, workflow_run_response=mock_response, stream_events=True))
started_events = [e for e in events if isinstance(e, ConditionExecutionStartedEvent)]
completed_events = [e for e in events if isinstance(e, ConditionExecutionCompletedEvent)]
step_outputs = [e for e in events if isinstance(e, StepOutput)]
assert len(started_events) == 1
assert len(completed_events) == 1
assert len(step_outputs) == 1
assert started_events[0].condition_result is False
assert completed_events[0].branch == "else"
def test_condition_else_steps_stop_propagation():
"""Test that stop flag propagates correctly from else_steps."""
condition = Condition(
name="Stop in Else",
evaluator=False, # Boolean False to trigger else branch
steps=[research_step],
else_steps=[early_stop_step],
)
step_input = StepInput(input="test")
result = condition.execute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is True, "Condition should propagate stop=True from else_steps"
def test_condition_else_steps_in_workflow(shared_db):
"""Test else_steps in a real workflow."""
workflow = Workflow(
name="Workflow with else_steps",
db=shared_db,
steps=[
Condition(
name="topic_router",
evaluator=is_tech_topic,
steps=[research_step],
else_steps=[general_step, fallback_step],
)
],
)
# Test with non-tech input (should trigger else branch)
response = workflow.run(input="General market overview")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 1
condition_result = response.step_results[0]
assert len(condition_result.steps) == 2 # general_step + fallback_step
assert "General research" in condition_result.steps[0].content
assert "Fallback step" in condition_result.steps[1].content
def test_condition_else_steps_in_workflow_if_branch(shared_db):
"""Test that if branch works correctly when else_steps is provided."""
workflow = Workflow(
name="Workflow with else_steps if branch",
db=shared_db,
steps=[
Condition(
name="topic_router",
evaluator=is_tech_topic,
steps=[research_step],
else_steps=[general_step],
)
],
)
# Test with tech input (should trigger if branch)
response = workflow.run(input="AI technology trends")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 1
condition_result = response.step_results[0]
assert len(condition_result.steps) == 1
assert "Research findings" in condition_result.steps[0].content
assert "if branch" in condition_result.content
# ============================================================================
# CEL EXPRESSION TESTS
# ============================================================================
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_condition_basic_input_contains():
"""Test CEL condition with input.contains() expression."""
condition = Condition(
name="CEL Input Contains",
evaluator='input.contains("urgent")',
steps=[research_step],
)
# Should trigger - contains "urgent"
result_true = condition.execute(StepInput(input="This is an urgent request"))
assert len(result_true.steps) == 1
assert "Research findings" in result_true.steps[0].content
# Should not trigger - no "urgent"
result_false = condition.execute(StepInput(input="This is a normal request"))
assert result_false.steps is None or len(result_false.steps) == 0
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_condition_previous_step_content():
"""Test CEL condition checking previous_step_content."""
condition = Condition(
name="CEL Previous Content",
evaluator="previous_step_content.size() > 10",
steps=[analysis_step],
)
# Should trigger - has previous content > 10 chars
result_true = condition.execute(
StepInput(input="test", previous_step_content="This is some longer content from previous step")
)
assert len(result_true.steps) == 1
assert "Analysis" in result_true.steps[0].content
# Should not trigger - short previous content
result_false = condition.execute(StepInput(input="test", previous_step_content="Short"))
assert result_false.steps is None or len(result_false.steps) == 0
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_condition_additional_data():
"""Test CEL condition with additional_data access."""
condition = Condition(
name="CEL Additional Data",
evaluator='additional_data.priority == "high"',
steps=[fact_check_step],
)
# Should trigger - high priority
result_true = condition.execute(StepInput(input="test", additional_data={"priority": "high"}))
assert len(result_true.steps) == 1
# Should not trigger - low priority
result_false = condition.execute(StepInput(input="test", additional_data={"priority": "low"}))
assert result_false.steps is None or len(result_false.steps) == 0
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_condition_session_state():
"""Test CEL condition with session_state access."""
condition = Condition(
name="CEL Session State",
evaluator="session_state.request_count > 5",
steps=[research_step],
)
# Should trigger - count > 5
result_true = condition.execute(StepInput(input="test"), session_state={"request_count": 10})
assert len(result_true.steps) == 1
# Should not trigger - count <= 5
result_false = condition.execute(StepInput(input="test"), session_state={"request_count": 3})
assert result_false.steps is None or len(result_false.steps) == 0
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_condition_compound_expression():
"""Test CEL condition with compound logical expression."""
condition = Condition(
name="CEL Compound",
evaluator='input.contains("urgent") && additional_data.priority == "high"',
steps=[fact_check_step],
)
# Should trigger - both conditions met
result_true = condition.execute(StepInput(input="This is urgent", additional_data={"priority": "high"}))
assert len(result_true.steps) == 1
# Should not trigger - only one condition met
result_partial = condition.execute(StepInput(input="This is urgent", additional_data={"priority": "low"}))
assert result_partial.steps is None or len(result_partial.steps) == 0
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_condition_with_else_steps():
"""Test CEL condition with else_steps."""
# Define else step locally to avoid any scope issues
def cel_else_step(step_input: StepInput) -> StepOutput:
return StepOutput(content=f"Else branch: {step_input.input}", success=True)
condition = Condition(
name="CEL With Else",
evaluator='input.contains("premium")',
steps=[research_step],
else_steps=[cel_else_step],
)
# Should trigger if branch
result_if = condition.execute(StepInput(input="premium user request"))
assert len(result_if.steps) == 1
assert "Research findings" in result_if.steps[0].content
assert "if branch" in result_if.content
# Should trigger else branch
result_else = condition.execute(StepInput(input="free user request"))
assert len(result_else.steps) == 1
assert "Else branch" in result_else.steps[0].content
assert "else branch" in result_else.content
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_condition_in_workflow(shared_db):
"""Test CEL condition within a workflow."""
workflow = Workflow(
name="CEL Condition Workflow",
db=shared_db,
steps=[
research_step,
Condition(
name="cel_check",
evaluator='input.contains("AI")',
steps=[analysis_step],
),
],
)
# Should trigger condition
response = workflow.run(input="AI technology trends")
assert len(response.step_results) == 2
condition_result = response.step_results[1]
assert len(condition_result.steps) == 1
assert "Analysis" in condition_result.steps[0].content
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_condition_streaming(shared_db):
"""Test CEL condition with streaming."""
workflow = Workflow(
name="CEL Streaming Condition",
db=shared_db,
steps=[
Condition(
name="cel_stream",
evaluator='input.contains("stream")',
steps=[research_step],
)
],
)
events = list(workflow.run(input="stream test", stream=True, stream_events=True))
condition_started = [e for e in events if isinstance(e, ConditionExecutionStartedEvent)]
condition_completed = [e for e in events if isinstance(e, ConditionExecutionCompletedEvent)]
assert len(condition_started) == 1
assert len(condition_completed) == 1
assert condition_started[0].condition_result is True
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
@pytest.mark.asyncio
async def test_cel_condition_async():
"""Test CEL condition with async execution."""
condition = Condition(
name="CEL Async",
evaluator="input.size() > 5",
steps=[research_step],
)
# Should trigger - input length > 5
result = await condition.aexecute(StepInput(input="longer input text"))
assert len(result.steps) == 1
assert "Research findings" in result.steps[0].content
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_condition_previous_step_outputs():
"""Test CEL condition with previous_step_outputs map variable."""
condition = Condition(
name="CEL Previous Step Outputs",
evaluator='previous_step_outputs.step1.contains("important")',
steps=[fact_check_step],
)
from agno.workflow.types import StepOutput as SO
# Should trigger - step1 contains "important"
step_input = StepInput(
input="test",
previous_step_outputs={
"step1": SO(content="This is important content", success=True),
"step2": SO(content="Second step output", success=True),
},
)
result = condition.execute(step_input)
assert len(result.steps) == 1
assert "Fact check" in result.steps[0].content
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/workflows/test_condition_steps.py",
"license": "Apache License 2.0",
"lines": 727,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/workflows/test_loop_steps.py | """Integration tests for Loop functionality in workflows."""
import pytest
from agno.run.workflow import (
LoopExecutionCompletedEvent,
LoopExecutionStartedEvent,
StepCompletedEvent,
StepStartedEvent,
WorkflowCompletedEvent,
WorkflowRunOutput,
)
from agno.workflow import Loop, Parallel, Workflow
from agno.workflow.cel import CEL_AVAILABLE
from agno.workflow.types import StepInput, StepOutput
# Helper functions
def research_step(step_input: StepInput) -> StepOutput:
"""Research step that generates content."""
return StepOutput(step_name="research", content="Found research data about AI trends", success=True)
def analysis_step(step_input: StepInput) -> StepOutput:
"""Analysis step."""
return StepOutput(step_name="analysis", content="Analyzed AI trends data", success=True)
def summary_step(step_input: StepInput) -> StepOutput:
"""Summary step."""
return StepOutput(step_name="summary", content="Summary of findings", success=True)
# Helper function to recursively search for content in nested steps
def find_content_in_steps(step_output: StepOutput, search_text: str) -> bool:
"""Recursively search for content in step output and its nested steps."""
if search_text in step_output.content:
return True
if step_output.steps:
return any(find_content_in_steps(nested_step, search_text) for nested_step in step_output.steps)
return False
# ============================================================================
# TESTS (Fast - No Workflow Overhead)
# ============================================================================
def test_loop_direct_execute():
"""Test Loop.execute() directly without workflow."""
def simple_end_condition(outputs):
return len(outputs) >= 2
loop = Loop(name="Direct Loop", steps=[research_step], end_condition=simple_end_condition, max_iterations=3)
step_input = StepInput(input="direct test")
result = loop.execute(step_input)
assert isinstance(result, StepOutput)
assert len(result.steps) >= 2 # Should stop when condition is met
assert all("AI trends" in output.content for output in result.steps)
@pytest.mark.asyncio
async def test_loop_direct_aexecute():
"""Test Loop.aexecute() directly without workflow."""
def simple_end_condition(outputs):
return len(outputs) >= 2
loop = Loop(name="Direct Async Loop", steps=[research_step], end_condition=simple_end_condition, max_iterations=3)
step_input = StepInput(input="direct async test")
result = await loop.aexecute(step_input)
assert isinstance(result, StepOutput)
assert len(result.steps) >= 2
assert all("AI trends" in output.content for output in result.steps)
def test_loop_direct_execute_stream():
"""Test Loop.execute_stream() directly without workflow."""
from agno.run.workflow import LoopIterationCompletedEvent, LoopIterationStartedEvent, WorkflowRunOutput
def simple_end_condition(outputs):
return len(outputs) >= 1
loop = Loop(name="Direct Stream Loop", steps=[research_step], end_condition=simple_end_condition, max_iterations=2)
step_input = StepInput(input="direct stream test")
# Mock workflow response for streaming
mock_response = WorkflowRunOutput(
run_id="test-run",
workflow_name="test-workflow",
workflow_id="test-id",
session_id="test-session",
content="",
)
events = list(loop.execute_stream(step_input, workflow_run_response=mock_response, stream_events=True))
# Should have started, completed, iteration events and step outputs
started_events = [e for e in events if isinstance(e, LoopExecutionStartedEvent)]
completed_events = [e for e in events if isinstance(e, LoopExecutionCompletedEvent)]
iteration_started = [e for e in events if isinstance(e, LoopIterationStartedEvent)]
iteration_completed = [e for e in events if isinstance(e, LoopIterationCompletedEvent)]
step_outputs = [e for e in events if isinstance(e, StepOutput)]
assert len(started_events) == 1
assert len(completed_events) == 1
assert len(iteration_started) >= 1
assert len(iteration_completed) >= 1
assert len(step_outputs) >= 1
assert started_events[0].max_iterations == 2
def test_loop_direct_max_iterations():
"""Test Loop respects max_iterations."""
def never_end_condition(outputs):
return False # Never end
loop = Loop(name="Max Iterations Loop", steps=[research_step], end_condition=never_end_condition, max_iterations=2)
step_input = StepInput(input="max iterations test")
result = loop.execute(step_input)
assert isinstance(result, StepOutput)
assert len(result.steps) == 2 # Should stop at max_iterations
def test_loop_direct_no_end_condition():
"""Test Loop without end condition (uses max_iterations only)."""
loop = Loop(name="No End Condition Loop", steps=[research_step], max_iterations=3)
step_input = StepInput(input="no condition test")
result = loop.execute(step_input)
assert isinstance(result, StepOutput)
assert len(result.steps) == 3 # Should run all iterations
def test_loop_direct_multiple_steps():
"""Test Loop with multiple steps per iteration."""
def simple_end_condition(outputs):
return len(outputs) >= 2 # 2 outputs = 1 iteration (2 steps)
loop = Loop(
name="Multi Step Loop",
steps=[research_step, analysis_step],
end_condition=simple_end_condition,
max_iterations=3,
)
step_input = StepInput(input="multi step test")
result = loop.execute(step_input)
assert isinstance(result, StepOutput)
assert len(result.steps) >= 2
# Should have both research and analysis outputs
research_outputs = [r for r in result.steps if "research data" in r.content]
analysis_outputs = [r for r in result.steps if "Analyzed" in r.content]
assert len(research_outputs) >= 1
assert len(analysis_outputs) >= 1
# ============================================================================
# INTEGRATION TESTS (With Workflow)
# ============================================================================
def test_basic_loop(shared_db):
"""Test basic loop with multiple steps."""
def check_content(outputs):
"""Stop when we have enough content."""
return any("AI trends" in o.content for o in outputs)
workflow = Workflow(
name="Basic Loop",
db=shared_db,
steps=[
Loop(
name="test_loop",
steps=[research_step, analysis_step],
end_condition=check_content,
max_iterations=3,
)
],
)
response = workflow.run(input="test")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 1
assert find_content_in_steps(response.step_results[0], "AI trends")
def test_loop_with_parallel(shared_db):
"""Test loop with parallel steps."""
def check_content(outputs):
"""Stop when both research and analysis are done."""
has_research = any("research data" in o.content for o in outputs)
has_analysis = any("Analyzed" in o.content for o in outputs)
return has_research and has_analysis
workflow = Workflow(
name="Parallel Loop",
db=shared_db,
steps=[
Loop(
name="test_loop",
steps=[Parallel(research_step, analysis_step, name="Parallel Research & Analysis"), summary_step],
end_condition=check_content,
max_iterations=3,
)
],
)
response = workflow.run(input="test")
assert isinstance(response, WorkflowRunOutput)
# Check the loop step output in step_results
loop_step_output = response.step_results[0] # First step (Loop)
assert isinstance(loop_step_output, StepOutput)
assert loop_step_output.step_type == "Loop"
# Check nested parallel and summary step outputs
parallel_output = loop_step_output.steps[0] if loop_step_output.steps else None
assert parallel_output is not None
assert parallel_output.step_type == "Parallel"
def test_loop_streaming(shared_db):
"""Test loop with streaming events."""
workflow = Workflow(
name="Streaming Loop",
db=shared_db,
steps=[
Loop(
name="test_loop",
steps=[research_step],
end_condition=lambda outputs: "AI trends" in outputs[-1].content,
max_iterations=3,
)
],
)
events = list(workflow.run(input="test", stream=True, stream_events=True))
loop_started = [e for e in events if isinstance(e, LoopExecutionStartedEvent)]
loop_completed = [e for e in events if isinstance(e, LoopExecutionCompletedEvent)]
workflow_completed = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(loop_started) == 1
assert len(loop_completed) == 1
assert len(workflow_completed) == 1
def test_parallel_loop_streaming(shared_db):
"""Test parallel steps in loop with streaming."""
workflow = Workflow(
name="Parallel Streaming Loop",
db=shared_db,
steps=[
Loop(
name="test_loop",
steps=[Parallel(research_step, analysis_step, name="Parallel Steps")],
end_condition=lambda outputs: "AI trends" in outputs[-1].content,
max_iterations=3,
)
],
)
events = list(workflow.run(input="test", stream=True, stream_events=True))
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
@pytest.mark.asyncio
async def test_async_loop(shared_db):
"""Test async loop execution."""
async def async_step(step_input: StepInput) -> StepOutput:
return StepOutput(step_name="async_step", content="Async research: AI trends", success=True)
workflow = Workflow(
name="Async Loop",
db=shared_db,
steps=[
Loop(
name="test_loop",
steps=[async_step],
end_condition=lambda outputs: "AI trends" in outputs[-1].content,
max_iterations=3,
)
],
)
response = await workflow.arun(input="test")
assert isinstance(response, WorkflowRunOutput)
assert find_content_in_steps(response.step_results[0], "AI trends")
@pytest.mark.asyncio
async def test_async_parallel_loop(shared_db):
"""Test async loop with parallel steps."""
async def async_research(step_input: StepInput) -> StepOutput:
return StepOutput(step_name="async_research", content="Async research: AI trends", success=True)
async def async_analysis(step_input: StepInput) -> StepOutput:
return StepOutput(step_name="async_analysis", content="Async analysis complete", success=True)
workflow = Workflow(
name="Async Parallel Loop",
db=shared_db,
steps=[
Loop(
name="test_loop",
steps=[Parallel(async_research, async_analysis, name="Async Parallel Steps")],
end_condition=lambda outputs: "AI trends" in outputs[-1].content,
max_iterations=3,
)
],
)
response = await workflow.arun(input="test")
assert isinstance(response, WorkflowRunOutput)
assert find_content_in_steps(response.step_results[0], "AI trends")
# ============================================================================
# EARLY TERMINATION / STOP PROPAGATION TESTS
# ============================================================================
def early_stop_step(step_input: StepInput) -> StepOutput:
"""Step that requests early termination."""
return StepOutput(
step_name="early_stop",
content="Early stop requested",
success=True,
stop=True,
)
def should_not_run_step(step_input: StepInput) -> StepOutput:
"""Step that should not run after early stop."""
return StepOutput(
step_name="should_not_run",
content="This step should not have run",
success=True,
)
def normal_loop_step(step_input: StepInput) -> StepOutput:
"""Normal step for loop testing."""
return StepOutput(
step_name="normal_loop_step",
content="Normal loop iteration",
success=True,
)
def test_loop_propagates_stop_flag():
"""Test that Loop propagates stop flag from inner steps."""
def never_end(outputs):
return False # Never end normally
loop = Loop(
name="Stop Loop",
steps=[early_stop_step],
end_condition=never_end,
max_iterations=5,
)
step_input = StepInput(input="test")
result = loop.execute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is True, "Loop should propagate stop=True from inner step"
# Should only have 1 iteration since stop was requested
assert len(result.steps) == 1, "Loop should stop after first iteration with stop=True"
def test_loop_stop_propagation_in_workflow(shared_db):
"""Test that workflow stops when Loop's inner step returns stop=True."""
def never_end(outputs):
return False
workflow = Workflow(
name="Loop Stop Propagation Test",
db=shared_db,
steps=[
Loop(
name="stop_loop",
steps=[early_stop_step],
end_condition=never_end,
max_iterations=5,
),
should_not_run_step, # This should NOT execute
],
)
response = workflow.run(input="test")
assert isinstance(response, WorkflowRunOutput)
# Should only have 1 step result (the Loop), not 2
assert len(response.step_results) == 1, "Workflow should stop after Loop with stop=True"
assert response.step_results[0].stop is True
def test_loop_stops_iterations_on_stop_flag():
"""Test that Loop stops iterating when a step returns stop=True."""
iteration_count = [0]
def counting_step(step_input: StepInput) -> StepOutput:
iteration_count[0] += 1
if iteration_count[0] >= 2:
return StepOutput(
step_name="counting_step",
content=f"Iteration {iteration_count[0]} - stopping",
success=True,
stop=True,
)
return StepOutput(
step_name="counting_step",
content=f"Iteration {iteration_count[0]}",
success=True,
)
def never_end(outputs):
return False
loop = Loop(
name="Counting Loop",
steps=[counting_step],
end_condition=never_end,
max_iterations=10,
)
step_input = StepInput(input="test")
result = loop.execute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is True
# Should have stopped at iteration 2
assert len(result.steps) == 2, "Loop should stop after iteration that returned stop=True"
assert iteration_count[0] == 2
def test_loop_streaming_propagates_stop(shared_db):
"""Test that streaming Loop propagates stop flag and stops workflow."""
def never_end(outputs):
return False
workflow = Workflow(
name="Streaming Loop Stop Test",
db=shared_db,
steps=[
Loop(
name="stop_loop",
steps=[early_stop_step],
end_condition=never_end,
max_iterations=5,
),
should_not_run_step,
],
)
events = list(workflow.run(input="test", stream=True, stream_events=True))
# Verify workflow completed
workflow_completed = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(workflow_completed) == 1
# Should only have 1 step result (the Loop), not 2
assert len(workflow_completed[0].step_results) == 1, "Workflow should stop after Loop with stop=True"
# Check that the loop output has stop=True
loop_output = workflow_completed[0].step_results[0]
assert loop_output.stop is True
# Check that inner step has stop=True in results
assert len(loop_output.steps) == 1
assert loop_output.steps[0].stop is True
# Most importantly: verify should_not_run_step was NOT executed
step_events = [e for e in events if isinstance(e, (StepStartedEvent, StepCompletedEvent))]
step_names = [e.step_name for e in step_events]
assert "should_not_run_step" not in step_names, "Workflow should have stopped before should_not_run_step"
@pytest.mark.asyncio
async def test_async_loop_propagates_stop():
"""Test that async Loop propagates stop flag."""
def never_end(outputs):
return False
loop = Loop(
name="Async Stop Loop",
steps=[early_stop_step],
end_condition=never_end,
max_iterations=5,
)
step_input = StepInput(input="test")
result = await loop.aexecute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is True, "Async Loop should propagate stop=True from inner step"
assert len(result.steps) == 1, "Loop should stop after first iteration with stop=True"
@pytest.mark.asyncio
async def test_async_loop_streaming_propagates_stop(shared_db):
"""Test that async streaming Loop propagates stop flag and stops workflow."""
def never_end(outputs):
return False
workflow = Workflow(
name="Async Streaming Loop Stop Test",
db=shared_db,
steps=[
Loop(
name="stop_loop",
steps=[early_stop_step],
end_condition=never_end,
max_iterations=5,
),
should_not_run_step,
],
)
events = []
async for event in workflow.arun(input="test", stream=True, stream_events=True):
events.append(event)
# Verify workflow completed
workflow_completed = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(workflow_completed) == 1
# Should only have 1 step result (the Loop), not 2
assert len(workflow_completed[0].step_results) == 1, "Workflow should stop after Loop with stop=True"
# Check that the loop output has stop=True
loop_output = workflow_completed[0].step_results[0]
assert loop_output.stop is True
# Check that inner step has stop=True in results
assert len(loop_output.steps) == 1
assert loop_output.steps[0].stop is True
# Most importantly: verify should_not_run_step was NOT executed
step_events = [e for e in events if isinstance(e, (StepStartedEvent, StepCompletedEvent))]
step_names = [e.step_name for e in step_events]
assert "should_not_run_step" not in step_names, "Workflow should have stopped before should_not_run_step"
def test_loop_with_multiple_steps_propagates_stop():
"""Test Loop with multiple steps per iteration propagates stop from any step."""
def first_step(step_input: StepInput) -> StepOutput:
return StepOutput(step_name="first", content="First step done", success=True)
def second_step_stops(step_input: StepInput) -> StepOutput:
return StepOutput(step_name="second", content="Second step stops", success=True, stop=True)
def third_step(step_input: StepInput) -> StepOutput:
return StepOutput(step_name="third", content="Third step - should not run", success=True)
def never_end(outputs):
return False
loop = Loop(
name="Multi Step Stop Loop",
steps=[first_step, second_step_stops, third_step],
end_condition=never_end,
max_iterations=5,
)
step_input = StepInput(input="test")
result = loop.execute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is True
# Should have first and second step from first iteration only
assert len(result.steps) == 2, "Loop should stop after step that returned stop=True"
assert result.steps[0].content == "First step done"
assert result.steps[1].content == "Second step stops"
# ============================================================================
# CEL EXPRESSION TESTS
# ============================================================================
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_loop_iteration_count():
"""Test CEL loop with current_iteration end condition."""
loop = Loop(
name="CEL Iteration Loop",
steps=[research_step],
end_condition="current_iteration >= 2",
max_iterations=5,
)
result = loop.execute(StepInput(input="test"))
assert isinstance(result, StepOutput)
# current_iteration is incremented AFTER each iteration completes, THEN end_condition is checked
# So current_iteration >= 2 is True after iteration 1 completes (when current_iteration becomes 2)
assert len(result.steps) == 2
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_loop_max_iterations_expression():
"""Test CEL loop comparing current_iteration to max_iterations."""
loop = Loop(
name="CEL Max Iterations Loop",
steps=[research_step],
end_condition="current_iteration >= max_iterations - 1",
max_iterations=3,
)
result = loop.execute(StepInput(input="test"))
assert isinstance(result, StepOutput)
# max_iterations=3, so end_condition is "current_iteration >= 2"
assert len(result.steps) == 2
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_loop_last_step_content():
"""Test CEL loop checking last_step_content."""
iteration_counter = [0]
def counting_step(step_input: StepInput) -> StepOutput:
iteration_counter[0] += 1
content = "DONE" if iteration_counter[0] >= 3 else f"Iteration {iteration_counter[0]}"
return StepOutput(step_name="counting", content=content, success=True)
loop = Loop(
name="CEL Last Content Loop",
steps=[counting_step],
end_condition='last_step_content.contains("DONE")',
max_iterations=10,
)
result = loop.execute(StepInput(input="test"))
assert isinstance(result, StepOutput)
assert len(result.steps) == 3
assert "DONE" in result.steps[-1].content
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_loop_all_success():
"""Test CEL loop with all_success check."""
def always_success_step(step_input: StepInput) -> StepOutput:
return StepOutput(step_name="success", content="Success", success=True)
loop = Loop(
name="CEL All Success Loop",
steps=[always_success_step],
end_condition="all_success && current_iteration >= 2",
max_iterations=10,
)
result = loop.execute(StepInput(input="test"))
assert isinstance(result, StepOutput)
assert all(s.success for s in result.steps)
assert len(result.steps) == 2
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_loop_step_outputs():
"""Test CEL loop with step_outputs map variable."""
loop = Loop(
name="CEL Step Outputs Loop",
steps=[research_step, analysis_step], # 2 steps per iteration
end_condition="step_outputs.size() >= 2 && all_success",
max_iterations=10,
)
result = loop.execute(StepInput(input="test"))
assert isinstance(result, StepOutput)
# step_outputs has 2 entries per iteration, so stops after first iteration
assert len(result.steps) == 2
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_loop_in_workflow(shared_db):
"""Test CEL loop within a workflow."""
workflow = Workflow(
name="CEL Loop Workflow",
db=shared_db,
steps=[
Loop(
name="cel_loop",
steps=[research_step],
end_condition="current_iteration >= 2",
max_iterations=5,
)
],
)
response = workflow.run(input="test")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 1
loop_output = response.step_results[0]
assert len(loop_output.steps) == 2
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_loop_streaming(shared_db):
"""Test CEL loop with streaming."""
workflow = Workflow(
name="CEL Streaming Loop",
db=shared_db,
steps=[
Loop(
name="cel_stream_loop",
steps=[research_step],
end_condition="current_iteration >= 1",
max_iterations=5,
)
],
)
events = list(workflow.run(input="test", stream=True, stream_events=True))
loop_started = [e for e in events if isinstance(e, LoopExecutionStartedEvent)]
loop_completed = [e for e in events if isinstance(e, LoopExecutionCompletedEvent)]
assert len(loop_started) == 1
assert len(loop_completed) == 1
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
@pytest.mark.asyncio
async def test_cel_loop_async():
"""Test CEL loop with async execution."""
loop = Loop(
name="CEL Async Loop",
steps=[research_step],
end_condition="current_iteration >= 1",
max_iterations=5,
)
result = await loop.aexecute(StepInput(input="test"))
assert isinstance(result, StepOutput)
assert len(result.steps) == 1
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_loop_compound_condition():
"""Test CEL loop with compound end condition."""
loop = Loop(
name="CEL Compound Loop",
steps=[research_step],
end_condition="current_iteration >= 2 && all_success",
max_iterations=10,
)
result = loop.execute(StepInput(input="test"))
assert isinstance(result, StepOutput)
assert len(result.steps) == 2
assert all(s.success for s in result.steps)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/workflows/test_loop_steps.py",
"license": "Apache License 2.0",
"lines": 597,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/workflows/test_mixed_steps.py | """Integration tests for complex combinations of workflow steps."""
import pytest
from agno.run.workflow import WorkflowCompletedEvent, WorkflowRunOutput
from agno.workflow import Condition, Loop, Parallel, Workflow
from agno.workflow.router import Router
from agno.workflow.types import StepInput, StepOutput
def find_content_in_steps(step_output, search_text):
"""Recursively search for content in step output and its nested steps."""
if search_text in step_output.content:
return True
if step_output.steps:
return any(find_content_in_steps(nested_step, search_text) for nested_step in step_output.steps)
return False
# Helper functions
def research_step(step_input: StepInput) -> StepOutput:
"""Research step."""
return StepOutput(content=f"Research: {step_input.input}. Found data showing trends.", success=True)
def analysis_step(step_input: StepInput) -> StepOutput:
"""Analysis step."""
return StepOutput(content=f"Analysis of: {step_input.previous_step_content}", success=True)
def summary_step(step_input: StepInput) -> StepOutput:
"""Summary step."""
return StepOutput(content=f"Summary of findings: {step_input.previous_step_content}", success=True)
# Evaluators for conditions
def has_data(step_input: StepInput) -> bool:
"""Check if content contains data."""
content = step_input.input or step_input.previous_step_content or ""
return "data" in content.lower()
def needs_more_research(step_input: StepInput) -> bool:
"""Check if more research is needed."""
content = step_input.previous_step_content or ""
return len(content) < 200
def router_step(step_input: StepInput) -> StepOutput:
"""Router decision step."""
return StepOutput(content="Route A" if "data" in step_input.input.lower() else "Route B", success=True)
def route_a_step(step_input: StepInput) -> StepOutput:
"""Route A processing."""
return StepOutput(content="Processed via Route A", success=True)
def route_b_step(step_input: StepInput) -> StepOutput:
"""Route B processing."""
return StepOutput(content="Processed via Route B", success=True)
def test_loop_with_parallel(shared_db):
"""Test Loop containing Parallel steps."""
workflow = Workflow(
name="Loop with Parallel",
db=shared_db,
steps=[
Loop(
name="research_loop",
steps=[Parallel(research_step, analysis_step, name="parallel_research"), summary_step],
end_condition=lambda outputs: len(outputs) >= 2,
max_iterations=3,
)
],
)
response = workflow.run(input="test topic")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 1 # One loop output
loop_output = response.step_results[0]
assert isinstance(loop_output, StepOutput)
assert loop_output.step_type == "Loop"
assert loop_output.steps is not None
assert len(loop_output.steps) >= 2 # At least two steps per iteration
def test_loop_with_condition(shared_db):
"""Test Loop containing Condition steps."""
workflow = Workflow(
name="Loop with Condition",
db=shared_db,
steps=[
Loop(
name="research_loop",
steps=[
research_step,
Condition(name="analysis_condition", evaluator=has_data, steps=[analysis_step]),
],
end_condition=lambda outputs: len(outputs) >= 2,
max_iterations=3,
)
],
)
response = workflow.run(input="test data")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 1
# Search for "Analysis" in the nested structure
assert find_content_in_steps(response.step_results[0], "Analysis")
def test_condition_with_loop(shared_db):
"""Test Condition containing Loop steps."""
workflow = Workflow(
name="Condition with Loop",
db=shared_db,
steps=[
research_step,
Condition(
name="research_condition",
evaluator=needs_more_research,
steps=[
Loop(
name="deep_research",
steps=[research_step, analysis_step],
end_condition=lambda outputs: len(outputs) >= 2,
max_iterations=3,
)
],
),
],
)
response = workflow.run(input="test topic")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 2 # Research + Condition
def test_parallel_with_loops(shared_db):
"""Test Parallel containing multiple Loops."""
workflow = Workflow(
name="Parallel with Loops",
db=shared_db,
steps=[
Parallel(
Loop(
name="research_loop",
steps=[research_step],
end_condition=lambda outputs: len(outputs) >= 2,
max_iterations=3,
),
Loop(
name="analysis_loop",
steps=[analysis_step],
end_condition=lambda outputs: len(outputs) >= 2,
max_iterations=3,
),
name="parallel_loops",
)
],
)
response = workflow.run(input="test topic")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 1 # One parallel output
parallel_output = response.step_results[0]
assert isinstance(parallel_output, StepOutput)
assert parallel_output.step_type == "Parallel"
def test_nested_conditions_and_loops(shared_db):
"""Test nested Conditions and Loops."""
workflow = Workflow(
name="Nested Conditions and Loops",
db=shared_db,
steps=[
Condition(
name="outer_condition",
evaluator=needs_more_research,
steps=[
Loop(
name="research_loop",
steps=[
research_step,
Condition(name="inner_condition", evaluator=has_data, steps=[analysis_step]),
],
end_condition=lambda outputs: len(outputs) >= 2,
max_iterations=3,
)
],
)
],
)
response = workflow.run(input="test data")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 1 # One condition output
condition_output = response.step_results[0]
assert isinstance(condition_output, StepOutput)
assert condition_output.step_type == "Condition"
def test_parallel_with_conditions_and_loops(shared_db):
"""Test Parallel with mix of Conditions and Loops."""
workflow = Workflow(
name="Mixed Parallel",
db=shared_db,
steps=[
Parallel(
Loop(
name="research_loop",
steps=[research_step],
end_condition=lambda outputs: len(outputs) >= 2,
max_iterations=3,
),
Condition(name="analysis_condition", evaluator=has_data, steps=[analysis_step]),
name="mixed_parallel",
),
summary_step,
],
)
response = workflow.run(input="test data")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 2 # Parallel + Summary
@pytest.mark.asyncio
async def test_async_complex_combination(shared_db):
"""Test async execution of complex step combinations."""
workflow = Workflow(
name="Async Complex",
db=shared_db,
steps=[
Loop(
name="outer_loop",
steps=[
Parallel(
Condition(name="research_condition", evaluator=needs_more_research, steps=[research_step]),
analysis_step,
name="parallel_steps",
)
],
end_condition=lambda outputs: len(outputs) >= 2,
max_iterations=3,
),
summary_step,
],
)
response = await workflow.arun(input="test topic")
assert isinstance(response, WorkflowRunOutput)
assert find_content_in_steps(response.step_results[-1], "Summary")
def test_complex_streaming(shared_db):
"""Test streaming with complex step combinations."""
workflow = Workflow(
name="Complex Streaming",
db=shared_db,
steps=[
Loop(
name="main_loop",
steps=[
Parallel(
Condition(name="research_condition", evaluator=has_data, steps=[research_step]),
Loop(
name="analysis_loop",
steps=[analysis_step],
end_condition=lambda outputs: len(outputs) >= 2,
max_iterations=2,
),
name="parallel_steps",
)
],
end_condition=lambda outputs: len(outputs) >= 2,
max_iterations=2,
)
],
)
events = list(workflow.run(input="test data", stream=True))
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
def test_router_with_loop(shared_db):
"""Test Router with Loop in routes."""
from agno.workflow.step import Step
research_loop = Loop(
name="research_loop",
steps=[research_step, analysis_step],
end_condition=lambda outputs: len(outputs) >= 2,
max_iterations=3,
)
def route_selector(step_input: StepInput):
"""Select between research loop and summary."""
if "data" in step_input.input.lower():
return [research_loop]
return [Step(name="summary", executor=summary_step)]
workflow = Workflow(
name="Router with Loop",
db=shared_db,
steps=[
Router(
name="research_router",
selector=route_selector,
choices=[research_loop, Step(name="summary", executor=summary_step)],
description="Routes between deep research and summary",
)
],
)
response = workflow.run(input="test data")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 1
# Search for "Research" in the nested structure
assert find_content_in_steps(response.step_results[0], "Research")
def test_loop_with_router(shared_db):
"""Test Loop containing Router."""
from agno.workflow.step import Step
def route_selector(step_input: StepInput):
"""Select between analysis and summary."""
if "data" in step_input.previous_step_content.lower():
return [Step(name="analysis", executor=analysis_step)]
return [Step(name="summary", executor=summary_step)]
router = Router(
name="process_router",
selector=route_selector,
choices=[Step(name="analysis", executor=analysis_step), Step(name="summary", executor=summary_step)],
description="Routes between analysis and summary",
)
workflow = Workflow(
name="Loop with Router",
db=shared_db,
steps=[
Loop(
name="main_loop",
steps=[
research_step,
router,
],
end_condition=lambda outputs: len(outputs) >= 2,
max_iterations=3,
)
],
)
response = workflow.run(input="test data")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 1
loop_output = response.step_results[0]
assert isinstance(loop_output, StepOutput)
assert loop_output.step_type == "Loop"
def test_parallel_with_routers(shared_db):
"""Test Parallel execution of multiple Routers."""
from agno.workflow.step import Step
def research_selector(step_input: StepInput):
"""Select research path."""
return (
[Step(name="research", executor=research_step)]
if "data" in step_input.input.lower()
else [Step(name="analysis", executor=analysis_step)]
)
def summary_selector(step_input: StepInput):
"""Select summary path."""
return (
[Step(name="summary", executor=summary_step)]
if "complete" in step_input.input.lower()
else [Step(name="analysis", executor=analysis_step)]
)
workflow = Workflow(
name="Parallel Routers",
db=shared_db,
steps=[
Parallel(
Router(
name="research_router",
selector=research_selector,
choices=[
Step(name="research", executor=research_step),
Step(name="analysis", executor=analysis_step),
],
description="Routes research process",
),
Router(
name="summary_router",
selector=summary_selector,
choices=[
Step(name="summary", executor=summary_step),
Step(name="analysis", executor=analysis_step),
],
description="Routes summary process",
),
name="parallel_routers",
)
],
)
response = workflow.run(input="test data complete")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 1
parallel_output = response.step_results[0]
assert isinstance(parallel_output, StepOutput)
assert parallel_output.step_type == "Parallel"
def test_router_with_condition_and_loop(shared_db):
"""Test Router with Condition and Loop in routes."""
research_loop = Loop(
name="research_loop",
steps=[research_step],
end_condition=lambda outputs: len(outputs) >= 2,
max_iterations=3,
)
analysis_condition = Condition(name="analysis_condition", evaluator=has_data, steps=[analysis_step])
def route_selector(step_input: StepInput):
"""Select between research loop and conditional analysis."""
if "research" in step_input.input.lower():
return [research_loop]
return [analysis_condition]
workflow = Workflow(
name="Complex Router",
db=shared_db,
steps=[
Router(
name="complex_router",
selector=route_selector,
choices=[research_loop, analysis_condition],
description="Routes between research loop and conditional analysis",
),
summary_step,
],
)
response = workflow.run(input="test research data")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 2
def test_nested_routers(shared_db):
"""Test nested Routers."""
from agno.workflow.step import Step
def inner_selector(step_input: StepInput):
"""Select inner route."""
if "data" in step_input.previous_step_content.lower():
return [Step(name="analysis", executor=analysis_step)]
return [Step(name="summary", executor=summary_step)]
inner_router = Router(
name="inner_router",
selector=inner_selector,
choices=[Step(name="analysis", executor=analysis_step), Step(name="summary", executor=summary_step)],
description="Routes between analysis and summary",
)
def outer_selector(step_input: StepInput):
"""Select outer route."""
if "research" in step_input.input.lower():
return [Step(name="research", executor=research_step), inner_router]
return [Step(name="summary", executor=summary_step)]
workflow = Workflow(
name="Nested Routers",
db=shared_db,
steps=[
Router(
name="outer_router",
selector=outer_selector,
choices=[
Step(name="research", executor=research_step),
inner_router,
Step(name="summary", executor=summary_step),
],
description="Routes research process with nested routing",
)
],
)
response = workflow.run(input="test research data")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 1
router_output = response.step_results[0]
assert isinstance(router_output, StepOutput)
assert router_output.step_type == "Router"
def test_router_streaming(shared_db):
"""Test streaming with Router combinations."""
parallel_research = Parallel(research_step, analysis_step, name="parallel_research")
research_loop = Loop(
name="research_loop",
steps=[parallel_research],
end_condition=lambda outputs: len(outputs) >= 2,
max_iterations=2,
)
analysis_condition = Condition(name="analysis_condition", evaluator=has_data, steps=[analysis_step])
def route_selector(step_input: StepInput):
"""Select between research loop and conditional analysis."""
if "research" in step_input.input.lower():
return [research_loop]
return [analysis_condition]
workflow = Workflow(
name="Streaming Router",
db=shared_db,
steps=[
Router(
name="stream_router",
selector=route_selector,
choices=[research_loop, analysis_condition],
description="Routes between research loop and analysis",
)
],
)
events = list(workflow.run(input="test research data", stream=True))
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/workflows/test_mixed_steps.py",
"license": "Apache License 2.0",
"lines": 459,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/workflows/test_parallel_steps.py | """Integration tests for Parallel steps functionality."""
from contextvars import ContextVar
from secrets import token_hex
from typing import List
import pytest
from pydantic import BaseModel
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.run.workflow import (
StepCompletedEvent,
StepStartedEvent,
WorkflowCompletedEvent,
WorkflowRunOutput,
)
from agno.workflow import Workflow
from agno.workflow.parallel import Parallel
from agno.workflow.step import Step
from agno.workflow.types import StepInput, StepOutput
def find_content_in_steps(step_output, search_text):
"""Recursively search for content in step output and its nested steps."""
if search_text in step_output.content:
return True
if step_output.steps:
return any(find_content_in_steps(nested_step, search_text) for nested_step in step_output.steps)
return False
# Simple step functions for testing
def step_a(step_input: StepInput) -> StepOutput:
"""Test step A."""
return StepOutput(content="Output A")
def step_b(step_input: StepInput) -> StepOutput:
"""Test step B."""
return StepOutput(content="Output B")
def final_step(step_input: StepInput) -> StepOutput:
"""Combine previous outputs."""
return StepOutput(content=f"Final: {step_input.get_all_previous_content()}")
# ============================================================================
# TESTS (Fast - No Workflow Overhead)
# ============================================================================
def test_parallel_direct_execute():
"""Test Parallel.execute() directly without workflow."""
parallel = Parallel(step_a, step_b, name="Direct Parallel")
step_input = StepInput(input="direct test")
result = parallel.execute(step_input)
assert isinstance(result, StepOutput)
assert result.step_name == "Direct Parallel"
assert result.step_type == "Parallel"
# Content should contain aggregated results from all inner steps
assert "## Parallel Execution Results" in result.content
assert "Output A" in result.content
assert "Output B" in result.content
# The actual step outputs should be in the steps field
assert len(result.steps) == 2
assert find_content_in_steps(result, "Output A")
assert find_content_in_steps(result, "Output B")
@pytest.mark.asyncio
async def test_parallel_direct_aexecute():
"""Test Parallel.aexecute() directly without workflow."""
parallel = Parallel(step_a, step_b, name="Direct Async Parallel")
step_input = StepInput(input="direct async test")
result = await parallel.aexecute(step_input)
assert isinstance(result, StepOutput)
assert result.step_name == "Direct Async Parallel"
assert result.step_type == "Parallel"
# Content should contain aggregated results from all inner steps
assert "## Parallel Execution Results" in result.content
assert "Output A" in result.content
assert "Output B" in result.content
# The actual step outputs should be in the steps field
assert len(result.steps) == 2
assert find_content_in_steps(result, "Output A")
assert find_content_in_steps(result, "Output B")
def test_parallel_direct_execute_stream():
"""Test Parallel.execute_stream() directly without workflow."""
from agno.run.workflow import ParallelExecutionCompletedEvent, ParallelExecutionStartedEvent, WorkflowRunOutput
parallel = Parallel(step_a, step_b, name="Direct Stream Parallel")
step_input = StepInput(input="direct stream test")
# Mock workflow response for streaming
mock_response = WorkflowRunOutput(
run_id="test-run",
workflow_name="test-workflow",
workflow_id="test-id",
session_id="test-session",
content="",
)
events = list(parallel.execute_stream(step_input, workflow_run_response=mock_response, stream_events=True))
# Should have started, completed events and final result
started_events = [e for e in events if isinstance(e, ParallelExecutionStartedEvent)]
completed_events = [e for e in events if isinstance(e, ParallelExecutionCompletedEvent)]
step_outputs = [e for e in events if isinstance(e, StepOutput)]
assert len(started_events) == 1
assert len(completed_events) == 1
assert len(step_outputs) == 1
assert started_events[0].parallel_step_count == 2
# Check the parallel container output
parallel_output = step_outputs[0]
# Content should contain aggregated results from all inner steps
assert "## Parallel Execution Results" in parallel_output.content
assert "Output A" in parallel_output.content
assert "Output B" in parallel_output.content
assert len(parallel_output.steps) == 2
assert find_content_in_steps(parallel_output, "Output A")
assert find_content_in_steps(parallel_output, "Output B")
def test_parallel_direct_single_step():
"""Test Parallel with single step."""
parallel = Parallel(step_a, name="Single Step Parallel")
step_input = StepInput(input="single test")
result = parallel.execute(step_input)
assert isinstance(result, StepOutput)
assert result.step_name == "Single Step Parallel"
assert result.step_type == "Parallel"
# Content should contain aggregated results from all inner steps
assert "## Parallel Execution Results" in result.content
assert "Output A" in result.content
# Single step should still be in the steps field
assert len(result.steps) == 1
assert result.steps[0].content == "Output A"
# ============================================================================
# CONTEXT PROPAGATION TESTS
# ============================================================================
# ContextVar for testing context propagation to child threads
_test_context_var: ContextVar[str] = ContextVar("test_context_var", default="not_set")
def _step_read_context(step_input: StepInput) -> StepOutput:
"""Step that reads a context variable to verify propagation."""
value = _test_context_var.get()
return StepOutput(content=f"context_value={value}")
def test_parallel_context_propagation():
"""Test that context variables are propagated to parallel step threads.
This verifies that copy_context().run() is used when submitting tasks
to the ThreadPoolExecutor, ensuring contextvars are available in child threads.
"""
# Set context variable in main thread
value = token_hex(16)
token = _test_context_var.set(value)
try:
parallel = Parallel(
_step_read_context,
_step_read_context,
name="Context Propagation Test",
)
step_input = StepInput(input="context test")
result = parallel.execute(step_input)
# Both parallel steps should have received the context variable
assert len(result.steps) == 2
for step_result in result.steps:
assert f"context_value={value}" in step_result.content, (
f"Context variable was not propagated to child thread. Got: {step_result.content}"
)
finally:
_test_context_var.reset(token)
def test_parallel_context_propagation_streaming():
"""Test context propagation in streaming parallel execution."""
from agno.run.workflow import WorkflowRunOutput
value = token_hex(16)
token = _test_context_var.set(value)
try:
parallel = Parallel(
_step_read_context,
_step_read_context,
name="Context Stream Test",
)
step_input = StepInput(input="context stream test")
mock_response = WorkflowRunOutput(
run_id="test-run",
workflow_name="test-workflow",
workflow_id="test-id",
session_id="test-session",
content="",
)
events = list(parallel.execute_stream(step_input, workflow_run_response=mock_response, stream_events=True))
step_outputs = [e for e in events if isinstance(e, StepOutput)]
assert len(step_outputs) == 1
parallel_output = step_outputs[0]
assert len(parallel_output.steps) == 2
for step_result in parallel_output.steps:
assert f"context_value={value}" in step_result.content, (
f"Context variable was not propagated in streaming mode. Got: {step_result.content}"
)
finally:
_test_context_var.reset(token)
# ============================================================================
# INTEGRATION TESTS (With Workflow)
# ============================================================================
def test_basic_parallel(shared_db):
"""Test basic parallel execution."""
workflow = Workflow(
name="Basic Parallel",
db=shared_db,
steps=[Parallel(step_a, step_b, name="Parallel Phase"), final_step],
)
response = workflow.run(input="test")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 2
# Check parallel output
parallel_output = response.step_results[0]
assert isinstance(parallel_output, StepOutput)
assert parallel_output.step_type == "Parallel"
# Content should contain aggregated results from all inner steps
assert "## Parallel Execution Results" in parallel_output.content
assert "Output A" in parallel_output.content
assert "Output B" in parallel_output.content
# The actual step outputs should be in the nested steps
assert len(parallel_output.steps) == 2
assert find_content_in_steps(parallel_output, "Output A")
assert find_content_in_steps(parallel_output, "Output B")
def test_parallel_streaming(shared_db):
"""Test parallel execution with streaming."""
workflow = Workflow(
name="Streaming Parallel",
db=shared_db,
steps=[Parallel(step_a, step_b, name="Parallel Phase"), final_step],
)
events = list(workflow.run(input="test", stream=True, stream_events=True))
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
assert completed_events[0].content is not None
# Check that the parallel output has nested steps
final_response = completed_events[0]
parallel_output = final_response.step_results[0]
assert parallel_output.step_type == "Parallel"
assert len(parallel_output.steps) == 2
def test_parallel_with_agent(shared_db, test_agent):
"""Test parallel execution with agent step."""
agent_step = Step(name="agent_step", agent=test_agent)
workflow = Workflow(
name="Agent Parallel",
db=shared_db,
steps=[Parallel(step_a, agent_step, name="Mixed Parallel"), final_step],
)
response = workflow.run(input="test")
assert isinstance(response, WorkflowRunOutput)
parallel_output = response.step_results[0]
assert isinstance(parallel_output, StepOutput)
assert parallel_output.step_type == "Parallel"
# Content should contain aggregated results from all inner steps
assert "## Parallel Execution Results" in parallel_output.content
assert "Output A" in parallel_output.content
# Check nested steps contain both function and agent outputs
assert len(parallel_output.steps) == 2
assert find_content_in_steps(parallel_output, "Output A")
# Agent output will vary, but should be present in nested steps
@pytest.mark.asyncio
async def test_async_parallel(shared_db):
"""Test async parallel execution."""
workflow = Workflow(
name="Async Parallel",
db=shared_db,
steps=[Parallel(step_a, step_b, name="Parallel Phase"), final_step],
)
response = await workflow.arun(input="test")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 2
# Check parallel output structure
parallel_output = response.step_results[0]
assert parallel_output.step_type == "Parallel"
assert len(parallel_output.steps) == 2
@pytest.mark.asyncio
async def test_async_parallel_streaming(shared_db):
"""Test async parallel execution with streaming."""
workflow = Workflow(
name="Async Streaming Parallel",
db=shared_db,
steps=[Parallel(step_a, step_b, name="Parallel Phase"), final_step],
)
events = []
async for event in workflow.arun(input="test", stream=True, stream_events=True):
events.append(event)
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
assert completed_events[0].content is not None
# Check parallel structure in final result
final_response = completed_events[0]
parallel_output = final_response.step_results[0]
assert parallel_output.step_type == "Parallel"
assert len(parallel_output.steps) == 2
# ============================================================================
# EARLY TERMINATION / STOP PROPAGATION TESTS
# ============================================================================
def early_stop_step(step_input: StepInput) -> StepOutput:
"""Step that requests early termination."""
return StepOutput(
content="Early stop requested",
success=True,
stop=True,
)
def should_not_run_step(step_input: StepInput) -> StepOutput:
"""Step that should not run after early stop."""
return StepOutput(
content="This step should not have run",
success=True,
)
def normal_parallel_step(step_input: StepInput) -> StepOutput:
"""Normal step for parallel testing."""
return StepOutput(
content="Normal parallel step output",
success=True,
)
def test_parallel_propagates_stop_flag():
"""Test that Parallel propagates stop flag from any inner step."""
parallel = Parallel(
normal_parallel_step,
early_stop_step, # This step requests stop
name="Stop Parallel",
)
step_input = StepInput(input="test")
result = parallel.execute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is True, "Parallel should propagate stop=True from any inner step"
def test_parallel_stop_propagation_in_workflow(shared_db):
"""Test that workflow stops when Parallel's inner step returns stop=True."""
workflow = Workflow(
name="Parallel Stop Propagation Test",
db=shared_db,
steps=[
Parallel(
normal_parallel_step,
early_stop_step,
name="stop_parallel",
),
should_not_run_step, # This should NOT execute
],
)
response = workflow.run(input="test")
assert isinstance(response, WorkflowRunOutput)
# Should only have 1 step result (the Parallel), not 2
assert len(response.step_results) == 1, "Workflow should stop after Parallel with stop=True"
assert response.step_results[0].stop is True
def test_parallel_streaming_propagates_stop(shared_db):
"""Test that streaming Parallel propagates stop flag and stops workflow."""
workflow = Workflow(
name="Streaming Parallel Stop Test",
db=shared_db,
steps=[
Parallel(
normal_parallel_step,
early_stop_step,
name="stop_parallel",
),
should_not_run_step,
],
)
events = list(workflow.run(input="test", stream=True, stream_events=True))
# Verify workflow completed
workflow_completed = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(workflow_completed) == 1
# Should only have 1 step result (the Parallel), not 2
assert len(workflow_completed[0].step_results) == 1, "Workflow should stop after Parallel with stop=True"
# Check that the parallel output has stop=True
parallel_output = workflow_completed[0].step_results[0]
assert parallel_output.stop is True
# Check that at least one inner step has stop=True in results
assert len(parallel_output.steps) == 2
assert any(r.stop for r in parallel_output.steps), "At least one step should have stop=True"
# Most importantly: verify should_not_run_step was NOT executed
step_events = [e for e in events if isinstance(e, (StepStartedEvent, StepCompletedEvent))]
step_names = [e.step_name for e in step_events]
assert "should_not_run_step" not in step_names, "Workflow should have stopped before should_not_run_step"
@pytest.mark.asyncio
async def test_async_parallel_propagates_stop():
"""Test that async Parallel propagates stop flag."""
parallel = Parallel(
normal_parallel_step,
early_stop_step,
name="Async Stop Parallel",
)
step_input = StepInput(input="test")
result = await parallel.aexecute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is True, "Async Parallel should propagate stop=True from any inner step"
@pytest.mark.asyncio
async def test_async_parallel_streaming_propagates_stop(shared_db):
"""Test that async streaming Parallel propagates stop flag and stops workflow."""
workflow = Workflow(
name="Async Streaming Parallel Stop Test",
db=shared_db,
steps=[
Parallel(
normal_parallel_step,
early_stop_step,
name="stop_parallel",
),
should_not_run_step,
],
)
events = []
async for event in workflow.arun(input="test", stream=True, stream_events=True):
events.append(event)
# Verify workflow completed
workflow_completed = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(workflow_completed) == 1
# Should only have 1 step result (the Parallel), not 2
assert len(workflow_completed[0].step_results) == 1, "Workflow should stop after Parallel with stop=True"
# Check that the parallel output has stop=True
parallel_output = workflow_completed[0].step_results[0]
assert parallel_output.stop is True
# Check that at least one inner step has stop=True in results
assert len(parallel_output.steps) == 2
assert any(r.stop for r in parallel_output.steps), "At least one step should have stop=True"
# Most importantly: verify should_not_run_step was NOT executed
step_events = [e for e in events if isinstance(e, (StepStartedEvent, StepCompletedEvent))]
step_names = [e.step_name for e in step_events]
assert "should_not_run_step" not in step_names, "Workflow should have stopped before should_not_run_step"
def test_parallel_all_steps_stop():
"""Test Parallel when all inner steps request stop."""
def stop_step_1(step_input: StepInput) -> StepOutput:
return StepOutput(content="Stop 1", success=True, stop=True)
def stop_step_2(step_input: StepInput) -> StepOutput:
return StepOutput(content="Stop 2", success=True, stop=True)
parallel = Parallel(
stop_step_1,
stop_step_2,
name="All Stop Parallel",
)
step_input = StepInput(input="test")
result = parallel.execute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is True
assert len(result.steps) == 2
assert all(step.stop for step in result.steps)
def test_parallel_no_stop():
"""Test Parallel when no inner steps request stop."""
parallel = Parallel(
normal_parallel_step,
step_b, # Using existing step_b from the file
name="No Stop Parallel",
)
step_input = StepInput(input="test")
result = parallel.execute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is False, "Parallel should not set stop when no inner step requests it"
def test_parallel_name_as_first_positional_arg():
"""Test Parallel with name as first positional argument."""
parallel = Parallel("My Named Parallel", step_a, step_b)
step_input = StepInput(input="test")
result = parallel.execute(step_input)
assert isinstance(result, StepOutput)
assert parallel.name == "My Named Parallel"
assert result.step_name == "My Named Parallel"
assert len(result.steps) == 2
assert find_content_in_steps(result, "Output A")
assert find_content_in_steps(result, "Output B")
def test_parallel_name_as_keyword_arg():
"""Test Parallel with name as keyword argument (original behavior)."""
parallel = Parallel(step_a, step_b, name="Keyword Named Parallel")
step_input = StepInput(input="test")
result = parallel.execute(step_input)
assert isinstance(result, StepOutput)
assert parallel.name == "Keyword Named Parallel"
assert result.step_name == "Keyword Named Parallel"
assert len(result.steps) == 2
assert find_content_in_steps(result, "Output A")
assert find_content_in_steps(result, "Output B")
def test_parallel_no_name():
"""Test Parallel without any name."""
parallel = Parallel(step_a, step_b)
step_input = StepInput(input="test")
result = parallel.execute(step_input)
assert isinstance(result, StepOutput)
assert parallel.name is None
assert result.step_name == "Parallel" # Default name
assert len(result.steps) == 2
def test_parallel_keyword_name_overrides_positional():
"""Test that keyword name takes precedence over positional name."""
parallel = Parallel("Positional Name", step_a, step_b, name="Keyword Name")
step_input = StepInput(input="test")
result = parallel.execute(step_input)
assert isinstance(result, StepOutput)
assert parallel.name == "Keyword Name"
assert result.step_name == "Keyword Name"
def test_parallel_name_first_single_step():
"""Test Parallel with name first and single step."""
parallel = Parallel("Single Step Named", step_a)
step_input = StepInput(input="test")
result = parallel.execute(step_input)
assert isinstance(result, StepOutput)
assert parallel.name == "Single Step Named"
assert len(result.steps) == 1
assert find_content_in_steps(result, "Output A")
def test_parallel_name_first_with_description():
"""Test Parallel with name first and description as keyword."""
parallel = Parallel("Described Parallel", step_a, step_b, description="A parallel with description")
step_input = StepInput(input="test")
result = parallel.execute(step_input)
assert isinstance(result, StepOutput)
assert parallel.name == "Described Parallel"
assert parallel.description == "A parallel with description"
assert len(result.steps) == 2
@pytest.mark.asyncio
async def test_parallel_name_first_async():
"""Test async Parallel with name as first positional argument."""
parallel = Parallel("Async Named Parallel", step_a, step_b)
step_input = StepInput(input="test")
result = await parallel.aexecute(step_input)
assert isinstance(result, StepOutput)
assert parallel.name == "Async Named Parallel"
assert result.step_name == "Async Named Parallel"
assert len(result.steps) == 2
def test_parallel_name_first_streaming():
"""Test streaming Parallel with name as first positional argument."""
from agno.run.workflow import WorkflowRunOutput
parallel = Parallel("Streaming Named Parallel", step_a, step_b)
step_input = StepInput(input="test")
mock_response = WorkflowRunOutput(
run_id="test-run",
workflow_name="test-workflow",
workflow_id="test-id",
session_id="test-session",
content="",
)
events = list(parallel.execute_stream(step_input, workflow_run_response=mock_response, stream_events=True))
step_outputs = [e for e in events if isinstance(e, StepOutput)]
assert len(step_outputs) == 1
assert parallel.name == "Streaming Named Parallel"
assert step_outputs[0].step_name == "Streaming Named Parallel"
# ==================================
# OUTPUT SCHEMA ISOLATION TESTS
# When parallel steps contain agents with different output_schema types, each
# step must receive its own run_context copy so that apply_to_context() writes
# do not clobber a sibling step's schema.
# ==================================
class ImageClassification(BaseModel):
"""Output schema for image classifier agents."""
image_id: str
category: str
confidence: float
tags: List[str]
class QualityAssessment(BaseModel):
"""Output schema for quality assessor agents."""
image_id: str
quality_score: int
issues: List[str]
approved: bool
def test_parallel_agents_with_different_output_schemas(shared_db):
"""Regression test for #6590: agents with different output_schema types must each
produce output of their own schema type, not a sibling's.
Before the fix, all parallel steps shared the same run_context. Each agent's
apply_to_context() overwrites run_context.output_schema, so concurrent agents
would corrupt each other's schema, causing ValidationError or wrong output types.
"""
classifier_agent = Agent(
name="classifier",
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=ImageClassification,
instructions="Classify image img_001. Return an ImageClassification.",
)
qa_agent = Agent(
name="qa_assessor",
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=QualityAssessment,
instructions="Assess quality of image img_001. Return a QualityAssessment.",
)
workflow = Workflow(
name="image_pipeline",
db=shared_db,
steps=[
Parallel(
Step(name="classify", agent=classifier_agent),
Step(name="assess", agent=qa_agent),
name="parallel_processing",
)
],
)
result = workflow.run(input="Process img_001")
assert result is not None
parallel_output = result.step_results[0]
assert parallel_output.step_type == "Parallel"
assert len(parallel_output.steps) == 2
by_name = {s.step_name: s.content for s in parallel_output.steps}
assert isinstance(by_name["classify"], ImageClassification), (
f"classify step should produce ImageClassification, got {type(by_name['classify'])}"
)
assert isinstance(by_name["assess"], QualityAssessment), (
f"assess step should produce QualityAssessment, got {type(by_name['assess'])}"
)
def test_parallel_loops_with_heterogeneous_agent_schemas(shared_db):
"""Regression test for #6590 with the exact bug-report structure: Parallel of Loops,
each loop containing a classifier agent and a QA agent with different output schemas.
Three images processed in parallel, each through classifier → QA. Before the fix,
agents inside sibling loops would corrupt each other's output_schema.
"""
from agno.workflow import Loop
image_ids = ["img_001", "img_002", "img_003"]
def make_loop(image_id: str) -> Loop:
classifier = Agent(
name=f"classifier_{image_id}",
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=ImageClassification,
instructions=f"Classify image {image_id}. Return an ImageClassification.",
)
qa = Agent(
name=f"qa_{image_id}",
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=QualityAssessment,
instructions=f"Assess quality of image {image_id}. Return a QualityAssessment.",
)
return Loop(
name=f"process_{image_id}",
steps=[
Step(name=f"classify_{image_id}", agent=classifier),
Step(name=f"assess_{image_id}", agent=qa),
],
max_iterations=1,
)
workflow = Workflow(
name="image_pipeline",
db=shared_db,
steps=[
Parallel(
*[make_loop(img_id) for img_id in image_ids],
name="parallel_processing",
)
],
)
result = workflow.run(input="Process all images")
assert result is not None
parallel_output = result.step_results[0]
assert parallel_output.step_type == "Parallel"
for loop_output in parallel_output.steps:
assert loop_output.steps, f"Loop {loop_output.step_name} has no nested steps"
for agent_step in loop_output.steps:
step_name = agent_step.step_name or ""
if step_name.startswith("classify_"):
assert isinstance(agent_step.content, ImageClassification), (
f"{step_name} should produce ImageClassification, got {type(agent_step.content)}"
)
elif step_name.startswith("assess_"):
assert isinstance(agent_step.content, QualityAssessment), (
f"{step_name} should produce QualityAssessment, got {type(agent_step.content)}"
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/workflows/test_parallel_steps.py",
"license": "Apache License 2.0",
"lines": 628,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/workflows/test_router_steps.py | """Test Router functionality in workflows."""
import pytest
from agno.run.workflow import (
RouterExecutionCompletedEvent,
RouterExecutionStartedEvent,
StepCompletedEvent,
StepStartedEvent,
WorkflowCompletedEvent,
WorkflowRunOutput,
)
from agno.workflow.cel import CEL_AVAILABLE
from agno.workflow.router import Router
from agno.workflow.step import Step
from agno.workflow.steps import Steps
from agno.workflow.types import StepInput, StepOutput
from agno.workflow.workflow import Workflow
def find_content_in_steps(step_output, search_text):
"""Recursively search for content in step output and its nested steps."""
if search_text in step_output.content:
return True
if step_output.steps:
return any(find_content_in_steps(nested_step, search_text) for nested_step in step_output.steps)
return False
# ============================================================================
# TESTS (Fast - No Workflow Overhead)
# ============================================================================
def test_router_direct_execute():
"""Test Router.execute directly without workflow."""
step_a = Step(name="step_a", executor=lambda x: StepOutput(content="Output A"))
step_b = Step(name="step_b", executor=lambda x: StepOutput(content="Output B"))
def simple_selector(step_input: StepInput):
if "A" in step_input.input:
return [step_a]
return [step_b]
router = Router(
name="test_router", selector=simple_selector, choices=[step_a, step_b], description="Direct router test"
)
# Test routing to step A
input_a = StepInput(input="Choose A")
results_a = router.execute(input_a)
assert isinstance(results_a, StepOutput)
assert len(results_a.steps) == 1
assert results_a.steps[0].content == "Output A"
assert results_a.steps[0].success
# Test routing to step B
input_b = StepInput(input="Choose B")
results_b = router.execute(input_b)
assert isinstance(results_b, StepOutput)
assert len(results_b.steps) == 1
assert results_b.steps[0].content == "Output B"
assert results_b.steps[0].success
def test_router_direct_multiple_steps():
"""Test Router.execute with multiple steps selection."""
step_1 = Step(name="step_1", executor=lambda x: StepOutput(content="Step 1"))
step_2 = Step(name="step_2", executor=lambda x: StepOutput(content="Step 2"))
step_3 = Step(name="step_3", executor=lambda x: StepOutput(content="Step 3"))
def multi_selector(step_input: StepInput):
if "multi" in step_input.input:
return [step_1, step_2]
return [step_3]
router = Router(
name="multi_router", selector=multi_selector, choices=[step_1, step_2, step_3], description="Multi-step router"
)
# Test multiple steps selection
input_multi = StepInput(input="Choose multi")
results_multi = router.execute(input_multi)
assert isinstance(results_multi, StepOutput)
assert len(results_multi.steps) == 2
assert results_multi.steps[0].content == "Step 1"
assert results_multi.steps[1].content == "Step 2"
assert all(r.success for r in results_multi.steps)
# Test single step selection
input_single = StepInput(input="Choose single")
results_single = router.execute(input_single)
assert isinstance(results_single, StepOutput)
assert len(results_single.steps) == 1
assert results_single.steps[0].content == "Step 3"
assert results_single.steps[0].success
def test_router_direct_with_steps_component():
"""Test Router.execute with Steps component."""
step_a = Step(name="step_a", executor=lambda x: StepOutput(content="A"))
step_b = Step(name="step_b", executor=lambda x: StepOutput(content="B"))
steps_sequence = Steps(name="sequence", steps=[step_a, step_b])
single_step = Step(name="single", executor=lambda x: StepOutput(content="Single"))
def sequence_selector(step_input: StepInput):
if "sequence" in step_input.input:
return [steps_sequence]
return [single_step]
router = Router(
name="sequence_router",
selector=sequence_selector,
choices=[steps_sequence, single_step],
description="Sequence router",
)
# Test routing to Steps sequence
input_seq = StepInput(input="Choose sequence")
results_seq = router.execute(input_seq)
# Steps component returns multiple outputs
assert isinstance(results_seq, StepOutput)
assert len(results_seq.steps) >= 1
# Check that we have content from both steps using recursive search
assert find_content_in_steps(results_seq, "A")
assert find_content_in_steps(results_seq, "B")
def test_router_direct_error_handling():
"""Test Router.execute error handling."""
def failing_executor(step_input: StepInput) -> StepOutput:
raise ValueError("Test error")
failing_step = Step(name="failing", executor=failing_executor)
success_step = Step(name="success", executor=lambda x: StepOutput(content="Success"))
def error_selector(step_input: StepInput):
if "fail" in step_input.input:
return [failing_step]
return [success_step]
router = Router(
name="error_router",
selector=error_selector,
choices=[failing_step, success_step],
description="Error handling router",
)
# Test error case
input_fail = StepInput(input="Make it fail")
results_fail = router.execute(input_fail)
assert isinstance(results_fail, StepOutput)
assert len(results_fail.steps) == 1
assert not results_fail.steps[0].success
assert "Test error" in results_fail.steps[0].content
# Test success case
input_success = StepInput(input="Make it success")
results_success = router.execute(input_success)
assert isinstance(results_success, StepOutput)
assert len(results_success.steps) == 1
assert results_success.steps[0].success
assert results_success.steps[0].content == "Success"
def test_router_direct_chaining():
"""Test Router.execute with step chaining (sequential execution)."""
def step_1_executor(step_input: StepInput) -> StepOutput:
return StepOutput(content=f"Step 1: {step_input.input}")
def step_2_executor(step_input: StepInput) -> StepOutput:
# Should receive output from step 1
return StepOutput(content=f"Step 2: {step_input.previous_step_content}")
step_1 = Step(name="step_1", executor=step_1_executor)
step_2 = Step(name="step_2", executor=step_2_executor)
def chain_selector(step_input: StepInput):
return [step_1, step_2]
router = Router(
name="chain_router", selector=chain_selector, choices=[step_1, step_2], description="Chaining router"
)
input_test = StepInput(input="Hello")
results = router.execute(input_test)
assert isinstance(results, StepOutput)
assert len(results.steps) == 2
assert results.steps[0].content == "Step 1: Hello"
assert results.steps[1].content == "Step 2: Step 1: Hello"
assert all(r.success for r in results.steps)
# ============================================================================
# EXISTING INTEGRATION TESTS (With Workflow)
# ============================================================================
def test_basic_routing(shared_db):
"""Test basic routing based on input."""
tech_step = Step(name="tech", executor=lambda x: StepOutput(content="Tech content"))
general_step = Step(name="general", executor=lambda x: StepOutput(content="General content"))
def route_selector(step_input: StepInput):
"""Select between tech and general steps."""
if "tech" in step_input.input.lower():
return [tech_step]
return [general_step]
workflow = Workflow(
name="Basic Router",
db=shared_db,
steps=[
Router(
name="router",
selector=route_selector,
choices=[tech_step, general_step],
description="Basic routing",
)
],
)
tech_response = workflow.run(input="tech topic")
assert find_content_in_steps(tech_response.step_results[0], "Tech content")
general_response = workflow.run(input="general topic")
assert find_content_in_steps(general_response.step_results[0], "General content")
def test_streaming(shared_db):
"""Test router with streaming."""
stream_step = Step(name="stream", executor=lambda x: StepOutput(content="Stream content"))
alt_step = Step(name="alt", executor=lambda x: StepOutput(content="Alt content"))
def route_selector(step_input: StepInput):
return [stream_step]
workflow = Workflow(
name="Stream Router",
db=shared_db,
steps=[
Router(
name="router",
selector=route_selector,
choices=[stream_step, alt_step],
description="Stream routing",
)
],
)
events = list(workflow.run(input="test", stream=True))
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
assert find_content_in_steps(completed_events[0].step_results[0], "Stream content")
def test_agent_routing(shared_db, test_agent):
"""Test routing to agent steps."""
agent_step = Step(name="agent_step", agent=test_agent)
function_step = Step(name="function_step", executor=lambda x: StepOutput(content="Function output"))
def route_selector(step_input: StepInput):
return [agent_step]
workflow = Workflow(
name="Agent Router",
db=shared_db,
steps=[
Router(
name="router",
selector=route_selector,
choices=[agent_step, function_step],
description="Agent routing",
)
],
)
response = workflow.run(input="test")
# Check if the router executed successfully (either has success in nested steps or router itself succeeded)
assert response.step_results[0].success or any(
step.success for step in response.step_results[0].steps if response.step_results[0].steps
)
def test_mixed_routing(shared_db, test_agent, test_team):
"""Test routing to mix of function, agent, and team."""
function_step = Step(name="function", executor=lambda x: StepOutput(content="Function output"))
agent_step = Step(name="agent", agent=test_agent)
team_step = Step(name="team", team=test_team)
def route_selector(step_input: StepInput):
if "function" in step_input.input:
return [function_step]
elif "agent" in step_input.input:
return [agent_step]
return [team_step]
workflow = Workflow(
name="Mixed Router",
db=shared_db,
steps=[
Router(
name="router",
selector=route_selector,
choices=[function_step, agent_step, team_step],
description="Mixed routing",
)
],
)
# Test function route
function_response = workflow.run(input="test function")
assert find_content_in_steps(function_response.step_results[0], "Function output")
# Test agent route
agent_response = workflow.run(input="test agent")
assert agent_response.step_results[0].success or any(
step.success for step in agent_response.step_results[0].steps if agent_response.step_results[0].steps
)
# Test team route
team_response = workflow.run(input="test team")
assert team_response.step_results[0].success or any(
step.success for step in team_response.step_results[0].steps if team_response.step_results[0].steps
)
def test_multiple_step_routing(shared_db):
"""Test routing to multiple steps."""
research_step = Step(name="research", executor=lambda x: StepOutput(content="Research output"))
analysis_step = Step(name="analysis", executor=lambda x: StepOutput(content="Analysis output"))
summary_step = Step(name="summary", executor=lambda x: StepOutput(content="Summary output"))
def route_selector(step_input: StepInput):
if "research" in step_input.input:
return [research_step, analysis_step]
return [summary_step]
workflow = Workflow(
name="Multiple Steps Router",
db=shared_db,
steps=[
Router(
name="router",
selector=route_selector,
choices=[research_step, analysis_step, summary_step],
description="Multiple step routing",
)
],
)
response = workflow.run(input="test research")
router_output = response.step_results[0]
assert len(router_output.steps) == 2
assert find_content_in_steps(router_output, "Research output")
assert find_content_in_steps(router_output, "Analysis output")
def test_route_steps(shared_db):
"""Test routing to multiple steps."""
research_step = Step(name="research", executor=lambda x: StepOutput(content="Research output"))
analysis_step = Step(name="analysis", executor=lambda x: StepOutput(content="Analysis output"))
research_sequence = Steps(name="research_sequence", steps=[research_step, analysis_step])
summary_step = Step(name="summary", executor=lambda x: StepOutput(content="Summary output"))
def route_selector(step_input: StepInput):
if "research" in step_input.input:
return [research_sequence]
return [summary_step]
workflow = Workflow(
name="Multiple Steps Router",
db=shared_db,
steps=[
Router(
name="router",
selector=route_selector,
choices=[research_sequence, summary_step],
description="Multiple step routing",
)
],
)
response = workflow.run(input="test research")
router_results = response.step_results[0]
# Check that we got results from both steps in the sequence
assert isinstance(router_results, StepOutput)
assert len(router_results.steps) >= 1 # Steps component should have nested results
assert find_content_in_steps(router_results, "Research output")
assert find_content_in_steps(router_results, "Analysis output")
# ============================================================================
# EARLY TERMINATION / STOP PROPAGATION TESTS
# ============================================================================
def early_stop_step(step_input: StepInput) -> StepOutput:
"""Step that requests early termination."""
return StepOutput(
content="Early stop requested",
success=True,
stop=True,
)
def should_not_run_step(step_input: StepInput) -> StepOutput:
"""Step that should not run after early stop."""
return StepOutput(
content="This step should not have run",
success=True,
)
def normal_step(step_input: StepInput) -> StepOutput:
"""Normal step that doesn't request stop."""
return StepOutput(
content="Normal step output",
success=True,
)
def test_router_propagates_stop_flag():
"""Test that Router propagates stop flag from inner steps."""
step_stop = Step(name="stop_step", executor=early_stop_step)
step_normal = Step(name="normal_step", executor=normal_step)
def selector(step_input: StepInput):
return [step_stop]
router = Router(
name="Stop Router",
selector=selector,
choices=[step_stop, step_normal],
)
step_input = StepInput(input="test")
result = router.execute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is True, "Router should propagate stop=True from inner step"
def test_router_stop_propagation_in_workflow(shared_db):
"""Test that workflow stops when Router's inner step returns stop=True."""
step_stop = Step(name="stop_step", executor=early_stop_step)
step_normal = Step(name="normal_step", executor=normal_step)
def selector(step_input: StepInput):
return [step_stop]
workflow = Workflow(
name="Router Stop Propagation Test",
db=shared_db,
steps=[
Router(
name="stop_router",
selector=selector,
choices=[step_stop, step_normal],
),
should_not_run_step, # This should NOT execute
],
)
response = workflow.run(input="test")
assert isinstance(response, WorkflowRunOutput)
# Should only have 1 step result (the Router), not 2
assert len(response.step_results) == 1, "Workflow should stop after Router with stop=True"
assert response.step_results[0].stop is True
def test_router_stops_inner_steps_on_stop_flag():
"""Test that Router stops executing remaining inner steps when one returns stop=True."""
step_stop = Step(name="stop_step", executor=early_stop_step)
step_after = Step(name="after_stop", executor=should_not_run_step)
def selector(step_input: StepInput):
return [step_stop, step_after] # stop_step should stop before after_stop runs
router = Router(
name="Inner Stop Router",
selector=selector,
choices=[step_stop, step_after],
)
step_input = StepInput(input="test")
result = router.execute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is True
# Should only have 1 step result (stop_step), not 2
assert len(result.steps) == 1, "Router should stop after inner step with stop=True"
assert "Early stop requested" in result.steps[0].content
def test_router_streaming_propagates_stop(shared_db):
"""Test that streaming Router propagates stop flag and stops workflow."""
step_stop = Step(name="stop_step", executor=early_stop_step)
step_normal = Step(name="normal_step", executor=normal_step)
def selector(step_input: StepInput):
return [step_stop]
workflow = Workflow(
name="Streaming Router Stop Test",
db=shared_db,
steps=[
Router(
name="stop_router",
selector=selector,
choices=[step_stop, step_normal],
),
should_not_run_step,
],
)
events = list(workflow.run(input="test", stream=True, stream_events=True))
# Verify that the Router completed with stop propagation
router_completed = [e for e in events if isinstance(e, RouterExecutionCompletedEvent)]
assert len(router_completed) == 1
# Check that inner step has stop=True in results
step_results = router_completed[0].step_results or []
assert len(step_results) == 1
assert step_results[0].stop is True
# Most importantly: verify should_not_run_step was NOT executed
step_events = [e for e in events if isinstance(e, (StepStartedEvent, StepCompletedEvent))]
step_names = [e.step_name for e in step_events]
assert "should_not_run_step" not in step_names, "Workflow should have stopped before should_not_run_step"
@pytest.mark.asyncio
async def test_async_router_propagates_stop():
"""Test that async Router propagates stop flag."""
step_stop = Step(name="stop_step", executor=early_stop_step)
step_normal = Step(name="normal_step", executor=normal_step)
def selector(step_input: StepInput):
return [step_stop]
router = Router(
name="Async Stop Router",
selector=selector,
choices=[step_stop, step_normal],
)
step_input = StepInput(input="test")
result = await router.aexecute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is True, "Async Router should propagate stop=True from inner step"
@pytest.mark.asyncio
async def test_async_router_streaming_propagates_stop(shared_db):
"""Test that async streaming Router propagates stop flag and stops workflow."""
step_stop = Step(name="stop_step", executor=early_stop_step)
step_normal = Step(name="normal_step", executor=normal_step)
def selector(step_input: StepInput):
return [step_stop]
workflow = Workflow(
name="Async Streaming Router Stop Test",
db=shared_db,
steps=[
Router(
name="stop_router",
selector=selector,
choices=[step_stop, step_normal],
),
should_not_run_step,
],
)
events = []
async for event in workflow.arun(input="test", stream=True, stream_events=True):
events.append(event)
# Verify that the Router completed with stop propagation
router_completed = [e for e in events if isinstance(e, RouterExecutionCompletedEvent)]
assert len(router_completed) == 1
# Check that inner step has stop=True in results
step_results = router_completed[0].step_results or []
assert len(step_results) == 1
assert step_results[0].stop is True
# Most importantly: verify should_not_run_step was NOT executed
step_events = [e for e in events if isinstance(e, (StepStartedEvent, StepCompletedEvent))]
step_names = [e.step_name for e in step_events]
assert "should_not_run_step" not in step_names, "Workflow should have stopped before should_not_run_step"
# ============================================================================
# STEP_CHOICES PARAMETER TESTS
# ============================================================================
def test_selector_receives_step_choices():
"""Test that selector function receives step_choices parameter with prepared steps."""
step_a = Step(name="step_a", executor=lambda x: StepOutput(content="Output A"))
step_b = Step(name="step_b", executor=lambda x: StepOutput(content="Output B"))
received_choices = []
def selector_with_choices(step_input: StepInput, step_choices: list):
"""Selector that captures step_choices for verification."""
received_choices.extend(step_choices)
# Select step based on available choices
step_map = {s.name: s for s in step_choices}
if "A" in step_input.input:
return [step_map["step_a"]]
return [step_map["step_b"]]
router = Router(
name="choices_router",
selector=selector_with_choices,
choices=[step_a, step_b],
)
input_test = StepInput(input="Choose A")
result = router.execute(input_test)
# Verify step_choices was passed to selector
assert len(received_choices) == 2
assert all(isinstance(s, Step) for s in received_choices)
assert {s.name for s in received_choices} == {"step_a", "step_b"}
# Verify correct step was selected
assert result.steps[0].content == "Output A"
def test_selector_with_step_choices_and_session_state():
"""Test selector that uses both step_choices and session_state."""
step_1 = Step(name="step_1", executor=lambda x: StepOutput(content="Step 1"))
step_2 = Step(name="step_2", executor=lambda x: StepOutput(content="Step 2"))
def selector_with_both(step_input: StepInput, session_state: dict, step_choices: list):
"""Selector using both session_state and step_choices."""
step_map = {s.name: s for s in step_choices}
target_step = session_state.get("target_step", "step_1")
return [step_map[target_step]]
router = Router(
name="combined_router",
selector=selector_with_both,
choices=[step_1, step_2],
)
# Test with session_state selecting step_2
input_test = StepInput(input="test")
result = router.execute(input_test, session_state={"target_step": "step_2"})
assert result.steps[0].content == "Step 2"
def test_selector_without_step_choices_still_works():
"""Test that selectors without step_choices parameter still work (backward compatibility)."""
step_a = Step(name="step_a", executor=lambda x: StepOutput(content="Output A"))
step_b = Step(name="step_b", executor=lambda x: StepOutput(content="Output B"))
def simple_selector(step_input: StepInput):
"""Old-style selector without step_choices."""
return [step_a]
router = Router(
name="simple_router",
selector=simple_selector,
choices=[step_a, step_b],
)
input_test = StepInput(input="test")
result = router.execute(input_test)
assert result.steps[0].content == "Output A"
def test_selector_dynamic_step_selection_from_choices():
"""Test selector that dynamically selects steps based on step_choices names."""
step_research = Step(name="research", executor=lambda x: StepOutput(content="Research done"))
step_write = Step(name="write", executor=lambda x: StepOutput(content="Writing done"))
step_review = Step(name="review", executor=lambda x: StepOutput(content="Review done"))
def dynamic_selector(step_input: StepInput, step_choices: list):
"""Select steps dynamically based on input keywords and available choices."""
step_map = {s.name: s for s in step_choices}
user_input = step_input.input.lower()
if "research" in user_input:
return [step_map["research"]]
elif "write" in user_input:
return [step_map["write"]]
elif "full" in user_input:
# Chain all available steps
return [step_map["research"], step_map["write"], step_map["review"]]
return [step_choices[0]]
router = Router(
name="dynamic_router",
selector=dynamic_selector,
choices=[step_research, step_write, step_review],
)
# Test single step selection
result = router.execute(StepInput(input="do research"))
assert len(result.steps) == 1
assert result.steps[0].content == "Research done"
# Test chaining multiple steps
result = router.execute(StepInput(input="full workflow"))
assert len(result.steps) == 3
assert result.steps[0].content == "Research done"
assert result.steps[1].content == "Writing done"
assert result.steps[2].content == "Review done"
@pytest.mark.asyncio
async def test_async_selector_receives_step_choices():
"""Test that async selector receives step_choices parameter."""
step_a = Step(name="step_a", executor=lambda x: StepOutput(content="Async A"))
step_b = Step(name="step_b", executor=lambda x: StepOutput(content="Async B"))
async def async_selector(step_input: StepInput, step_choices: list):
"""Async selector with step_choices."""
step_map = {s.name: s for s in step_choices}
return [step_map["step_b"]]
router = Router(
name="async_choices_router",
selector=async_selector,
choices=[step_a, step_b],
)
input_test = StepInput(input="test")
result = await router.aexecute(input_test)
assert result.steps[0].content == "Async B"
def test_selector_step_choices_in_workflow(shared_db):
"""Test step_choices parameter works correctly within a workflow."""
step_fast = Step(name="fast", executor=lambda x: StepOutput(content="Fast path"))
step_slow = Step(name="slow", executor=lambda x: StepOutput(content="Slow path"))
def workflow_selector(step_input: StepInput, step_choices: list):
"""Selector that uses step_choices in workflow context."""
step_map = {s.name: s for s in step_choices}
if "fast" in step_input.input.lower():
return [step_map["fast"]]
return [step_map["slow"]]
workflow = Workflow(
name="Step Choices Workflow",
db=shared_db,
steps=[
Router(
name="choices_router",
selector=workflow_selector,
choices=[step_fast, step_slow],
)
],
)
# Test fast path
response = workflow.run(input="take fast route")
assert find_content_in_steps(response.step_results[0], "Fast path")
# Test slow path
response = workflow.run(input="take slow route")
assert find_content_in_steps(response.step_results[0], "Slow path")
# ============================================================================
# STRING RETURN TYPE TESTS
# ============================================================================
def test_selector_returns_string_step_name():
"""Test that selector can return step name as string."""
step_a = Step(name="step_a", executor=lambda x: StepOutput(content="Output A"))
step_b = Step(name="step_b", executor=lambda x: StepOutput(content="Output B"))
def string_selector(step_input: StepInput):
"""Selector that returns step name as string."""
if "A" in step_input.input:
return "step_a" # Return string name
return "step_b" # Return string name
router = Router(
name="string_router",
selector=string_selector,
choices=[step_a, step_b],
)
# Test selecting step_a by name
result = router.execute(StepInput(input="Choose A"))
assert result.steps[0].content == "Output A"
# Test selecting step_b by name
result = router.execute(StepInput(input="Choose B"))
assert result.steps[0].content == "Output B"
def test_selector_returns_list_of_strings():
"""Test that selector can return list of step names as strings."""
step_1 = Step(name="step_1", executor=lambda x: StepOutput(content="Step 1"))
step_2 = Step(name="step_2", executor=lambda x: StepOutput(content="Step 2"))
step_3 = Step(name="step_3", executor=lambda x: StepOutput(content="Step 3"))
def multi_string_selector(step_input: StepInput):
"""Selector that returns multiple step names as strings."""
if "all" in step_input.input:
return ["step_1", "step_2", "step_3"]
return ["step_1"]
router = Router(
name="multi_string_router",
selector=multi_string_selector,
choices=[step_1, step_2, step_3],
)
# Test selecting all steps by name
result = router.execute(StepInput(input="run all"))
assert len(result.steps) == 3
assert result.steps[0].content == "Step 1"
assert result.steps[1].content == "Step 2"
assert result.steps[2].content == "Step 3"
def test_selector_returns_mixed_string_and_step():
"""Test that selector can return mixed strings and Step objects."""
step_a = Step(name="step_a", executor=lambda x: StepOutput(content="Output A"))
step_b = Step(name="step_b", executor=lambda x: StepOutput(content="Output B"))
def mixed_selector(step_input: StepInput, step_choices: list):
"""Selector that returns mix of string and Step."""
step_map = {s.name: s for s in step_choices}
# Return string for first, Step object for second
return ["step_a", step_map["step_b"]]
router = Router(
name="mixed_router",
selector=mixed_selector,
choices=[step_a, step_b],
)
result = router.execute(StepInput(input="test"))
assert len(result.steps) == 2
assert result.steps[0].content == "Output A"
assert result.steps[1].content == "Output B"
def test_selector_unknown_string_name_warns():
"""Test that unknown step name logs warning and returns empty."""
step_a = Step(name="step_a", executor=lambda x: StepOutput(content="Output A"))
def bad_selector(step_input: StepInput):
return "nonexistent_step"
router = Router(
name="bad_router",
selector=bad_selector,
choices=[step_a],
)
result = router.execute(StepInput(input="test"))
# Should complete but with no steps executed (steps is None or empty)
assert result.steps is None or len(result.steps) == 0
# ============================================================================
# NESTED CHOICES TESTS
# ============================================================================
def test_nested_list_in_choices_becomes_steps_container():
"""Test that nested list in choices becomes a Steps container."""
from agno.workflow.steps import Steps
step_a = Step(name="step_a", executor=lambda x: StepOutput(content="Output A"))
step_b = Step(name="step_b", executor=lambda x: StepOutput(content="Output B"))
step_c = Step(name="step_c", executor=lambda x: StepOutput(content="Output C"))
def nested_selector(step_input: StepInput, step_choices: list):
"""Select from nested choices."""
# step_choices[0] = step_a
# step_choices[1] = Steps container with [step_b, step_c]
if "single" in step_input.input:
return step_choices[0]
return step_choices[1] # Returns the Steps container
router = Router(
name="nested_router",
selector=nested_selector,
choices=[step_a, [step_b, step_c]], # Nested list
)
# Verify step_choices structure after preparation
router._prepare_steps()
assert len(router.steps) == 2
assert isinstance(router.steps[0], Step)
assert isinstance(router.steps[1], Steps)
# Test selecting single step
result = router.execute(StepInput(input="single"))
assert len(result.steps) == 1
assert result.steps[0].content == "Output A"
# Test selecting Steps container (runs step_b then step_c)
result = router.execute(StepInput(input="sequence"))
# Steps container should execute both nested steps
assert find_content_in_steps(result, "Output B")
assert find_content_in_steps(result, "Output C")
def test_multiple_nested_lists_in_choices():
"""Test multiple nested lists in choices."""
step_1 = Step(name="step_1", executor=lambda x: StepOutput(content="Step 1"))
step_2 = Step(name="step_2", executor=lambda x: StepOutput(content="Step 2"))
step_3 = Step(name="step_3", executor=lambda x: StepOutput(content="Step 3"))
step_4 = Step(name="step_4", executor=lambda x: StepOutput(content="Step 4"))
def selector(step_input: StepInput, step_choices: list):
if "first" in step_input.input:
return step_choices[0] # Steps container [step_1, step_2]
return step_choices[1] # Steps container [step_3, step_4]
router = Router(
name="multi_nested_router",
selector=selector,
choices=[[step_1, step_2], [step_3, step_4]],
)
# Test first group
result = router.execute(StepInput(input="first group"))
assert find_content_in_steps(result, "Step 1")
assert find_content_in_steps(result, "Step 2")
# Test second group
result = router.execute(StepInput(input="second group"))
assert find_content_in_steps(result, "Step 3")
assert find_content_in_steps(result, "Step 4")
@pytest.mark.asyncio
async def test_async_selector_returns_string():
"""Test async selector returning string step name."""
step_a = Step(name="step_a", executor=lambda x: StepOutput(content="Async A"))
step_b = Step(name="step_b", executor=lambda x: StepOutput(content="Async B"))
async def async_string_selector(step_input: StepInput):
return "step_b"
router = Router(
name="async_string_router",
selector=async_string_selector,
choices=[step_a, step_b],
)
result = await router.aexecute(StepInput(input="test"))
assert result.steps[0].content == "Async B"
def test_string_selector_in_workflow(shared_db):
"""Test string-returning selector in workflow context."""
tech_step = Step(name="Tech Research", executor=lambda x: StepOutput(content="Tech content"))
biz_step = Step(name="Business Research", executor=lambda x: StepOutput(content="Business content"))
general_step = Step(name="General Research", executor=lambda x: StepOutput(content="General content"))
def route_by_topic(step_input: StepInput):
topic = step_input.input.lower()
if "tech" in topic:
return "Tech Research"
elif "business" in topic:
return "Business Research"
return "General Research"
workflow = Workflow(
name="String Selector Workflow",
db=shared_db,
steps=[
Router(
name="Topic Router",
selector=route_by_topic,
choices=[tech_step, biz_step, general_step],
)
],
)
# Test tech routing
response = workflow.run(input="tech trends")
assert find_content_in_steps(response.step_results[0], "Tech content")
# Test business routing
response = workflow.run(input="business analysis")
assert find_content_in_steps(response.step_results[0], "Business content")
# Test general routing
response = workflow.run(input="random topic")
assert find_content_in_steps(response.step_results[0], "General content")
# ============================================================================
# CEL EXPRESSION TESTS
# ============================================================================
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_router_basic_ternary():
"""Test CEL router with basic ternary expression."""
video_step = Step(name="video_step", executor=lambda x: StepOutput(content="Video processing"))
image_step = Step(name="image_step", executor=lambda x: StepOutput(content="Image processing"))
router = Router(
name="CEL Ternary Router",
selector='input.contains("video") ? "video_step" : "image_step"',
choices=[video_step, image_step],
)
# Should route to video_step
result_video = router.execute(StepInput(input="Process this video file"))
assert len(result_video.steps) == 1
assert "Video processing" in result_video.steps[0].content
# Should route to image_step
result_image = router.execute(StepInput(input="Process this image file"))
assert len(result_image.steps) == 1
assert "Image processing" in result_image.steps[0].content
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_router_additional_data():
"""Test CEL router using additional_data for routing."""
fast_step = Step(name="fast_step", executor=lambda x: StepOutput(content="Fast processing"))
normal_step = Step(name="normal_step", executor=lambda x: StepOutput(content="Normal processing"))
router = Router(
name="CEL Additional Data Router",
selector="additional_data.route",
choices=[fast_step, normal_step],
)
# Route to fast_step
result_fast = router.execute(StepInput(input="test", additional_data={"route": "fast_step"}))
assert len(result_fast.steps) == 1
assert "Fast processing" in result_fast.steps[0].content
# Route to normal_step
result_normal = router.execute(StepInput(input="test", additional_data={"route": "normal_step"}))
assert len(result_normal.steps) == 1
assert "Normal processing" in result_normal.steps[0].content
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_router_session_state():
"""Test CEL router using session_state for routing."""
premium_step = Step(name="premium_step", executor=lambda x: StepOutput(content="Premium service"))
basic_step = Step(name="basic_step", executor=lambda x: StepOutput(content="Basic service"))
router = Router(
name="CEL Session State Router",
selector='session_state.user_tier == "premium" ? "premium_step" : "basic_step"',
choices=[premium_step, basic_step],
)
# Route to premium_step
result_premium = router.execute(
StepInput(input="test"),
session_state={"user_tier": "premium"},
)
assert len(result_premium.steps) == 1
assert "Premium service" in result_premium.steps[0].content
# Route to basic_step
result_basic = router.execute(
StepInput(input="test"),
session_state={"user_tier": "free"},
)
assert len(result_basic.steps) == 1
assert "Basic service" in result_basic.steps[0].content
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_router_compound_condition():
"""Test CEL router with compound condition."""
urgent_step = Step(name="urgent_step", executor=lambda x: StepOutput(content="Urgent handling"))
normal_step = Step(name="normal_step", executor=lambda x: StepOutput(content="Normal handling"))
router = Router(
name="CEL Compound Router",
selector='additional_data.priority == "high" && input.contains("urgent") ? "urgent_step" : "normal_step"',
choices=[urgent_step, normal_step],
)
# Both conditions met - route to urgent
result_urgent = router.execute(StepInput(input="This is urgent!", additional_data={"priority": "high"}))
assert len(result_urgent.steps) == 1
assert "Urgent handling" in result_urgent.steps[0].content
# Only one condition met - route to normal
result_normal = router.execute(StepInput(input="This is urgent!", additional_data={"priority": "low"}))
assert len(result_normal.steps) == 1
assert "Normal handling" in result_normal.steps[0].content
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_router_previous_step_content():
"""Test CEL router using previous_step_content."""
error_handler = Step(name="error_handler", executor=lambda x: StepOutput(content="Error handled"))
success_handler = Step(name="success_handler", executor=lambda x: StepOutput(content="Success processed"))
router = Router(
name="CEL Previous Content Router",
selector='previous_step_content.contains("error") ? "error_handler" : "success_handler"',
choices=[error_handler, success_handler],
)
# Route based on error in previous content
result_error = router.execute(StepInput(input="test", previous_step_content="An error occurred in processing"))
assert len(result_error.steps) == 1
assert "Error handled" in result_error.steps[0].content
# Route to success when no error
result_success = router.execute(StepInput(input="test", previous_step_content="Processing completed successfully"))
assert len(result_success.steps) == 1
assert "Success processed" in result_success.steps[0].content
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_router_input_size():
"""Test CEL router using input size for routing."""
detailed_step = Step(name="detailed_step", executor=lambda x: StepOutput(content="Detailed analysis"))
quick_step = Step(name="quick_step", executor=lambda x: StepOutput(content="Quick analysis"))
router = Router(
name="CEL Input Size Router",
selector='input.size() > 50 ? "detailed_step" : "quick_step"',
choices=[detailed_step, quick_step],
)
# Long input - detailed analysis
result_detailed = router.execute(
StepInput(input="This is a very long input that contains more than fifty characters for sure")
)
assert len(result_detailed.steps) == 1
assert "Detailed analysis" in result_detailed.steps[0].content
# Short input - quick analysis
result_quick = router.execute(StepInput(input="Short input"))
assert len(result_quick.steps) == 1
assert "Quick analysis" in result_quick.steps[0].content
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_router_unknown_step_name():
"""Test CEL router with unknown step name returns empty."""
step_a = Step(name="step_a", executor=lambda x: StepOutput(content="Step A"))
router = Router(
name="CEL Unknown Step Router",
selector='"nonexistent_step"', # Returns a step name that doesn't exist
choices=[step_a],
)
result = router.execute(StepInput(input="test"))
# Should complete but with no steps executed (unknown step name)
assert result.steps is None or len(result.steps) == 0
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_router_in_workflow(shared_db):
"""Test CEL router within a workflow."""
tech_step = Step(name="tech_step", executor=lambda x: StepOutput(content="Tech content"))
general_step = Step(name="general_step", executor=lambda x: StepOutput(content="General content"))
workflow = Workflow(
name="CEL Router Workflow",
db=shared_db,
steps=[
Router(
name="cel_router",
selector='input.contains("tech") ? "tech_step" : "general_step"',
choices=[tech_step, general_step],
)
],
)
# Route to tech_step
response_tech = workflow.run(input="tech topic discussion")
assert find_content_in_steps(response_tech.step_results[0], "Tech content")
# Route to general_step
response_general = workflow.run(input="general topic discussion")
assert find_content_in_steps(response_general.step_results[0], "General content")
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_router_streaming(shared_db):
"""Test CEL router with streaming."""
step_a = Step(name="step_a", executor=lambda x: StepOutput(content="Step A output"))
step_b = Step(name="step_b", executor=lambda x: StepOutput(content="Step B output"))
workflow = Workflow(
name="CEL Streaming Router",
db=shared_db,
steps=[
Router(
name="cel_stream_router",
selector='"step_a"',
choices=[step_a, step_b],
)
],
)
events = list(workflow.run(input="test", stream=True, stream_events=True))
router_started = [e for e in events if isinstance(e, RouterExecutionStartedEvent)]
router_completed = [e for e in events if isinstance(e, RouterExecutionCompletedEvent)]
assert len(router_started) == 1
assert len(router_completed) == 1
assert "step_a" in router_started[0].selected_steps
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
@pytest.mark.asyncio
async def test_cel_router_async():
"""Test CEL router with async execution."""
step_a = Step(name="step_a", executor=lambda x: StepOutput(content="Async Step A"))
step_b = Step(name="step_b", executor=lambda x: StepOutput(content="Async Step B"))
router = Router(
name="CEL Async Router",
selector='input.contains("option_a") ? "step_a" : "step_b"',
choices=[step_a, step_b],
)
result = await router.aexecute(StepInput(input="Select option_a please"))
assert len(result.steps) == 1
assert "Async Step A" in result.steps[0].content
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_router_with_steps_component():
"""Test CEL router routing to a Steps component."""
step_1 = Step(name="step_1", executor=lambda x: StepOutput(content="Step 1"))
step_2 = Step(name="step_2", executor=lambda x: StepOutput(content="Step 2"))
sequence = Steps(name="sequence", steps=[step_1, step_2])
single = Step(name="single", executor=lambda x: StepOutput(content="Single"))
router = Router(
name="CEL Steps Router",
selector='input.contains("multi") ? "sequence" : "single"',
choices=[sequence, single],
)
# Route to sequence
result_multi = router.execute(StepInput(input="multi step request"))
assert find_content_in_steps(result_multi, "Step 1")
assert find_content_in_steps(result_multi, "Step 2")
# Route to single
result_single = router.execute(StepInput(input="simple request"))
assert len(result_single.steps) == 1
assert "Single" in result_single.steps[0].content
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_cel_router_nested_additional_data():
"""Test CEL router with nested additional_data access."""
step_a = Step(name="step_a", executor=lambda x: StepOutput(content="Step A"))
step_b = Step(name="step_b", executor=lambda x: StepOutput(content="Step B"))
router = Router(
name="CEL Nested Data Router",
selector='additional_data.config.mode == "advanced" ? "step_a" : "step_b"',
choices=[step_a, step_b],
)
# Route based on nested config
result_advanced = router.execute(StepInput(input="test", additional_data={"config": {"mode": "advanced"}}))
assert len(result_advanced.steps) == 1
assert "Step A" in result_advanced.steps[0].content
result_basic = router.execute(StepInput(input="test", additional_data={"config": {"mode": "basic"}}))
assert len(result_basic.steps) == 1
assert "Step B" in result_basic.steps[0].content
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/workflows/test_router_steps.py",
"license": "Apache License 2.0",
"lines": 1018,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/workflows/test_sequence_of_function_and_agent.py | """Integration tests for Workflow v2 sequence of steps functionality"""
import asyncio
from typing import AsyncIterator, Iterator
import pytest
from agno.run.workflow import StepOutputEvent, WorkflowCompletedEvent, WorkflowRunOutput
from agno.workflow import StepInput, StepOutput, Workflow
def test_basic_sequence(shared_db):
"""Test basic sequence with just functions."""
def step1(step_input: StepInput) -> StepOutput:
return StepOutput(content=f"First: {step_input.input}")
def step2(step_input: StepInput) -> StepOutput:
return StepOutput(content=f"Second: {step_input.previous_step_content}")
workflow = Workflow(name="Basic Sequence", db=shared_db, steps=[step1, step2])
response = workflow.run(input="test")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 2
assert "Second: First: test" in response.content
def test_function_and_agent_sequence(shared_db, test_agent):
"""Test sequence with function and agent."""
def step(step_input: StepInput) -> StepOutput:
return StepOutput(content=f"Function: {step_input.input}")
workflow = Workflow(name="Agent Sequence", db=shared_db, steps=[step, test_agent])
response = workflow.run(input="test")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 2
assert response.step_results[1].success
def test_function_and_team_sequence(shared_db, test_team):
"""Test sequence with function and team."""
def step(step_input: StepInput) -> StepOutput:
return StepOutput(content=f"Function: {step_input.input}")
workflow = Workflow(name="Team Sequence", db=shared_db, steps=[step, test_team])
response = workflow.run(input="test")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 2
assert response.step_results[1].success
def test_function_streaming_sequence(shared_db):
"""Test streaming sequence."""
def streaming_step(step_input: StepInput) -> Iterator[StepOutput]:
yield StepOutput(content="Start")
workflow = Workflow(name="Streaming", db=shared_db, steps=[streaming_step])
events = list(workflow.run(input="test", stream=True))
step_events = [e for e in events if isinstance(e, StepOutputEvent)]
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
assert any("Start" in str(e.content) for e in step_events)
@pytest.mark.asyncio
async def test_async_function_sequence(shared_db):
"""Test async sequence."""
async def async_step(step_input: StepInput) -> StepOutput:
await asyncio.sleep(0.001)
return StepOutput(content=f"Async: {step_input.input}")
workflow = Workflow(name="Async", db=shared_db, steps=[async_step])
response = await workflow.arun(input="test")
assert isinstance(response, WorkflowRunOutput)
assert "Async: test" in response.content
@pytest.mark.asyncio
async def test_async_function_streaming(shared_db):
"""Test async streaming sequence."""
async def async_streaming_step(step_input: StepInput) -> AsyncIterator[StepOutput]:
yield StepOutput(content="Start")
workflow = Workflow(name="Async Streaming", db=shared_db, steps=[async_streaming_step])
events = []
async for event in workflow.arun(input="test", stream=True):
events.append(event)
step_events = [e for e in events if isinstance(e, StepOutputEvent)]
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
assert any("Start" in str(e.content) for e in step_events)
def test_mixed_sequence(shared_db, test_agent, test_team):
"""Test sequence with function, agent, and team."""
def step(step_input: StepInput) -> StepOutput:
return StepOutput(content=f"Function: {step_input.input}")
workflow = Workflow(name="Mixed", db=shared_db, steps=[step, test_agent, test_team])
response = workflow.run(input="test")
assert isinstance(response, WorkflowRunOutput)
assert len(response.step_results) == 3
assert "Function: test" in response.step_results[0].content
assert all(step.success for step in response.step_results[1:])
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/workflows/test_sequence_of_function_and_agent.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/workflows/test_sequence_of_steps.py | """Integration tests for Workflow v2 sequence of steps functionality"""
import asyncio
from typing import AsyncIterator
import pytest
from agno.models.metrics import SessionMetrics
from agno.run.workflow import WorkflowCompletedEvent, WorkflowRunOutput
from agno.workflow import Step, StepInput, StepOutput, Workflow
def research_step_function(step_input: StepInput) -> StepOutput:
"""Minimal research function."""
topic = step_input.input
return StepOutput(content=f"Research: {topic}")
def content_step_function(step_input: StepInput) -> StepOutput:
"""Minimal content function."""
prev = step_input.previous_step_content
return StepOutput(content=f"Content: Hello World | Referencing: {prev}")
def test_function_sequence_non_streaming(shared_db):
"""Test basic function sequence."""
workflow = Workflow(
name="Test Workflow",
db=shared_db,
steps=[
Step(name="research", executor=research_step_function),
Step(name="content", executor=content_step_function),
],
)
response = workflow.run(input="test")
assert isinstance(response, WorkflowRunOutput)
assert "Content: Hello World | Referencing: Research: test" in response.content
assert len(response.step_results) == 2
def test_function_sequence_streaming(shared_db):
"""Test function sequence with streaming."""
workflow = Workflow(
name="Test Workflow",
db=shared_db,
steps=[
Step(name="research", executor=research_step_function),
Step(name="content", executor=content_step_function),
],
)
events = list(workflow.run(input="test", stream=True))
assert len(events) > 0
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
assert "Content: Hello World | Referencing: Research: test" == completed_events[0].content
def test_agent_sequence_non_streaming(shared_db, test_agent):
"""Test agent sequence."""
test_agent.instructions = "Do research on the topic and return the results."
workflow = Workflow(
name="Test Workflow",
db=shared_db,
steps=[
Step(name="research", agent=test_agent),
Step(name="content", executor=content_step_function),
],
)
response = workflow.run(input="AI Agents")
assert isinstance(response, WorkflowRunOutput)
assert response.content is not None
assert len(response.step_results) == 2
def test_team_sequence_non_streaming(shared_db, test_team):
"""Test team sequence."""
test_team.members[0].role = "Do research on the topic and return the results."
workflow = Workflow(
name="Test Workflow",
db=shared_db,
steps=[
Step(name="research", team=test_team),
Step(name="content", executor=content_step_function),
],
)
response = workflow.run(input="test")
assert isinstance(response, WorkflowRunOutput)
assert response.content is not None
assert len(response.step_results) == 2
@pytest.mark.asyncio
async def test_async_function_sequence(shared_db):
"""Test async function sequence."""
async def async_research(step_input: StepInput) -> StepOutput:
await asyncio.sleep(0.001) # Minimal delay
return StepOutput(content=f"Async: {step_input.input}")
workflow = Workflow(
name="Test Workflow",
db=shared_db,
steps=[
Step(name="research", executor=async_research),
Step(name="content", executor=content_step_function),
],
)
response = await workflow.arun(input="test")
assert isinstance(response, WorkflowRunOutput)
assert "Async: test" in response.content
assert "Content: Hello World | Referencing: Async: test" in response.content
@pytest.mark.asyncio
async def test_async_streaming(shared_db):
"""Test async streaming."""
async def async_streaming_step(step_input: StepInput) -> AsyncIterator[str]:
yield f"Stream: {step_input.input}"
await asyncio.sleep(0.001)
workflow = Workflow(
name="Test Workflow",
db=shared_db,
steps=[
Step(name="research", executor=async_streaming_step),
Step(name="content", executor=content_step_function),
],
)
events = []
async for event in workflow.arun(input="test", stream=True):
events.append(event)
assert len(events) > 0
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
def test_step_chaining(shared_db):
"""Test that steps properly chain outputs."""
def step1(step_input: StepInput) -> StepOutput:
return StepOutput(content="step1_output")
def step2(step_input: StepInput) -> StepOutput:
prev = step_input.previous_step_content
return StepOutput(content=f"step2_received_{prev}")
workflow = Workflow(
name="Test Workflow",
db=shared_db,
steps=[
Step(name="step1", executor=step1),
Step(name="step2", executor=step2),
],
)
response = workflow.run(input="test")
assert "step2_received_step1_output" in response.content
# Session Metrics Tests
def test_workflow_session_metrics_basic(shared_db):
"""Test that session metrics are calculated and stored for basic workflow."""
workflow = Workflow(
name="Session Metrics Test",
db=shared_db,
steps=[
Step(name="research", executor=research_step_function),
Step(name="content", executor=content_step_function),
],
)
# Run workflow multiple times to accumulate metrics
response1 = workflow.run(input="test1")
response2 = workflow.run(input="test2")
# Check that responses are valid
assert isinstance(response1, WorkflowRunOutput)
assert isinstance(response2, WorkflowRunOutput)
# Get session metrics
session_metrics = workflow.get_session_metrics()
# Basic assertions
assert session_metrics is not None
assert isinstance(session_metrics, SessionMetrics)
def test_workflow_session_metrics_aggregation(shared_db, test_agent):
"""Test that session metrics properly aggregate across multiple runs."""
test_agent.instructions = "Respond with a short message about the topic."
workflow = Workflow(
name="Aggregation Test",
db=shared_db,
steps=[
Step(name="agent_step", agent=test_agent),
],
)
# Run workflow multiple times
responses = []
for i in range(3):
response = workflow.run(input=f"test topic {i}")
responses.append(response)
assert isinstance(response, WorkflowRunOutput)
# Get session metrics
session_metrics = workflow.get_session_metrics()
assert session_metrics is not None
# Check that tokens and costs are accumulated
assert session_metrics.total_tokens > 0
assert session_metrics.input_tokens > 0
assert session_metrics.output_tokens > 0
@pytest.mark.asyncio
async def test_async_workflow_session_metrics(shared_db, test_agent):
"""Test that session metrics work with async workflows."""
test_agent.instructions = "Respond with a short message about the topic."
workflow = Workflow(
name="Async Metrics Test",
db=shared_db,
steps=[
Step(name="agent_step", agent=test_agent),
],
)
# Run workflow asynchronously
response = await workflow.arun(input="async test")
assert isinstance(response, WorkflowRunOutput)
# Get session metrics
session_metrics = workflow.get_session_metrics()
assert session_metrics is not None
assert session_metrics.total_tokens > 0
@pytest.mark.asyncio
async def test_async_workflow_session_metrics_multiple_runs(shared_db):
"""Test session metrics accumulation across multiple async runs."""
async def async_step(step_input: StepInput) -> StepOutput:
await asyncio.sleep(0.001) # Minimal delay
return StepOutput(content=f"Async response: {step_input.input}")
workflow = Workflow(
name="Multi Async Test",
db=shared_db,
steps=[
Step(name="async_step", executor=async_step),
],
)
# Run multiple times
for i in range(2):
response = await workflow.arun(input=f"test {i}")
assert isinstance(response, WorkflowRunOutput)
# Get session metrics
session_metrics = workflow.get_session_metrics()
assert session_metrics is not None
def test_workflow_session_metrics_persistence(shared_db):
"""Test that session metrics persist across workflow instances."""
session_id = "test_persistence_session"
# Create first workflow instance
workflow1 = Workflow(
name="Persistence Test",
db=shared_db,
session_id=session_id,
steps=[
Step(name="step1", executor=research_step_function),
],
)
response1 = workflow1.run(input="test1")
assert isinstance(response1, WorkflowRunOutput)
# Create second workflow instance with same session_id
workflow2 = Workflow(
name="Persistence Test",
db=shared_db,
session_id=session_id,
steps=[
Step(name="step2", executor=content_step_function),
],
)
response2 = workflow2.run(input="test2")
assert isinstance(response2, WorkflowRunOutput)
# Both instances should see accumulated metrics
metrics1 = workflow1.get_session_metrics()
metrics2 = workflow2.get_session_metrics()
assert metrics1 is not None
assert metrics2 is not None
def test_workflow_session_metrics_different_sessions(shared_db):
"""Test that different sessions have separate metrics."""
# Create two workflows with different session IDs
workflow1 = Workflow(
name="Session 1",
db=shared_db,
session_id="session_1",
steps=[Step(name="step1", executor=research_step_function)],
)
workflow2 = Workflow(
name="Session 2",
db=shared_db,
session_id="session_2",
steps=[Step(name="step2", executor=content_step_function)],
)
# Run both workflows
response1 = workflow1.run(input="test1")
response2 = workflow2.run(input="test2")
assert isinstance(response1, WorkflowRunOutput)
assert isinstance(response2, WorkflowRunOutput)
# Get metrics for each session
metrics1 = workflow1.get_session_metrics()
metrics2 = workflow2.get_session_metrics()
# Each should have only 1 run
assert metrics1 is not None
assert metrics2 is not None
def test_workflow_session_metrics_error_handling(shared_db):
"""Test session metrics with workflow errors."""
def failing_step(step_input: StepInput) -> StepOutput:
raise Exception("Intentional test failure")
workflow = Workflow(
name="Error Test",
db=shared_db,
steps=[
Step(name="success", executor=research_step_function),
Step(name="failure", executor=failing_step),
],
)
# This should fail
try:
workflow.run(input="test")
except Exception:
pass # Expected to fail
# Run a successful one
workflow_success = Workflow(
name="Error Test",
db=shared_db,
session_id=workflow.session_id, # Same session
steps=[
Step(name="success_only", executor=research_step_function),
],
)
response = workflow_success.run(input="test success")
assert isinstance(response, WorkflowRunOutput)
# Check metrics include both failed and successful runs
session_metrics = workflow_success.get_session_metrics()
assert session_metrics is not None
def test_workflow_session_metrics_aggregation_across_runs(shared_db, test_agent):
"""Test that metrics accumulate across multiple runs."""
test_agent.instructions = "Respond with exactly 10 words about the topic."
workflow = Workflow(
name="Aggregation Test",
db=shared_db,
steps=[Step(name="agent_step", agent=test_agent)],
)
# Run once and get initial metrics
workflow.run(input="first test")
metrics_after_first = workflow.get_session_metrics()
first_total_tokens = metrics_after_first.total_tokens if metrics_after_first else 0
# Run again and verify metrics increased
workflow.run(input="second test")
metrics_after_second = workflow.get_session_metrics()
assert metrics_after_second is not None
assert metrics_after_second.total_tokens > first_total_tokens
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/workflows/test_sequence_of_steps.py",
"license": "Apache License 2.0",
"lines": 316,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/workflows/test_session.py | import uuid
from typing import Any, Dict, Optional
import pytest
from agno.agent.agent import Agent
from agno.run import RunContext
from agno.workflow import Step, StepInput, StepOutput, Workflow
from agno.workflow.condition import Condition
from agno.workflow.router import Router
# Simple helper functions
def research_step_function(step_input: StepInput) -> StepOutput:
"""Minimal research function."""
topic = step_input.input
return StepOutput(content=f"Research: {topic}")
def content_step_function(step_input: StepInput) -> StepOutput:
"""Minimal content function."""
prev = step_input.previous_step_content
return StepOutput(content=f"Content: Hello World | Referencing: {prev}")
def workflow_factory(shared_db, session_id: Optional[str] = None, session_state: Optional[Dict[str, Any]] = None):
"""Create a route team with storage and memory for testing."""
return Workflow(
name="Test Workflow",
db=shared_db,
session_id=session_id,
session_state=session_state,
steps=[
Step(name="research", executor=research_step_function),
Step(name="content", executor=content_step_function),
],
)
def test_workflow_default_state(shared_db):
session_id = "session_1"
session_state = {"test_key": "test_value"}
workflow = workflow_factory(shared_db, session_id, session_state)
response = workflow.run("Test")
assert response.run_id is not None
assert workflow.session_id == session_id
assert workflow.session_state == session_state
session_from_storage = workflow.get_session(session_id=session_id)
assert session_from_storage is not None
assert session_from_storage.session_id == session_id
assert session_from_storage.session_data["session_state"] == session_state
def test_workflow_set_session_name(shared_db):
session_id = "session_1"
session_state = {"test_key": "test_value"}
workflow = workflow_factory(shared_db, session_id, session_state)
workflow.run("Test")
workflow.set_session_name(session_id=session_id, session_name="my_test_session")
session_from_storage = workflow.get_session(session_id=session_id)
assert session_from_storage is not None
assert session_from_storage.session_id == session_id
assert session_from_storage.session_data["session_name"] == "my_test_session"
def test_workflow_get_session_name(shared_db):
session_id = "session_1"
workflow = workflow_factory(shared_db, session_id)
workflow.run("Test")
workflow.set_session_name(session_id=session_id, session_name="my_test_session")
assert workflow.get_session_name() == "my_test_session"
def test_workflow_get_session_state(shared_db):
session_id = "session_1"
workflow = workflow_factory(shared_db, session_id, session_state={"test_key": "test_value"})
workflow.run("Test")
assert workflow.get_session_state() == {"test_key": "test_value"}
def test_workflow_session_state_switch_session_id(shared_db):
session_id_1 = "session_1"
session_id_2 = "session_2"
session_state = {"test_key": "test_value"}
workflow = workflow_factory(shared_db, session_id_1, session_state)
# First run with a different session ID
workflow.run("Test 1", session_id=session_id_1)
session_from_storage = workflow.get_session(session_id=session_id_1)
assert session_from_storage.session_id == session_id_1
assert session_from_storage.session_data["session_state"] == session_state
# Second run with different session ID
workflow.run("Test", session_id=session_id_2)
session_from_storage = workflow.get_session(session_id=session_id_2)
assert session_from_storage.session_id == session_id_2
assert session_from_storage.session_data["session_state"] == session_state
# Third run with the original session ID
workflow.run("Test", session_id=session_id_1)
session_from_storage = workflow.get_session(session_id=session_id_1)
assert session_from_storage.session_id == session_id_1
assert session_from_storage.session_data["session_state"] == {"test_key": "test_value"}
def test_workflow_with_state_shared_downstream(shared_db):
# Define a tool that increments our counter and returns the new value
def add_item(run_context: RunContext, item: str) -> str:
"""Add an item to the shopping list."""
run_context.session_state["shopping_list"].append(item)
return f"The shopping list is now {run_context.session_state['shopping_list']}"
def get_all_items(run_context: RunContext) -> str:
"""Get all items from the shopping list."""
return f"The shopping list is now {run_context.session_state['shopping_list']}"
workflow = workflow_factory(shared_db, session_id="session_1", session_state={"shopping_list": []})
workflow.steps[0] = Step(name="add_item", agent=Agent(tools=[add_item]))
workflow.steps[1] = Step(
name="list_items", agent=Agent(tools=[get_all_items], instructions="Get all items from the shopping list")
)
# Create an Agent that maintains state
workflow.run("Add oranges to my shopping list", session_id="session_1", session_state={"shopping_list": []})
session_from_storage = workflow.get_session(session_id="session_1")
assert session_from_storage.session_data["session_state"] == {"shopping_list": ["oranges"]}
def test_condition_with_session_state(shared_db):
"""Test that Condition evaluators can access and modify session_state."""
session_id = "session_condition"
# Track if condition was called with session_state
condition_calls = []
def condition_evaluator(step_input: StepInput, session_state: Dict[str, Any]) -> bool:
"""Condition evaluator that uses session_state."""
condition_calls.append(
{
"user_id": session_state.get("current_user_id"),
"session_id": session_state.get("current_session_id"),
"counter": session_state.get("counter", 0),
}
)
# Increment counter
session_state["counter"] = session_state.get("counter", 0) + 1
session_state["condition_executed"] = True
# Return True if counter is less than 2
return session_state["counter"] < 2
def dummy_function(step_input: StepInput) -> StepOutput:
return StepOutput(content="Dummy step executed")
workflow = Workflow(
name="Condition Test Workflow",
db=shared_db,
session_id=session_id,
session_state={"counter": 0},
steps=[
Condition(
name="Test Condition",
description="Test condition with session_state",
evaluator=condition_evaluator,
steps=[
Step(name="dummy", executor=dummy_function),
],
),
],
)
# First run - condition should be True
response1 = workflow.run("Test 1", session_id=session_id)
assert response1.run_id is not None
# Verify condition was called with session_state
assert len(condition_calls) == 1
assert condition_calls[0]["session_id"] == session_id
assert condition_calls[0]["counter"] == 0
# Verify session state was modified
session_state = workflow.get_session_state(session_id=session_id)
assert session_state["counter"] == 1
assert session_state["condition_executed"] is True
# Second run - condition should be False
response2 = workflow.run("Test 2", session_id=session_id)
assert response2.run_id is not None
# Verify condition was called again
assert len(condition_calls) == 2
assert condition_calls[1]["counter"] == 1
# Verify counter was incremented again
session_state = workflow.get_session_state(session_id=session_id)
assert session_state["counter"] == 2
def test_router_with_session_state(shared_db):
"""Test that Router selectors can access and modify session_state."""
session_id = "session_router"
# Track router calls
router_calls = []
def router_selector(step_input: StepInput, session_state: Dict[str, Any]) -> Step:
"""Router selector that uses session_state."""
router_calls.append(
{
"user_id": session_state.get("current_user_id"),
"session_id": session_state.get("current_session_id"),
"route_count": session_state.get("route_count", 0),
}
)
# Increment route count
session_state["route_count"] = session_state.get("route_count", 0) + 1
session_state["router_executed"] = True
# Route based on count
if session_state["route_count"] % 2 == 1:
return step_a
else:
return step_b
def step_a_function(step_input: StepInput) -> StepOutput:
return StepOutput(content="Step A executed")
def step_b_function(step_input: StepInput) -> StepOutput:
return StepOutput(content="Step B executed")
step_a = Step(name="step_a", executor=step_a_function)
step_b = Step(name="step_b", executor=step_b_function)
workflow = Workflow(
name="Router Test Workflow",
db=shared_db,
session_id=session_id,
session_state={"route_count": 0},
steps=[
Router(
name="Test Router",
description="Test router with session_state",
selector=router_selector,
choices=[step_a, step_b],
),
],
)
# First run - should route to step_a
response1 = workflow.run("Test 1", session_id=session_id)
assert response1.run_id is not None
assert "Step A executed" in response1.content
# Verify router was called with session_state
assert len(router_calls) == 1
assert router_calls[0]["session_id"] == session_id
assert router_calls[0]["route_count"] == 0
# Verify session state was modified
session_state = workflow.get_session_state(session_id=session_id)
assert session_state["route_count"] == 1
assert session_state["router_executed"] is True
# Second run - should route to step_b
response2 = workflow.run("Test 2", session_id=session_id)
assert response2.run_id is not None
assert "Step B executed" in response2.content
# Verify router was called again
assert len(router_calls) == 2
assert router_calls[1]["route_count"] == 1
# Verify route count was incremented
session_state = workflow.get_session_state(session_id=session_id)
assert session_state["route_count"] == 2
def test_condition_without_session_state_param(shared_db):
"""Test that Condition evaluators still work without session_state parameter."""
session_id = "session_condition_no_param"
def condition_evaluator_no_param(step_input: StepInput) -> bool:
"""Condition evaluator without session_state parameter."""
# This should still work
return True
def dummy_function(step_input: StepInput) -> StepOutput:
return StepOutput(content="Dummy step executed")
workflow = Workflow(
name="Condition No Param Test",
db=shared_db,
session_id=session_id,
steps=[
Condition(
name="Test Condition",
evaluator=condition_evaluator_no_param,
steps=[
Step(name="dummy", executor=dummy_function),
],
),
],
)
# Should work without error
response = workflow.run("Test", session_id=session_id)
assert response.run_id is not None
assert "Dummy step executed" in response.content
def test_router_without_session_state_param(shared_db):
"""Test that Router selectors still work without session_state parameter."""
session_id = "session_router_no_param"
def router_selector_no_param(step_input: StepInput) -> Step:
"""Router selector without session_state parameter."""
# This should still work
return step_a
def step_a_function(step_input: StepInput) -> StepOutput:
return StepOutput(content="Step A executed")
step_a = Step(name="step_a", executor=step_a_function)
workflow = Workflow(
name="Router No Param Test",
db=shared_db,
session_id=session_id,
steps=[
Router(
name="Test Router",
selector=router_selector_no_param,
choices=[step_a],
),
],
)
# Should work without error
response = workflow.run("Test", session_id=session_id)
assert response.run_id is not None
assert "Step A executed" in response.content
async def test_async_condition_with_session_state(async_shared_db):
"""Test that async Condition evaluators can access and modify session_state."""
session_id = "session_async_condition"
condition_calls = []
async def async_condition_evaluator(step_input: StepInput, session_state: Dict[str, Any]) -> bool:
"""Async condition evaluator that uses session_state."""
condition_calls.append(
{
"session_id": session_state.get("current_session_id"),
"async_counter": session_state.get("async_counter", 0),
}
)
session_state["async_counter"] = session_state.get("async_counter", 0) + 1
session_state["async_condition_executed"] = True
return session_state["async_counter"] < 2
def dummy_function(step_input: StepInput) -> StepOutput:
return StepOutput(content="Async dummy executed")
workflow = Workflow(
name="Async Condition Test",
db=async_shared_db,
session_id=session_id,
session_state={"async_counter": 0},
steps=[
Condition(
name="Async Condition",
evaluator=async_condition_evaluator,
steps=[
Step(name="dummy", executor=dummy_function),
],
),
],
)
# First run
response1 = await workflow.arun("Test 1", session_id=session_id)
assert response1.run_id is not None
# Verify async condition was called
assert len(condition_calls) == 1
assert condition_calls[0]["async_counter"] == 0
# Verify state was modified (use async method with AsyncSqliteDb)
session_state = await workflow.aget_session_state(session_id=session_id)
assert session_state["async_counter"] == 1
assert session_state["async_condition_executed"] is True
async def test_async_router_with_session_state(async_shared_db):
"""Test that async Router selectors can access and modify session_state."""
session_id = "session_async_router"
router_calls = []
async def async_router_selector(step_input: StepInput, session_state: Dict[str, Any]) -> Step:
"""Async router selector that uses session_state."""
router_calls.append(
{
"session_id": session_state.get("current_session_id"),
"async_route_count": session_state.get("async_route_count", 0),
}
)
session_state["async_route_count"] = session_state.get("async_route_count", 0) + 1
session_state["async_router_executed"] = True
return step_a
def step_a_function(step_input: StepInput) -> StepOutput:
return StepOutput(content="Async Step A executed")
step_a = Step(name="step_a", executor=step_a_function)
workflow = Workflow(
name="Async Router Test",
db=async_shared_db,
session_id=session_id,
session_state={"async_route_count": 0},
steps=[
Router(
name="Async Router",
selector=async_router_selector,
choices=[step_a],
),
],
)
# First run
response1 = await workflow.arun("Test 1", session_id=session_id)
assert response1.run_id is not None
assert "Async Step A executed" in response1.content
# Verify async router was called
assert len(router_calls) == 1
assert router_calls[0]["async_route_count"] == 0
# Verify state was modified (use async method with AsyncSqliteDb)
session_state = await workflow.aget_session_state(session_id=session_id)
assert session_state["async_route_count"] == 1
assert session_state["async_router_executed"] is True
async def test_workflow_with_base_model_content(shared_db):
"""Test that a workflow with a BaseModel content can be run."""
from datetime import datetime
from pydantic import BaseModel
from agno.db.base import SessionType
session_id = "session_base_model_content"
class Content(BaseModel):
content: str
date: datetime
def content_function(step_input: StepInput) -> StepOutput:
return StepOutput(content=Content(content="Hello World", date=datetime.now()))
workflow = Workflow(
name="Base Model Content Test",
db=shared_db,
steps=[
Step(name="content", executor=content_function),
],
)
response = workflow.run("Test", session_id=session_id)
assert response.run_id is not None
assert response.content.content == "Hello World"
assert response.content.date is not None
assert (
shared_db.get_session(session_id=session_id, session_type=SessionType.WORKFLOW) is not None
) # This tells us that the session was stored in the database with the BaseModel content with values like datetime
@pytest.mark.asyncio
async def test_async_workflow_run_with_async_db(async_shared_db):
"""Test Workflow async arun() with async database."""
workflow = Workflow(
name="Test Workflow",
db=async_shared_db,
steps=[
Step(name="research", executor=research_step_function),
Step(name="content", executor=content_step_function),
],
)
session_id = str(uuid.uuid4())
response = await workflow.arun("Test", session_id=session_id)
assert response is not None
assert response.run_id is not None
@pytest.mark.asyncio
async def test_async_workflow_run_stream_with_async_db(async_shared_db):
"""Test Workflow async arun() streaming with async database."""
workflow = Workflow(
name="Test Workflow",
db=async_shared_db,
steps=[
Step(name="research", executor=research_step_function),
Step(name="content", executor=content_step_function),
],
)
session_id = str(uuid.uuid4())
final_response = None
async for response in workflow.arun("Test", session_id=session_id, stream=True):
final_response = response
assert final_response is not None
assert final_response.run_id is not None
@pytest.mark.asyncio
async def test_aget_session_with_async_db(async_shared_db):
"""Test aget_session with async database."""
workflow = Workflow(
name="Test Workflow",
db=async_shared_db,
steps=[
Step(name="research", executor=research_step_function),
Step(name="content", executor=content_step_function),
],
)
session_id = str(uuid.uuid4())
await workflow.arun("Test", session_id=session_id)
session = await workflow.aget_session(session_id=session_id)
assert session is not None
assert session.session_id == session_id
assert len(session.runs) == 1
@pytest.mark.asyncio
async def test_asave_session_with_async_db(async_shared_db):
"""Test asave_session with async database."""
workflow = Workflow(
name="Test Workflow",
db=async_shared_db,
steps=[
Step(name="research", executor=research_step_function),
Step(name="content", executor=content_step_function),
],
)
session_id = str(uuid.uuid4())
await workflow.arun("Test", session_id=session_id)
session = await workflow.aget_session(session_id=session_id)
session.session_data["custom_key"] = "custom_value"
await workflow.asave_session(session)
retrieved_session = await workflow.aget_session(session_id=session_id)
assert retrieved_session.session_data["custom_key"] == "custom_value"
@pytest.mark.asyncio
async def test_aset_session_name_with_async_db(async_shared_db):
"""Test aset_session_name with async database."""
workflow = Workflow(
name="Test Workflow",
db=async_shared_db,
steps=[
Step(name="research", executor=research_step_function),
Step(name="content", executor=content_step_function),
],
)
session_id = str(uuid.uuid4())
await workflow.arun("Test", session_id=session_id)
await workflow.aset_session_name(session_id=session_id, session_name="my_test_session")
session = await workflow.aget_session(session_id=session_id)
assert session.session_data["session_name"] == "my_test_session"
@pytest.mark.asyncio
async def test_aget_session_name_with_async_db(async_shared_db):
"""Test aget_session_name with async database."""
workflow = Workflow(
name="Test Workflow",
db=async_shared_db,
steps=[
Step(name="research", executor=research_step_function),
Step(name="content", executor=content_step_function),
],
)
session_id = str(uuid.uuid4())
await workflow.arun("Test", session_id=session_id)
await workflow.aset_session_name(session_id=session_id, session_name="my_test_session")
session_name = await workflow.aget_session_name(session_id=session_id)
assert session_name == "my_test_session"
@pytest.mark.asyncio
async def test_aget_session_state_with_async_db(async_shared_db):
"""Test aget_session_state with async database."""
workflow = Workflow(
name="Test Workflow",
db=async_shared_db,
steps=[
Step(name="research", executor=research_step_function),
Step(name="content", executor=content_step_function),
],
)
session_id = str(uuid.uuid4())
await workflow.arun("Test", session_id=session_id, session_state={"key": "value"})
session_state = await workflow.aget_session_state(session_id=session_id)
assert session_state is not None
@pytest.mark.asyncio
async def test_aupdate_session_state_with_async_db(async_shared_db):
"""Test aupdate_session_state with async database."""
workflow = Workflow(
name="Test Workflow",
db=async_shared_db,
steps=[
Step(name="research", executor=research_step_function),
Step(name="content", executor=content_step_function),
],
)
session_id = str(uuid.uuid4())
await workflow.arun("Test", session_id=session_id, session_state={"counter": 0, "items": []})
result = await workflow.aupdate_session_state({"counter": 10}, session_id=session_id)
assert result == {"counter": 10, "items": []}
updated_state = await workflow.aget_session_state(session_id=session_id)
assert updated_state["counter"] == 10
@pytest.mark.asyncio
async def test_adelete_session_with_async_db(async_shared_db):
"""Test adelete_session with async database."""
workflow = Workflow(
name="Test Workflow",
db=async_shared_db,
steps=[
Step(name="research", executor=research_step_function),
Step(name="content", executor=content_step_function),
],
)
session_id = str(uuid.uuid4())
await workflow.arun("Test", session_id=session_id)
session = await workflow.aget_session(session_id=session_id)
assert session is not None
await workflow.adelete_session(session_id=session_id)
session = await workflow.aget_session(session_id=session_id)
assert session is None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/workflows/test_session.py",
"license": "Apache License 2.0",
"lines": 532,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/workflows/test_steps.py | """Integration tests for Steps functionality in workflows."""
import asyncio
from typing import AsyncIterator
import pytest
from agno.run.workflow import (
StepCompletedEvent,
StepsExecutionCompletedEvent,
StepsExecutionStartedEvent,
StepStartedEvent,
WorkflowCompletedEvent,
WorkflowRunOutput,
)
from agno.workflow import Step, StepInput, StepOutput, Steps, Workflow
def find_content_in_steps(step_output, search_text):
"""Recursively search for content in step output and its nested steps."""
if search_text in step_output.content:
return True
if step_output.steps:
return any(find_content_in_steps(nested_step, search_text) for nested_step in step_output.steps)
return False
# Simple helper functions
def step1_function(step_input: StepInput) -> StepOutput:
"""First step function."""
return StepOutput(content=f"Step1: {step_input.input}")
def step2_function(step_input: StepInput) -> StepOutput:
"""Second step function that uses previous output."""
prev = step_input.previous_step_content or ""
return StepOutput(content=f"Step2: {prev}")
def step3_function(step_input: StepInput) -> StepOutput:
"""Third step function."""
prev = step_input.previous_step_content or ""
return StepOutput(content=f"Step3: {prev}")
class StepClassWithCallable:
def __call__(self, step_input: StepInput) -> StepOutput:
prev = step_input.previous_step_content or ""
return StepOutput(content=f"StepClassWithCallable: {prev}")
async def async_step_function(step_input: StepInput) -> StepOutput:
"""Async step function."""
await asyncio.sleep(0.001)
return StepOutput(content=f"AsyncStep: {step_input.input}")
class AsyncStepClassWithCallable:
async def __call__(self, step_input: StepInput) -> StepOutput:
await asyncio.sleep(0.001)
return StepOutput(content=f"AsyncStepClassWithCallable: {step_input.input}")
async def async_streaming_function(step_input: StepInput) -> AsyncIterator[str]:
"""Async streaming step function."""
yield f"Streaming: {step_input.input}"
await asyncio.sleep(0.001)
# ============================================================================
# TESTS (Fast - No Workflow Overhead)
# ============================================================================
def test_steps_direct_execute():
"""Test Steps.execute() directly without workflow."""
step1 = Step(name="step1", executor=step1_function)
step2 = Step(name="step2", executor=step2_function)
steps = Steps(name="Direct Steps", steps=[step1, step2])
step_input = StepInput(input="direct test")
result = steps.execute(step_input)
assert isinstance(result, StepOutput)
assert len(result.steps) == 2
assert find_content_in_steps(result, "Step1: direct test")
assert find_content_in_steps(result, "Step2: Step1: direct test")
def test_steps_direct_execute_with_callable_class():
"""Test Steps.execute() directly without workflow."""
step1 = Step(name="step1", executor=step1_function)
step2 = Step(name="step2", executor=StepClassWithCallable())
steps = Steps(name="Direct Steps", steps=[step1, step2])
step_input = StepInput(input="direct test")
result = steps.execute(step_input)
assert isinstance(result, StepOutput)
assert len(result.steps) == 2
assert find_content_in_steps(result, "Step1: direct test")
assert find_content_in_steps(result, "StepClassWithCallable: Step1: direct test")
@pytest.mark.asyncio
async def test_steps_direct_aexecute():
"""Test Steps.aexecute() directly without workflow."""
step1 = Step(name="step1", executor=step1_function)
step2 = Step(name="step2", executor=step2_function)
steps = Steps(name="Direct Async Steps", steps=[step1, step2])
step_input = StepInput(input="direct async test")
result = await steps.aexecute(step_input)
assert isinstance(result, StepOutput)
assert len(result.steps) == 2
assert find_content_in_steps(result, "Step1: direct async test")
assert find_content_in_steps(result, "Step2: Step1: direct async test")
@pytest.mark.asyncio
async def test_steps_direct_aexecute_with_callable_class():
"""Test Steps.aexecute() directly without workflow."""
step1 = Step(name="step1", executor=step1_function)
step2 = Step(name="step2", executor=AsyncStepClassWithCallable())
steps = Steps(name="Direct Async Steps", steps=[step1, step2])
step_input = StepInput(input="direct async test")
result = await steps.aexecute(step_input)
assert isinstance(result, StepOutput)
assert len(result.steps) == 2
assert find_content_in_steps(result, "Step1: direct async test")
assert find_content_in_steps(result, "AsyncStepClassWithCallable: direct async test")
def test_steps_direct_execute_stream():
"""Test Steps.execute_stream() directly without workflow."""
from agno.run.workflow import WorkflowRunOutput
step1 = Step(name="step1", executor=step1_function)
step2 = Step(name="step2", executor=step2_function)
steps = Steps(name="Direct Stream Steps", steps=[step1, step2])
step_input = StepInput(input="direct stream test")
# Mock workflow response for streaming
mock_response = WorkflowRunOutput(
run_id="test-run",
workflow_name="test-workflow",
workflow_id="test-id",
session_id="test-session",
content="",
)
events = list(steps.execute_stream(step_input, mock_response, stream_events=True))
# Should have started, completed events and final step output
started_events = [e for e in events if isinstance(e, StepsExecutionStartedEvent)]
completed_events = [e for e in events if isinstance(e, StepsExecutionCompletedEvent)]
step_outputs = [e for e in events if isinstance(e, StepOutput)]
assert len(started_events) == 1
assert len(completed_events) == 1
assert len(step_outputs) == 1 # Now returns single container StepOutput
assert started_events[0].steps_count == 2
def test_steps_direct_empty():
"""Test Steps with no internal steps."""
steps = Steps(name="Empty Steps", steps=[])
step_input = StepInput(input="test")
result = steps.execute(step_input)
assert isinstance(result, StepOutput)
assert "No steps to execute" in result.content
def test_steps_direct_single_step():
"""Test Steps with single step."""
step1 = Step(name="step1", executor=step1_function)
steps = Steps(name="Single Step", steps=[step1])
step_input = StepInput(input="single test")
result = steps.execute(step_input)
assert isinstance(result, StepOutput)
assert len(result.steps) == 1
assert find_content_in_steps(result, "Step1: single test")
def test_steps_direct_chaining():
"""Test Steps properly chains outputs."""
step1 = Step(name="first", executor=lambda x: StepOutput(content="first_output"))
step2 = Step(name="second", executor=lambda x: StepOutput(content=f"second_{x.previous_step_content}"))
step3 = Step(name="third", executor=lambda x: StepOutput(content=f"third_{x.previous_step_content}"))
steps = Steps(name="Chaining Steps", steps=[step1, step2, step3])
step_input = StepInput(input="test")
result = steps.execute(step_input)
assert isinstance(result, StepOutput)
assert len(result.steps) == 3
assert result.steps[0].content == "first_output"
assert result.steps[1].content == "second_first_output"
assert result.steps[2].content == "third_second_first_output"
# ============================================================================
# INTEGRATION TESTS (With Workflow)
# ============================================================================
def test_basic_steps_execution(shared_db):
"""Test basic Steps execution - sync non-streaming."""
step1 = Step(name="step1", executor=step1_function)
step2 = Step(name="step2", executor=step2_function)
steps_sequence = Steps(name="test_steps", steps=[step1, step2])
workflow = Workflow(
name="Basic Steps Test",
db=shared_db,
steps=[steps_sequence],
)
response = workflow.run(input="test message")
assert len(response.step_results) == 1
assert find_content_in_steps(response.step_results[0], "Step2: Step1: test message")
def test_steps_streaming(shared_db):
"""Test Steps execution - sync streaming."""
step1 = Step(name="step1", executor=step1_function)
step2 = Step(name="step2", executor=step2_function)
steps_sequence = Steps(name="streaming_steps", steps=[step1, step2])
workflow = Workflow(
name="Streaming Steps Test",
db=shared_db,
steps=[steps_sequence],
)
events = list(workflow.run(input="stream test", stream=True, stream_events=True))
# Check for required events
steps_started = [e for e in events if isinstance(e, StepsExecutionStartedEvent)]
steps_completed = [e for e in events if isinstance(e, StepsExecutionCompletedEvent)]
workflow_completed = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(steps_started) == 1
assert len(steps_completed) == 1
assert len(workflow_completed) == 1
# Check final content
final_response = workflow_completed[0]
assert find_content_in_steps(final_response.step_results[0], "Step2: Step1: stream test")
@pytest.mark.asyncio
async def test_async_steps_execution(shared_db):
"""Test Steps execution - async non-streaming."""
async_step = Step(name="async_step", executor=async_step_function)
regular_step = Step(name="regular_step", executor=step2_function)
steps_sequence = Steps(name="async_steps", steps=[async_step, regular_step])
workflow = Workflow(
name="Async Steps Test",
db=shared_db,
steps=[steps_sequence],
)
response = await workflow.arun(input="async test")
assert len(response.step_results) == 1
assert find_content_in_steps(response.step_results[0], "Step2: AsyncStep: async test")
@pytest.mark.asyncio
async def test_async_steps_streaming(shared_db):
"""Test Steps execution - async streaming."""
async_streaming_step = Step(name="async_streaming", executor=async_streaming_function)
regular_step = Step(name="regular_step", executor=step2_function)
steps_sequence = Steps(name="async_streaming_steps", steps=[async_streaming_step, regular_step])
workflow = Workflow(
name="Async Streaming Steps Test",
db=shared_db,
steps=[steps_sequence],
)
events = []
async for event in workflow.arun(input="async stream test", stream=True, stream_events=True):
events.append(event)
# Check that we have events
assert len(events) > 0
# Check for workflow completion
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
def test_steps_chaining(shared_db):
"""Test that steps properly chain outputs."""
step1 = Step(name="first", executor=lambda x: StepOutput(content="first_output"))
step2 = Step(name="second", executor=lambda x: StepOutput(content=f"second_{x.previous_step_content}"))
step3 = Step(name="third", executor=lambda x: StepOutput(content=f"third_{x.previous_step_content}"))
steps_sequence = Steps(name="chaining_steps", steps=[step1, step2, step3])
workflow = Workflow(
name="Chaining Test",
db=shared_db,
steps=[steps_sequence],
)
response = workflow.run(input="test")
# Should chain through all steps
assert find_content_in_steps(response.step_results[0], "third_second_first_output")
def test_empty_steps(shared_db):
"""Test Steps with no internal steps."""
empty_steps = Steps(name="empty_steps", steps=[])
workflow = Workflow(
name="Empty Steps Test",
db=shared_db,
steps=[empty_steps],
)
response = workflow.run(input="test")
assert "No steps to execute" in response.content
def test_steps_media_aggregation(shared_db):
"""Test Steps media aggregation."""
step1 = Step(name="step1", executor=lambda x: StepOutput(content="content1", images=["image1.jpg"]))
step2 = Step(name="step2", executor=lambda x: StepOutput(content="content2", videos=["video1.mp4"]))
step3 = Step(name="step3", executor=lambda x: StepOutput(content="content3", audio=["audio1.mp3"]))
steps_sequence = Steps(name="media_steps", steps=[step1, step2, step3])
workflow = Workflow(
name="Media Test",
db=shared_db,
steps=[steps_sequence],
)
response = workflow.run(input="test")
# The media should be in the nested steps
steps_container = response.step_results[0]
assert steps_container.steps[0].images == ["image1.jpg"]
assert steps_container.steps[1].videos == ["video1.mp4"]
assert steps_container.steps[2].audio == ["audio1.mp3"]
# Content should be from the Steps container (summary)
assert find_content_in_steps(response.step_results[0], "content3")
def test_nested_steps(shared_db):
"""Test nested Steps."""
inner_step1 = Step(name="inner1", executor=lambda x: StepOutput(content="inner1"))
inner_step2 = Step(name="inner2", executor=lambda x: StepOutput(content=f"inner2_{x.previous_step_content}"))
inner_steps = Steps(name="inner_steps", steps=[inner_step1, inner_step2])
outer_step = Step(name="outer", executor=lambda x: StepOutput(content=f"outer_{x.previous_step_content}"))
outer_steps = Steps(name="outer_steps", steps=[inner_steps, outer_step])
workflow = Workflow(
name="Nested Test",
db=shared_db,
steps=[outer_steps],
)
response = workflow.run(input="test")
outer_steps_container = response.step_results[0]
outer_step_result = outer_steps_container.steps[1] # The outer step
# New behavior: outer step receives deepest content from inner_steps ("inner2_inner1")
assert outer_step_result.content == "outer_inner2_inner1"
# Inner steps still contain their nested outputs
assert find_content_in_steps(outer_steps_container.steps[0], "inner2_inner1")
def test_steps_with_other_workflow_steps(shared_db):
"""Test Steps in workflow with other steps."""
individual_step = Step(name="individual", executor=lambda x: StepOutput(content="individual_output"))
step1 = Step(name="grouped1", executor=lambda x: StepOutput(content=f"grouped1_{x.previous_step_content}"))
step2 = Step(name="grouped2", executor=lambda x: StepOutput(content=f"grouped2_{x.previous_step_content}"))
grouped_steps = Steps(name="grouped_steps", steps=[step1, step2])
final_step = Step(name="final", executor=lambda x: StepOutput(content=f"final_{x.previous_step_content}"))
workflow = Workflow(
name="Mixed Workflow",
db=shared_db,
steps=[individual_step, grouped_steps, final_step],
)
response = workflow.run(input="test")
assert len(response.step_results) == 3
# New behavior: final step receives deepest content from grouped_steps
final_step_result = response.step_results[2]
assert final_step_result.content == "final_grouped2_grouped1_individual_output"
# Grouped container still carries nested results
grouped_steps_container = response.step_results[1]
assert find_content_in_steps(grouped_steps_container, "grouped2_grouped1_individual_output")
# ============================================================================
# EARLY TERMINATION / STOP PROPAGATION TESTS
# ============================================================================
def early_stop_step(step_input: StepInput) -> StepOutput:
"""Step that requests early termination."""
return StepOutput(
content="Early stop requested",
success=True,
stop=True,
)
def should_not_run_step(step_input: StepInput) -> StepOutput:
"""Step that should not run after early stop."""
return StepOutput(
content="This step should not have run",
success=True,
)
def normal_step(step_input: StepInput) -> StepOutput:
"""Normal step that doesn't request stop."""
return StepOutput(
content="Normal step output",
success=True,
)
def test_steps_propagates_stop_flag():
"""Test that Steps propagates stop flag from inner steps."""
step1 = Step(name="normal", executor=normal_step)
step2 = Step(name="stop_step", executor=early_stop_step)
steps = Steps(
name="Stop Steps",
steps=[step1, step2],
)
step_input = StepInput(input="test")
result = steps.execute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is True, "Steps should propagate stop=True from inner step"
def test_steps_stop_propagation_in_workflow(shared_db):
"""Test that workflow stops when Steps' inner step returns stop=True."""
step1 = Step(name="normal", executor=normal_step)
step2 = Step(name="stop_step", executor=early_stop_step)
workflow = Workflow(
name="Steps Stop Propagation Test",
db=shared_db,
steps=[
Steps(
name="stop_steps",
steps=[step1, step2],
),
should_not_run_step, # This should NOT execute
],
)
response = workflow.run(input="test")
assert isinstance(response, WorkflowRunOutput)
# Should only have 1 step result (the Steps container), not 2
assert len(response.step_results) == 1, "Workflow should stop after Steps with stop=True"
assert response.step_results[0].stop is True
def test_steps_stops_inner_steps_on_stop_flag():
"""Test that Steps stops executing remaining inner steps when one returns stop=True."""
step1 = Step(name="first", executor=normal_step)
step2 = Step(name="stop_step", executor=early_stop_step)
step3 = Step(name="should_not_run", executor=should_not_run_step)
steps = Steps(
name="Inner Stop Steps",
steps=[step1, step2, step3],
)
step_input = StepInput(input="test")
result = steps.execute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is True
# Should only have 2 step results (first and stop_step), not 3
assert len(result.steps) == 2, "Steps should stop after inner step with stop=True"
assert result.steps[0].content == "Normal step output"
assert result.steps[1].content == "Early stop requested"
def test_steps_streaming_propagates_stop(shared_db):
"""Test that streaming Steps propagates stop flag and stops workflow."""
step1 = Step(name="normal", executor=normal_step)
step2 = Step(name="stop_step", executor=early_stop_step)
workflow = Workflow(
name="Streaming Steps Stop Test",
db=shared_db,
steps=[
Steps(
name="stop_steps",
steps=[step1, step2],
),
should_not_run_step,
],
)
events = list(workflow.run(input="test", stream=True, stream_events=True))
# Verify that the Steps completed with stop propagation
steps_completed = [e for e in events if isinstance(e, StepsExecutionCompletedEvent)]
assert len(steps_completed) == 1
# Check that inner step has stop=True in results
step_results = steps_completed[0].step_results or []
assert len(step_results) == 2
assert step_results[1].stop is True
# Most importantly: verify should_not_run_step was NOT executed
step_events = [e for e in events if isinstance(e, (StepStartedEvent, StepCompletedEvent))]
step_names = [e.step_name for e in step_events]
assert "should_not_run_step" not in step_names, "Workflow should have stopped before should_not_run_step"
@pytest.mark.asyncio
async def test_async_steps_propagates_stop():
"""Test that async Steps propagates stop flag."""
step1 = Step(name="normal", executor=normal_step)
step2 = Step(name="stop_step", executor=early_stop_step)
steps = Steps(
name="Async Stop Steps",
steps=[step1, step2],
)
step_input = StepInput(input="test")
result = await steps.aexecute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is True, "Async Steps should propagate stop=True from inner step"
@pytest.mark.asyncio
async def test_async_steps_streaming_propagates_stop(shared_db):
"""Test that async streaming Steps propagates stop flag and stops workflow."""
step1 = Step(name="normal", executor=normal_step)
step2 = Step(name="stop_step", executor=early_stop_step)
workflow = Workflow(
name="Async Streaming Steps Stop Test",
db=shared_db,
steps=[
Steps(
name="stop_steps",
steps=[step1, step2],
),
should_not_run_step,
],
)
events = []
async for event in workflow.arun(input="test", stream=True, stream_events=True):
events.append(event)
# Verify that the Steps completed with stop propagation
steps_completed = [e for e in events if isinstance(e, StepsExecutionCompletedEvent)]
assert len(steps_completed) == 1
# Check that inner step has stop=True in results
step_results = steps_completed[0].step_results or []
assert len(step_results) == 2
assert step_results[1].stop is True
# Most importantly: verify should_not_run_step was NOT executed
step_events = [e for e in events if isinstance(e, (StepStartedEvent, StepCompletedEvent))]
step_names = [e.step_name for e in step_events]
assert "should_not_run_step" not in step_names, "Workflow should have stopped before should_not_run_step"
def test_steps_first_step_stops():
"""Test Steps when first step requests stop."""
step1 = Step(name="stop_first", executor=early_stop_step)
step2 = Step(name="should_not_run", executor=should_not_run_step)
steps = Steps(
name="First Stop Steps",
steps=[step1, step2],
)
step_input = StepInput(input="test")
result = steps.execute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is True
# Should only have 1 step result
assert len(result.steps) == 1, "Steps should stop after first step with stop=True"
assert result.steps[0].content == "Early stop requested"
def test_steps_no_stop():
"""Test Steps when no inner steps request stop."""
step1 = Step(name="first", executor=normal_step)
step2 = Step(name="second", executor=step2_function)
steps = Steps(
name="No Stop Steps",
steps=[step1, step2],
)
step_input = StepInput(input="test")
result = steps.execute(step_input)
assert isinstance(result, StepOutput)
assert result.stop is False, "Steps should not set stop when no inner step requests it"
assert len(result.steps) == 2
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/workflows/test_steps.py",
"license": "Apache License 2.0",
"lines": 471,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/workflows/test_structured_output_flow.py | """Integration tests for structured output flow between workflow steps."""
from typing import List
import pytest
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.team import Team
from agno.workflow import Step, Workflow
from agno.workflow.types import StepInput, StepOutput
# Define structured models for testing
class ResearchData(BaseModel):
"""Structured research data."""
topic: str = Field(description="The research topic")
insights: List[str] = Field(description="Key insights", min_items=2)
confidence: float = Field(description="Confidence score", ge=0.0, le=1.0)
class AnalysisResult(BaseModel):
"""Structured analysis result."""
summary: str = Field(description="Analysis summary")
recommendations: List[str] = Field(description="Recommendations", min_items=2)
priority: str = Field(description="Priority level")
class FinalReport(BaseModel):
"""Final structured report."""
title: str = Field(description="Report title")
content: str = Field(description="Report content")
metrics: List[str] = Field(description="Success metrics", min_items=1)
# Test functions for structured output
def research_function(step_input: StepInput) -> StepOutput:
"""Function that returns structured data."""
research_data = ResearchData(
topic="AI Testing", insights=["AI is evolving rapidly", "Testing is crucial for AI systems"], confidence=0.85
)
return StepOutput(content=research_data)
def analysis_function(step_input: StepInput) -> StepOutput:
"""Function that processes structured input and returns structured output."""
# Access the structured data from previous step
previous_data = step_input.previous_step_content
# Verify we received structured data
assert isinstance(previous_data, ResearchData)
assert previous_data.topic == "AI Testing"
assert len(previous_data.insights) == 2
# Create structured output based on input
analysis_result = AnalysisResult(
summary=f"Analysis of {previous_data.topic}",
recommendations=["Implement testing framework", "Monitor AI performance"],
priority="High",
)
return StepOutput(content=analysis_result)
def final_function(step_input: StepInput) -> StepOutput:
"""Function that creates final report from structured data."""
# Access structured data from previous step
analysis_data = step_input.previous_step_content
# Verify we received structured data
assert isinstance(analysis_data, AnalysisResult)
assert analysis_data.priority == "High"
# Create final structured output
final_report = FinalReport(
title="AI Testing Report",
content=f"Report based on: {analysis_data.summary}",
metrics=["Test coverage", "Performance metrics"],
)
return StepOutput(content=final_report)
def test_structured_output_function_flow_sync(shared_db):
"""Test structured output flow between functions - sync."""
workflow = Workflow(
name="Structured Function Flow",
db=shared_db,
steps=[
Step(name="research", executor=research_function),
Step(name="analysis", executor=analysis_function),
Step(name="final", executor=final_function),
],
)
response = workflow.run(input="test structured flow")
# Verify we have all step responses
assert len(response.step_results) == 3
# Verify each step received and produced structured data
research_output = response.step_results[0]
analysis_output = response.step_results[1]
final_output = response.step_results[2]
# Check types
assert isinstance(research_output.content, ResearchData)
assert isinstance(analysis_output.content, AnalysisResult)
assert isinstance(final_output.content, FinalReport)
# Check data flow
assert research_output.content.topic == "AI Testing"
assert analysis_output.content.summary == "Analysis of AI Testing"
assert final_output.content.title == "AI Testing Report"
def test_structured_output_function_flow_streaming(shared_db):
"""Test structured output flow between functions - streaming."""
workflow = Workflow(
name="Structured Function Flow Streaming",
db=shared_db,
steps=[
Step(name="research", executor=research_function),
Step(name="analysis", executor=analysis_function),
Step(name="final", executor=final_function),
],
)
events = list(workflow.run(input="test structured flow", stream=True))
# Find the workflow completed event
from agno.run.workflow import WorkflowCompletedEvent
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
# Verify structured data in final output
final_content = completed_events[0].content
assert isinstance(final_content, FinalReport)
assert final_content.title == "AI Testing Report"
@pytest.mark.asyncio
async def test_structured_output_function_flow_async(shared_db):
"""Test structured output flow between functions - async."""
workflow = Workflow(
name="Async Structured Function Flow",
db=shared_db,
steps=[
Step(name="research", executor=research_function),
Step(name="analysis", executor=analysis_function),
Step(name="final", executor=final_function),
],
)
response = await workflow.arun(input="test structured flow")
# Verify we have all step responses
assert len(response.step_results) == 3
# Verify final output is structured
final_output = response.step_results[2]
assert isinstance(final_output.content, FinalReport)
assert final_output.content.title == "AI Testing Report"
@pytest.mark.asyncio
async def test_structured_output_function_flow_async_streaming(shared_db):
"""Test structured output flow between functions - async streaming."""
workflow = Workflow(
name="Async Structured Function Flow Streaming",
db=shared_db,
steps=[
Step(name="research", executor=research_function),
Step(name="analysis", executor=analysis_function),
Step(name="final", executor=final_function),
],
)
events = []
async for event in workflow.arun(input="test structured flow", stream=True):
events.append(event)
# Find the workflow completed event
from agno.run.workflow import WorkflowCompletedEvent
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
# Verify structured data in final output
final_content = completed_events[0].content
assert isinstance(final_content, FinalReport)
assert final_content.title == "AI Testing Report"
def test_structured_output_agent_flow_sync(shared_db):
"""Test structured output flow between agents - sync."""
# Create agents with structured response models
research_agent = Agent(
name="Research Agent",
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=ResearchData,
instructions=["Provide research data in structured format"],
)
analysis_agent = Agent(
name="Analysis Agent",
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=AnalysisResult,
instructions=["Analyze the research data and provide structured results"],
)
final_agent = Agent(
name="Final Agent",
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=FinalReport,
instructions=["Create a final report based on the analysis"],
)
workflow = Workflow(
name="Structured Agent Flow",
db=shared_db,
steps=[
Step(name="research", agent=research_agent),
Step(name="analysis", agent=analysis_agent),
Step(name="final", agent=final_agent),
],
)
response = workflow.run(input="Research AI testing methodologies")
# Verify we have all step responses
assert len(response.step_results) == 3
# Verify each step produced structured data
research_output = response.step_results[0]
analysis_output = response.step_results[1]
final_output = response.step_results[2]
# Check that outputs are structured (BaseModel instances)
assert isinstance(research_output.content, ResearchData)
assert isinstance(analysis_output.content, AnalysisResult)
assert isinstance(final_output.content, FinalReport)
def test_structured_output_agent_flow_streaming(shared_db):
"""Test structured output flow between agents - streaming."""
# Create agents with structured response models
research_agent = Agent(
name="Research Agent",
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=ResearchData,
instructions=["Provide research data in structured format"],
)
analysis_agent = Agent(
name="Analysis Agent",
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=AnalysisResult,
instructions=["Analyze the research data and provide structured results"],
)
workflow = Workflow(
name="Structured Agent Flow Streaming",
db=shared_db,
steps=[
Step(name="research", agent=research_agent),
Step(name="analysis", agent=analysis_agent),
],
)
events = list(workflow.run(input="Research AI testing methodologies", stream=True))
# Find the workflow completed event
from agno.run.workflow import WorkflowCompletedEvent
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
# Verify structured data in final output
final_content = completed_events[0].content
assert isinstance(final_content, AnalysisResult)
@pytest.mark.asyncio
async def test_structured_output_agent_flow_async(shared_db):
"""Test structured output flow between agents - async."""
# Create agents with structured response models
research_agent = Agent(
name="Research Agent",
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=ResearchData,
instructions=["Provide research data in structured format"],
)
analysis_agent = Agent(
name="Analysis Agent",
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=AnalysisResult,
instructions=["Analyze the research data and provide structured results"],
)
workflow = Workflow(
name="Async Structured Agent Flow",
db=shared_db,
steps=[
Step(name="research", agent=research_agent),
Step(name="analysis", agent=analysis_agent),
],
)
response = await workflow.arun(input="Research AI testing methodologies")
# Verify we have all step responses
assert len(response.step_results) == 2
# Verify structured outputs
research_output = response.step_results[0]
analysis_output = response.step_results[1]
assert isinstance(research_output.content, ResearchData)
assert isinstance(analysis_output.content, AnalysisResult)
@pytest.mark.asyncio
async def test_structured_output_agent_flow_async_streaming(shared_db):
"""Test structured output flow between agents - async streaming."""
# Create agents with structured response models
research_agent = Agent(
name="Research Agent",
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=ResearchData,
instructions=["Provide research data in structured format"],
)
analysis_agent = Agent(
name="Analysis Agent",
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=AnalysisResult,
instructions=["Analyze the research data and provide structured results"],
)
workflow = Workflow(
name="Async Structured Agent Flow Streaming",
db=shared_db,
steps=[
Step(name="research", agent=research_agent),
Step(name="analysis", agent=analysis_agent),
],
)
events = [event async for event in workflow.arun(input="Research AI testing methodologies", stream=True)]
# Find the workflow completed event
from agno.run.workflow import WorkflowCompletedEvent
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
# Verify structured data in final output
final_content = completed_events[0].content
assert isinstance(final_content, AnalysisResult)
def test_structured_output_team_flow_sync(shared_db):
"""Test structured output flow with team - sync (simplified)."""
# Create minimal team with structured response model
researcher = Agent(
name="Researcher",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=["Provide brief research data"],
)
research_team = Team(
name="Research Team",
members=[researcher],
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=ResearchData,
instructions=["Provide structured research data"],
)
workflow = Workflow(
name="Simple Team Flow",
db=shared_db,
steps=[
Step(name="research", team=research_team),
],
)
response = workflow.run(input="Brief AI research")
# Verify structured output
assert len(response.step_results) == 1
research_output = response.step_results[0]
assert isinstance(research_output.content, ResearchData)
def test_structured_output_team_flow_streaming(shared_db):
"""Test structured output flow with team - streaming (simplified)."""
# Create minimal team
researcher = Agent(
name="Researcher",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=["Provide brief research data"],
)
research_team = Team(
name="Research Team",
members=[researcher],
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=ResearchData,
instructions=["Provide structured research data"],
)
workflow = Workflow(
name="Simple Team Flow Streaming",
db=shared_db,
steps=[
Step(name="research", team=research_team),
],
)
events = list(workflow.run(input="Brief AI research", stream=True))
# Find the workflow completed event
from agno.run.workflow import WorkflowCompletedEvent
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
# Verify structured data in final output
final_content = completed_events[0].content
assert isinstance(final_content, ResearchData)
@pytest.mark.asyncio
async def test_structured_output_team_flow_async(shared_db):
"""Test structured output flow with team - async (simplified)."""
# Create minimal team
researcher = Agent(
name="Researcher",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=["Provide brief research data"],
)
research_team = Team(
name="Research Team",
members=[researcher],
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=ResearchData,
instructions=["Provide structured research data"],
)
workflow = Workflow(
name="Simple Async Team Flow",
db=shared_db,
steps=[
Step(name="research", team=research_team),
],
)
response = await workflow.arun(input="Brief AI research")
# Verify structured output
assert len(response.step_results) == 1
research_output = response.step_results[0]
assert isinstance(research_output.content, ResearchData)
@pytest.mark.asyncio
async def test_structured_output_team_flow_async_streaming(shared_db):
"""Test structured output flow with team - async streaming (simplified)."""
# Create minimal team
researcher = Agent(
name="Researcher",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=["Provide brief research data"],
)
research_team = Team(
name="Research Team",
members=[researcher],
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=ResearchData,
instructions=["Provide structured research data"],
)
workflow = Workflow(
name="Simple Async Team Flow Streaming",
db=shared_db,
steps=[
Step(name="research", team=research_team),
],
)
events = [event async for event in workflow.arun(input="Brief AI research", stream=True)]
# Find the workflow completed event
from agno.run.workflow import WorkflowCompletedEvent
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
# Verify structured data in final output
final_content = completed_events[0].content
assert isinstance(final_content, ResearchData)
def test_mixed_structured_output_flow(shared_db):
"""Test mixed structured output flow (function -> agent -> team) - simplified."""
# Create minimal agent
analysis_agent = Agent(
name="Analysis Agent",
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=AnalysisResult,
instructions=["Analyze briefly"],
)
# Create minimal team
final_member = Agent(
name="Report Writer",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=["Create brief reports"],
)
final_team = Team(
name="Final Team",
members=[final_member],
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=FinalReport,
instructions=["Create brief structured report"],
)
workflow = Workflow(
name="Mixed Structured Flow",
db=shared_db,
steps=[
Step(name="research", executor=research_function), # Function (fast)
Step(name="analysis", agent=analysis_agent), # Agent
Step(name="final", team=final_team), # Team
],
)
response = workflow.run(input="test mixed flow")
# Verify we have all step responses
assert len(response.step_results) == 3
# Verify each step produced structured data
research_output = response.step_results[0]
analysis_output = response.step_results[1]
final_output = response.step_results[2]
# Check that outputs are structured
assert isinstance(research_output.content, ResearchData)
assert isinstance(analysis_output.content, AnalysisResult)
assert isinstance(final_output.content, FinalReport)
def test_structured_output_with_workflow_components(shared_db):
"""Test structured output flow with workflow components (Steps, Loop, Condition)."""
from agno.workflow import Condition, Loop, Steps
# Simple condition function
def should_continue(step_input: StepInput) -> bool:
"""Simple condition - always true for testing."""
return True
# Simple loop condition
def loop_end_condition(outputs):
"""End loop after 1 iteration."""
return len(outputs) >= 1
# Create a workflow with structured data flowing through different components
workflow = Workflow(
name="Simple Component Flow",
db=shared_db,
steps=[
Steps(
name="research_steps",
steps=[
Step(name="research", executor=research_function),
],
),
Condition(
name="analysis_condition",
evaluator=should_continue,
steps=[Step(name="analysis", executor=analysis_function)],
),
Loop(
name="final_loop",
steps=[Step(name="final", executor=final_function)],
end_condition=loop_end_condition,
max_iterations=1,
),
],
)
response = workflow.run(input="test simple component flow")
# Verify we have all step responses
assert len(response.step_results) == 3
# Handle the actual structure - some might be lists
steps_output = response.step_results[0]
condition_output = response.step_results[1]
loop_output = response.step_results[2]
assert hasattr(steps_output, "steps") and steps_output.steps
actual_research_step = steps_output.steps[0] # Get the actual research step
assert isinstance(actual_research_step.content, ResearchData)
# Condition should have processed the structured data - access the nested step content
assert hasattr(condition_output, "steps") and condition_output.steps
actual_analysis_step = condition_output.steps[0] # Get the actual analysis step
assert isinstance(actual_analysis_step.content, AnalysisResult)
# Loop should have structured output - access the nested step content
assert hasattr(loop_output, "steps") and loop_output.steps
actual_final_step = loop_output.steps[0] # Get the actual final step
assert isinstance(actual_final_step.content, FinalReport)
assert actual_final_step.content.title == "AI Testing Report"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/workflows/test_structured_output_flow.py",
"license": "Apache License 2.0",
"lines": 497,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.