sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
agno-agi/agno:libs/agno/agno/os/interfaces/slack/helpers.py | from typing import Any, Dict, List, Optional, Tuple
import httpx
from agno.media import Audio, File, Image, Video
from agno.utils.log import log_error
def task_id(agent_name: Optional[str], base_id: str) -> str:
# Prefix card IDs per agent so concurrent tool calls from different
# team members don't collide in the Slack stream
if agent_name:
safe = agent_name.lower().replace(" ", "_")[:20]
return f"{safe}_{base_id}"
return base_id
def member_name(chunk: Any, entity_name: str) -> Optional[str]:
# Return name only for team members (not leader) to prefix task card
# labels like "Researcher: web_search" for disambiguation
name = getattr(chunk, "agent_name", None)
if name and isinstance(name, str) and name != entity_name:
return name
return None
def should_respond(event: dict, reply_to_mentions_only: bool) -> bool:
event_type = event.get("type")
if event_type not in ("app_mention", "message"):
return False
channel_type = event.get("channel_type", "")
is_dm = channel_type == "im"
if reply_to_mentions_only and event_type == "message" and not is_dm:
return False
# When responding to all messages, skip app_mention to avoid duplicates.
# Slack fires both app_mention and message for the same @mention — the
# message event already covers it.
if not reply_to_mentions_only and event_type == "app_mention" and not is_dm:
return False
return True
def extract_event_context(event: dict) -> Dict[str, Any]:
return {
"message_text": event.get("text", ""),
"channel_id": event.get("channel", ""),
"user": event.get("user", ""),
# Prefer existing thread; fall back to message ts for new conversations
"thread_id": event.get("thread_ts") or event.get("ts", ""),
}
async def download_event_files_async(
token: str, event: dict, max_file_size: int
) -> Tuple[List[File], List[Image], List[Video], List[Audio], List[str]]:
files: List[File] = []
images: List[Image] = []
videos: List[Video] = []
audio: List[Audio] = []
skipped: List[str] = []
if not event.get("files"):
return files, images, videos, audio, skipped
headers = {"Authorization": f"Bearer {token}"}
async with httpx.AsyncClient() as client:
for file_info in event["files"]:
file_id = file_info.get("id")
filename = file_info.get("name", "file")
mimetype = file_info.get("mimetype", "application/octet-stream")
file_size = file_info.get("size", 0)
if file_size > max_file_size:
limit_mb = max_file_size / (1024 * 1024)
actual_mb = file_size / (1024 * 1024)
skipped.append(f"{filename} ({actual_mb:.1f}MB — exceeds {limit_mb:.0f}MB limit)")
continue
url_private = file_info.get("url_private")
if not url_private:
continue
try:
resp = await client.get(url_private, headers=headers, timeout=30)
resp.raise_for_status()
file_content = resp.content
if mimetype.startswith("image/"):
fmt = mimetype.split("/")[-1]
images.append(Image(content=file_content, id=file_id, mime_type=mimetype, format=fmt))
elif mimetype.startswith("video/"):
videos.append(Video(content=file_content, mime_type=mimetype))
elif mimetype.startswith("audio/"):
audio.append(Audio(content=file_content, mime_type=mimetype))
else:
# Pass None for unsupported types to avoid File validation errors
safe_mime = mimetype if mimetype in File.valid_mime_types() else None
files.append(File(content=file_content, filename=filename, mime_type=safe_mime))
except Exception as e:
log_error(f"Failed to download file {file_id}: {e}")
return files, images, videos, audio, skipped
async def upload_response_media_async(async_client: Any, response: Any, channel_id: str, thread_ts: str) -> None:
media_attrs = [
("images", "image.png"),
("files", "file"),
("videos", "video.mp4"),
("audio", "audio.mp3"),
]
for attr, default_name in media_attrs:
items = getattr(response, attr, None)
if not items:
continue
for item in items:
content_bytes = item.get_content_bytes()
if content_bytes:
try:
await async_client.files_upload_v2(
channel=channel_id,
content=content_bytes,
filename=getattr(item, "filename", None) or default_name,
thread_ts=thread_ts,
)
except Exception as e:
log_error(f"Failed to upload {attr.rstrip('s')}: {e}")
async def send_slack_message_async(
async_client: Any, channel: str, thread_ts: str, message: str, italics: bool = False
) -> None:
if not message or not message.strip():
return
def _format(text: str) -> str:
if italics:
return "\n".join([f"_{line}_" for line in text.split("\n")])
return text
# Under Slack's 40K char limit with margin for batch prefix overhead
max_len = 39900
if len(message) <= max_len:
await async_client.chat_postMessage(channel=channel, text=_format(message), thread_ts=thread_ts)
return
message_batches = [message[i : i + max_len] for i in range(0, len(message), max_len)]
for i, batch in enumerate(message_batches, 1):
batch_message = f"[{i}/{len(message_batches)}] {batch}"
await async_client.chat_postMessage(channel=channel, text=_format(batch_message), thread_ts=thread_ts)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/interfaces/slack/helpers.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/interfaces/slack/state.py | from __future__ import annotations
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Dict, List, Literal, Optional
from typing_extensions import TypedDict
if TYPE_CHECKING:
from agno.media import Audio, File, Image, Video
from agno.run.base import BaseRunOutputEvent
# Literal not Enum — values flow directly into Slack API dicts as plain strings
TaskStatus = Literal["in_progress", "complete", "error"]
class TaskUpdateDict(TypedDict):
type: str
id: str
title: str
status: TaskStatus
@dataclass
class TaskCard:
title: str
status: TaskStatus = "in_progress"
@dataclass
class StreamState:
# Slack thread title — set once on first content to avoid repeated API calls
title_set: bool = False
# Incremented per error; used to generate unique fallback task card IDs
error_count: int = 0
text_buffer: str = ""
# Counter for unique reasoning task card keys (reasoning_0, reasoning_1, ...)
reasoning_round: int = 0
task_cards: Dict[str, TaskCard] = field(default_factory=dict)
images: List["Image"] = field(default_factory=list)
videos: List["Video"] = field(default_factory=list)
audio: List["Audio"] = field(default_factory=list)
files: List["File"] = field(default_factory=list)
# Used by process_event to suppress nested agent events in workflow mode
entity_type: Literal["agent", "team", "workflow"] = "agent"
# Leader/workflow name; member_name() compares against it to detect team members
entity_name: str = ""
# Last StepOutput content; WorkflowCompleted uses as fallback when content is None
workflow_final_content: str = ""
# Set by handlers on terminal events; router reads this for the final flush
terminal_status: Optional[TaskStatus] = None
def track_task(self, key: str, title: str) -> None:
self.task_cards[key] = TaskCard(title=title)
def complete_task(self, key: str) -> None:
card = self.task_cards.get(key)
if card:
card.status = "complete"
def error_task(self, key: str) -> None:
card = self.task_cards.get(key)
if card:
card.status = "error"
def resolve_all_pending(self, status: TaskStatus = "complete") -> List[TaskUpdateDict]:
# Called at stream end to close any cards left in_progress (e.g. if the
# model finished without emitting a ToolCallCompleted for every start).
chunks: List[TaskUpdateDict] = []
for key, card in self.task_cards.items():
if card.status == "in_progress":
card.status = status # type: ignore[assignment]
chunks.append(TaskUpdateDict(type="task_update", id=key, title=card.title, status=status))
return chunks
def append_content(self, text: str) -> None:
self.text_buffer += str(text)
def append_error(self, error_msg: str) -> None:
self.text_buffer += f"\n_Error: {error_msg}_"
def has_content(self) -> bool:
return bool(self.text_buffer)
def flush(self) -> str:
result = self.text_buffer
self.text_buffer = ""
return result
def collect_media(self, chunk: BaseRunOutputEvent) -> None:
# Media can't be streamed inline — Slack requires a separate upload after
# the stream ends. We collect here and upload_response_media() sends them.
for img in getattr(chunk, "images", None) or []:
if img not in self.images:
self.images.append(img)
for vid in getattr(chunk, "videos", None) or []:
if vid not in self.videos:
self.videos.append(vid)
for aud in getattr(chunk, "audio", None) or []:
if aud not in self.audio:
self.audio.append(aud)
for f in getattr(chunk, "files", None) or []:
if f not in self.files:
self.files.append(f)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/interfaces/slack/state.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/os/routers/test_slack_helpers.py | from unittest.mock import AsyncMock, Mock, patch
import pytest
from agno.os.interfaces.slack.helpers import (
download_event_files_async,
extract_event_context,
member_name,
send_slack_message_async,
should_respond,
task_id,
upload_response_media_async,
)
from agno.os.interfaces.slack.state import StreamState
class TestTaskId:
def test_truncates_long_name(self):
result = task_id("A Very Long Agent Name Here", "id1")
assert result == "a_very_long_agent_na_id1"
assert len("a_very_long_agent_na") == 20
def test_none_returns_base(self):
assert task_id(None, "base_id") == "base_id"
class TestMemberName:
def test_different_name_returned(self):
chunk = Mock(agent_name="Research Agent")
assert member_name(chunk, "Main Agent") == "Research Agent"
def test_missing_attr_returns_none(self):
chunk = Mock(spec=[])
assert member_name(chunk, "Main Agent") is None
class TestShouldRespond:
def test_app_mention_always_responds(self):
assert should_respond({"type": "app_mention", "text": "hello"}, reply_to_mentions_only=True) is True
def test_dm_always_responds(self):
assert should_respond({"type": "message", "channel_type": "im"}, reply_to_mentions_only=True) is True
def test_channel_blocked_with_mentions_only(self):
assert should_respond({"type": "message", "channel_type": "channel"}, reply_to_mentions_only=True) is False
def test_channel_allowed_without_mentions_only(self):
assert should_respond({"type": "message", "channel_type": "channel"}, reply_to_mentions_only=False) is True
def test_unknown_event_type(self):
assert should_respond({"type": "reaction_added"}, reply_to_mentions_only=False) is False
def test_app_mention_skipped_when_not_mentions_only(self):
assert should_respond({"type": "app_mention", "channel_type": "channel"}, reply_to_mentions_only=False) is False
def test_app_mention_dm_still_works(self):
assert should_respond({"type": "app_mention", "channel_type": "im"}, reply_to_mentions_only=False) is True
class TestExtractEventContext:
def test_prefers_thread_ts(self):
ctx = extract_event_context({"text": "hi", "channel": "C1", "user": "U1", "ts": "111", "thread_ts": "222"})
assert ctx["thread_id"] == "222"
def test_falls_back_to_ts(self):
ctx = extract_event_context({"text": "hi", "channel": "C1", "user": "U1", "ts": "111"})
assert ctx["thread_id"] == "111"
class TestDownloadEventFilesAsync:
@pytest.mark.asyncio
async def test_video_routing(self):
mock_response = Mock(content=b"video-data", status_code=200)
mock_response.raise_for_status = Mock()
event = {
"files": [
{"id": "F1", "name": "clip.mp4", "mimetype": "video/mp4", "url_private": "https://files.slack.com/F1"}
]
}
with patch("agno.os.interfaces.slack.helpers.httpx.AsyncClient") as mock_httpx:
mock_client = AsyncMock()
mock_client.get = AsyncMock(return_value=mock_response)
mock_httpx.return_value.__aenter__ = AsyncMock(return_value=mock_client)
mock_httpx.return_value.__aexit__ = AsyncMock(return_value=False)
files, images, videos, audio, skipped = await download_event_files_async("xoxb-token", event, 1_073_741_824)
assert len(videos) == 1
assert len(files) == 0 and len(images) == 0
assert len(skipped) == 0
@pytest.mark.asyncio
async def test_file_over_max_size_skipped(self):
event = {
"files": [
{
"id": "F1",
"name": "huge.zip",
"mimetype": "application/zip",
"size": 50_000_000,
"url_private": "https://files.slack.com/F1",
},
]
}
files, images, videos, audio, skipped = await download_event_files_async("xoxb-token", event, 25 * 1024 * 1024)
assert len(skipped) == 1
assert "huge.zip" in skipped[0]
class TestSendSlackMessageAsync:
@pytest.mark.asyncio
async def test_empty_skipped(self):
client = AsyncMock()
await send_slack_message_async(client, "C1", "ts1", "")
client.chat_postMessage.assert_not_called()
@pytest.mark.asyncio
async def test_normal_send(self):
client = AsyncMock()
await send_slack_message_async(client, "C1", "ts1", "hello world")
client.chat_postMessage.assert_called_once_with(channel="C1", text="hello world", thread_ts="ts1")
@pytest.mark.asyncio
async def test_long_message_batching(self):
client = AsyncMock()
await send_slack_message_async(client, "C1", "ts1", "x" * 50000)
assert client.chat_postMessage.call_count == 2
class TestUploadResponseMediaAsync:
@pytest.mark.asyncio
async def test_all_types_uploaded(self):
client = AsyncMock()
response = Mock(
images=[Mock(get_content_bytes=Mock(return_value=b"img"), filename="photo.png")],
files=[Mock(get_content_bytes=Mock(return_value=b"file"), filename="doc.pdf")],
videos=[Mock(get_content_bytes=Mock(return_value=b"vid"), filename=None)],
audio=[Mock(get_content_bytes=Mock(return_value=b"aud"), filename=None)],
)
await upload_response_media_async(client, response, "C1", "ts1")
assert client.files_upload_v2.call_count == 4
@pytest.mark.asyncio
async def test_exception_continues(self):
client = AsyncMock()
client.files_upload_v2 = AsyncMock(side_effect=RuntimeError("upload failed"))
response = Mock(
images=[Mock(get_content_bytes=Mock(return_value=b"img"), filename="photo.png")],
files=[Mock(get_content_bytes=Mock(return_value=b"file"), filename="doc.pdf")],
videos=None,
audio=None,
)
with patch("agno.os.interfaces.slack.helpers.log_error"):
await upload_response_media_async(client, response, "C1", "ts1")
# -- StreamState --
class TestStreamState:
def test_track_complete_lifecycle(self):
state = StreamState()
state.track_task("tool_1", "Running search")
assert state.task_cards["tool_1"].status == "in_progress"
state.complete_task("tool_1")
assert state.task_cards["tool_1"].status == "complete"
state.complete_task("nonexistent")
assert len(state.task_cards) == 1
def test_resolve_all_pending_skips_finished(self):
state = StreamState()
state.track_task("t1", "Task 1")
state.complete_task("t1")
state.track_task("t2", "Task 2")
state.track_task("t3", "Task 3")
state.error_task("t3")
chunks = state.resolve_all_pending()
assert len(chunks) == 1
assert chunks[0]["id"] == "t2"
assert state.task_cards["t1"].status == "complete"
assert state.task_cards["t2"].status == "complete"
assert state.task_cards["t3"].status == "error"
def test_collect_media_deduplicates(self):
state = StreamState()
chunk = Mock(images=["img1", "img1"], videos=["vid1"], audio=[], files=[])
state.collect_media(chunk)
state.collect_media(chunk)
assert state.images == ["img1"]
assert state.videos == ["vid1"]
def test_collect_media_tolerates_none(self):
state = StreamState()
chunk = Mock(images=None, videos=None, audio=None, files=None)
state.collect_media(chunk)
assert state.images == []
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/os/routers/test_slack_helpers.py",
"license": "Apache License 2.0",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/os/routers/test_slack_process_event.py | from unittest.mock import AsyncMock, Mock
import pytest
from agno.agent import RunEvent
from agno.os.interfaces.slack.events import process_event
from agno.os.interfaces.slack.state import StreamState
from agno.run.team import TeamRunEvent
from agno.run.workflow import WorkflowRunEvent
def _stream():
s = AsyncMock()
s.append = AsyncMock()
return s
def _chunk(event, **kwargs):
m = Mock(event=event)
for k, v in kwargs.items():
setattr(m, k, v)
return m
def _tool_mock(tool_name="search", tool_call_id="call_1", tool_call_error=None):
t = Mock()
t.tool_name = tool_name
t.tool_call_id = tool_call_id
t.tool_call_error = tool_call_error
return t
class TestReasoning:
@pytest.mark.asyncio
async def test_started_creates_card(self):
state = StreamState()
stream = _stream()
await process_event(RunEvent.reasoning_started.value, _chunk(RunEvent.reasoning_started.value), state, stream)
assert "reasoning_0" in state.task_cards
assert state.task_cards["reasoning_0"].status == "in_progress"
@pytest.mark.asyncio
async def test_multiple_rounds_unique_keys(self):
state = StreamState()
stream = _stream()
await process_event(RunEvent.reasoning_started.value, _chunk(RunEvent.reasoning_started.value), state, stream)
await process_event(
RunEvent.reasoning_completed.value, _chunk(RunEvent.reasoning_completed.value), state, stream
)
await process_event(RunEvent.reasoning_started.value, _chunk(RunEvent.reasoning_started.value), state, stream)
assert "reasoning_0" in state.task_cards
assert "reasoning_1" in state.task_cards
class TestToolLifecycle:
@pytest.mark.asyncio
async def test_started_creates_card(self):
state = StreamState()
stream = _stream()
chunk = _chunk(RunEvent.tool_call_started.value, tool=_tool_mock(), agent_name=None)
await process_event(RunEvent.tool_call_started.value, chunk, state, stream)
assert "call_1" in state.task_cards
assert state.task_cards["call_1"].status == "in_progress"
@pytest.mark.asyncio
async def test_completed_success(self):
state = StreamState()
stream = _stream()
state.track_task("call_1", "search")
chunk = _chunk(RunEvent.tool_call_completed.value, tool=_tool_mock(), agent_name=None)
await process_event(RunEvent.tool_call_completed.value, chunk, state, stream)
assert state.task_cards["call_1"].status == "complete"
@pytest.mark.asyncio
async def test_completed_with_error(self):
state = StreamState()
stream = _stream()
state.track_task("call_1", "search")
tool = _tool_mock(tool_call_error="timeout")
chunk = _chunk(RunEvent.tool_call_completed.value, tool=tool, agent_name=None)
await process_event(RunEvent.tool_call_completed.value, chunk, state, stream)
assert state.task_cards["call_1"].status == "error"
@pytest.mark.asyncio
async def test_tool_error_event(self):
state = StreamState()
stream = _stream()
tool = _tool_mock(tool_call_id="call_err")
chunk = _chunk(RunEvent.tool_call_error.value, tool=tool, agent_name=None, error="boom")
await process_event(RunEvent.tool_call_error.value, chunk, state, stream)
assert state.task_cards["call_err"].status == "error"
assert state.error_count == 1
@pytest.mark.asyncio
async def test_tool_with_member_name(self):
state = StreamState(entity_name="Main Agent")
stream = _stream()
chunk = _chunk(RunEvent.tool_call_started.value, tool=_tool_mock(), agent_name="Research Agent")
await process_event(RunEvent.tool_call_started.value, chunk, state, stream)
assert "research_agent_call_1" in state.task_cards
class TestContent:
@pytest.mark.asyncio
async def test_run_content_buffers(self):
state = StreamState()
stream = _stream()
chunk = _chunk(RunEvent.run_content.value, content="hello")
await process_event(RunEvent.run_content.value, chunk, state, stream)
assert state.text_buffer == "hello"
@pytest.mark.asyncio
async def test_intermediate_content_suppressed_for_team(self):
state = StreamState(entity_type="team")
stream = _stream()
chunk = _chunk(RunEvent.run_intermediate_content.value, content="partial")
await process_event(RunEvent.run_intermediate_content.value, chunk, state, stream)
assert state.text_buffer == ""
class TestMemory:
@pytest.mark.asyncio
async def test_started_completed_lifecycle(self):
state = StreamState()
stream = _stream()
await process_event(
RunEvent.memory_update_started.value, _chunk(RunEvent.memory_update_started.value), state, stream
)
assert "memory_update" in state.task_cards
assert state.task_cards["memory_update"].status == "in_progress"
await process_event(
RunEvent.memory_update_completed.value, _chunk(RunEvent.memory_update_completed.value), state, stream
)
assert state.task_cards["memory_update"].status == "complete"
class TestTerminalEvents:
@pytest.mark.asyncio
@pytest.mark.parametrize("ev", [RunEvent.run_error.value, RunEvent.run_cancelled.value])
async def test_run_terminal_returns_true(self, ev):
state = StreamState()
stream = _stream()
chunk = _chunk(ev, content="something went wrong")
result = await process_event(ev, chunk, state, stream)
assert result is True
assert state.terminal_status == "error"
@pytest.mark.asyncio
@pytest.mark.parametrize("ev", ["WorkflowError", "WorkflowCancelled"])
async def test_workflow_terminal_returns_true(self, ev):
state = StreamState()
stream = _stream()
chunk = _chunk(ev, error="wf failed", content=None)
result = await process_event(ev, chunk, state, stream)
assert result is True
assert state.terminal_status == "error"
class TestWorkflowSuppression:
_SUPPRESSED = [
RunEvent.run_content.value,
RunEvent.reasoning_started.value,
RunEvent.tool_call_started.value,
RunEvent.tool_call_completed.value,
RunEvent.tool_call_error.value,
RunEvent.memory_update_started.value,
RunEvent.memory_update_completed.value,
RunEvent.run_intermediate_content.value,
RunEvent.run_completed.value,
RunEvent.run_error.value,
RunEvent.run_cancelled.value,
]
@pytest.mark.asyncio
@pytest.mark.parametrize("ev", _SUPPRESSED)
async def test_suppressed_in_workflow_mode(self, ev):
state = StreamState(entity_type="workflow")
stream = _stream()
chunk = _chunk(ev, content="suppressed", tool=None)
result = await process_event(ev, chunk, state, stream)
assert result is False
assert state.text_buffer == ""
stream.append.assert_not_called()
@pytest.mark.asyncio
@pytest.mark.parametrize("ev", _SUPPRESSED)
async def test_team_prefix_also_suppressed(self, ev):
state = StreamState(entity_type="workflow")
stream = _stream()
team_ev = f"Team{ev}"
chunk = _chunk(team_ev, content="suppressed", tool=None)
result = await process_event(team_ev, chunk, state, stream)
assert result is False
assert state.text_buffer == ""
class TestWorkflowLifecycle:
@pytest.mark.asyncio
async def test_workflow_started(self):
state = StreamState(entity_name="News Reporter")
stream = _stream()
chunk = _chunk(WorkflowRunEvent.workflow_started.value, workflow_name="News Reporter", run_id="run1")
await process_event(WorkflowRunEvent.workflow_started.value, chunk, state, stream)
assert "wf_run_run1" in state.task_cards
@pytest.mark.asyncio
async def test_workflow_completed_with_content(self):
state = StreamState(entity_name="News Reporter")
stream = _stream()
chunk = _chunk(
WorkflowRunEvent.workflow_completed.value,
content="Final article",
run_id="run1",
workflow_name="News Reporter",
)
state.track_task("wf_run_run1", "Workflow: News Reporter")
await process_event(WorkflowRunEvent.workflow_completed.value, chunk, state, stream)
assert "Final article" in state.text_buffer
assert state.task_cards["wf_run_run1"].status == "complete"
@pytest.mark.asyncio
async def test_workflow_completed_fallback_to_captured(self):
state = StreamState()
state.workflow_final_content = "captured output"
stream = _stream()
chunk = _chunk(WorkflowRunEvent.workflow_completed.value, content=None, run_id="run1", workflow_name="Test")
await process_event(WorkflowRunEvent.workflow_completed.value, chunk, state, stream)
assert "captured output" in state.text_buffer
@pytest.mark.asyncio
async def test_step_output_captures_in_workflow(self):
state = StreamState(entity_type="workflow")
stream = _stream()
chunk = _chunk(WorkflowRunEvent.step_output.value, content="step result")
await process_event(WorkflowRunEvent.step_output.value, chunk, state, stream)
assert state.workflow_final_content == "step result"
assert state.text_buffer == ""
class TestStructuralEvents:
@pytest.mark.asyncio
async def test_step_start_complete(self):
state = StreamState()
stream = _stream()
await process_event(
WorkflowRunEvent.step_started.value, Mock(step_name="research", step_id="s1"), state, stream
)
assert state.task_cards["wf_step_s1"].status == "in_progress"
await process_event(
WorkflowRunEvent.step_completed.value, Mock(step_name="research", step_id="s1"), state, stream
)
assert state.task_cards["wf_step_s1"].status == "complete"
@pytest.mark.asyncio
async def test_loop_full_lifecycle(self):
state = StreamState()
stream = _stream()
await process_event(
WorkflowRunEvent.loop_execution_started.value,
Mock(step_name="retry", step_id="l1", max_iterations=3),
state,
stream,
)
assert "wf_loop_l1" in state.task_cards
await process_event(
WorkflowRunEvent.loop_iteration_started.value,
Mock(step_name="retry", step_id="l1", iteration=1, max_iterations=3),
state,
stream,
)
assert "wf_loop_l1_iter_1" in state.task_cards
await process_event(
WorkflowRunEvent.loop_iteration_completed.value,
Mock(step_name="retry", step_id="l1", iteration=1),
state,
stream,
)
assert state.task_cards["wf_loop_l1_iter_1"].status == "complete"
await process_event(
WorkflowRunEvent.loop_execution_completed.value, Mock(step_name="retry", step_id="l1"), state, stream
)
assert state.task_cards["wf_loop_l1"].status == "complete"
@pytest.mark.asyncio
@pytest.mark.parametrize(
"started,completed,prefix",
[
(
WorkflowRunEvent.parallel_execution_started,
WorkflowRunEvent.parallel_execution_completed,
"wf_parallel_",
),
(WorkflowRunEvent.condition_execution_started, WorkflowRunEvent.condition_execution_completed, "wf_cond_"),
(WorkflowRunEvent.router_execution_started, WorkflowRunEvent.router_execution_completed, "wf_router_"),
(WorkflowRunEvent.steps_execution_started, WorkflowRunEvent.steps_execution_completed, "wf_steps_"),
],
)
async def test_structural_pairs(self, started, completed, prefix):
state = StreamState()
stream = _stream()
await process_event(started.value, Mock(step_name="test", step_id="x1"), state, stream)
assert state.task_cards[f"{prefix}x1"].status == "in_progress"
await process_event(completed.value, Mock(step_name="test", step_id="x1"), state, stream)
assert state.task_cards[f"{prefix}x1"].status == "complete"
class TestNormalization:
@pytest.mark.asyncio
async def test_team_events_normalized(self):
state = StreamState()
stream = _stream()
chunk = _chunk(TeamRunEvent.run_content.value, content="team hello")
await process_event(TeamRunEvent.run_content.value, chunk, state, stream)
assert state.text_buffer == "team hello"
@pytest.mark.asyncio
async def test_unknown_event_returns_false(self):
state = StreamState()
stream = _stream()
result = await process_event("CompletelyUnknownEvent", _chunk("CompletelyUnknownEvent"), state, stream)
assert result is False
stream.append.assert_not_called()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/os/routers/test_slack_process_event.py",
"license": "Apache License 2.0",
"lines": 281,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/os/routers/test_slack_security.py | import hashlib
import hmac
import time
import pytest
from fastapi import HTTPException
from agno.os.interfaces.slack import security as sec_mod
from agno.os.interfaces.slack.security import verify_slack_signature
def _sign(body: bytes, timestamp: str, secret: str) -> str:
sig_base = f"v0:{timestamp}:{body.decode()}"
return "v0=" + hmac.new(secret.encode(), sig_base.encode(), hashlib.sha256).hexdigest()
class TestVerifySlackSignature:
def test_explicit_secret(self):
body = b'{"test": true}'
ts = str(int(time.time()))
secret = "explicit-secret"
sig = _sign(body, ts, secret)
assert verify_slack_signature(body, ts, sig, signing_secret=secret) is True
def test_env_fallback(self):
body = b'{"test": true}'
ts = str(int(time.time()))
env_secret = "env-secret-value"
sig = _sign(body, ts, env_secret)
original = sec_mod.SLACK_SIGNING_SECRET
try:
sec_mod.SLACK_SIGNING_SECRET = env_secret
assert verify_slack_signature(body, ts, sig) is True
finally:
sec_mod.SLACK_SIGNING_SECRET = original
def test_empty_string_not_fallback(self):
body = b'{"test": true}'
ts = str(int(time.time()))
original = sec_mod.SLACK_SIGNING_SECRET
try:
sec_mod.SLACK_SIGNING_SECRET = "env-secret"
with pytest.raises(HTTPException) as exc_info:
verify_slack_signature(body, ts, "v0=fake", signing_secret="")
assert exc_info.value.status_code == 500
finally:
sec_mod.SLACK_SIGNING_SECRET = original
def test_missing_secret_raises_500(self):
body = b'{"test": true}'
ts = str(int(time.time()))
original = sec_mod.SLACK_SIGNING_SECRET
try:
sec_mod.SLACK_SIGNING_SECRET = None
with pytest.raises(HTTPException) as exc_info:
verify_slack_signature(body, ts, "v0=fake")
assert exc_info.value.status_code == 500
finally:
sec_mod.SLACK_SIGNING_SECRET = original
def test_stale_timestamp_rejected(self):
body = b'{"test": true}'
secret = "test-secret"
stale_ts = str(int(time.time()) - 400)
sig = _sign(body, stale_ts, secret)
assert verify_slack_signature(body, stale_ts, sig, signing_secret=secret) is False
def test_wrong_signature_rejected(self):
body = b'{"test": true}'
ts = str(int(time.time()))
assert verify_slack_signature(body, ts, "v0=deadbeef", signing_secret="secret") is False
def test_non_utf8_body_rejected(self):
body = b"\x80\x81\x82\xff"
ts = str(int(time.time()))
assert verify_slack_signature(body, ts, "v0=deadbeef", signing_secret="secret") is False
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/os/routers/test_slack_security.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/02_agents/08_guardrails/mixed_hooks.py | """
Mixed Hooks and Guardrails
=============================
Example demonstrating how to combine plain hooks with guardrails in pre_hooks.
Both run in order: the logging hook fires, then the PII guardrail checks
for sensitive data. If PII is detected the run is rejected with RunStatus.error.
"""
from agno.agent import Agent
from agno.guardrails import PIIDetectionGuardrail
from agno.models.openai import OpenAIResponses
from agno.run import RunStatus
from agno.run.agent import RunInput
# ---------------------------------------------------------------------------
# Plain hook (non-guardrail)
# ---------------------------------------------------------------------------
def log_request(run_input: RunInput) -> None:
"""Pre-hook that logs every incoming request."""
print(f" [log_request] Input: {run_input.input_content[:60]}")
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
def main():
print("Mixed Hooks and Guardrails Demo")
print("=" * 50)
agent = Agent(
name="Privacy-Protected Agent",
model=OpenAIResponses(id="gpt-4o-mini"),
pre_hooks=[log_request, PIIDetectionGuardrail()],
instructions="You are a helpful assistant that protects user privacy.",
)
# Test 1: Clean input — hook runs, guardrail passes, agent responds
print("\n[TEST 1] Clean input (no PII)")
print("-" * 40)
response = agent.run(input="What is the weather today?")
if response.status == RunStatus.error:
print(f" [ERROR] Unexpected block: {response.content}")
else:
print(f" [OK] Agent responded: {response.content[:80]}")
# Test 2: PII input — guardrail blocks before agent sees the data
print("\n[TEST 2] Input with SSN")
print("-" * 40)
response = agent.run(input="My SSN is 123-45-6789, can you help?")
if response.status == RunStatus.error:
print(f" [BLOCKED] Guardrail rejected: {response.content}")
else:
print(" [WARNING] Should have been blocked!")
# Test 3: PII input with credit card
print("\n[TEST 3] Input with credit card")
print("-" * 40)
response = agent.run(input="My card is 4532 1234 5678 9012, charge it.")
if response.status == RunStatus.error:
print(f" [BLOCKED] Guardrail rejected: {response.content}")
else:
print(" [WARNING] Should have been blocked!")
print("\n" + "=" * 50)
print("Mixed Hooks and Guardrails Demo Complete")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
main()
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/02_agents/08_guardrails/mixed_hooks.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/unit/agent/test_hooks_guardrails.py | """Tests for guardrail behavior in hook execution under background mode."""
from typing import Any, List, Union
from unittest.mock import MagicMock
import pytest
from agno.agent._hooks import (
aexecute_post_hooks,
aexecute_pre_hooks,
execute_post_hooks,
execute_pre_hooks,
)
from agno.exceptions import InputCheckError, OutputCheckError
from agno.guardrails.base import BaseGuardrail
from agno.run import RunContext
from agno.run.agent import RunInput
from agno.run.team import TeamRunInput
from agno.utils.hooks import is_guardrail_hook, normalize_pre_hooks
class BlockingGuardrail(BaseGuardrail):
"""Guardrail that raises InputCheckError."""
def check(self, run_input: Union[RunInput, TeamRunInput]) -> None:
raise InputCheckError("blocked by guardrail")
async def async_check(self, run_input: Union[RunInput, TeamRunInput]) -> None:
raise InputCheckError("blocked by guardrail (async)")
class OutputBlockingGuardrail(BaseGuardrail):
"""Guardrail that raises OutputCheckError."""
def check(self, **kwargs: Any) -> None:
raise OutputCheckError("blocked output")
async def async_check(self, **kwargs: Any) -> None:
raise OutputCheckError("blocked output (async)")
class PassthroughGuardrail(BaseGuardrail):
"""Guardrail that passes (no error)."""
def __init__(self):
self.call_count = 0
def check(self, **kwargs: Any) -> None:
self.call_count += 1
async def async_check(self, **kwargs: Any) -> None:
self.call_count += 1
def _make_agent(run_hooks_in_background: bool = True) -> MagicMock:
agent = MagicMock()
agent._run_hooks_in_background = run_hooks_in_background
agent.debug_mode = False
agent.events_to_skip = None
agent.store_events = False
return agent
def _make_background_tasks() -> MagicMock:
bt = MagicMock()
bt.tasks: List = []
def add_task(fn, **kwargs):
bt.tasks.append((fn, kwargs))
bt.add_task = add_task
return bt
def _make_session() -> MagicMock:
return MagicMock()
def _make_run_context() -> RunContext:
return RunContext(run_id="r1", session_id="s1", session_state={}, metadata={"key": "val"})
def _make_run_input() -> RunInput:
return RunInput(input_content="test input")
class TestIsGuardrailHook:
def test_bound_guardrail_check_detected(self):
g = PassthroughGuardrail()
assert is_guardrail_hook(g.check) is True
assert is_guardrail_hook(g.async_check) is True
def test_plain_function_not_detected(self):
def plain_hook(**kwargs):
pass
assert is_guardrail_hook(plain_hook) is False
def test_normalize_pre_hooks_produces_guardrail_hooks(self):
g = BlockingGuardrail()
hooks = normalize_pre_hooks([g], async_mode=False)
assert hooks is not None
assert is_guardrail_hook(hooks[0]) is True
class TestPreHookGuardrailInBackground:
def test_guardrail_runs_sync_in_global_background_mode(self):
agent = _make_agent(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = BlockingGuardrail()
hooks = normalize_pre_hooks([guardrail], async_mode=False)
with pytest.raises(InputCheckError, match="blocked by guardrail"):
list(
execute_pre_hooks(
agent=agent,
hooks=hooks,
run_response=MagicMock(),
run_input=_make_run_input(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
)
)
assert len(bt.tasks) == 0
@pytest.mark.asyncio
async def test_async_guardrail_runs_sync_in_global_background_mode(self):
agent = _make_agent(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = BlockingGuardrail()
hooks = normalize_pre_hooks([guardrail], async_mode=True)
with pytest.raises(InputCheckError, match="blocked by guardrail"):
async for _ in aexecute_pre_hooks(
agent=agent,
hooks=hooks,
run_response=MagicMock(),
run_input=_make_run_input(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
):
pass
assert len(bt.tasks) == 0
def test_non_guardrail_hook_goes_to_background(self):
agent = _make_agent(run_hooks_in_background=True)
bt = _make_background_tasks()
def plain_hook(**kwargs):
pass
hooks = [plain_hook]
list(
execute_pre_hooks(
agent=agent,
hooks=hooks,
run_response=MagicMock(),
run_input=_make_run_input(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
)
)
assert len(bt.tasks) == 1
class TestPostHookGuardrailInBackground:
def test_output_guardrail_runs_sync_in_global_background_mode(self):
agent = _make_agent(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = OutputBlockingGuardrail()
hooks = [guardrail.check]
with pytest.raises(OutputCheckError, match="blocked output"):
list(
execute_post_hooks(
agent=agent,
hooks=hooks,
run_output=MagicMock(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
)
)
assert len(bt.tasks) == 0
@pytest.mark.asyncio
async def test_async_output_guardrail_runs_sync_in_global_background_mode(self):
agent = _make_agent(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = OutputBlockingGuardrail()
hooks = [guardrail.async_check]
with pytest.raises(OutputCheckError, match="blocked output"):
async for _ in aexecute_post_hooks(
agent=agent,
hooks=hooks,
run_output=MagicMock(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
):
pass
assert len(bt.tasks) == 0
def test_non_guardrail_post_hook_goes_to_background(self):
agent = _make_agent(run_hooks_in_background=True)
bt = _make_background_tasks()
def plain_post_hook(**kwargs):
pass
hooks = [plain_post_hook]
list(
execute_post_hooks(
agent=agent,
hooks=hooks,
run_output=MagicMock(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
)
)
assert len(bt.tasks) == 1
class TestMixedHookOrdering:
"""Tests for hook ordering: non-guardrail hooks should NOT be queued if a later guardrail rejects."""
def test_plain_hook_before_guardrail_not_queued_on_rejection(self):
"""When a plain hook appears before a guardrail, the buffer-and-flush
pattern ensures the plain hook is NOT queued if the guardrail rejects.
"""
agent = _make_agent(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = BlockingGuardrail()
guardrail_hooks = normalize_pre_hooks([guardrail], async_mode=False)
def plain_hook(**kwargs):
pass
# plain_hook BEFORE guardrail
hooks = [plain_hook] + guardrail_hooks
with pytest.raises(InputCheckError, match="blocked by guardrail"):
list(
execute_pre_hooks(
agent=agent,
hooks=hooks,
run_response=MagicMock(),
run_input=_make_run_input(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
)
)
assert len(bt.tasks) == 0
def test_plain_hook_after_guardrail_not_queued_on_rejection(self):
"""When a guardrail appears first, the exception prevents later hooks from queueing."""
agent = _make_agent(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = BlockingGuardrail()
guardrail_hooks = normalize_pre_hooks([guardrail], async_mode=False)
def plain_hook(**kwargs):
pass
# guardrail BEFORE plain_hook — this ordering is safe
hooks = guardrail_hooks + [plain_hook]
with pytest.raises(InputCheckError, match="blocked by guardrail"):
list(
execute_pre_hooks(
agent=agent,
hooks=hooks,
run_response=MagicMock(),
run_input=_make_run_input(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
)
)
assert len(bt.tasks) == 0
@pytest.mark.asyncio
async def test_async_plain_hook_before_guardrail_not_queued_on_rejection(self):
agent = _make_agent(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = BlockingGuardrail()
guardrail_hooks = normalize_pre_hooks([guardrail], async_mode=True)
def plain_hook(**kwargs):
pass
hooks = [plain_hook] + guardrail_hooks
with pytest.raises(InputCheckError, match="blocked by guardrail"):
async for _ in aexecute_pre_hooks(
agent=agent,
hooks=hooks,
run_response=MagicMock(),
run_input=_make_run_input(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
):
pass
assert len(bt.tasks) == 0
def test_post_hook_plain_before_guardrail_not_queued_on_rejection(self):
agent = _make_agent(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = OutputBlockingGuardrail()
def plain_hook(**kwargs):
pass
# plain_hook BEFORE guardrail
hooks = [plain_hook, guardrail.check]
with pytest.raises(OutputCheckError, match="blocked output"):
list(
execute_post_hooks(
agent=agent,
hooks=hooks,
run_output=MagicMock(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
)
)
assert len(bt.tasks) == 0
class TestDebugModeFalse:
def test_debug_mode_false_not_overridden_by_agent(self):
agent = _make_agent()
agent.debug_mode = True
guardrail = PassthroughGuardrail()
hooks = normalize_pre_hooks([guardrail], async_mode=False)
captured_args = {}
original_check = guardrail.check
def spy_check(**kwargs):
captured_args.update(kwargs)
return original_check(**kwargs)
hooks = [spy_check]
list(
execute_pre_hooks(
agent=agent,
hooks=hooks,
run_response=MagicMock(),
run_input=_make_run_input(),
session=_make_session(),
run_context=_make_run_context(),
debug_mode=False,
)
)
assert captured_args.get("debug_mode") is False
class TestMetadataInjection:
def test_metadata_from_run_context_passed_to_hooks(self):
agent = _make_agent(run_hooks_in_background=False)
captured_args = {}
def spy_hook(**kwargs):
captured_args.update(kwargs)
run_context = _make_run_context()
run_context.metadata = {"env": "test", "version": "2.5"}
list(
execute_pre_hooks(
agent=agent,
hooks=[spy_hook],
run_response=MagicMock(),
run_input=_make_run_input(),
session=_make_session(),
run_context=run_context,
)
)
assert captured_args.get("metadata") == {"env": "test", "version": "2.5"}
class MutatingGuardrail(BaseGuardrail):
def check(self, run_input: Union[RunInput, TeamRunInput], **kwargs: Any) -> None:
run_input.input_content = "[REDACTED]"
async def async_check(self, run_input: Union[RunInput, TeamRunInput], **kwargs: Any) -> None:
run_input.input_content = "[REDACTED]"
class CrashingGuardrail(BaseGuardrail):
def check(self, **kwargs: Any) -> None:
raise RuntimeError("unexpected internal error")
async def async_check(self, **kwargs: Any) -> None:
raise RuntimeError("unexpected internal error")
class TestMutatingGuardrailBackground:
def test_bg_hooks_receive_post_mutation_data(self):
agent = _make_agent(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = MutatingGuardrail()
guardrail_hooks = normalize_pre_hooks([guardrail], async_mode=False)
captured_input = {}
def spy_hook(run_input, **kwargs):
captured_input["content"] = run_input.input_content
hooks = guardrail_hooks + [spy_hook]
list(
execute_pre_hooks(
agent=agent,
hooks=hooks,
run_response=MagicMock(),
run_input=RunInput(input_content="sensitive data"),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
)
)
assert len(bt.tasks) == 1
_, task_kwargs = bt.tasks[0]
assert task_kwargs["run_input"].input_content == "[REDACTED]"
@pytest.mark.asyncio
async def test_async_bg_hooks_receive_post_mutation_data(self):
agent = _make_agent(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = MutatingGuardrail()
guardrail_hooks = normalize_pre_hooks([guardrail], async_mode=True)
def spy_hook(run_input, **kwargs):
pass
hooks = guardrail_hooks + [spy_hook]
async for _ in aexecute_pre_hooks(
agent=agent,
hooks=hooks,
run_response=MagicMock(),
run_input=RunInput(input_content="sensitive data"),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
):
pass
assert len(bt.tasks) == 1
_, task_kwargs = bt.tasks[0]
assert task_kwargs["run_input"].input_content == "[REDACTED]"
class TestCrashingGuardrailBackground:
def test_unexpected_exception_logged_not_propagated(self):
agent = _make_agent(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = CrashingGuardrail()
guardrail_hooks = normalize_pre_hooks([guardrail], async_mode=False)
def plain_hook(**kwargs):
pass
hooks = guardrail_hooks + [plain_hook]
list(
execute_pre_hooks(
agent=agent,
hooks=hooks,
run_response=MagicMock(),
run_input=_make_run_input(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
)
)
assert len(bt.tasks) == 1
@pytest.mark.asyncio
async def test_async_unexpected_exception_logged_not_propagated(self):
agent = _make_agent(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = CrashingGuardrail()
guardrail_hooks = normalize_pre_hooks([guardrail], async_mode=True)
def plain_hook(**kwargs):
pass
hooks = guardrail_hooks + [plain_hook]
async for _ in aexecute_pre_hooks(
agent=agent,
hooks=hooks,
run_response=MagicMock(),
run_input=_make_run_input(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
):
pass
assert len(bt.tasks) == 1
class TestPIIGuardrailTypeSafety:
def test_mask_pii_masks_and_assigns_string(self):
from agno.guardrails.pii import PIIDetectionGuardrail
guardrail = PIIDetectionGuardrail(mask_pii=True, enable_email_check=True)
run_input = RunInput(input_content="Contact me at user@example.com")
guardrail.check(run_input)
assert "@" not in run_input.input_content
assert isinstance(run_input.input_content, str)
@pytest.mark.asyncio
async def test_async_mask_pii_masks_and_assigns_string(self):
from agno.guardrails.pii import PIIDetectionGuardrail
guardrail = PIIDetectionGuardrail(mask_pii=True, enable_email_check=True)
run_input = RunInput(input_content="My SSN is 123-45-6789")
await guardrail.async_check(run_input)
assert "123-45-6789" not in run_input.input_content
def test_detect_pii_raises_without_masking(self):
from agno.guardrails.pii import PIIDetectionGuardrail
guardrail = PIIDetectionGuardrail(mask_pii=False, enable_email_check=True)
run_input = RunInput(input_content="Contact user@example.com")
with pytest.raises(InputCheckError, match="Potential PII detected"):
guardrail.check(run_input)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/agent/test_hooks_guardrails.py",
"license": "Apache License 2.0",
"lines": 429,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/team/test_hooks_guardrails.py | """Tests for guardrail behavior in team hook execution under background mode.
Mirrors the agent hook tests to verify parity between agent/_hooks.py and team/_hooks.py.
"""
from typing import Any, List, Union
from unittest.mock import MagicMock
import pytest
from agno.exceptions import InputCheckError, OutputCheckError
from agno.guardrails.base import BaseGuardrail
from agno.run import RunContext
from agno.run.team import TeamRunInput
from agno.team._hooks import (
_aexecute_post_hooks,
_aexecute_pre_hooks,
_execute_post_hooks,
_execute_pre_hooks,
)
from agno.utils.hooks import normalize_pre_hooks
class BlockingGuardrail(BaseGuardrail):
def check(self, run_input: Union[TeamRunInput, Any]) -> None:
raise InputCheckError("blocked by guardrail")
async def async_check(self, run_input: Union[TeamRunInput, Any]) -> None:
raise InputCheckError("blocked by guardrail (async)")
class OutputBlockingGuardrail(BaseGuardrail):
def check(self, **kwargs: Any) -> None:
raise OutputCheckError("blocked output")
async def async_check(self, **kwargs: Any) -> None:
raise OutputCheckError("blocked output (async)")
class PassthroughGuardrail(BaseGuardrail):
def __init__(self):
self.call_count = 0
def check(self, **kwargs: Any) -> None:
self.call_count += 1
async def async_check(self, **kwargs: Any) -> None:
self.call_count += 1
def _make_team(run_hooks_in_background: bool = True) -> MagicMock:
team = MagicMock()
team._run_hooks_in_background = run_hooks_in_background
team.debug_mode = False
team.events_to_skip = None
team.store_events = False
return team
def _make_background_tasks() -> MagicMock:
bt = MagicMock()
bt.tasks: List = []
def add_task(fn, **kwargs):
bt.tasks.append((fn, kwargs))
bt.add_task = add_task
return bt
def _make_session() -> MagicMock:
return MagicMock()
def _make_run_context() -> RunContext:
return RunContext(run_id="r1", session_id="s1", session_state={}, metadata={"key": "val"})
def _make_run_input() -> TeamRunInput:
return TeamRunInput(input_content="test input")
class TestTeamPreHookGuardrailInBackground:
def test_guardrail_runs_sync_in_global_background_mode(self):
team = _make_team(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = BlockingGuardrail()
hooks = normalize_pre_hooks([guardrail], async_mode=False)
with pytest.raises(InputCheckError, match="blocked by guardrail"):
list(
_execute_pre_hooks(
team=team,
hooks=hooks,
run_response=MagicMock(),
run_input=_make_run_input(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
)
)
assert len(bt.tasks) == 0
@pytest.mark.asyncio
async def test_async_guardrail_runs_sync_in_global_background_mode(self):
team = _make_team(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = BlockingGuardrail()
hooks = normalize_pre_hooks([guardrail], async_mode=True)
with pytest.raises(InputCheckError, match="blocked by guardrail"):
async for _ in _aexecute_pre_hooks(
team=team,
hooks=hooks,
run_response=MagicMock(),
run_input=_make_run_input(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
):
pass
assert len(bt.tasks) == 0
def test_non_guardrail_hook_goes_to_background(self):
team = _make_team(run_hooks_in_background=True)
bt = _make_background_tasks()
def plain_hook(**kwargs):
pass
list(
_execute_pre_hooks(
team=team,
hooks=[plain_hook],
run_response=MagicMock(),
run_input=_make_run_input(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
)
)
assert len(bt.tasks) == 1
class TestTeamPostHookGuardrailInBackground:
def test_output_guardrail_runs_sync_in_global_background_mode(self):
team = _make_team(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = OutputBlockingGuardrail()
with pytest.raises(OutputCheckError, match="blocked output"):
list(
_execute_post_hooks(
team=team,
hooks=[guardrail.check],
run_output=MagicMock(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
)
)
assert len(bt.tasks) == 0
@pytest.mark.asyncio
async def test_async_output_guardrail_runs_sync_in_global_background_mode(self):
team = _make_team(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = OutputBlockingGuardrail()
with pytest.raises(OutputCheckError, match="blocked output"):
async for _ in _aexecute_post_hooks(
team=team,
hooks=[guardrail.async_check],
run_output=MagicMock(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
):
pass
assert len(bt.tasks) == 0
class TestTeamMixedHookOrdering:
def test_plain_hook_before_guardrail_not_queued_on_rejection(self):
team = _make_team(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = BlockingGuardrail()
guardrail_hooks = normalize_pre_hooks([guardrail], async_mode=False)
def plain_hook(**kwargs):
pass
hooks = [plain_hook] + guardrail_hooks
with pytest.raises(InputCheckError, match="blocked by guardrail"):
list(
_execute_pre_hooks(
team=team,
hooks=hooks,
run_response=MagicMock(),
run_input=_make_run_input(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
)
)
assert len(bt.tasks) == 0
@pytest.mark.asyncio
async def test_async_plain_hook_before_guardrail_not_queued_on_rejection(self):
team = _make_team(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = BlockingGuardrail()
guardrail_hooks = normalize_pre_hooks([guardrail], async_mode=True)
def plain_hook(**kwargs):
pass
hooks = [plain_hook] + guardrail_hooks
with pytest.raises(InputCheckError, match="blocked by guardrail"):
async for _ in _aexecute_pre_hooks(
team=team,
hooks=hooks,
run_response=MagicMock(),
run_input=_make_run_input(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
):
pass
assert len(bt.tasks) == 0
def test_post_hook_plain_before_guardrail_not_queued_on_rejection(self):
team = _make_team(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = OutputBlockingGuardrail()
def plain_hook(**kwargs):
pass
hooks = [plain_hook, guardrail.check]
with pytest.raises(OutputCheckError, match="blocked output"):
list(
_execute_post_hooks(
team=team,
hooks=hooks,
run_output=MagicMock(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
)
)
assert len(bt.tasks) == 0
class TestTeamPostHookBackgroundEnqueue:
def test_non_guardrail_post_hook_goes_to_background(self):
team = _make_team(run_hooks_in_background=True)
bt = _make_background_tasks()
def plain_post_hook(**kwargs):
pass
list(
_execute_post_hooks(
team=team,
hooks=[plain_post_hook],
run_output=MagicMock(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
)
)
assert len(bt.tasks) == 1
@pytest.mark.asyncio
async def test_async_non_guardrail_post_hook_goes_to_background(self):
team = _make_team(run_hooks_in_background=True)
bt = _make_background_tasks()
def plain_post_hook(**kwargs):
pass
async for _ in _aexecute_post_hooks(
team=team,
hooks=[plain_post_hook],
run_output=MagicMock(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
):
pass
assert len(bt.tasks) == 1
class MutatingGuardrail(BaseGuardrail):
def check(self, run_input: Union[TeamRunInput, Any], **kwargs: Any) -> None:
run_input.input_content = "[REDACTED]"
async def async_check(self, run_input: Union[TeamRunInput, Any], **kwargs: Any) -> None:
run_input.input_content = "[REDACTED]"
class CrashingGuardrail(BaseGuardrail):
def check(self, **kwargs: Any) -> None:
raise RuntimeError("unexpected internal error")
async def async_check(self, **kwargs: Any) -> None:
raise RuntimeError("unexpected internal error")
class TestTeamMutatingGuardrailBackground:
def test_bg_hooks_receive_post_mutation_data(self):
team = _make_team(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = MutatingGuardrail()
guardrail_hooks = normalize_pre_hooks([guardrail], async_mode=False)
def spy_hook(run_input, **kwargs):
pass
hooks = guardrail_hooks + [spy_hook]
list(
_execute_pre_hooks(
team=team,
hooks=hooks,
run_response=MagicMock(),
run_input=TeamRunInput(input_content="sensitive data"),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
)
)
assert len(bt.tasks) == 1
_, task_kwargs = bt.tasks[0]
assert task_kwargs["run_input"].input_content == "[REDACTED]"
@pytest.mark.asyncio
async def test_async_bg_hooks_receive_post_mutation_data(self):
team = _make_team(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = MutatingGuardrail()
guardrail_hooks = normalize_pre_hooks([guardrail], async_mode=True)
def spy_hook(run_input, **kwargs):
pass
hooks = guardrail_hooks + [spy_hook]
async for _ in _aexecute_pre_hooks(
team=team,
hooks=hooks,
run_response=MagicMock(),
run_input=TeamRunInput(input_content="sensitive data"),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
):
pass
assert len(bt.tasks) == 1
_, task_kwargs = bt.tasks[0]
assert task_kwargs["run_input"].input_content == "[REDACTED]"
class TestTeamCrashingGuardrailBackground:
def test_unexpected_exception_logged_not_propagated(self):
team = _make_team(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = CrashingGuardrail()
guardrail_hooks = normalize_pre_hooks([guardrail], async_mode=False)
def plain_hook(**kwargs):
pass
hooks = guardrail_hooks + [plain_hook]
list(
_execute_pre_hooks(
team=team,
hooks=hooks,
run_response=MagicMock(),
run_input=_make_run_input(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
)
)
assert len(bt.tasks) == 1
@pytest.mark.asyncio
async def test_async_unexpected_exception_logged_not_propagated(self):
team = _make_team(run_hooks_in_background=True)
bt = _make_background_tasks()
guardrail = CrashingGuardrail()
guardrail_hooks = normalize_pre_hooks([guardrail], async_mode=True)
def plain_hook(**kwargs):
pass
hooks = guardrail_hooks + [plain_hook]
async for _ in _aexecute_pre_hooks(
team=team,
hooks=hooks,
run_response=MagicMock(),
run_input=_make_run_input(),
session=_make_session(),
run_context=_make_run_context(),
background_tasks=bt,
):
pass
assert len(bt.tasks) == 1
class TestTeamMetadataInjection:
def test_metadata_from_run_context_passed_to_hooks(self):
team = _make_team(run_hooks_in_background=False)
captured_args = {}
def spy_hook(**kwargs):
captured_args.update(kwargs)
run_context = _make_run_context()
run_context.metadata = {"env": "test", "version": "2.5"}
list(
_execute_pre_hooks(
team=team,
hooks=[spy_hook],
run_response=MagicMock(),
run_input=_make_run_input(),
session=_make_session(),
run_context=run_context,
)
)
assert captured_args.get("metadata") == {"env": "test", "version": "2.5"}
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/team/test_hooks_guardrails.py",
"license": "Apache License 2.0",
"lines": 351,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/02_agents/14_advanced/background_execution_metrics.py | """
Background Execution Metrics
=============================
Demonstrates that metrics are fully tracked for background runs.
When an agent runs in the background, the run completes asynchronously
and is stored in the database. Once complete, the run output includes
the same metrics as a synchronous run: token counts, model details,
duration, and time-to-first-token.
"""
import asyncio
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.openai import OpenAIChat
from agno.run.base import RunStatus
from agno.tools.yfinance import YFinanceTools
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Config
# ---------------------------------------------------------------------------
db = PostgresDb(
db_url="postgresql+psycopg://ai:ai@localhost:5532/ai",
session_table="bg_metrics_sessions",
)
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
name="BackgroundMetricsAgent",
model=OpenAIChat(id="gpt-4o-mini"),
tools=[YFinanceTools(enable_stock_price=True)],
db=db,
)
# ---------------------------------------------------------------------------
# Run in background and inspect metrics
# ---------------------------------------------------------------------------
async def main():
# Start a background run
run_output = await agent.arun(
"What is the stock price of AAPL?",
background=True,
)
print(f"Run ID: {run_output.run_id}")
print(f"Status: {run_output.status}")
# Poll for completion
result = None
for i in range(30):
await asyncio.sleep(1)
result = await agent.aget_run_output(
run_id=run_output.run_id,
session_id=run_output.session_id,
)
if result and result.status in (RunStatus.completed, RunStatus.error):
print(f"Completed after {i + 1}s")
break
if result is None or result.status != RunStatus.completed:
print("Run did not complete in time")
return
# ----- Run metrics -----
print("\n" + "=" * 50)
print("RUN METRICS")
print("=" * 50)
pprint(result.metrics)
# ----- Model details breakdown -----
print("\n" + "=" * 50)
print("MODEL DETAILS")
print("=" * 50)
if result.metrics and result.metrics.details:
for model_type, model_metrics_list in result.metrics.details.items():
print(f"\n{model_type}:")
for model_metric in model_metrics_list:
pprint(model_metric)
# ----- Session metrics -----
print("\n" + "=" * 50)
print("SESSION METRICS")
print("=" * 50)
session_metrics = agent.get_session_metrics()
if session_metrics:
pprint(session_metrics)
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/02_agents/14_advanced/background_execution_metrics.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/03_teams/14_run_control/background_execution_metrics.py | """
Team Background Execution Metrics
==================================
Demonstrates that metrics are fully tracked for team background runs.
When a team runs in the background, the run completes asynchronously
and is stored in the database. Once complete, the run output includes
the same metrics as a synchronous run: token counts, model details,
duration, and member-level breakdown.
"""
import asyncio
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.openai import OpenAIChat
from agno.run.base import RunStatus
from agno.team import Team
from agno.tools.yfinance import YFinanceTools
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Config
# ---------------------------------------------------------------------------
db = PostgresDb(
db_url="postgresql+psycopg://ai:ai@localhost:5532/ai",
session_table="team_bg_metrics_sessions",
)
# ---------------------------------------------------------------------------
# Create Team
# ---------------------------------------------------------------------------
stock_searcher = Agent(
name="Stock Searcher",
model=OpenAIChat(id="gpt-4o-mini"),
role="Searches for stock information.",
tools=[YFinanceTools(enable_stock_price=True)],
)
team = Team(
name="Stock Research Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=[stock_searcher],
db=db,
show_members_responses=True,
store_member_responses=True,
)
# ---------------------------------------------------------------------------
# Run in background and inspect metrics
# ---------------------------------------------------------------------------
async def main():
run_output = await team.arun(
"What is the stock price of NVDA?",
background=True,
)
print(f"Run ID: {run_output.run_id}")
print(f"Status: {run_output.status}")
# Poll for completion
result = None
for i in range(60):
await asyncio.sleep(1)
result = await team.aget_run_output(
run_id=run_output.run_id,
session_id=run_output.session_id,
)
if result and result.status in (RunStatus.completed, RunStatus.error):
print(f"Completed after {i + 1}s")
break
if result is None or result.status != RunStatus.completed:
print("Run did not complete in time")
return
# ----- Team metrics -----
print("\n" + "=" * 50)
print("TEAM METRICS")
print("=" * 50)
pprint(result.metrics)
# ----- Model details breakdown -----
print("\n" + "=" * 50)
print("MODEL DETAILS")
print("=" * 50)
if result.metrics and result.metrics.details:
for model_type, model_metrics_list in result.metrics.details.items():
print(f"\n{model_type}:")
for model_metric in model_metrics_list:
pprint(model_metric)
# ----- Member metrics -----
print("\n" + "=" * 50)
print("MEMBER METRICS")
print("=" * 50)
if result.member_responses:
for member_response in result.member_responses:
print(f"\nMember: {member_response.agent_name}")
print("-" * 40)
pprint(member_response.metrics)
# ----- Session metrics -----
print("\n" + "=" * 50)
print("SESSION METRICS")
print("=" * 50)
session_metrics = team.get_session_metrics()
if session_metrics:
pprint(session_metrics)
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/03_teams/14_run_control/background_execution_metrics.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/03_teams/22_metrics/05_team_eval_metrics.py | """
Team Eval Metrics
=============================
Demonstrates that eval model metrics are accumulated back into the
team's run_output when AgentAsJudgeEval is used as a post_hook.
After the team runs, the evaluator agent makes its own model call.
Those eval tokens show up under "eval_model" in run_output.metrics.details,
separate from the team's own model tokens.
"""
from agno.agent import Agent
from agno.eval.agent_as_judge import AgentAsJudgeEval
from agno.models.openai import OpenAIChat
from agno.team import Team
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Create eval as a post-hook
# ---------------------------------------------------------------------------
eval_hook = AgentAsJudgeEval(
name="Quality Check",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should be accurate, well-structured, and concise",
scoring_strategy="binary",
)
# ---------------------------------------------------------------------------
# Create Team
# ---------------------------------------------------------------------------
researcher = Agent(
name="Researcher",
model=OpenAIChat(id="gpt-4o-mini"),
role="Research topics and provide factual information.",
)
team = Team(
name="Research Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=[researcher],
post_hooks=[eval_hook],
show_members_responses=True,
store_member_responses=True,
)
# ---------------------------------------------------------------------------
# Run Team
# ---------------------------------------------------------------------------
if __name__ == "__main__":
result = team.run("What are the three laws of thermodynamics?")
if result.metrics:
print("Total tokens (team + eval):", result.metrics.total_tokens)
if result.metrics.details:
# Team's own model calls
if "model" in result.metrics.details:
team_tokens = sum(
metric.total_tokens for metric in result.metrics.details["model"]
)
print("Team model tokens:", team_tokens)
# Eval model call
if "eval_model" in result.metrics.details:
eval_tokens = sum(
metric.total_tokens
for metric in result.metrics.details["eval_model"]
)
print("Eval model tokens:", eval_tokens)
for metric in result.metrics.details["eval_model"]:
print(f" Evaluator: {metric.id} ({metric.provider})")
print("\n" + "=" * 50)
print("FULL METRICS")
print("=" * 50)
pprint(result.metrics)
print("\n" + "=" * 50)
print("MODEL DETAILS")
print("=" * 50)
for model_type, model_metrics_list in result.metrics.details.items():
print(f"\n{model_type}:")
for model_metric in model_metrics_list:
pprint(model_metric)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/03_teams/22_metrics/05_team_eval_metrics.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/04_workflows/_07_human_in_the_loop/condition/01_condition_user_decision.py | """
Condition with User Decision HITL Example
This example demonstrates how to use HITL with a Condition component,
allowing the user to decide which branch to execute at runtime.
When `requires_confirmation=True` on a Condition, the `on_reject` setting
controls what happens when the user rejects:
- on_reject="else" (default): Execute `else_steps` if provided, otherwise skip
- on_reject="skip": Skip the entire condition (both branches)
- on_reject="cancel": Cancel the workflow
This is useful for:
- User-driven decision points
- Interactive branching workflows
- A/B testing with human judgment
"""
from agno.db.sqlite import SqliteDb
from agno.workflow.condition import Condition
from agno.workflow.step import Step
from agno.workflow.types import OnReject, StepInput, StepOutput
from agno.workflow.workflow import Workflow
# ============================================================
# Step functions
# ============================================================
def analyze_data(step_input: StepInput) -> StepOutput:
"""Analyze the data."""
user_query = step_input.input or "data"
return StepOutput(
content=f"Analysis complete for '{user_query}':\n"
"- Found potential issues that may require detailed review\n"
"- Quick summary is available\n\n"
"Would you like to proceed with detailed analysis?"
)
def detailed_analysis(step_input: StepInput) -> StepOutput:
"""Perform detailed analysis (if branch)."""
return StepOutput(
content="Detailed Analysis Results:\n"
"- Comprehensive review completed\n"
"- All edge cases examined\n"
"- Full report generated\n"
"- Processing time: 10 minutes"
)
def quick_summary(step_input: StepInput) -> StepOutput:
"""Provide quick summary (else branch)."""
return StepOutput(
content="Quick Summary:\n"
"- Basic metrics computed\n"
"- Key highlights identified\n"
"- Processing time: 1 minute"
)
def generate_report(step_input: StepInput) -> StepOutput:
"""Generate final report."""
previous_content = step_input.previous_step_content or "No analysis"
return StepOutput(
content=f"=== FINAL REPORT ===\n\n{previous_content}\n\n"
"Report generated successfully."
)
def run_demo(on_reject_mode: OnReject, demo_name: str):
"""Run a demo with the specified on_reject mode."""
print("\n" + "=" * 60)
print(f"Demo: {demo_name}")
print(f"on_reject = {on_reject_mode.value}")
print("=" * 60)
# Define the steps
analyze_step = Step(name="analyze_data", executor=analyze_data)
# Condition with HITL - user decides which branch to take
# The evaluator is ignored when requires_confirmation=True
# User confirms -> detailed_analysis (if branch)
# User rejects -> behavior depends on on_reject setting
analysis_condition = Condition(
name="analysis_depth_decision",
steps=[Step(name="detailed_analysis", executor=detailed_analysis)],
else_steps=[Step(name="quick_summary", executor=quick_summary)],
requires_confirmation=True,
confirmation_message="Would you like to perform detailed analysis?",
on_reject=on_reject_mode,
)
report_step = Step(name="generate_report", executor=generate_report)
# Create workflow with database for HITL persistence
workflow = Workflow(
name="condition_hitl_demo",
steps=[analyze_step, analysis_condition, report_step],
db=SqliteDb(db_file="tmp/condition_hitl.db"),
)
run_output = workflow.run("Q4 sales data")
# Handle HITL pauses
while run_output.is_paused:
# Handle Step requirements (confirmation)
for requirement in run_output.steps_requiring_confirmation:
print(f"\n[DECISION POINT] {requirement.step_name}")
print(f"[HITL] {requirement.confirmation_message}")
print(f"[INFO] on_reject mode: {requirement.on_reject}")
user_choice = input("\nYour choice (yes/no): ").strip().lower()
if user_choice in ("yes", "y"):
requirement.confirm()
print("[HITL] Confirmed - executing 'if' branch (detailed analysis)")
else:
requirement.reject()
if on_reject_mode == OnReject.else_branch:
print("[HITL] Rejected - executing 'else' branch (quick summary)")
elif on_reject_mode == OnReject.skip:
print("[HITL] Rejected - skipping entire condition")
else:
print("[HITL] Rejected - cancelling workflow")
run_output = workflow.continue_run(run_output)
print("\n" + "-" * 40)
print(f"Status: {run_output.status}")
print("-" * 40)
print(run_output.content)
if __name__ == "__main__":
print("=" * 60)
print("Condition with User Decision HITL Example")
print("=" * 60)
print("\nThis demo shows 3 different on_reject behaviors:")
print(" 1. on_reject='else' (default) - Execute else branch on reject")
print(" 2. on_reject='skip' - Skip entire condition on reject")
print(" 3. on_reject='cancel' - Cancel workflow on reject")
print()
# Let user choose which demo to run
print("Which demo would you like to run?")
print(" 1. on_reject='else' (execute else branch)")
print(" 2. on_reject='skip' (skip condition)")
print(" 3. on_reject='cancel' (cancel workflow)")
print(" 4. Run all demos")
choice = input("\nEnter choice (1-4): ").strip()
if choice == "1":
run_demo(OnReject.else_branch, "Execute Else Branch on Reject")
elif choice == "2":
run_demo(OnReject.skip, "Skip Condition on Reject")
elif choice == "3":
run_demo(OnReject.cancel, "Cancel Workflow on Reject")
elif choice == "4":
# Run all demos - use a non-interactive mode for demonstration
print(
"\nRunning all demos with automatic 'no' response to show rejection behavior..."
)
for mode, name in [
(OnReject.else_branch, "Execute Else Branch on Reject"),
(OnReject.skip, "Skip Condition on Reject"),
(OnReject.cancel, "Cancel Workflow on Reject"),
]:
print("\n" + "=" * 60)
print(f"Demo: {name}")
print(f"on_reject = {mode.value}")
print("=" * 60)
analyze_step = Step(name="analyze_data", executor=analyze_data)
analysis_condition = Condition(
name="analysis_depth_decision",
evaluator=True,
steps=[Step(name="detailed_analysis", executor=detailed_analysis)],
else_steps=[Step(name="quick_summary", executor=quick_summary)],
requires_confirmation=True,
confirmation_message="Would you like to perform detailed analysis?",
on_reject=mode,
)
report_step = Step(name="generate_report", executor=generate_report)
workflow = Workflow(
name="condition_hitl_demo",
steps=[analyze_step, analysis_condition, report_step],
db=SqliteDb(db_file="tmp/condition_hitl.db"),
)
run_output = workflow.run("Q4 sales data")
# Auto-reject for demonstration
while run_output.is_paused:
for requirement in run_output.steps_requiring_confirmation:
print(f"\n[DECISION POINT] {requirement.step_name}")
print(f"[HITL] {requirement.confirmation_message}")
print("[AUTO] Rejecting to demonstrate on_reject behavior...")
requirement.reject()
run_output = workflow.continue_run(run_output)
print(f"\nStatus: {run_output.status}")
print(f"Content: {run_output.content}")
else:
print("Invalid choice. Running default demo (on_reject='else')...")
run_demo(OnReject.else_branch, "Execute Else Branch on Reject")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/04_workflows/_07_human_in_the_loop/condition/01_condition_user_decision.py",
"license": "Apache License 2.0",
"lines": 173,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/04_workflows/_07_human_in_the_loop/confirmation/01_basic_step_confirmation.py | """
Basic Step Confirmation Example
This example demonstrates how to pause a workflow for user confirmation
before executing a step. The user can either:
- Confirm: Step executes and workflow continues
- Reject with on_reject=OnReject.cancel (default): Workflow is cancelled
- Reject with on_reject=OnReject.skip: Step is skipped and workflow continues with next step
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIChat
from agno.workflow import OnReject
from agno.workflow.step import Step
from agno.workflow.workflow import Workflow
# Create agents for each step
fetch_agent = Agent(
name="Fetcher",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You fetch and summarize data. Return a brief summary of what data you would fetch.",
)
process_agent = Agent(
name="Processor",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You process data. Describe what processing you would do on the input.",
)
save_agent = Agent(
name="Saver",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You save results. Confirm that you would save the processed data.",
)
# Create a workflow with a step that requires confirmation
# on_reject="skip" means if user rejects, skip this step and continue with next
workflow = Workflow(
name="data_processing",
db=SqliteDb(
db_file="tmp/workflow_hitl.db"
), # Required for HITL to persist session state
steps=[
Step(
name="fetch_data",
agent=fetch_agent,
),
Step(
name="process_data",
agent=process_agent,
requires_confirmation=True,
confirmation_message="About to process sensitive data. Confirm?",
on_reject=OnReject.skip, # If rejected, skip this step and continue with save_results
),
Step(
name="save_results",
agent=save_agent,
),
],
)
# Run the workflow
run_output = workflow.run("Process user data")
# Check if workflow is paused
if run_output.is_paused:
for requirement in run_output.steps_requiring_confirmation:
print(f"\nStep '{requirement.step_name}' requires confirmation")
print(f"Message: {requirement.confirmation_message}")
# Wait for actual user input
user_input = input("\nDo you want to continue? (yes/no): ").strip().lower()
if user_input in ("yes", "y"):
requirement.confirm()
print("Step confirmed.")
else:
requirement.reject()
print("Step rejected.")
# Continue the workflow
run_output = workflow.continue_run(run_output)
print(f"\nFinal output: {run_output.content}")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/04_workflows/_07_human_in_the_loop/confirmation/01_basic_step_confirmation.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/04_workflows/_07_human_in_the_loop/confirmation/02_custom_function_step_confirmation.py | """
Test script demonstrating Step-level Human-In-The-Loop (HITL) functionality.
This example shows a blog post workflow where:
1. Research agent gathers information (no confirmation)
2. Custom function processes the research (HITL via @pause decorator)
3. Writer agent creates the final post (no confirmation)
Two approaches for HITL:
1. Flag-based: Using requires_confirmation=True on Step
2. Decorator-based: Using @pause decorator on custom functions
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.openai import OpenAIChat
from agno.workflow.decorators import pause
from agno.workflow.step import Step
from agno.workflow.types import StepInput, StepOutput
from agno.workflow.workflow import Workflow
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
# ============================================================
# Step 1: Research Agent (no confirmation needed)
# ============================================================
research_agent = Agent(
name="Researcher",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=[
"You are a research assistant.",
"Given a topic, provide 3 key points about it in a concise bullet list.",
"Keep each point to one sentence.",
],
)
# ============================================================
# Step 2: Process research (requires confirmation via @pause decorator)
# ============================================================
@pause(
name="Process Research",
requires_confirmation=True,
confirmation_message="Research complete. Ready to generate blog post. Proceed?",
)
def process_research(step_input: StepInput) -> StepOutput:
"""Process the research data before writing."""
research = step_input.previous_step_content or "No research available"
return StepOutput(
content=f"PROCESSED RESEARCH:\n{research}\n\nReady for blog post generation."
)
# ============================================================
# Step 3: Writer Agent (no confirmation needed)
# ============================================================
writer_agent = Agent(
name="Writer",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=[
"You are a blog writer.",
"Given processed research, write a short 2-paragraph blog post.",
"Keep it concise and engaging.",
],
)
# Define steps
research_step = Step(name="research", agent=research_agent)
process_step = Step(
name="process_research", executor=process_research
) # @pause auto-detected
write_step = Step(name="write_post", agent=writer_agent)
# Create workflow
workflow = Workflow(
name="blog_post_workflow",
db=PostgresDb(db_url=db_url),
steps=[research_step, process_step, write_step],
)
if __name__ == "__main__":
print("Starting blog post workflow...")
print("=" * 50)
run_output = workflow.run("Benefits of morning exercise")
# Handle HITL pause
while run_output.is_paused:
for requirement in run_output.steps_requiring_confirmation:
print(f"\n[HITL] Step '{requirement.step_name}' requires confirmation")
print(f"[HITL] {requirement.confirmation_message}")
user_input = input("\nContinue? (yes/no): ").strip().lower()
if user_input in ("yes", "y"):
requirement.confirm()
print("[HITL] Confirmed - continuing workflow...")
else:
requirement.reject()
print("[HITL] Rejected - cancelling workflow...")
run_output = workflow.continue_run(run_output)
print("\n" + "=" * 50)
print(f"Status: {run_output.status}")
print(f"Output:\n{run_output.content}")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/04_workflows/_07_human_in_the_loop/confirmation/02_custom_function_step_confirmation.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/04_workflows/_07_human_in_the_loop/confirmation/03_step_confirmation_streaming.py | """
Step Confirmation with Streaming
This example demonstrates how to pause a workflow for user confirmation
before executing a step, with streaming execution for real-time event updates.
Key differences from non-streaming:
1. workflow.run(..., stream=True) returns an Iterator of events
2. stream_events=True is required to receive StepStartedEvent/StepCompletedEvent
3. StepPausedEvent is emitted when a step requires confirmation
4. Get WorkflowRunOutput from session after streaming
5. Use workflow.continue_run(..., stream=True, stream_events=True) for consistent streaming
The user can either:
- Confirm: Step executes and workflow continues
- Reject with on_reject=OnReject.cancel (default): Workflow is cancelled
- Reject with on_reject=OnReject.skip: Step is skipped and workflow continues with next step
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIChat
from agno.run.workflow import (
StepCompletedEvent,
StepPausedEvent,
StepStartedEvent,
WorkflowCancelledEvent,
WorkflowCompletedEvent,
WorkflowStartedEvent,
)
from agno.workflow import OnReject
from agno.workflow.step import Step
from agno.workflow.workflow import Workflow
# Create agents for each step
fetch_agent = Agent(
name="Fetcher",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You fetch and summarize data. Return a brief summary of what data you would fetch.",
)
process_agent = Agent(
name="Processor",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You process data. Describe what processing you would do on the input.",
)
save_agent = Agent(
name="Saver",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You save results. Confirm that you would save the processed data.",
)
# Create a workflow with a step that requires confirmation
# on_reject=OnReject.skip means if user rejects, skip this step and continue with next
workflow = Workflow(
name="data_processing_streaming",
db=SqliteDb(db_file="tmp/workflow_hitl_streaming.db"),
steps=[
Step(
name="fetch_data",
agent=fetch_agent,
),
Step(
name="process_data",
agent=process_agent,
requires_confirmation=True,
confirmation_message="About to process sensitive data. Confirm?",
on_reject=OnReject.skip, # If rejected, skip this step and continue with save_results
),
Step(
name="save_results",
agent=save_agent,
),
],
)
def handle_confirmation_hitl(run_output):
"""Handle confirmation HITL requirements."""
if run_output.steps_requiring_confirmation:
for requirement in run_output.steps_requiring_confirmation:
print("\n" + "-" * 50)
print(f"Step '{requirement.step_name}' requires confirmation")
print(f"Message: {requirement.confirmation_message}")
print("-" * 50)
user_input = input("\nDo you want to continue? (yes/no): ").strip().lower()
if user_input in ("yes", "y"):
requirement.confirm()
print("Step confirmed.")
else:
requirement.reject()
print("Step rejected.")
def process_event_stream(event_stream):
"""Process events from a workflow stream."""
for event in event_stream:
if isinstance(event, WorkflowStartedEvent):
print(f"[EVENT] Workflow started: {event.workflow_name}")
elif isinstance(event, StepStartedEvent):
print(f"[EVENT] Step started: {event.step_name}")
elif isinstance(event, StepPausedEvent):
print(f"[EVENT] Step paused: {event.step_name}")
if event.requires_confirmation:
print(" Reason: Requires confirmation")
if event.confirmation_message:
print(f" Message: {event.confirmation_message}")
elif isinstance(event, StepCompletedEvent):
print(f"[EVENT] Step completed: {event.step_name}")
if event.content:
preview = (
str(event.content)[:60] + "..."
if len(str(event.content)) > 60
else str(event.content)
)
print(f" Content: {preview}")
elif isinstance(event, WorkflowCompletedEvent):
print("\n[EVENT] Workflow completed!")
elif isinstance(event, WorkflowCancelledEvent):
print("\n[EVENT] Workflow cancelled!")
if event.reason:
print(f" Reason: {event.reason}")
def main():
print("=" * 60)
print("Step Confirmation with Streaming")
print("=" * 60)
print("The 'process_data' step requires confirmation before execution.")
print("You can confirm to proceed or reject to skip the step.")
print("\nStarting workflow with streaming...\n")
# Run with streaming - returns an iterator of events
# stream=True enables streaming output, stream_events=True enables step events
event_stream = workflow.run("Process user data", stream=True, stream_events=True)
# Process initial events
process_event_stream(event_stream)
# Get run output from session
session = workflow.get_session()
run_output = session.runs[-1] if session and session.runs else None
# Handle HITL pauses
while run_output and run_output.is_paused:
handle_confirmation_hitl(run_output)
print("\n[INFO] Continuing workflow with streaming...\n")
# Continue with streaming
continue_stream = workflow.continue_run(
run_output, stream=True, stream_events=True
)
# Process continuation events
process_event_stream(continue_stream)
# Get updated run output
session = workflow.get_session()
run_output = session.runs[-1] if session and session.runs else None
print("\n" + "=" * 60)
print("Workflow finished!")
print("=" * 60)
if run_output:
print(f"Status: {run_output.status}")
print(f"Content: {run_output.content}")
# Show step results
if run_output.step_results:
print("\nStep Results:")
for result in run_output.step_results:
status = "SUCCESS" if result.success else "SKIPPED"
content = str(result.content)[:60] if result.content else "No content"
print(f" [{result.step_name}] {status}: {content}...")
if __name__ == "__main__":
main()
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/04_workflows/_07_human_in_the_loop/confirmation/03_step_confirmation_streaming.py",
"license": "Apache License 2.0",
"lines": 154,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/04_workflows/_07_human_in_the_loop/confirmation/04_async_step_confirmation.py | """
Demonstrates that the @pause decorator works correctly with async functions.
The @pause decorator attaches metadata directly to the function without
creating a wrapper, so async functions retain their async nature.
This example shows:
1. An async step function decorated with @pause
2. Using acontinue_run for async workflow continuation
3. Simulating async I/O operations within the step
"""
import asyncio
from agno.agent import Agent
from agno.db.postgres import AsyncPostgresDb
from agno.models.openai import OpenAIChat
from agno.workflow.decorators import pause
from agno.workflow.step import Step
from agno.workflow.types import StepInput, StepOutput
from agno.workflow.workflow import Workflow
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
async_db_url = "postgresql+psycopg_async://ai:ai@localhost:5532/ai"
# ============================================================
# Step 1: Research Agent
# ============================================================
research_agent = Agent(
name="Researcher",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=[
"You are a research assistant.",
"Given a topic, provide 3 key points about it.",
],
)
# ============================================================
# Step 2: Async processing step with @pause decorator
# ============================================================
@pause(
name="Async Data Processor",
requires_confirmation=True,
confirmation_message="Research gathered. Ready to process asynchronously. Continue?",
)
async def async_process_data(step_input: StepInput) -> StepOutput:
"""
Async step function that simulates async I/O operations.
The @pause decorator works correctly with async functions because
it attaches metadata directly to the function without wrapping it.
"""
research = step_input.previous_step_content or "No research"
# Simulate async I/O (e.g., API call, database query)
await asyncio.sleep(0.5)
processed = f"ASYNC PROCESSED:\n{research}\n\n[Processed with async I/O simulation]"
return StepOutput(content=processed)
# ============================================================
# Step 3: Writer Agent
# ============================================================
writer_agent = Agent(
name="Writer",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=[
"You are a content writer.",
"Write a brief summary based on the processed research.",
],
)
# Define steps
research_step = Step(name="research", agent=research_agent)
process_step = Step(name="async_process", executor=async_process_data)
write_step = Step(name="write", agent=writer_agent)
# Create workflow with async database for proper async support
workflow = Workflow(
name="async_hitl_workflow",
db=AsyncPostgresDb(db_url=async_db_url),
steps=[research_step, process_step, write_step],
)
async def main():
print("Starting async HITL workflow...")
print("=" * 50)
# Run workflow asynchronously
run_output = await workflow.arun("Benefits of meditation")
# Handle HITL pause
while run_output.is_paused:
for requirement in run_output.steps_requiring_confirmation:
print(f"\n[HITL] Step '{requirement.step_name}' requires confirmation")
print(f"[HITL] {requirement.confirmation_message}")
# In a real app, this would be async user input
# For demo, we auto-confirm
print("[HITL] Auto-confirming for demo...")
requirement.confirm()
# Continue workflow asynchronously
run_output = await workflow.acontinue_run(run_output)
print("\n" + "=" * 50)
print(f"Status: {run_output.status}")
print(f"Output:\n{run_output.content}")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/04_workflows/_07_human_in_the_loop/confirmation/04_async_step_confirmation.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/04_workflows/_07_human_in_the_loop/error/01_error_retry_skip.py | """
Error HITL: Retry or Skip Failed Steps
This example demonstrates how to use HITL when a step encounters an error.
When a step with `on_error="pause"` fails, the workflow pauses and lets the user
decide to either retry the step or skip it and continue with the next step.
Use Case:
- API calls that may fail due to rate limits or network issues
- Operations that may timeout but could succeed on retry
- Steps where intermittent failures are expected
"""
import random
from agno.db.sqlite import SqliteDb
from agno.workflow import OnError
from agno.workflow.step import Step
from agno.workflow.types import StepInput, StepOutput
from agno.workflow.workflow import Workflow
# A function that randomly fails to simulate an unreliable operation
def unreliable_api_call(step_input: StepInput) -> StepOutput:
"""Simulates an API call that may fail randomly."""
if random.random() < 0.99: # 99% chance of failure
raise Exception("API call failed: Connection timeout")
return StepOutput(
content="API call succeeded! Data fetched successfully.",
success=True,
)
def process_data(step_input: StepInput) -> StepOutput:
"""Process the data from the previous step."""
previous_content = step_input.previous_step_content or "No data"
return StepOutput(
content=f"Processed: {previous_content}",
success=True,
)
def save_results(step_input: StepInput) -> StepOutput:
"""Save the processed results."""
previous_content = step_input.previous_step_content or "No data"
return StepOutput(
content=f"Saved: {previous_content}",
success=True,
)
# Create the workflow
workflow = Workflow(
name="error_hitl_workflow",
db=SqliteDb(db_file="tmp/error_hitl.db"),
steps=[
Step(
name="fetch_data",
executor=unreliable_api_call,
on_error=OnError.pause, # Pause on error and let user decide
),
Step(
name="process_data",
executor=process_data,
),
Step(
name="save_results",
executor=save_results,
),
],
)
def main():
print("=" * 60)
print("Error HITL: Retry or Skip Failed Steps")
print("=" * 60)
print("The 'fetch_data' step has a 70% chance of failing.")
print("When it fails, you can choose to retry or skip.")
print()
run_output = workflow.run("Fetch and process data")
while run_output.is_paused:
# Check for error requirements
if run_output.steps_with_errors:
for error_req in run_output.steps_with_errors:
print("\n" + "-" * 40)
print(f"Step '{error_req.step_name}' FAILED")
print(f"Error Type: {error_req.error_type}")
print(f"Error Message: {error_req.error_message}")
print(f"Retry Count: {error_req.retry_count}")
print("-" * 40)
user_choice = (
input("\nWhat would you like to do? (retry/skip): ").strip().lower()
)
if user_choice == "retry":
error_req.retry()
print("Retrying the step...")
else:
error_req.skip()
print("Skipping the step and continuing...")
# Continue the workflow
run_output = workflow.continue_run(run_output)
print("\n" + "=" * 60)
print("Workflow completed!")
print("=" * 60)
print(f"Status: {run_output.status}")
print(f"Content: {run_output.content}")
# Show step results
if run_output.step_results:
print("\nStep Results:")
for result in run_output.step_results:
status = "SUCCESS" if result.success else "FAILED/SKIPPED"
print(
f" [{result.step_name}] {status}: {result.content[:80] if result.content else 'No content'}..."
)
if __name__ == "__main__":
main()
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/04_workflows/_07_human_in_the_loop/error/01_error_retry_skip.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/04_workflows/_07_human_in_the_loop/error/02_error_retry_skip_streaming.py | """
Error HITL: Retry or Skip Failed Steps (Streaming)
This example demonstrates how to use HITL when a step encounters an error,
with streaming execution for real-time event updates.
Key differences from non-streaming:
1. workflow.run(..., stream=True) returns an Iterator of events
2. stream_events=True is required to receive StepStartedEvent/StepCompletedEvent
3. Events include StepErrorEvent when errors occur
4. Get WorkflowRunOutput from session after streaming
5. Use workflow.continue_run(..., stream=True, stream_events=True) for consistent streaming
Use Case:
- API calls that may fail due to rate limits or network issues
- Operations that may timeout but could succeed on retry
- Steps where intermittent failures are expected
- Real-time progress updates during workflow execution
"""
import random
from agno.db.sqlite import SqliteDb
from agno.run.workflow import (
StepCompletedEvent,
StepStartedEvent,
WorkflowCompletedEvent,
WorkflowStartedEvent,
)
from agno.workflow import OnError
from agno.workflow.step import Step
from agno.workflow.types import StepInput, StepOutput
from agno.workflow.workflow import Workflow
# A function that randomly fails to simulate an unreliable operation
def unreliable_api_call(step_input: StepInput) -> StepOutput:
"""Simulates an API call that may fail randomly."""
if random.random() < 0.99: # 99% chance of failure
raise Exception("API call failed: Connection timeout")
return StepOutput(
content="API call succeeded! Data fetched successfully.",
success=True,
)
def process_data(step_input: StepInput) -> StepOutput:
"""Process the data from the previous step."""
previous_content = step_input.previous_step_content or "No data"
return StepOutput(
content=f"Processed: {previous_content}",
success=True,
)
def save_results(step_input: StepInput) -> StepOutput:
"""Save the processed results."""
previous_content = step_input.previous_step_content or "No data"
return StepOutput(
content=f"Saved: {previous_content}",
success=True,
)
# Create the workflow
workflow = Workflow(
name="error_hitl_streaming_workflow",
db=SqliteDb(db_file="tmp/error_hitl_streaming.db"),
steps=[
Step(
name="fetch_data",
executor=unreliable_api_call,
on_error=OnError.pause, # Pause on error and let user decide
),
Step(
name="process_data",
executor=process_data,
),
Step(
name="save_results",
executor=save_results,
),
],
)
def handle_error_hitl(run_output):
"""Handle error HITL requirements."""
if run_output.steps_with_errors:
for error_req in run_output.steps_with_errors:
print("\n" + "-" * 40)
print(f"Step '{error_req.step_name}' FAILED")
print(f"Error Type: {error_req.error_type}")
print(f"Error Message: {error_req.error_message}")
print(f"Retry Count: {error_req.retry_count}")
print("-" * 40)
user_choice = (
input("\nWhat would you like to do? (retry/skip): ").strip().lower()
)
if user_choice == "retry":
error_req.retry()
print("Retrying the step...")
else:
error_req.skip()
print("Skipping the step and continuing...")
def main():
print("=" * 60)
print("Error HITL: Retry or Skip Failed Steps (Streaming)")
print("=" * 60)
print("The 'fetch_data' step has a 70% chance of failing.")
print("When it fails, you can choose to retry or skip.")
print("\nStarting workflow with streaming...\n")
# Run with streaming - returns an iterator of events
# stream=True enables streaming output, stream_events=True enables step events
event_stream = workflow.run(
"Fetch and process data", stream=True, stream_events=True
)
for event in event_stream:
if isinstance(event, WorkflowStartedEvent):
print(f"[EVENT] Workflow started: {event.workflow_name}")
elif isinstance(event, StepStartedEvent):
print(f"[EVENT] Step started: {event.step_name}")
elif isinstance(event, StepCompletedEvent):
print(f"[EVENT] Step completed: {event.step_name}")
if event.content:
preview = (
str(event.content)[:60] + "..."
if len(str(event.content)) > 60
else str(event.content)
)
print(f" Content: {preview}")
elif isinstance(event, WorkflowCompletedEvent):
print("\n[EVENT] Workflow completed!")
# Get run output from session
session = workflow.get_session()
run_output = session.runs[-1] if session and session.runs else None
# Handle HITL pauses
while run_output and run_output.is_paused:
handle_error_hitl(run_output)
print("\n[INFO] Continuing workflow with streaming...\n")
# Continue with streaming
continue_stream = workflow.continue_run(
run_output, stream=True, stream_events=True
)
for event in continue_stream:
if isinstance(event, StepStartedEvent):
print(f"[EVENT] Step started: {event.step_name}")
elif isinstance(event, StepCompletedEvent):
print(f"[EVENT] Step completed: {event.step_name}")
if event.content:
preview = (
str(event.content)[:60] + "..."
if len(str(event.content)) > 60
else str(event.content)
)
print(f" Content: {preview}")
elif isinstance(event, WorkflowCompletedEvent):
print("\n[EVENT] Workflow completed!")
# Get updated run output
session = workflow.get_session()
run_output = session.runs[-1] if session and session.runs else None
print("\n" + "=" * 60)
print("Workflow finished!")
print("=" * 60)
if run_output:
print(f"Status: {run_output.status}")
print(f"Content: {run_output.content}")
# Show step results
if run_output.step_results:
print("\nStep Results:")
for result in run_output.step_results:
status = "SUCCESS" if result.success else "FAILED/SKIPPED"
content = result.content[:60] if result.content else "No content"
print(f" [{result.step_name}] {status}: {content}...")
if __name__ == "__main__":
main()
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/04_workflows/_07_human_in_the_loop/error/02_error_retry_skip_streaming.py",
"license": "Apache License 2.0",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/04_workflows/_07_human_in_the_loop/loop/01_loop_confirmation.py | """
Loop with User Confirmation HITL Example
This example demonstrates start confirmation for Loop components.
When `requires_confirmation=True`:
- Pauses before the first iteration
- User confirms -> execute loop
- User rejects -> skip loop entirely
This is useful for:
- Optional iterative processing
- User-controlled loop execution
- Confirming expensive/time-consuming loops
"""
from agno.db.sqlite import SqliteDb
from agno.workflow.loop import Loop
from agno.workflow.step import Step
from agno.workflow.types import StepInput, StepOutput
from agno.workflow.workflow import Workflow
# ============================================================
# Step functions
# ============================================================
def prepare_data(step_input: StepInput) -> StepOutput:
"""Prepare data for processing."""
return StepOutput(
content="Data prepared for iterative processing.\n"
"Ready to begin refinement loop."
)
def refine_analysis(step_input: StepInput) -> StepOutput:
"""Perform one iteration of analysis refinement."""
# Track iteration count via session state or input
iteration = getattr(step_input, "_iteration_count", 1)
return StepOutput(
content=f"Iteration {iteration} complete:\n"
f"- Quality score: {70 + iteration * 10}%\n"
f"- Improvements made: {iteration * 3}\n"
"- Further refinement possible"
)
def finalize_results(step_input: StepInput) -> StepOutput:
"""Finalize the results."""
previous_content = step_input.previous_step_content or "No iterations"
return StepOutput(
content=f"=== FINAL RESULTS ===\n\n{previous_content}\n\nProcessing complete."
)
# Define the steps
prepare_step = Step(name="prepare_data", executor=prepare_data)
# Loop with start confirmation - user must confirm to start the loop
refinement_loop = Loop(
name="refinement_loop",
steps=[Step(name="refine_analysis", executor=refine_analysis)],
max_iterations=5,
requires_confirmation=True,
confirmation_message="Start the refinement loop? This may take several iterations.",
)
finalize_step = Step(name="finalize_results", executor=finalize_results)
# Create workflow with database for HITL persistence
workflow = Workflow(
name="loop_start_confirmation_demo",
steps=[prepare_step, refinement_loop, finalize_step],
db=SqliteDb(db_file="tmp/loop_hitl.db"),
)
if __name__ == "__main__":
print("=" * 60)
print("Loop with Start Confirmation HITL Example")
print("=" * 60)
run_output = workflow.run("Process quarterly data")
# Handle HITL pauses
while run_output.is_paused:
# Handle Step requirements (confirmation for loop start)
for requirement in run_output.steps_requiring_confirmation:
print(f"\n[DECISION POINT] {requirement.step_name}")
print(f"[HITL] {requirement.confirmation_message}")
user_choice = input("\nStart the loop? (yes/no): ").strip().lower()
if user_choice in ("yes", "y"):
requirement.confirm()
print("[HITL] Confirmed - starting loop")
else:
requirement.reject()
print("[HITL] Rejected - skipping loop")
run_output = workflow.continue_run(run_output)
print("\n" + "=" * 60)
print(f"Status: {run_output.status}")
print("=" * 60)
print(run_output.content)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/04_workflows/_07_human_in_the_loop/loop/01_loop_confirmation.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/04_workflows/_07_human_in_the_loop/loop/02_loop_confirmation_streaming.py | """
Loop with User Confirmation HITL Example (Streaming)
This example demonstrates start confirmation for Loop components with streaming.
When `requires_confirmation=True`:
- Pauses before the first iteration
- User confirms -> execute loop
- User rejects -> skip loop entirely
Streaming mode emits events like:
- WorkflowStartedEvent
- StepPausedEvent (when loop requires confirmation)
- StepStartedEvent / StepCompletedEvent
- WorkflowCompletedEvent
Key difference from non-streaming:
- Get WorkflowRunOutput from session after streaming completes
"""
from agno.db.sqlite import SqliteDb
from agno.run.workflow import (
StepCompletedEvent,
StepPausedEvent,
StepStartedEvent,
WorkflowCompletedEvent,
WorkflowStartedEvent,
)
from agno.workflow.loop import Loop
from agno.workflow.step import Step
from agno.workflow.types import StepInput, StepOutput
from agno.workflow.workflow import Workflow
# ============================================================
# Step functions
# ============================================================
def prepare_data(step_input: StepInput) -> StepOutput:
"""Prepare data for processing."""
return StepOutput(
content="Data prepared for iterative processing.\n"
"Ready to begin refinement loop."
)
def refine_analysis(step_input: StepInput) -> StepOutput:
"""Perform one iteration of analysis refinement."""
iteration = getattr(step_input, "_iteration_count", 1)
return StepOutput(
content=f"Iteration {iteration} complete:\n"
f"- Quality score: {70 + iteration * 10}%\n"
f"- Improvements made: {iteration * 3}\n"
"- Further refinement possible"
)
def finalize_results(step_input: StepInput) -> StepOutput:
"""Finalize the results."""
previous_content = step_input.previous_step_content or "No iterations"
return StepOutput(
content=f"=== FINAL RESULTS ===\n\n{previous_content}\n\nProcessing complete."
)
# Define the steps
prepare_step = Step(name="prepare_data", executor=prepare_data)
# Loop with start confirmation - user must confirm to start the loop
refinement_loop = Loop(
name="refinement_loop",
steps=[Step(name="refine_analysis", executor=refine_analysis)],
max_iterations=5,
requires_confirmation=True,
confirmation_message="Start the refinement loop? This may take several iterations.",
)
finalize_step = Step(name="finalize_results", executor=finalize_results)
# Create workflow with database for HITL persistence
workflow = Workflow(
name="loop_start_confirmation_streaming_demo",
steps=[prepare_step, refinement_loop, finalize_step],
db=SqliteDb(db_file="tmp/loop_hitl_streaming.db"),
)
def process_event_stream(event_stream):
"""Process events from a workflow stream."""
for event in event_stream:
if isinstance(event, WorkflowStartedEvent):
print("\n[EVENT] Workflow started")
elif isinstance(event, StepStartedEvent):
print(f"[EVENT] Step started: {event.step_name}")
elif isinstance(event, StepPausedEvent):
print(f"\n[EVENT] Step paused: {event.step_name}")
print(f" Requires confirmation: {event.requires_confirmation}")
if event.confirmation_message:
print(f" Message: {event.confirmation_message}")
elif isinstance(event, StepCompletedEvent):
print(f"[EVENT] Step completed: {event.step_name}")
elif isinstance(event, WorkflowCompletedEvent):
print("\n[EVENT] Workflow completed")
def handle_confirmation_hitl(run_output):
"""Handle confirmation HITL requirements."""
for requirement in run_output.steps_requiring_confirmation:
print(f"\n[DECISION POINT] {requirement.step_name}")
print(f"[HITL] {requirement.confirmation_message}")
user_choice = input("\nStart the loop? (yes/no): ").strip().lower()
if user_choice in ("yes", "y"):
requirement.confirm()
print("[HITL] Confirmed - starting loop")
else:
requirement.reject()
print("[HITL] Rejected - skipping loop")
def main():
print("=" * 60)
print("Loop with Start Confirmation HITL Example (Streaming)")
print("=" * 60)
# Initial run with streaming
event_stream = workflow.run(
"Process quarterly data", stream=True, stream_events=True
)
# Process initial events
process_event_stream(event_stream)
# Get run output from session (key difference for streaming!)
session = workflow.get_session()
run_output = session.runs[-1] if session and session.runs else None
# Handle HITL pauses with streaming continuation
while run_output and run_output.is_paused:
handle_confirmation_hitl(run_output)
print("\n[INFO] Continuing workflow with streaming...\n")
# Continue with streaming
continue_stream = workflow.continue_run(
run_output, stream=True, stream_events=True
)
# Process continuation events
process_event_stream(continue_stream)
# Get updated run output from session
session = workflow.get_session()
run_output = session.runs[-1] if session and session.runs else None
print("\n" + "=" * 60)
if run_output:
print(f"Status: {run_output.status}")
print("=" * 60)
print(run_output.content)
else:
print("No output received")
print("=" * 60)
if __name__ == "__main__":
main()
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/04_workflows/_07_human_in_the_loop/loop/02_loop_confirmation_streaming.py",
"license": "Apache License 2.0",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/04_workflows/_07_human_in_the_loop/router/01_router_user_selection.py | """
Router with User Selection HITL Example
This example demonstrates how to create a user-driven decision tree using
a Router where the user selects which path to take at runtime.
The Router with HITL pattern is powerful for:
- Interactive wizards
- User-driven workflows
- Decision trees with human judgment
- Dynamic routing based on user preferences
Flow:
1. Analyze data (automatic step)
2. User chooses analysis type via Router HITL
3. Execute the chosen analysis path
4. Generate report (automatic step)
"""
from agno.db.sqlite import SqliteDb
from agno.workflow.router import Router
from agno.workflow.step import Step
from agno.workflow.types import StepInput, StepOutput
from agno.workflow.workflow import Workflow
# ============================================================
# Step 1: Analyze data (automatic)
# ============================================================
def analyze_data(step_input: StepInput) -> StepOutput:
"""Analyze the data and provide options."""
user_query = step_input.input or "data"
return StepOutput(
content=f"Analysis complete for '{user_query}':\n"
"- Found 1000 records\n"
"- Data quality: Good\n"
"- Ready for processing\n\n"
"Please choose how you'd like to proceed."
)
# ============================================================
# Router Choice Steps
# ============================================================
def quick_analysis(step_input: StepInput) -> StepOutput:
"""Perform quick analysis."""
return StepOutput(
content="Quick Analysis Results:\n"
"- Summary statistics computed\n"
"- Basic trends identified\n"
"- Processing time: 2 minutes\n"
"- Confidence: 85%"
)
def deep_analysis(step_input: StepInput) -> StepOutput:
"""Perform deep analysis."""
return StepOutput(
content="Deep Analysis Results:\n"
"- Comprehensive statistical analysis\n"
"- Pattern recognition applied\n"
"- Anomaly detection completed\n"
"- Correlation matrix generated\n"
"- Processing time: 10 minutes\n"
"- Confidence: 97%"
)
def custom_analysis(step_input: StepInput) -> StepOutput:
"""Perform custom analysis based on user preferences."""
user_input = (
step_input.additional_data.get("user_input", {})
if step_input.additional_data
else {}
)
params = user_input.get("custom_params", "default parameters")
return StepOutput(
content=f"Custom Analysis Results:\n"
f"- Custom parameters applied: {params}\n"
"- Tailored analysis completed\n"
"- Processing time: varies\n"
"- Confidence: based on parameters"
)
# ============================================================
# Step 4: Generate report (automatic)
# ============================================================
def generate_report(step_input: StepInput) -> StepOutput:
"""Generate final report."""
analysis_results = step_input.previous_step_content or "No results"
return StepOutput(
content=f"=== FINAL REPORT ===\n\n{analysis_results}\n\n"
"Report generated successfully.\n"
"Thank you for using the analysis workflow!"
)
# Define the analysis step
analyze_step = Step(name="analyze_data", executor=analyze_data)
# Define the Router with HITL - user selects which analysis to perform
analysis_router = Router(
name="analysis_type_router",
choices=[
Step(
name="quick_analysis",
description="Fast analysis with basic insights (2 min)",
executor=quick_analysis,
),
Step(
name="deep_analysis",
description="Comprehensive analysis with full details (10 min)",
executor=deep_analysis,
),
Step(
name="custom_analysis",
description="Custom analysis with your parameters",
executor=custom_analysis,
),
],
requires_user_input=True,
user_input_message="Select the type of analysis to perform:",
allow_multiple_selections=False, # Only one analysis type at a time
)
# Define the report step
report_step = Step(name="generate_report", executor=generate_report)
# Create workflow
workflow = Workflow(
name="user_driven_analysis",
db=SqliteDb(db_file="tmp/workflow_router_hitl.db"),
steps=[analyze_step, analysis_router, report_step],
)
if __name__ == "__main__":
print("=" * 60)
print("User-Driven Analysis Workflow with Router HITL")
print("=" * 60)
run_output = workflow.run("Q4 sales data")
# Handle HITL pauses
while run_output.is_paused:
# Handle Router requirements (user selection)
# with requires_route_selection=True
for requirement in run_output.steps_requiring_route:
print(f"\n[DECISION POINT] Router: {requirement.step_name}")
print(f"[HITL] {requirement.user_input_message}")
# Show available choices
print("\nAvailable options:")
for choice in requirement.available_choices or []:
print(f" - {choice}")
# Get user selection
selection = input("\nEnter your choice: ").strip()
if selection:
requirement.select(selection)
print(f"\n[HITL] Selected: {selection}")
# Handle Step requirements (confirmation or user input)
for requirement in run_output.steps_requiring_user_input:
print(f"\n[HITL] Step: {requirement.step_name}")
print(f"[HITL] {requirement.user_input_message}")
if requirement.user_input_schema:
user_values = {}
for field in requirement.user_input_schema:
required_marker = "*" if field.required else ""
if field.description:
print(f" ({field.description})")
value = input(f"{field.name}{required_marker}: ").strip()
if value:
user_values[field.name] = value
requirement.set_user_input(**user_values)
for requirement in run_output.steps_requiring_confirmation:
print(
f"\n[HITL] {requirement.step_name}: {requirement.confirmation_message}"
)
if input("Continue? (yes/no): ").strip().lower() in ("yes", "y"):
requirement.confirm()
else:
requirement.reject()
run_output = workflow.continue_run(run_output)
print("\n" + "=" * 60)
print(f"Status: {run_output.status}")
print("=" * 60)
print(run_output.content)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/04_workflows/_07_human_in_the_loop/router/01_router_user_selection.py",
"license": "Apache License 2.0",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/04_workflows/_07_human_in_the_loop/router/02_router_multi_selection.py | """
Router with Multiple Selection HITL Example
This example demonstrates how to let users select MULTIPLE paths to execute
in sequence using a Router with allow_multiple_selections=True.
Use cases:
- Build-your-own pipeline (user picks which analyses to run)
- Multi-step processing where user controls the steps
- Customizable workflows with optional components
Flow:
1. Collect data (automatic)
2. User selects one or more processing steps via Router HITL
3. Execute ALL selected steps in sequence (chained)
4. Summarize results (automatic)
"""
from agno.db.sqlite import SqliteDb
from agno.workflow.router import Router
from agno.workflow.step import Step
from agno.workflow.types import StepInput, StepOutput
from agno.workflow.workflow import Workflow
# ============================================================
# Step 1: Collect data (automatic)
# ============================================================
def collect_data(step_input: StepInput) -> StepOutput:
"""Collect and prepare data for processing."""
user_query = step_input.input or "dataset"
return StepOutput(
content=f"Data collected for '{user_query}':\n"
"- 5000 records loaded\n"
"- Schema validated\n"
"- Ready for processing\n\n"
"Select which processing steps to apply (you can choose multiple)."
)
# ============================================================
# Router Choice Steps - User can select multiple
# ============================================================
def clean_data(step_input: StepInput) -> StepOutput:
"""Clean and normalize the data."""
prev = step_input.previous_step_content or ""
return StepOutput(
content=f"{prev}\n\n[CLEANING]\n"
"- Removed 150 duplicate records\n"
"- Fixed 23 null values\n"
"- Standardized date formats\n"
"- Data cleaning complete"
)
def validate_data(step_input: StepInput) -> StepOutput:
"""Validate data integrity."""
prev = step_input.previous_step_content or ""
return StepOutput(
content=f"{prev}\n\n[VALIDATION]\n"
"- Schema validation: PASSED\n"
"- Referential integrity: PASSED\n"
"- Business rules check: PASSED\n"
"- Data validation complete"
)
def enrich_data(step_input: StepInput) -> StepOutput:
"""Enrich data with additional information."""
prev = step_input.previous_step_content or ""
return StepOutput(
content=f"{prev}\n\n[ENRICHMENT]\n"
"- Added geographic coordinates\n"
"- Appended demographic data\n"
"- Calculated derived metrics\n"
"- Data enrichment complete"
)
def transform_data(step_input: StepInput) -> StepOutput:
"""Transform data for analysis."""
prev = step_input.previous_step_content or ""
return StepOutput(
content=f"{prev}\n\n[TRANSFORMATION]\n"
"- Normalized numeric columns\n"
"- One-hot encoded categories\n"
"- Created feature vectors\n"
"- Data transformation complete"
)
def aggregate_data(step_input: StepInput) -> StepOutput:
"""Aggregate data for reporting."""
prev = step_input.previous_step_content or ""
return StepOutput(
content=f"{prev}\n\n[AGGREGATION]\n"
"- Grouped by region and time\n"
"- Calculated summary statistics\n"
"- Built pivot tables\n"
"- Data aggregation complete"
)
# ============================================================
# Step 4: Summarize results (automatic)
# ============================================================
def summarize_results(step_input: StepInput) -> StepOutput:
"""Generate final summary."""
processing_results = step_input.previous_step_content or "No processing performed"
return StepOutput(
content=f"=== PROCESSING SUMMARY ===\n\n{processing_results}\n\n"
"=== END OF PIPELINE ===\n"
"All selected processing steps completed successfully."
)
# Define steps
collect_step = Step(name="collect_data", executor=collect_data)
# Define the Router with HITL - user can select MULTIPLE steps
processing_router = Router(
name="processing_pipeline",
choices=[
Step(
name="clean",
description="Clean and normalize data (remove duplicates, fix nulls)",
executor=clean_data,
),
Step(
name="validate",
description="Validate data integrity and business rules",
executor=validate_data,
),
Step(
name="enrich",
description="Enrich with external data sources",
executor=enrich_data,
),
Step(
name="transform",
description="Transform for ML/analysis (normalize, encode)",
executor=transform_data,
),
Step(
name="aggregate",
description="Aggregate for reporting (group, summarize)",
executor=aggregate_data,
),
],
requires_user_input=True,
user_input_message="Select processing steps to apply (comma-separated for multiple):",
allow_multiple_selections=True, # KEY: Allow selecting multiple steps
)
summary_step = Step(name="summarize", executor=summarize_results)
# Create workflow
workflow = Workflow(
name="multi_step_processing",
db=SqliteDb(db_file="tmp/workflow_router_multi.db"),
steps=[collect_step, processing_router, summary_step],
)
if __name__ == "__main__":
print("=" * 60)
print("Multi-Selection Router HITL Example")
print("=" * 60)
run_output = workflow.run("customer transactions")
# Handle HITL pauses
while run_output.is_paused:
# Handle Router requirements (user selection)
# Note: Router selection requirements are now unified into step_requirements
for requirement in run_output.steps_requiring_route:
print(f"\n[DECISION POINT] Router: {requirement.step_name}")
print(f"[HITL] {requirement.user_input_message}")
# Show available choices with descriptions
print("\nAvailable processing steps:")
for i, choice in enumerate(requirement.available_choices or [], 1):
print(f" {i}. {choice}")
if requirement.allow_multiple_selections:
print("\nTip: Enter multiple choices separated by commas")
print("Example: clean, validate, transform")
# Get user selection(s)
selection = input("\nEnter your choice(s): ").strip()
if selection:
# Handle comma-separated selections
selections = [s.strip() for s in selection.split(",")]
if len(selections) > 1:
requirement.select_multiple(
selections
) # Use select_multiple for list
print(f"\n[HITL] Selected {len(selections)} steps: {selections}")
else:
requirement.select(selections[0]) # Single selection
print(f"\n[HITL] Selected: {selections[0]}")
# Handle Step requirements if any
for requirement in run_output.steps_requiring_user_input:
print(f"\n[HITL] Step: {requirement.step_name}")
print(f"[HITL] {requirement.user_input_message}")
if requirement.user_input_schema:
user_values = {}
for field in requirement.user_input_schema:
value = input(f"{field.name}: ").strip()
if value:
user_values[field.name] = value
requirement.set_user_input(**user_values)
for requirement in run_output.steps_requiring_confirmation:
print(
f"\n[HITL] {requirement.step_name}: {requirement.confirmation_message}"
)
if input("Continue? (yes/no): ").strip().lower() in ("yes", "y"):
requirement.confirm()
else:
requirement.reject()
run_output = workflow.continue_run(run_output)
print("\n" + "=" * 60)
print(f"Status: {run_output.status}")
print("=" * 60)
print(run_output.content)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/04_workflows/_07_human_in_the_loop/router/02_router_multi_selection.py",
"license": "Apache License 2.0",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/04_workflows/_07_human_in_the_loop/router/03_router_nested_choices.py | """
Router with Nested Choices HITL Example
This example demonstrates how to use HITL with nested step lists in Router choices.
When choices contain nested lists like [step_a, [step_b, step_c]], the nested list
becomes a Steps container that executes ALL steps in sequence when selected.
Use cases:
- Pre-defined pipelines that user can choose from
- "Packages" of processing steps (e.g., "Basic", "Standard", "Premium")
- Workflow templates where user picks a complete flow
Flow:
1. Receive input (automatic)
2. User selects a processing package (single step OR a sequence of steps)
3. Execute the selected package (if nested, all steps run in sequence)
4. Generate output (automatic)
Key concept:
- choices=[step_a, [step_b, step_c], step_d]
- "step_a" -> executes just step_a
- "steps_group_1" -> executes step_b THEN step_c (chained)
- "step_d" -> executes just step_d
"""
from agno.db.sqlite import SqliteDb
from agno.workflow.router import Router
from agno.workflow.step import Step
from agno.workflow.steps import Steps
from agno.workflow.types import StepInput, StepOutput
from agno.workflow.workflow import Workflow
# ============================================================
# Step 1: Receive input (automatic)
# ============================================================
def receive_input(step_input: StepInput) -> StepOutput:
"""Receive and validate input."""
user_query = step_input.input or "document"
return StepOutput(
content=f"Input received: '{user_query}'\n"
"Ready for processing.\n\n"
"Please select a processing package."
)
# ============================================================
# Individual processing steps
# ============================================================
def quick_scan(step_input: StepInput) -> StepOutput:
"""Quick scan - fast but basic."""
prev = step_input.previous_step_content or ""
return StepOutput(
content=f"{prev}\n\n[QUICK SCAN]\n"
"- Surface-level analysis\n"
"- Key points extracted\n"
"- Processing time: 30 seconds"
)
def deep_analysis(step_input: StepInput) -> StepOutput:
"""Deep analysis - thorough examination."""
prev = step_input.previous_step_content or ""
return StepOutput(
content=f"{prev}\n\n[DEEP ANALYSIS]\n"
"- Comprehensive examination\n"
"- Pattern detection applied\n"
"- Processing time: 5 minutes"
)
def quality_check(step_input: StepInput) -> StepOutput:
"""Quality check - verify results."""
prev = step_input.previous_step_content or ""
return StepOutput(
content=f"{prev}\n\n[QUALITY CHECK]\n"
"- Results validated\n"
"- Accuracy verified: 98%\n"
"- Processing time: 1 minute"
)
def format_output(step_input: StepInput) -> StepOutput:
"""Format output - prepare final results."""
prev = step_input.previous_step_content or ""
return StepOutput(
content=f"{prev}\n\n[FORMAT OUTPUT]\n"
"- Results formatted\n"
"- Report generated\n"
"- Processing time: 30 seconds"
)
def archive_results(step_input: StepInput) -> StepOutput:
"""Archive results - store for future reference."""
prev = step_input.previous_step_content or ""
return StepOutput(
content=f"{prev}\n\n[ARCHIVE]\n"
"- Results archived\n"
"- Backup created\n"
"- Processing time: 15 seconds"
)
# ============================================================
# Final step (automatic)
# ============================================================
def finalize(step_input: StepInput) -> StepOutput:
"""Finalize and return results."""
results = step_input.previous_step_content or "No processing performed"
return StepOutput(
content=f"=== FINAL RESULTS ===\n\n{results}\n\n=== PROCESSING COMPLETE ==="
)
# Define individual steps
quick_scan_step = Step(
name="quick_scan", description="Fast surface-level scan (30s)", executor=quick_scan
)
# Define step sequences as Steps containers with descriptive names
standard_package = Steps(
name="standard_package",
description="Standard processing: Deep Analysis + Quality Check (6 min)",
steps=[
Step(name="deep_analysis", executor=deep_analysis),
Step(name="quality_check", executor=quality_check),
],
)
premium_package = Steps(
name="premium_package",
description="Premium processing: Deep Analysis + Quality Check + Format + Archive (8 min)",
steps=[
Step(name="deep_analysis", executor=deep_analysis),
Step(name="quality_check", executor=quality_check),
Step(name="format_output", executor=format_output),
Step(name="archive_results", executor=archive_results),
],
)
# Create workflow with Router HITL
# User can select:
# - "quick_scan" -> runs just quick_scan
# - "standard_package" -> runs deep_analysis THEN quality_check
# - "premium_package" -> runs deep_analysis THEN quality_check THEN format_output THEN archive_results
workflow = Workflow(
name="package_selection_workflow",
db=SqliteDb(db_file="tmp/workflow_router_nested.db"),
steps=[
Step(name="receive_input", executor=receive_input),
Router(
name="package_selector",
choices=[
quick_scan_step, # Single step
standard_package, # Steps container (2 steps)
premium_package, # Steps container (4 steps)
],
requires_user_input=True,
user_input_message="Select a processing package:",
allow_multiple_selections=False, # Pick ONE package
),
Step(name="finalize", executor=finalize),
],
)
# Alternative: Using nested lists directly (auto-converted to Steps containers)
# Note: Auto-generated names like "steps_group_0" are less descriptive
workflow_with_nested_lists = Workflow(
name="nested_list_workflow",
db=SqliteDb(db_file="tmp/workflow_router_nested_alt.db"),
steps=[
Step(name="receive_input", executor=receive_input),
Router(
name="package_selector",
choices=[
Step(
name="quick_scan",
description="Fast scan (30s)",
executor=quick_scan,
),
# Nested list -> becomes "steps_group_1" Steps container
[
Step(name="deep_analysis", executor=deep_analysis),
Step(name="quality_check", executor=quality_check),
],
# Nested list -> becomes "steps_group_2" Steps container
[
Step(name="deep_analysis", executor=deep_analysis),
Step(name="quality_check", executor=quality_check),
Step(name="format_output", executor=format_output),
Step(name="archive_results", executor=archive_results),
],
],
requires_user_input=True,
user_input_message="Select a processing option:",
),
Step(name="finalize", executor=finalize),
],
)
if __name__ == "__main__":
print("=" * 60)
print("Router with Nested Choices (Pre-defined Packages)")
print("=" * 60)
print("\nThis example shows how to offer 'packages' of steps.")
print("Each package can be a single step or a sequence of steps.\n")
run_output = workflow.run("quarterly report")
# Handle HITL pauses
while run_output.is_paused:
# Handle Router requirements (user selection)
for requirement in run_output.steps_requiring_route:
print(f"\n[DECISION POINT] {requirement.step_name}")
print(f"[HITL] {requirement.user_input_message}")
# Show available packages
print("\nAvailable packages:")
for i, choice in enumerate(requirement.available_choices or [], 1):
# Get description if available from the router's choices
print(f" {i}. {choice}")
print("\nPackage details:")
print(" - quick_scan: Fast surface-level scan (30s)")
print(" - standard_package: Deep Analysis + Quality Check (6 min)")
print(" - premium_package: Full pipeline with archiving (8 min)")
selection = input("\nEnter your choice: ").strip()
if selection:
requirement.select(selection)
print(f"\n[HITL] Selected package: {selection}")
for requirement in run_output.steps_requiring_confirmation:
print(
f"\n[HITL] {requirement.step_name}: {requirement.confirmation_message}"
)
if input("Continue? (yes/no): ").strip().lower() in ("yes", "y"):
requirement.confirm()
else:
requirement.reject()
run_output = workflow.continue_run(run_output)
print("\n" + "=" * 60)
print(f"Status: {run_output.status}")
print("=" * 60)
print(run_output.content)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/04_workflows/_07_human_in_the_loop/router/03_router_nested_choices.py",
"license": "Apache License 2.0",
"lines": 214,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/04_workflows/_07_human_in_the_loop/router/04_router_confirmation.py | """
Router with Confirmation HITL Example
This example demonstrates the confirmation mode for Router components,
which is different from the user selection mode.
When `requires_confirmation=True` on a Router (with a selector):
- The selector determines which steps to run
- User is asked to confirm before executing those steps
- User confirms -> Execute the selected steps
- User rejects -> Skip the router entirely
This is useful for:
- Confirming automated routing decisions
- Human oversight of programmatic selections
- Safety checks before executing routed steps
Note: This is different from `requires_user_input=True` which lets the user
choose which steps to execute. Here, the selector chooses, but user confirms.
"""
from agno.db.sqlite import SqliteDb
from agno.workflow.router import Router
from agno.workflow.step import Step
from agno.workflow.types import StepInput, StepOutput
from agno.workflow.workflow import Workflow
# ============================================================
# Step functions
# ============================================================
def analyze_request(step_input: StepInput) -> StepOutput:
"""Analyze the request to determine routing."""
user_query = step_input.input or "general request"
# Determine category based on content
if "urgent" in user_query.lower():
category = "urgent"
elif "billing" in user_query.lower():
category = "billing"
else:
category = "general"
return StepOutput(
content=f"Request analyzed:\n"
f"- Query: {user_query}\n"
f"- Detected category: {category}\n"
"- Ready for routing"
)
def handle_urgent(step_input: StepInput) -> StepOutput:
"""Handle urgent requests."""
return StepOutput(
content="Urgent Request Handling:\n"
"- Priority escalation initiated\n"
"- Immediate response generated\n"
"- Notification sent to on-call team"
)
def handle_billing(step_input: StepInput) -> StepOutput:
"""Handle billing requests."""
return StepOutput(
content="Billing Request Handling:\n"
"- Account details retrieved\n"
"- Billing history analyzed\n"
"- Response prepared"
)
def handle_general(step_input: StepInput) -> StepOutput:
"""Handle general requests."""
return StepOutput(
content="General Request Handling:\n"
"- Standard processing applied\n"
"- Response generated"
)
def finalize_response(step_input: StepInput) -> StepOutput:
"""Finalize the response."""
previous_content = step_input.previous_step_content or "No handling"
return StepOutput(
content=f"=== FINAL RESPONSE ===\n\n{previous_content}\n\nRequest processed successfully."
)
# Selector function that determines routing based on previous step content
def route_by_category(step_input: StepInput) -> str:
"""Route based on detected category in previous step."""
content = step_input.previous_step_content or ""
if "urgent" in content.lower():
return "handle_urgent"
elif "billing" in content.lower():
return "handle_billing"
else:
return "handle_general"
# Define the steps
analyze_step = Step(name="analyze_request", executor=analyze_request)
# Router with confirmation - selector chooses, user confirms
request_router = Router(
name="request_router",
choices=[
Step(
name="handle_urgent",
description="Handle urgent requests",
executor=handle_urgent,
),
Step(
name="handle_billing",
description="Handle billing requests",
executor=handle_billing,
),
Step(
name="handle_general",
description="Handle general requests",
executor=handle_general,
),
],
selector=route_by_category,
requires_confirmation=True,
confirmation_message="The system has selected a handler. Proceed with the routed action?",
)
finalize_step = Step(name="finalize_response", executor=finalize_response)
# Create workflow with database for HITL persistence
workflow = Workflow(
name="router_confirmation_demo",
steps=[analyze_step, request_router, finalize_step],
db=SqliteDb(db_file="tmp/router_hitl.db"),
)
if __name__ == "__main__":
print("=" * 60)
print("Router with Confirmation HITL Example")
print("=" * 60)
print("The selector will choose the route, but you must confirm.")
print()
# Test with an urgent request
run_output = workflow.run("URGENT: System is down!")
# Handle HITL pauses
while run_output.is_paused:
# Handle Step requirements (confirmation for router)
for requirement in run_output.steps_requiring_confirmation:
print(f"\n[ROUTING DECISION] {requirement.step_name}")
print(f"[HITL] {requirement.confirmation_message}")
user_choice = input("\nProceed with routing? (yes/no): ").strip().lower()
if user_choice in ("yes", "y"):
requirement.confirm()
print("[HITL] Confirmed - executing routed steps")
else:
requirement.reject()
print("[HITL] Rejected - skipping router")
run_output = workflow.continue_run(run_output)
print("\n" + "=" * 60)
print(f"Status: {run_output.status}")
print("=" * 60)
print(run_output.content)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/04_workflows/_07_human_in_the_loop/router/04_router_confirmation.py",
"license": "Apache License 2.0",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/04_workflows/_07_human_in_the_loop/steps/01_steps_pipeline_confirmation.py | """
Steps Pipeline with User Confirmation HITL Example
This example demonstrates how to use HITL with a Steps component,
allowing the user to confirm before executing an entire pipeline of steps.
When `requires_confirmation=True` on a Steps component:
- User confirms -> Execute all steps in the pipeline
- User rejects -> Skip the entire pipeline
This is useful for:
- Optional processing pipelines
- Expensive/time-consuming step groups
- User-controlled workflow sections
"""
from agno.db.sqlite import SqliteDb
from agno.workflow.step import Step
from agno.workflow.steps import Steps
from agno.workflow.types import StepInput, StepOutput
from agno.workflow.workflow import Workflow
# ============================================================
# Step functions
# ============================================================
def collect_data(step_input: StepInput) -> StepOutput:
"""Collect initial data."""
return StepOutput(
content="Data collection complete:\n"
"- 1000 records gathered\n"
"- Ready for optional advanced processing"
)
# Advanced processing pipeline steps
def validate_data(step_input: StepInput) -> StepOutput:
"""Validate the data."""
return StepOutput(
content="Validation complete:\n"
"- Schema validation passed\n"
"- Data integrity verified"
)
def transform_data(step_input: StepInput) -> StepOutput:
"""Transform the data."""
return StepOutput(
content="Transformation complete:\n- Data normalized\n- Outliers handled"
)
def enrich_data(step_input: StepInput) -> StepOutput:
"""Enrich the data with additional information."""
return StepOutput(
content="Enrichment complete:\n"
"- External data merged\n"
"- Derived fields computed"
)
def generate_report(step_input: StepInput) -> StepOutput:
"""Generate final report."""
previous_content = step_input.previous_step_content or "Basic data"
return StepOutput(
content=f"=== FINAL REPORT ===\n\n{previous_content}\n\n"
"Report generated successfully."
)
# Define the steps
collect_step = Step(name="collect_data", executor=collect_data)
# Steps pipeline with HITL confirmation
# User must confirm to run this entire pipeline
advanced_processing = Steps(
name="advanced_processing_pipeline",
steps=[
Step(name="validate_data", executor=validate_data),
Step(name="transform_data", executor=transform_data),
Step(name="enrich_data", executor=enrich_data),
],
requires_confirmation=True,
confirmation_message="Run advanced processing pipeline? (This includes validation, transformation, and enrichment)",
)
report_step = Step(name="generate_report", executor=generate_report)
# Create workflow with database for HITL persistence
workflow = Workflow(
name="steps_pipeline_confirmation_demo",
steps=[collect_step, advanced_processing, report_step],
db=SqliteDb(db_file="tmp/steps_hitl.db"),
)
if __name__ == "__main__":
print("=" * 60)
print("Steps Pipeline with User Confirmation HITL Example")
print("=" * 60)
run_output = workflow.run("Process quarterly data")
# Handle HITL pauses
while run_output.is_paused:
# Handle Step requirements (confirmation for pipeline)
for requirement in run_output.steps_requiring_confirmation:
print(f"\n[DECISION POINT] {requirement.step_name}")
print(f"[HITL] {requirement.confirmation_message}")
user_choice = input("\nRun this pipeline? (yes/no): ").strip().lower()
if user_choice in ("yes", "y"):
requirement.confirm()
print("[HITL] Confirmed - executing pipeline")
else:
requirement.reject()
print("[HITL] Rejected - skipping pipeline")
run_output = workflow.continue_run(run_output)
print("\n" + "=" * 60)
print(f"Status: {run_output.status}")
print("=" * 60)
print(run_output.content)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/04_workflows/_07_human_in_the_loop/steps/01_steps_pipeline_confirmation.py",
"license": "Apache License 2.0",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/04_workflows/_07_human_in_the_loop/user_input/01_basic_user_input.py | """
Basic User Input HITL Example
This example demonstrates how to pause a workflow to collect user input
before executing a step. The user input is then available to the step
via step_input.additional_data["user_input"].
Use case: Collecting parameters from the user before processing data.
Two ways to define user_input_schema:
1. List of UserInputField objects (recommended) - explicit and type-safe
2. List of dicts - simple but less explicit
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.openai import OpenAIChat
from agno.workflow.decorators import pause
from agno.workflow.step import Step
from agno.workflow.types import StepInput, StepOutput, UserInputField
from agno.workflow.workflow import Workflow
# Step 1: Analyze data (no HITL)
def analyze_data(step_input: StepInput) -> StepOutput:
"""Analyze the data and provide summary."""
user_query = step_input.input or "data"
return StepOutput(
content=f"Analysis complete: Found 1000 records matching '{user_query}'. "
"Ready for processing with user-specified parameters."
)
# Step 2: Process with user-provided parameters (HITL - user input)
# Using UserInputField for schema - explicit and type-safe
@pause(
name="Process Data",
requires_user_input=True,
user_input_message="Please provide processing parameters:",
user_input_schema=[
UserInputField(
name="threshold",
field_type="float",
description="Processing threshold (0.0 to 1.0)",
required=True,
),
UserInputField(
name="mode",
field_type="str",
description="Processing mode: 'fast' or 'accurate'",
required=True,
),
UserInputField(
name="batch_size",
field_type="int",
description="Number of records per batch",
required=False,
),
],
)
def process_with_params(step_input: StepInput) -> StepOutput:
"""Process data with user-provided parameters."""
# Get user input from additional_data
user_input = (
step_input.additional_data.get("user_input", {})
if step_input.additional_data
else {}
)
threshold = user_input.get("threshold", 0.5)
mode = user_input.get("mode", "fast")
batch_size = user_input.get("batch_size", 100)
previous = step_input.previous_step_content or ""
return StepOutput(
content=f"Processing complete!\n"
f"- Input: {previous}\n"
f"- Threshold: {threshold}\n"
f"- Mode: {mode}\n"
f"- Batch size: {batch_size}\n"
f"- Records processed: 1000"
)
# Step 3: Generate report (no HITL)
writer_agent = Agent(
name="Report Writer",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=[
"You are a report writer.",
"Given processing results, write a brief summary report.",
"Keep it concise - 2-3 sentences.",
],
)
# Define steps
analyze_step = Step(name="analyze_data", executor=analyze_data)
process_step = Step(
name="process_data", executor=process_with_params
) # @pause auto-detected
report_step = Step(name="generate_report", agent=writer_agent)
# Create workflow
workflow = Workflow(
name="data_processing_with_params",
db=PostgresDb(db_url="postgresql+psycopg://ai:ai@localhost:5532/ai"),
steps=[analyze_step, process_step, report_step],
)
if __name__ == "__main__":
print("Starting data processing workflow...")
print("=" * 50)
run_output = workflow.run("customer transactions from Q4")
# Handle HITL pauses
while run_output.is_paused:
# Show paused step info
print(
f"\n[PAUSED] Workflow paused at step {run_output.paused_step_index}: '{run_output.paused_step_name}'"
)
# Check for user input requirements
for requirement in run_output.steps_requiring_user_input:
print(f"\n[HITL] Step '{requirement.step_name}' requires user input")
print(f"[HITL] {requirement.user_input_message}")
# Display schema and collect input
if requirement.user_input_schema:
print("\nRequired fields:")
user_values = {}
for field in requirement.user_input_schema:
required_marker = "*" if field.required else ""
field_desc = f" - {field.description}" if field.description else ""
prompt = f" {field.name}{required_marker} ({field.field_type}){field_desc}: "
value = input(prompt).strip()
# Convert to appropriate type
if value:
if field.field_type == "int":
user_values[field.name] = int(value)
elif field.field_type == "float":
user_values[field.name] = float(value)
elif field.field_type == "bool":
user_values[field.name] = value.lower() in (
"true",
"yes",
"1",
)
else:
user_values[field.name] = value
# Set the user input
requirement.set_user_input(**user_values)
print("\n[HITL] User input received - continuing workflow...")
# Check for confirmation requirements
for requirement in run_output.steps_requiring_confirmation:
print(f"\n[HITL] Step '{requirement.step_name}' requires confirmation")
print(f"[HITL] {requirement.confirmation_message}")
user_input = input("\nContinue? (yes/no): ").strip().lower()
if user_input in ("yes", "y"):
requirement.confirm()
else:
requirement.reject()
# Continue the workflow
run_output = workflow.continue_run(run_output)
print("\n" + "=" * 50)
print(f"Status: {run_output.status}")
print(f"Output:\n{run_output.content}")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/04_workflows/_07_human_in_the_loop/user_input/01_basic_user_input.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/04_workflows/_07_human_in_the_loop/user_input/02_step_user_input.py | """
Step-Level User Input HITL Example
This example demonstrates how to pause a workflow to collect user input
using Step parameters directly (without the @pause decorator).
This approach is useful when:
- Using agent-based steps that need user parameters
- You want to configure HITL at the Step level rather than on a function
- You need to override or add HITL to existing functions/agents
Use case: Collecting user preferences before an agent generates content.
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIChat
from agno.workflow.step import Step
from agno.workflow.types import StepInput, StepOutput, UserInputField
from agno.workflow.workflow import Workflow
# Step 1: Gather context (no HITL)
def gather_context(step_input: StepInput) -> StepOutput:
"""Gather initial context from the input."""
topic = step_input.input or "general topic"
return StepOutput(
content=f"Context gathered for: '{topic}'\n"
"Ready to generate content based on user preferences."
)
# Step 2: Content generator agent (HITL configured on Step, not function)
# Note: User input from HITL is automatically appended to the message as "User preferences:"
content_agent = Agent(
name="Content Generator",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=[
"You are a content generator.",
"Generate content based on the topic and user preferences provided.",
"The user preferences will be provided in the message - use them to guide your output.",
"Respect the tone, length, and format specified by the user.",
"Keep the output focused and professional.",
],
)
# Step 3: Format output (no HITL)
def format_output(step_input: StepInput) -> StepOutput:
"""Format the final output."""
content = step_input.previous_step_content or "No content generated"
return StepOutput(content=f"=== GENERATED CONTENT ===\n\n{content}\n\n=== END ===")
# Define workflow with Step-level HITL configuration
workflow = Workflow(
name="content_generation_workflow",
db=SqliteDb(db_file="tmp/workflow_step_user_input.db"),
steps=[
Step(name="gather_context", executor=gather_context),
# HITL configured directly on the Step using agent
Step(
name="generate_content",
agent=content_agent,
requires_user_input=True,
user_input_message="Please provide your content preferences:",
user_input_schema=[
UserInputField(
name="tone",
field_type="str",
description="Tone of the content: 'formal', 'casual', or 'technical'",
required=True,
),
UserInputField(
name="length",
field_type="str",
description="Content length: 'short' (1 para), 'medium' (3 para), or 'long' (5+ para)",
required=True,
),
UserInputField(
name="include_examples",
field_type="bool",
description="Include practical examples?",
required=False,
),
],
),
Step(name="format_output", executor=format_output),
],
)
# Alternative: Using executor function with Step-level HITL
def process_data(step_input: StepInput) -> StepOutput:
"""Process data with user-specified options."""
user_input = (
step_input.additional_data.get("user_input", {})
if step_input.additional_data
else {}
)
format_type = user_input.get("format", "json")
include_metadata = user_input.get("include_metadata", False)
return StepOutput(
content=f"Data processed with format: {format_type}, metadata: {include_metadata}"
)
workflow_with_executor = Workflow(
name="data_processing_workflow",
db=SqliteDb(db_file="tmp/workflow_step_executor_input.db"),
steps=[
Step(name="gather_context", executor=gather_context),
# HITL on Step with a plain executor function
Step(
name="process_data",
executor=process_data,
requires_user_input=True,
user_input_message="Configure data processing:",
user_input_schema=[
UserInputField(
name="format",
field_type="str",
description="Output format: 'json', 'csv', or 'xml'",
required=True,
),
UserInputField(
name="include_metadata",
field_type="bool",
description="Include metadata in output?",
required=False,
),
],
),
Step(name="format_output", executor=format_output),
],
)
if __name__ == "__main__":
print("=" * 60)
print("Step-Level User Input HITL Example")
print("=" * 60)
print("\nThis example uses Step parameters for HITL configuration.")
print("No @pause decorator needed - configure directly on Step.\n")
# Run the agent-based workflow
run_output = workflow.run("Python async programming")
# Handle HITL pauses
while run_output.is_paused:
for requirement in run_output.steps_requiring_user_input:
print(f"\n[HITL] Step '{requirement.step_name}' requires user input")
print(f"[HITL] {requirement.user_input_message}")
# Display schema and collect input
if requirement.user_input_schema:
print("\nFields (* = required):")
user_values = {}
for field in requirement.user_input_schema:
required_marker = "*" if field.required else ""
field_desc = f" - {field.description}" if field.description else ""
prompt = f" {field.name}{required_marker} ({field.field_type}){field_desc}: "
value = input(prompt).strip()
# Convert to appropriate type
if value:
if field.field_type == "int":
user_values[field.name] = int(value)
elif field.field_type == "float":
user_values[field.name] = float(value)
elif field.field_type == "bool":
user_values[field.name] = value.lower() in (
"true",
"yes",
"1",
"y",
)
else:
user_values[field.name] = value
# Set the user input
requirement.set_user_input(**user_values)
print("\n[HITL] Preferences received - continuing workflow...")
# Check for confirmation requirements (if any)
for requirement in run_output.steps_requiring_confirmation:
print(f"\n[HITL] Step '{requirement.step_name}' requires confirmation")
print(f"[HITL] {requirement.confirmation_message}")
confirm = input("\nContinue? (yes/no): ").strip().lower()
if confirm in ("yes", "y"):
requirement.confirm()
else:
requirement.reject()
# Continue the workflow
run_output = workflow.continue_run(run_output)
print("\n" + "=" * 60)
print(f"Status: {run_output.status}")
print("=" * 60)
print(run_output.content)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/04_workflows/_07_human_in_the_loop/user_input/02_step_user_input.py",
"license": "Apache License 2.0",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/04_workflows/_07_human_in_the_loop/user_input/03_step_user_input_streaming.py | """
Step-Level User Input HITL Example (Streaming)
This example demonstrates how to handle HITL with streaming workflows.
Key differences from non-streaming:
1. workflow.run(..., stream=True) returns an Iterator of events
2. stream_events=True is required to receive StepStartedEvent/StepCompletedEvent
3. Look for StepPausedEvent to detect HITL pauses
4. Events are processed as they stream in
5. Use workflow.continue_run(..., stream=True, stream_events=True) to continue with streaming
This is useful for:
- Real-time progress updates
- Large workflows where you want incremental feedback
- UI integrations that show step-by-step progress
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIChat
from agno.run.workflow import (
StepCompletedEvent,
StepPausedEvent,
StepStartedEvent,
WorkflowCompletedEvent,
WorkflowRunOutput,
WorkflowStartedEvent,
)
from agno.workflow.step import Step
from agno.workflow.types import StepInput, StepOutput, UserInputField
from agno.workflow.workflow import Workflow
# Step 1: Gather context (no HITL)
def gather_context(step_input: StepInput) -> StepOutput:
"""Gather initial context from the input."""
topic = step_input.input or "general topic"
return StepOutput(
content=f"Context gathered for: '{topic}'\n"
"Ready to generate content based on user preferences."
)
# Step 2: Content generator agent (HITL configured on Step)
content_agent = Agent(
name="Content Generator",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=[
"You are a content generator.",
"Generate content based on the topic and user preferences provided.",
"The user preferences will be provided in the message - use them to guide your output.",
"Respect the tone, length, and format specified by the user.",
"Keep the output focused and professional.",
],
)
# Step 3: Format output (no HITL)
def format_output(step_input: StepInput) -> StepOutput:
"""Format the final output."""
content = step_input.previous_step_content or "No content generated"
return StepOutput(content=f"=== GENERATED CONTENT ===\n\n{content}\n\n=== END ===")
# Define workflow with Step-level HITL configuration
workflow = Workflow(
name="content_generation_workflow_stream",
db=SqliteDb(db_file="tmp/workflow_step_user_input_stream.db"),
steps=[
Step(name="gather_context", executor=gather_context),
Step(
name="generate_content",
agent=content_agent,
requires_user_input=True,
user_input_message="Please provide your content preferences:",
user_input_schema=[
UserInputField(
name="tone",
field_type="str",
description="Tone of the content",
required=True,
# Validation: only these values are allowed
allowed_values=["formal", "casual", "technical"],
),
UserInputField(
name="length",
field_type="str",
description="Content length",
required=True,
allowed_values=["short", "medium", "long"],
),
UserInputField(
name="include_examples",
field_type="bool",
description="Include practical examples?",
required=False,
),
],
),
Step(name="format_output", executor=format_output),
],
)
def handle_hitl_pause(run_output: WorkflowRunOutput) -> None:
"""Handle HITL requirements from the paused workflow."""
# Handle user input requirements
for requirement in run_output.steps_requiring_user_input:
print(f"\n[HITL] Step '{requirement.step_name}' requires user input")
print(f"[HITL] {requirement.user_input_message}")
if requirement.user_input_schema:
print("\nFields (* = required):")
user_values = {}
for field in requirement.user_input_schema:
required_marker = "*" if field.required else ""
field_desc = f" - {field.description}" if field.description else ""
# Show allowed values if specified
allowed_hint = (
f" [{', '.join(str(v) for v in field.allowed_values)}]"
if field.allowed_values
else ""
)
prompt = f" {field.name}{required_marker} ({field.field_type}){allowed_hint}{field_desc}: "
value = input(prompt).strip()
if value:
if field.field_type == "int":
user_values[field.name] = int(value)
elif field.field_type == "float":
user_values[field.name] = float(value)
elif field.field_type == "bool":
user_values[field.name] = value.lower() in (
"true",
"yes",
"1",
"y",
)
else:
user_values[field.name] = value
# set_user_input validates by default; catch validation errors
try:
requirement.set_user_input(**user_values)
print("\n[HITL] Preferences received - continuing workflow...")
except ValueError as e:
print(f"\n[HITL] Validation error: {e}")
print("[HITL] Please provide valid input.")
# In a real app, you'd loop and re-prompt
raise
# Handle confirmation requirements
for requirement in run_output.steps_requiring_confirmation:
print(f"\n[HITL] Step '{requirement.step_name}' requires confirmation")
print(f"[HITL] {requirement.confirmation_message}")
confirm = input("\nContinue? (yes/no): ").strip().lower()
if confirm in ("yes", "y"):
requirement.confirm()
else:
requirement.reject()
def run_workflow_streaming(input_text: str) -> WorkflowRunOutput:
"""Run workflow with streaming and handle HITL pauses."""
print("=" * 60)
print("Step-Level User Input HITL Example (Streaming)")
print("=" * 60)
print("\nStarting workflow with streaming...\n")
# Track the final run output
run_output: WorkflowRunOutput | None = None
# Run with streaming - returns an iterator of events
# stream=True enables streaming output, stream_events=True enables step events
event_stream = workflow.run(input_text, stream=True, stream_events=True)
for event in event_stream:
# Check event type and handle accordingly
if isinstance(event, WorkflowStartedEvent):
print(f"[EVENT] Workflow started: {event.workflow_name}")
elif isinstance(event, StepStartedEvent):
print(f"[EVENT] Step started: {event.step_name}")
elif isinstance(event, StepCompletedEvent):
print(f"[EVENT] Step completed: {event.step_name}")
if event.content:
# Show preview of content (truncated)
preview = (
str(event.content)[:100] + "..."
if len(str(event.content)) > 100
else str(event.content)
)
print(f" Content: {preview}")
elif isinstance(event, StepPausedEvent):
# HITL pause detected!
print(f"\n[EVENT] Step PAUSED: {event.step_name}")
if event.requires_user_input:
print(" Reason: User input required")
print(f" Message: {event.user_input_message}")
elif event.requires_confirmation:
print(" Reason: Confirmation required")
print(f" Message: {event.confirmation_message}")
elif isinstance(event, WorkflowCompletedEvent):
print("\n[EVENT] Workflow completed!")
print(
f" Final content length: {len(str(event.content)) if event.content else 0} chars"
)
# Check if the event contains the workflow run output
# (some events have a workflow_run_output attribute)
if hasattr(event, "workflow_run_output") and event.workflow_run_output:
run_output = event.workflow_run_output
# After streaming, we need to get the current run state
# The last event in a paused workflow should give us the state
# If run_output is still None, get it from session
if run_output is None:
# Get the latest run from the session
session = workflow.get_session()
if session and session.runs:
run_output = session.runs[-1]
# If workflow is paused, handle HITL and continue
while run_output and run_output.is_paused:
handle_hitl_pause(run_output)
print("\n[INFO] Continuing workflow with streaming...\n")
# Continue with streaming
continue_stream = workflow.continue_run(
run_output, stream=True, stream_events=True
)
for event in continue_stream:
if isinstance(event, StepStartedEvent):
print(f"[EVENT] Step started: {event.step_name}")
elif isinstance(event, StepCompletedEvent):
print(f"[EVENT] Step completed: {event.step_name}")
if event.content:
preview = (
str(event.content)[:100] + "..."
if len(str(event.content)) > 100
else str(event.content)
)
print(f" Content: {preview}")
elif isinstance(event, StepPausedEvent):
print(f"\n[EVENT] Step PAUSED: {event.step_name}")
elif isinstance(event, WorkflowCompletedEvent):
print("\n[EVENT] Workflow completed!")
if hasattr(event, "workflow_run_output") and event.workflow_run_output:
run_output = event.workflow_run_output
# Get updated run output from session
session = workflow.get_session()
if session and session.runs:
run_output = session.runs[-1]
return run_output # type: ignore
if __name__ == "__main__":
final_output = run_workflow_streaming("Python async programming")
print("\n" + "=" * 60)
print(f"Final Status: {final_output.status}")
print("=" * 60)
print(final_output.content)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/04_workflows/_07_human_in_the_loop/user_input/03_step_user_input_streaming.py",
"license": "Apache License 2.0",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/workflow/decorators.py | """Decorators for workflow step configuration."""
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, TypeVar, Union
F = TypeVar("F", bound=Callable)
if TYPE_CHECKING:
from agno.workflow.types import UserInputField
def pause(
name: Optional[str] = None,
requires_confirmation: bool = False,
confirmation_message: Optional[str] = None,
requires_user_input: bool = False,
user_input_message: Optional[str] = None,
user_input_schema: Optional[List[Union[Dict[str, Any], "UserInputField"]]] = None,
) -> Callable[[F], F]:
"""Decorator to mark a step function with Human-In-The-Loop (HITL) configuration.
This decorator adds HITL metadata to a function that will be used as a workflow step.
When the function is passed to a Step or directly to a Workflow, the HITL configuration
will be automatically detected and applied.
Args:
name: Optional name for the step. If not provided, the function name will be used.
requires_confirmation: Whether the step requires user confirmation before execution.
Defaults to False.
confirmation_message: Message to display to the user when requesting confirmation.
requires_user_input: Whether the step requires user input before execution.
Defaults to False.
user_input_message: Message to display to the user when requesting input.
user_input_schema: Schema for user input fields. Can be a list of dicts or
UserInputField objects. Each field should have:
- name: Field name (required)
- field_type: "str", "int", "float", "bool" (default: "str")
- description: Field description (optional)
- required: Whether field is required (default: True)
Returns:
A decorator that adds HITL metadata to the function.
"""
def decorator(func: F) -> F:
# Store HITL metadata directly on the function
func._hitl_name = name # type: ignore[attr-defined]
func._hitl_requires_confirmation = requires_confirmation # type: ignore[attr-defined]
func._hitl_confirmation_message = confirmation_message # type: ignore[attr-defined]
func._hitl_requires_user_input = requires_user_input # type: ignore[attr-defined]
func._hitl_user_input_message = user_input_message # type: ignore[attr-defined]
func._hitl_user_input_schema = user_input_schema # type: ignore[attr-defined]
return func
return decorator
def get_pause_metadata(func: Callable) -> dict:
"""Extract HITL metadata from a function if it has been decorated with @pause.
Args:
func: The function to extract metadata from.
Returns:
A dictionary with HITL configuration, or empty dict if not decorated.
"""
if not callable(func):
return {}
metadata = {}
if hasattr(func, "_hitl_name"):
metadata["name"] = func._hitl_name # type: ignore[attr-defined]
if hasattr(func, "_hitl_requires_confirmation"):
metadata["requires_confirmation"] = func._hitl_requires_confirmation # type: ignore[attr-defined]
if hasattr(func, "_hitl_confirmation_message"):
metadata["confirmation_message"] = func._hitl_confirmation_message # type: ignore[attr-defined]
if hasattr(func, "_hitl_requires_user_input"):
metadata["requires_user_input"] = func._hitl_requires_user_input # type: ignore[attr-defined]
if hasattr(func, "_hitl_user_input_message"):
metadata["user_input_message"] = func._hitl_user_input_message # type: ignore[attr-defined]
if hasattr(func, "_hitl_user_input_schema"):
metadata["user_input_schema"] = func._hitl_user_input_schema # type: ignore[attr-defined]
return metadata
def has_pause_metadata(func: Callable) -> bool:
"""Check if a function has HITL metadata from the @pause decorator.
Args:
func: The function to check.
Returns:
True if the function has HITL metadata, False otherwise.
"""
return hasattr(func, "_hitl_requires_confirmation") or hasattr(func, "_hitl_requires_user_input")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/workflow/decorators.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/agno/workflow/utils/hitl.py | """Helper classes and functions for workflow HITL (Human-in-the-Loop) execution.
This module contains shared utilities used by the execute and continue_run methods
(sync/async, streaming/non-streaming) to reduce code duplication.
"""
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union, cast
from agno.run.base import RunStatus
from agno.utils.log import log_debug
from agno.workflow.types import StepOutput
if TYPE_CHECKING:
from agno.media import Audio, File, Image, Video
from agno.run.workflow import WorkflowRunOutput
from agno.session.workflow import WorkflowSession
from agno.workflow.types import StepInput, StepRequirement, WorkflowExecutionInput
@dataclass
class StepPauseResult:
"""Result of a step pause status check.
Attributes:
should_pause: Whether the workflow should pause for user interaction.
step_requirement: The step requirement for any pause type (confirmation, user input, or route selection).
"""
should_pause: bool = False
step_requirement: Optional["StepRequirement"] = None
def step_pause_status(
step: Any,
step_index: int,
step_input: "StepInput",
step_type: str,
for_route_selection: bool = False,
) -> StepPauseResult:
"""Check if a workflow component requires pausing for user interaction.
This is a unified function that handles pause checks for all component types:
- Step: confirmation or user input
- Loop, Condition, Steps, Router: confirmation
- Router: route selection (when for_route_selection=True)
Args:
step: The workflow component to check (Step, Loop, Condition, Steps, or Router).
step_index: Index of the step in the workflow.
step_input: The prepared input for the step.
step_type: Type of the component ("Step", "Loop", "Condition", "Steps", "Router").
for_route_selection: If True, check for Router route selection instead of confirmation.
Returns:
StepPauseResult indicating whether to pause and the requirement.
"""
# Determine if pause is required
if for_route_selection:
requires_pause = getattr(step, "requires_user_input", False)
pause_type = "user selection"
elif step_type == "Step":
requires_pause = getattr(step, "requires_confirmation", False) or getattr(step, "requires_user_input", False)
pause_type = "confirmation" if getattr(step, "requires_confirmation", False) else "user input"
else:
requires_pause = getattr(step, "requires_confirmation", False)
pause_type = "confirmation"
if not requires_pause:
return StepPauseResult(should_pause=False)
# Get step name with fallback
step_name = getattr(step, "name", None) or f"{step_type.lower()}_{step_index + 1}"
log_debug(f"{step_type} '{step_name}' requires {pause_type} - pausing workflow")
# Create the requirement
if for_route_selection:
step_requirement = step.create_step_requirement(
step_index=step_index,
step_input=step_input,
for_route_selection=True,
)
else:
step_requirement = step.create_step_requirement(step_index, step_input)
return StepPauseResult(should_pause=True, step_requirement=step_requirement)
def create_step_paused_event(
workflow_run_response: "WorkflowRunOutput",
step: Any,
step_name: str,
step_index: int,
pause_result: StepPauseResult,
) -> Any:
"""Create a StepPausedEvent for streaming.
Args:
workflow_run_response: The workflow run output.
step: The step that triggered the pause.
step_name: Name of the step.
step_index: Index of the step.
pause_result: The step pause result.
Returns:
StepPausedEvent instance.
"""
from agno.run.workflow import StepPausedEvent
req = pause_result.step_requirement
return StepPausedEvent(
run_id=workflow_run_response.run_id or "",
workflow_name=workflow_run_response.workflow_name,
workflow_id=workflow_run_response.workflow_id,
session_id=workflow_run_response.session_id,
step_name=step_name,
step_index=step_index,
step_id=getattr(step, "step_id", None),
requires_confirmation=req.requires_confirmation if req else False,
confirmation_message=req.confirmation_message if req else None,
requires_user_input=req.requires_user_input if req else False,
user_input_message=req.user_input_message if req else None,
)
def create_router_paused_event(
workflow_run_response: "WorkflowRunOutput",
step_name: str,
step_index: int,
pause_result: StepPauseResult,
) -> Any:
"""Create a RouterPausedEvent for streaming.
Args:
workflow_run_response: The workflow run output.
step_name: Name of the router.
step_index: Index of the router.
pause_result: The step pause result.
Returns:
RouterPausedEvent instance.
"""
from agno.run.workflow import RouterPausedEvent
req = pause_result.step_requirement
return RouterPausedEvent(
run_id=workflow_run_response.run_id or "",
workflow_name=workflow_run_response.workflow_name,
workflow_id=workflow_run_response.workflow_id,
session_id=workflow_run_response.session_id,
step_name=step_name,
step_index=step_index,
available_choices=req.available_choices if req and req.available_choices else [],
user_input_message=req.user_input_message if req else None,
allow_multiple_selections=req.allow_multiple_selections if req else False,
)
def apply_pause_state(
workflow_run_response: "WorkflowRunOutput",
step_index: int,
step_name: Optional[str],
collected_step_outputs: List[Union["StepOutput", List["StepOutput"]]],
pause_result: StepPauseResult,
) -> None:
"""Apply the paused state to the workflow run response.
Args:
workflow_run_response: The workflow run output to update.
step_index: Index of the step that triggered the pause.
step_name: Name of the step that triggered the pause.
collected_step_outputs: The step outputs collected so far.
pause_result: The step pause result containing the requirement.
"""
workflow_run_response.status = RunStatus.paused
workflow_run_response.paused_step_index = step_index
workflow_run_response.paused_step_name = step_name
workflow_run_response.step_results = collected_step_outputs
if pause_result.step_requirement:
workflow_run_response.step_requirements = [pause_result.step_requirement]
def save_paused_session(
workflow: Any,
session: "WorkflowSession",
workflow_run_response: "WorkflowRunOutput",
) -> None:
"""Save the session with paused state.
Args:
workflow: The workflow instance.
session: The workflow session.
workflow_run_response: The workflow run output.
"""
workflow._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
session.upsert_run(run=workflow_run_response)
workflow.save_session(session=session)
async def asave_paused_session(
workflow: Any,
session: "WorkflowSession",
workflow_run_response: "WorkflowRunOutput",
) -> None:
"""Save the session with paused state (async version).
Args:
workflow: The workflow instance.
session: The workflow session.
workflow_run_response: The workflow run output.
"""
workflow._update_session_metrics(session=session, workflow_run_response=workflow_run_response)
session.upsert_run(run=workflow_run_response)
if workflow._has_async_db():
await workflow.asave_session(session=session)
else:
workflow.save_session(session=session)
class ContinueExecutionState:
"""State container for continue execution methods.
This class encapsulates the shared state used across all continue_execute variants
(sync/async, streaming/non-streaming) to reduce code duplication.
"""
def __init__(
self,
workflow_run_response: "WorkflowRunOutput",
execution_input: "WorkflowExecutionInput",
):
# Restore previous step outputs from step_results
self.collected_step_outputs: List[Union["StepOutput", List["StepOutput"]]] = list(
workflow_run_response.step_results or []
)
self.previous_step_outputs: Dict[str, "StepOutput"] = {}
for step_output in self.collected_step_outputs:
if isinstance(step_output, StepOutput) and step_output.step_name:
self.previous_step_outputs[step_output.step_name] = step_output
# Initialize media lists
self.shared_images: List["Image"] = execution_input.images or []
self.output_images: List["Image"] = (execution_input.images or []).copy()
self.shared_videos: List["Video"] = execution_input.videos or []
self.output_videos: List["Video"] = (execution_input.videos or []).copy()
self.shared_audio: List["Audio"] = execution_input.audio or []
self.output_audio: List["Audio"] = (execution_input.audio or []).copy()
self.shared_files: List["File"] = execution_input.files or []
self.output_files: List["File"] = (execution_input.files or []).copy()
# Restore shared media from previous steps
for step_output in self.collected_step_outputs:
if isinstance(step_output, StepOutput):
self.shared_images.extend(step_output.images or [])
self.shared_videos.extend(step_output.videos or [])
self.shared_audio.extend(step_output.audio or [])
self.shared_files.extend(step_output.files or [])
self.output_images.extend(step_output.images or [])
self.output_videos.extend(step_output.videos or [])
self.output_audio.extend(step_output.audio or [])
self.output_files.extend(step_output.files or [])
def extend_media_from_step(self, step_output: "StepOutput") -> None:
"""Extend shared and output media lists from a step output."""
self.shared_images.extend(step_output.images or [])
self.shared_videos.extend(step_output.videos or [])
self.shared_audio.extend(step_output.audio or [])
self.shared_files.extend(step_output.files or [])
self.output_images.extend(step_output.images or [])
self.output_videos.extend(step_output.videos or [])
self.output_audio.extend(step_output.audio or [])
self.output_files.extend(step_output.files or [])
def add_step_output(self, step_name: str, step_output: "StepOutput") -> None:
"""Add a step output to tracking collections and extend media."""
self.previous_step_outputs[step_name] = step_output
self.collected_step_outputs.append(step_output)
self.extend_media_from_step(step_output)
def finalize_workflow_completion(
workflow_run_response: "WorkflowRunOutput",
state: ContinueExecutionState,
) -> None:
"""Finalize workflow completion by updating metrics and status.
This helper consolidates the common completion logic used across all
continue_execute variants.
Args:
workflow_run_response: The workflow run output to finalize.
state: The execution state containing collected outputs and media.
"""
if state.collected_step_outputs:
if workflow_run_response.metrics:
workflow_run_response.metrics.stop_timer()
# Extract final content from last step output
last_output = cast(StepOutput, state.collected_step_outputs[-1])
if getattr(last_output, "steps", None):
_cur = last_output
while getattr(_cur, "steps", None):
_steps = _cur.steps or []
if not _steps:
break
_cur = _steps[-1]
workflow_run_response.content = _cur.content
else:
workflow_run_response.content = last_output.content
else:
workflow_run_response.content = "No steps executed"
workflow_run_response.step_results = state.collected_step_outputs
workflow_run_response.images = state.output_images
workflow_run_response.videos = state.output_videos
workflow_run_response.audio = state.output_audio
workflow_run_response.status = RunStatus.completed
workflow_run_response.paused_step_index = None
workflow_run_response.paused_step_name = None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/workflow/utils/hitl.py",
"license": "Apache License 2.0",
"lines": 269,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/workflows/test_hitl.py | """
Integration tests for Human-In-The-Loop (HITL) workflow functionality.
Tests cover:
- Step confirmation (requires_confirmation) - sync, async, streaming
- Step user input (requires_user_input) - sync, async, streaming
- Router user selection - sync, async, streaming
- Error handling with on_error="pause" - sync, async, streaming
- Step rejection with on_reject="skip" vs on_reject="cancel"
- Workflow pause and resume via continue_run()
- Multiple HITL pauses in a workflow
"""
import pytest
from agno.run.base import RunStatus
from agno.workflow import Router
from agno.workflow.step import Step
from agno.workflow.types import StepInput, StepOutput
from agno.workflow.workflow import Workflow
# =============================================================================
# Test Step Functions
# =============================================================================
def fetch_data(step_input: StepInput) -> StepOutput:
"""Simple fetch data function."""
return StepOutput(content="Fetched data from source")
def process_data(step_input: StepInput) -> StepOutput:
"""Process data function that uses user input if available."""
user_input = step_input.additional_data.get("user_input", {}) if step_input.additional_data else {}
preference = user_input.get("preference", "default")
return StepOutput(content=f"Processed data with preference: {preference}")
def save_data(step_input: StepInput) -> StepOutput:
"""Save data function."""
prev = step_input.previous_step_content or "no previous content"
return StepOutput(content=f"Data saved: {prev}")
def failing_step(step_input: StepInput) -> StepOutput:
"""A step that always fails."""
raise ValueError("Intentional test failure")
def route_a(step_input: StepInput) -> StepOutput:
"""Route A function."""
return StepOutput(content="Route A executed")
def route_b(step_input: StepInput) -> StepOutput:
"""Route B function."""
return StepOutput(content="Route B executed")
def route_c(step_input: StepInput) -> StepOutput:
"""Route C function."""
return StepOutput(content="Route C executed")
# =============================================================================
# Step Confirmation Tests
# =============================================================================
class TestStepConfirmation:
"""Tests for Step confirmation HITL."""
def test_step_confirmation_pauses_workflow(self, shared_db):
"""Test that a step with requires_confirmation pauses the workflow."""
workflow = Workflow(
name="Confirmation Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="process",
executor=process_data,
requires_confirmation=True,
confirmation_message="Proceed with processing?",
),
Step(name="save", executor=save_data),
],
)
response = workflow.run(input="test data")
assert response.is_paused is True
assert response.status == RunStatus.paused
assert response.step_requirements is not None
assert len(response.step_requirements) == 1
assert response.step_requirements[0].step_name == "process"
assert response.step_requirements[0].confirmation_message == "Proceed with processing?"
def test_step_confirmation_continue_after_confirm(self, shared_db):
"""Test workflow continues after confirmation."""
workflow = Workflow(
name="Confirmation Continue Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="process",
executor=process_data,
requires_confirmation=True,
),
Step(name="save", executor=save_data),
],
)
# Run until pause
response = workflow.run(input="test data")
assert response.is_paused is True
# Confirm the step
response.step_requirements[0].confirm()
# Continue the workflow
final_response = workflow.continue_run(response)
assert final_response.status == RunStatus.completed
assert "Data saved" in final_response.content
def test_step_confirmation_reject_cancels_workflow(self, shared_db):
"""Test workflow is cancelled when confirmation is rejected (default on_reject=cancel)."""
workflow = Workflow(
name="Confirmation Reject Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="process",
executor=process_data,
requires_confirmation=True,
on_reject="cancel",
),
Step(name="save", executor=save_data),
],
)
# Run until pause
response = workflow.run(input="test data")
assert response.is_paused is True
# Reject the step
response.step_requirements[0].reject()
# Continue the workflow
final_response = workflow.continue_run(response)
assert final_response.status == RunStatus.cancelled
def test_step_confirmation_reject_skips_step(self, shared_db):
"""Test workflow skips step when rejected with on_reject=skip."""
workflow = Workflow(
name="Confirmation Skip Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="process",
executor=process_data,
requires_confirmation=True,
on_reject="skip",
),
Step(name="save", executor=save_data),
],
)
# Run until pause
response = workflow.run(input="test data")
assert response.is_paused is True
# Reject the step
response.step_requirements[0].reject()
# Continue the workflow
final_response = workflow.continue_run(response)
# Workflow should complete, skipping process step
assert final_response.status == RunStatus.completed
# Save step received output from fetch step (not process step since it was skipped)
assert "Data saved" in final_response.content
@pytest.mark.asyncio
async def test_step_confirmation_async(self, async_shared_db):
"""Test step confirmation with async execution."""
workflow = Workflow(
name="Async Confirmation Test",
db=async_shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="process",
executor=process_data,
requires_confirmation=True,
),
Step(name="save", executor=save_data),
],
)
# Run until pause
response = await workflow.arun(input="test data")
assert response.is_paused is True
# Confirm the step
response.step_requirements[0].confirm()
# Continue the workflow
final_response = await workflow.acontinue_run(response)
assert final_response.status == RunStatus.completed
def test_step_confirmation_streaming(self, shared_db):
"""Test step confirmation with streaming execution."""
from agno.run.workflow import StepPausedEvent, StepStartedEvent, WorkflowStartedEvent
workflow = Workflow(
name="Streaming Confirmation Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="process",
executor=process_data,
requires_confirmation=True,
),
Step(name="save", executor=save_data),
],
)
# Run with streaming until pause - stream_events=True for step events
events = list(workflow.run(input="test data", stream=True, stream_events=True))
# Check we got a workflow started event
workflow_started = [e for e in events if isinstance(e, WorkflowStartedEvent)]
assert len(workflow_started) == 1
# Check we got step started events (fetch should start before we pause on process)
step_started = [e for e in events if isinstance(e, StepStartedEvent)]
assert len(step_started) >= 1
# Check we got a paused event
paused_events = [e for e in events if isinstance(e, StepPausedEvent)]
assert len(paused_events) > 0
def test_step_confirmation_streaming_continue(self, shared_db):
"""Test step confirmation with streaming execution and continue."""
from agno.run.workflow import StepCompletedEvent, StepStartedEvent
workflow = Workflow(
name="Streaming Confirmation Continue Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="process",
executor=process_data,
requires_confirmation=True,
),
Step(name="save", executor=save_data),
],
)
# Run with streaming until pause
_ = list(workflow.run(input="test data", stream=True, stream_events=True))
# Get run output from session
session = workflow.get_session()
assert session is not None
response = session.runs[-1]
assert response.is_paused is True
# Confirm the step
response.step_requirements[0].confirm()
# Continue with streaming - stream_events=True for step events
continue_events = list(workflow.continue_run(response, stream=True, stream_events=True))
# Verify we got step events
step_started = [e for e in continue_events if isinstance(e, StepStartedEvent)]
step_completed = [e for e in continue_events if isinstance(e, StepCompletedEvent)]
assert len(step_started) >= 1, "Should have at least one StepStartedEvent"
assert len(step_completed) >= 1, "Should have at least one StepCompletedEvent"
# Get final state
session = workflow.get_session()
final_response = session.runs[-1]
assert final_response.status == RunStatus.completed
# =============================================================================
# Step User Input Tests
# =============================================================================
class TestStepUserInput:
"""Tests for Step user input HITL."""
def test_step_user_input_pauses_workflow(self, shared_db):
"""Test that a step with requires_user_input pauses the workflow."""
workflow = Workflow(
name="User Input Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="process",
executor=process_data,
requires_user_input=True,
user_input_message="Please provide your preference",
# Step uses List[Dict] for user_input_schema, not List[UserInputField]
user_input_schema=[
{"name": "preference", "field_type": "str", "description": "Your preference", "required": True},
],
),
Step(name="save", executor=save_data),
],
)
response = workflow.run(input="test data")
assert response.is_paused is True
assert response.step_requirements is not None
assert len(response.step_requirements) == 1
assert response.step_requirements[0].requires_user_input is True
assert response.step_requirements[0].user_input_message == "Please provide your preference"
def test_step_user_input_continue_with_input(self, shared_db):
"""Test workflow continues with user input."""
workflow = Workflow(
name="User Input Continue Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="process",
executor=process_data,
requires_user_input=True,
user_input_message="Please provide your preference",
),
Step(name="save", executor=save_data),
],
)
# Run until pause
response = workflow.run(input="test data")
assert response.is_paused is True
# Provide user input (set_user_input takes **kwargs)
response.step_requirements[0].set_user_input(preference="fast")
# Continue the workflow
final_response = workflow.continue_run(response)
assert final_response.status == RunStatus.completed
# The process step should have used the user input
process_output = [r for r in final_response.step_results if r.step_name == "process"]
assert len(process_output) == 1
assert "fast" in process_output[0].content
@pytest.mark.asyncio
async def test_step_user_input_async(self, async_shared_db):
"""Test step user input with async execution."""
workflow = Workflow(
name="Async User Input Test",
db=async_shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="process",
executor=process_data,
requires_user_input=True,
),
Step(name="save", executor=save_data),
],
)
# Run until pause
response = await workflow.arun(input="test data")
assert response.is_paused is True
# Provide user input (set_user_input takes **kwargs)
response.step_requirements[0].set_user_input(preference="async_value")
# Continue the workflow
final_response = await workflow.acontinue_run(response)
assert final_response.status == RunStatus.completed
def test_step_user_input_streaming(self, shared_db):
"""Test step user input with streaming execution."""
from agno.run.workflow import StepCompletedEvent, StepPausedEvent, StepStartedEvent
workflow = Workflow(
name="Streaming User Input Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="process",
executor=process_data,
requires_user_input=True,
),
Step(name="save", executor=save_data),
],
)
# Run with streaming until pause - stream_events=True for step events
events = list(workflow.run(input="test data", stream=True, stream_events=True))
# Check we got step events before pause
step_started = [e for e in events if isinstance(e, StepStartedEvent)]
assert len(step_started) >= 1, "Should have at least one StepStartedEvent"
# Check we got a paused event with requires_user_input
paused_events = [e for e in events if isinstance(e, StepPausedEvent)]
assert len(paused_events) > 0
assert paused_events[0].requires_user_input is True
# Get run output from session
session = workflow.get_session()
assert session is not None
response = session.runs[-1]
assert response.is_paused is True
# Provide user input
response.step_requirements[0].set_user_input(preference="streaming_value")
# Continue with streaming - stream_events=True for step events
continue_events = list(workflow.continue_run(response, stream=True, stream_events=True))
# Verify we got step events after continue
step_started = [e for e in continue_events if isinstance(e, StepStartedEvent)]
step_completed = [e for e in continue_events if isinstance(e, StepCompletedEvent)]
assert len(step_started) >= 1, "Should have at least one StepStartedEvent after continue"
assert len(step_completed) >= 1, "Should have at least one StepCompletedEvent after continue"
# Get final state
session = workflow.get_session()
final_response = session.runs[-1]
assert final_response.status == RunStatus.completed
# Verify user input was used
process_output = [r for r in final_response.step_results if r.step_name == "process"]
assert len(process_output) == 1
assert "streaming_value" in process_output[0].content
# =============================================================================
# Router User Selection Tests
# =============================================================================
class TestRouterUserSelection:
"""Tests for Router user selection HITL."""
def test_router_user_selection_pauses_workflow(self, shared_db):
"""Test that a Router with requires_user_input pauses the workflow."""
workflow = Workflow(
name="Router Selection Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Router(
name="route_selector",
requires_user_input=True,
user_input_message="Select a route",
choices=[
Step(name="route_a", executor=route_a),
Step(name="route_b", executor=route_b),
],
),
Step(name="save", executor=save_data),
],
)
response = workflow.run(input="test data")
assert response.is_paused is True
assert response.steps_requiring_route is not None
assert len(response.steps_requiring_route) == 1
assert response.steps_requiring_route[0].available_choices == ["route_a", "route_b"]
def test_router_user_selection_continue(self, shared_db):
"""Test workflow continues after router selection."""
workflow = Workflow(
name="Router Selection Continue Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Router(
name="route_selector",
requires_user_input=True,
choices=[
Step(name="route_a", executor=route_a),
Step(name="route_b", executor=route_b),
],
),
Step(name="save", executor=save_data),
],
)
# Run until pause
response = workflow.run(input="test data")
assert response.is_paused is True
# Select a route
response.steps_requiring_route[0].select("route_a")
# Continue the workflow
final_response = workflow.continue_run(response)
assert final_response.status == RunStatus.completed
# Check route_a was executed
route_outputs = [r for r in final_response.step_results if r.step_name == "route_selector"]
assert len(route_outputs) == 1
def test_router_multi_selection(self, shared_db):
"""Test Router with multiple selections allowed."""
workflow = Workflow(
name="Router Multi Selection Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Router(
name="route_selector",
requires_user_input=True,
allow_multiple_selections=True,
choices=[
Step(name="route_a", executor=route_a),
Step(name="route_b", executor=route_b),
Step(name="route_c", executor=route_c),
],
),
Step(name="save", executor=save_data),
],
)
# Run until pause
response = workflow.run(input="test data")
assert response.is_paused is True
assert response.steps_requiring_route[0].allow_multiple_selections is True
# Select multiple routes
response.steps_requiring_route[0].select_multiple(["route_a", "route_c"])
# Continue the workflow
final_response = workflow.continue_run(response)
assert final_response.status == RunStatus.completed
@pytest.mark.asyncio
async def test_router_user_selection_async(self, async_shared_db):
"""Test Router user selection with async execution."""
workflow = Workflow(
name="Async Router Selection Test",
db=async_shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Router(
name="route_selector",
requires_user_input=True,
choices=[
Step(name="route_a", executor=route_a),
Step(name="route_b", executor=route_b),
],
),
Step(name="save", executor=save_data),
],
)
# Run until pause
response = await workflow.arun(input="test data")
assert response.is_paused is True
# Select a route
response.steps_requiring_route[0].select("route_b")
# Continue the workflow
final_response = await workflow.acontinue_run(response)
assert final_response.status == RunStatus.completed
def test_router_user_selection_streaming(self, shared_db):
"""Test Router user selection with streaming execution."""
from agno.run.workflow import RouterPausedEvent, StepCompletedEvent, StepStartedEvent
workflow = Workflow(
name="Streaming Router Selection Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Router(
name="route_selector",
requires_user_input=True,
choices=[
Step(name="route_a", executor=route_a),
Step(name="route_b", executor=route_b),
],
),
Step(name="save", executor=save_data),
],
)
# Run with streaming until pause - stream_events=True for step events
events = list(workflow.run(input="test data", stream=True, stream_events=True))
# Check we got step events before pause
step_started = [e for e in events if isinstance(e, StepStartedEvent)]
assert len(step_started) >= 1, "Should have at least one StepStartedEvent"
# Check we got a router paused event
router_paused = [e for e in events if isinstance(e, RouterPausedEvent)]
assert len(router_paused) > 0
assert router_paused[0].available_choices == ["route_a", "route_b"]
# Get run output from session
session = workflow.get_session()
assert session is not None
response = session.runs[-1]
assert response.is_paused is True
assert response.steps_requiring_route is not None
# Select a route
response.steps_requiring_route[0].select("route_a")
# Continue with streaming - stream_events=True for step events
continue_events = list(workflow.continue_run(response, stream=True, stream_events=True))
# Verify we got step events after continue
step_started = [e for e in continue_events if isinstance(e, StepStartedEvent)]
step_completed = [e for e in continue_events if isinstance(e, StepCompletedEvent)]
assert len(step_started) >= 1, "Should have at least one StepStartedEvent after continue"
assert len(step_completed) >= 1, "Should have at least one StepCompletedEvent after continue"
# Get final state
session = workflow.get_session()
final_response = session.runs[-1]
assert final_response.status == RunStatus.completed
# =============================================================================
# Error Handling HITL Tests
# =============================================================================
class TestErrorHandlingHITL:
"""Tests for error handling HITL with on_error='pause'."""
def test_error_pause_workflow(self, shared_db):
"""Test that a step with on_error='pause' pauses on error."""
workflow = Workflow(
name="Error Pause Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="failing",
executor=failing_step,
on_error="pause",
),
Step(name="save", executor=save_data),
],
)
response = workflow.run(input="test data")
assert response.is_paused is True
assert response.error_requirements is not None
assert len(response.error_requirements) == 1
assert response.error_requirements[0].step_name == "failing"
assert "Intentional test failure" in response.error_requirements[0].error_message
def test_error_pause_skip(self, shared_db):
"""Test skipping a failed step after pause."""
workflow = Workflow(
name="Error Skip Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="failing",
executor=failing_step,
on_error="pause",
),
Step(name="save", executor=save_data),
],
)
# Run until pause (due to error)
response = workflow.run(input="test data")
assert response.is_paused is True
assert response.error_requirements is not None
# Skip the failed step
response.error_requirements[0].skip()
# Continue the workflow
final_response = workflow.continue_run(response)
assert final_response.status == RunStatus.completed
# Save step should have executed
assert "Data saved" in final_response.content
def test_error_skip_without_pause(self, shared_db):
"""Test on_error='skip' skips step without pausing."""
workflow = Workflow(
name="Error Skip Without Pause Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="failing",
executor=failing_step,
on_error="skip",
),
Step(name="save", executor=save_data),
],
)
response = workflow.run(input="test data")
# Workflow should complete without pausing
assert response.status == RunStatus.completed
assert "Data saved" in response.content
def test_error_fail_raises(self, shared_db):
"""Test on_error='fail' raises exception."""
workflow = Workflow(
name="Error Fail Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="failing",
executor=failing_step,
on_error="fail",
),
Step(name="save", executor=save_data),
],
)
with pytest.raises(ValueError, match="Intentional test failure"):
workflow.run(input="test data")
@pytest.mark.asyncio
async def test_error_pause_async(self, async_shared_db):
"""Test error pause with async execution."""
workflow = Workflow(
name="Async Error Pause Test",
db=async_shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="failing",
executor=failing_step,
on_error="pause",
),
Step(name="save", executor=save_data),
],
)
# Run until pause
response = await workflow.arun(input="test data")
assert response.is_paused is True
assert response.error_requirements is not None
# Skip the failed step
response.error_requirements[0].skip()
# Continue the workflow
final_response = await workflow.acontinue_run(response)
assert final_response.status == RunStatus.completed
def test_error_pause_streaming(self, shared_db):
"""Test error pause with streaming execution."""
from agno.run.workflow import StepCompletedEvent, StepErrorEvent, StepStartedEvent
workflow = Workflow(
name="Streaming Error Pause Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="failing",
executor=failing_step,
on_error="pause",
),
Step(name="save", executor=save_data),
],
)
# Run with streaming until pause - stream_events=True for step events
events = list(workflow.run(input="test data", stream=True, stream_events=True))
# Check we got step events before error
step_started = [e for e in events if isinstance(e, StepStartedEvent)]
assert len(step_started) >= 1, "Should have at least one StepStartedEvent"
# Check we got an error event
error_events = [e for e in events if isinstance(e, StepErrorEvent)]
assert len(error_events) > 0
assert error_events[0].step_name == "failing"
# Get run output from session
session = workflow.get_session()
assert session is not None
response = session.runs[-1]
assert response.is_paused is True
assert response.error_requirements is not None
# Skip the failed step
response.error_requirements[0].skip()
# Continue with streaming - stream_events=True for step events
continue_events = list(workflow.continue_run(response, stream=True, stream_events=True))
# Verify we got step events after continue
step_started = [e for e in continue_events if isinstance(e, StepStartedEvent)]
step_completed = [e for e in continue_events if isinstance(e, StepCompletedEvent)]
assert len(step_started) >= 1, "Should have at least one StepStartedEvent after continue"
assert len(step_completed) >= 1, "Should have at least one StepCompletedEvent after continue"
# Get final state
session = workflow.get_session()
final_response = session.runs[-1]
assert final_response.status == RunStatus.completed
assert "Data saved" in final_response.content
# =============================================================================
# Multiple HITL Pauses Tests
# =============================================================================
class TestMultipleHITLPauses:
"""Tests for workflows with multiple HITL pauses."""
def test_multiple_confirmation_steps(self, shared_db):
"""Test workflow with multiple confirmation steps."""
workflow = Workflow(
name="Multiple Confirmations Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="process1",
executor=process_data,
requires_confirmation=True,
confirmation_message="Confirm step 1?",
),
Step(
name="process2",
executor=process_data,
requires_confirmation=True,
confirmation_message="Confirm step 2?",
),
Step(name="save", executor=save_data),
],
)
# First run - pauses at process1
response = workflow.run(input="test data")
assert response.is_paused is True
assert response.step_requirements[0].step_name == "process1"
# Confirm first step
response.step_requirements[0].confirm()
# Continue - pauses at process2
response = workflow.continue_run(response)
assert response.is_paused is True
assert response.step_requirements[0].step_name == "process2"
# Confirm second step
response.step_requirements[0].confirm()
# Continue - completes
final_response = workflow.continue_run(response)
assert final_response.status == RunStatus.completed
def test_confirmation_then_user_input(self, shared_db):
"""Test workflow with confirmation followed by user input."""
workflow = Workflow(
name="Confirm Then Input Test",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
Step(
name="confirm_step",
executor=process_data,
requires_confirmation=True,
),
Step(
name="input_step",
executor=process_data,
requires_user_input=True,
),
Step(name="save", executor=save_data),
],
)
# First run - pauses at confirmation
response = workflow.run(input="test data")
assert response.is_paused is True
assert response.step_requirements[0].requires_confirmation is True
# Confirm
response.step_requirements[0].confirm()
# Continue - pauses at user input
response = workflow.continue_run(response)
assert response.is_paused is True
assert response.step_requirements[0].requires_user_input is True
# Provide input (set_user_input takes **kwargs)
response.step_requirements[0].set_user_input(preference="final")
# Continue - completes
final_response = workflow.continue_run(response)
assert final_response.status == RunStatus.completed
# =============================================================================
# Test Step Immutability (Regression tests for step mutation bug)
# =============================================================================
class TestStepImmutability:
"""Tests to ensure step configuration is not mutated after HITL resolution.
These tests verify that the workflow step definitions remain unchanged after
HITL pauses are resolved. This is critical for workflow reusability - the same
workflow instance should work correctly for multiple runs.
Bug context: Previously, continue_run() would mutate step.requires_confirmation
and step.requires_user_input to False after resolution, breaking subsequent runs.
"""
def test_step_confirmation_not_mutated_after_continue(self, shared_db):
"""Verify step.requires_confirmation is not mutated after continue_run."""
# Create workflow with confirmation step
confirm_step = Step(
name="confirm_step",
executor=process_data,
requires_confirmation=True,
confirmation_message="Please confirm",
)
workflow = Workflow(
name="test_confirmation_immutable",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
confirm_step,
Step(name="save", executor=save_data),
],
)
# Verify initial state
assert confirm_step.requires_confirmation is True
# First run - pauses at confirmation
run1 = workflow.run(input="first run")
assert run1.is_paused is True
# Confirm and continue
run1.step_requirements[0].confirm()
result1 = workflow.continue_run(run1)
assert result1.status == RunStatus.completed
# CRITICAL: Step configuration should NOT be mutated
assert confirm_step.requires_confirmation is True, (
"Step.requires_confirmation was mutated after continue_run! This breaks workflow reusability."
)
def test_step_user_input_not_mutated_after_continue(self, shared_db):
"""Verify step.requires_user_input is not mutated after continue_run."""
# Create workflow with user input step
input_step = Step(
name="input_step",
executor=process_data,
requires_user_input=True,
user_input_message="Enter preference",
)
workflow = Workflow(
name="test_user_input_immutable",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
input_step,
Step(name="save", executor=save_data),
],
)
# Verify initial state
assert input_step.requires_user_input is True
# First run - pauses at user input
run1 = workflow.run(input="first run")
assert run1.is_paused is True
# Provide input and continue
run1.step_requirements[0].set_user_input(preference="test")
result1 = workflow.continue_run(run1)
assert result1.status == RunStatus.completed
# CRITICAL: Step configuration should NOT be mutated
assert input_step.requires_user_input is True, (
"Step.requires_user_input was mutated after continue_run! This breaks workflow reusability."
)
def test_workflow_reusable_after_hitl_confirmation(self, shared_db):
"""Verify workflow can be reused after HITL confirmation is resolved.
This is the primary regression test for the step mutation bug.
"""
# Create workflow with confirmation step
confirm_step = Step(
name="confirm_step",
executor=process_data,
requires_confirmation=True,
confirmation_message="Please confirm",
)
workflow = Workflow(
name="test_reusable_confirmation",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
confirm_step,
Step(name="save", executor=save_data),
],
)
# === First run ===
run1 = workflow.run(input="first run data")
assert run1.is_paused is True, "First run should pause at confirmation step"
run1.step_requirements[0].confirm()
result1 = workflow.continue_run(run1)
assert result1.status == RunStatus.completed
# === Second run with same workflow instance ===
run2 = workflow.run(input="second run data")
# CRITICAL: Second run should ALSO pause at confirmation
assert run2.is_paused is True, (
"Second run should pause at confirmation step! If this fails, step.requires_confirmation was mutated."
)
assert len(run2.step_requirements) == 1
assert run2.step_requirements[0].step_name == "confirm_step"
# Complete second run
run2.step_requirements[0].confirm()
result2 = workflow.continue_run(run2)
assert result2.status == RunStatus.completed
def test_workflow_reusable_after_hitl_user_input(self, shared_db):
"""Verify workflow can be reused after HITL user input is resolved."""
# Create workflow with user input step
input_step = Step(
name="input_step",
executor=process_data,
requires_user_input=True,
user_input_message="Enter preference",
)
workflow = Workflow(
name="test_reusable_user_input",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
input_step,
Step(name="save", executor=save_data),
],
)
# === First run ===
run1 = workflow.run(input="first run data")
assert run1.is_paused is True
run1.step_requirements[0].set_user_input(preference="first")
result1 = workflow.continue_run(run1)
assert result1.status == RunStatus.completed
# === Second run with same workflow instance ===
run2 = workflow.run(input="second run data")
# CRITICAL: Second run should ALSO pause at user input
assert run2.is_paused is True, (
"Second run should pause at user input step! If this fails, step.requires_user_input was mutated."
)
assert run2.step_requirements[0].step_name == "input_step"
# Complete second run
run2.step_requirements[0].set_user_input(preference="second")
result2 = workflow.continue_run(run2)
assert result2.status == RunStatus.completed
def test_router_not_mutated_after_continue(self, shared_db):
"""Verify Router.requires_user_input is not mutated after continue_run."""
# Create router with user input
router = Router(
name="test_router",
choices=[
Step(name="option_a", executor=process_data),
Step(name="option_b", executor=save_data),
],
requires_user_input=True,
user_input_message="Select option",
)
workflow = Workflow(
name="test_router_immutable",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
router,
Step(name="final", executor=save_data),
],
)
# Verify initial state
assert router.requires_user_input is True
# First run - pauses at router
run1 = workflow.run(input="first run")
assert run1.is_paused is True
# Select and continue
run1.steps_requiring_route[0].select("option_a")
result1 = workflow.continue_run(run1)
assert result1.status == RunStatus.completed
# CRITICAL: Router configuration should NOT be mutated
assert router.requires_user_input is True, (
"Router.requires_user_input was mutated after continue_run! This breaks workflow reusability."
)
def test_workflow_reusable_after_router_selection(self, shared_db):
"""Verify workflow can be reused after Router HITL selection."""
router = Router(
name="test_router",
choices=[
Step(name="option_a", executor=process_data),
Step(name="option_b", executor=save_data),
],
requires_user_input=True,
user_input_message="Select option",
)
workflow = Workflow(
name="test_reusable_router",
db=shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
router,
Step(name="final", executor=save_data),
],
)
# === First run ===
run1 = workflow.run(input="first run")
assert run1.is_paused is True
run1.steps_requiring_route[0].select("option_a")
result1 = workflow.continue_run(run1)
assert result1.status == RunStatus.completed
# === Second run with same workflow instance ===
run2 = workflow.run(input="second run")
# CRITICAL: Second run should ALSO pause at router
assert run2.is_paused is True, (
"Second run should pause at router! If this fails, Router.requires_user_input was mutated."
)
assert len(run2.steps_requiring_route) == 1
# Complete second run
run2.steps_requiring_route[0].select("option_b")
result2 = workflow.continue_run(run2)
assert result2.status == RunStatus.completed
@pytest.mark.asyncio
async def test_workflow_reusable_after_hitl_async(self, async_shared_db):
"""Verify workflow reusability works with async continue_run."""
confirm_step = Step(
name="confirm_step",
executor=process_data,
requires_confirmation=True,
confirmation_message="Please confirm",
)
workflow = Workflow(
name="test_reusable_async",
db=async_shared_db,
steps=[
Step(name="fetch", executor=fetch_data),
confirm_step,
Step(name="save", executor=save_data),
],
)
# === First run ===
run1 = await workflow.arun(input="first run")
assert run1.is_paused is True
run1.step_requirements[0].confirm()
result1 = await workflow.acontinue_run(run1)
assert result1.status == RunStatus.completed
# Step should NOT be mutated
assert confirm_step.requires_confirmation is True
# === Second run ===
run2 = await workflow.arun(input="second run")
assert run2.is_paused is True, "Second async run should also pause"
run2.step_requirements[0].confirm()
result2 = await workflow.acontinue_run(run2)
assert result2.status == RunStatus.completed
def test_multiple_runs_sequential(self, shared_db):
"""Verify workflow works correctly for many sequential runs."""
confirm_step = Step(
name="confirm_step",
executor=process_data,
requires_confirmation=True,
)
workflow = Workflow(
name="test_many_runs",
db=shared_db,
steps=[confirm_step],
)
# Run the workflow 5 times sequentially
for i in range(5):
run = workflow.run(input=f"run {i}")
assert run.is_paused is True, f"Run {i} should pause"
assert confirm_step.requires_confirmation is True, f"Step mutated on run {i}"
run.step_requirements[0].confirm()
result = workflow.continue_run(run)
assert result.status == RunStatus.completed, f"Run {i} should complete"
# Verify step still has correct configuration
assert confirm_step.requires_confirmation is True, f"Step was mutated after run {i}"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/workflows/test_hitl.py",
"license": "Apache License 2.0",
"lines": 1031,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/workflow/test_hitl.py | """
Unit tests for Human-In-The-Loop (HITL) workflow functionality.
Tests cover:
- Step confirmation (requires_confirmation)
- Step user input (requires_user_input)
- Router user selection (Router.requires_user_input)
- Error handling with on_error="pause"
- Step rejection with on_reject="skip"
- Workflow pause and resume via continue_run()
- StepRequirement (including route selection), ErrorRequirement dataclasses
- Serialization/deserialization of HITL requirements
"""
from agno.run.base import RunStatus
from agno.run.workflow import WorkflowRunOutput
from agno.workflow.step import Step
from agno.workflow.types import (
ErrorRequirement,
StepInput,
StepOutput,
StepRequirement,
UserInputField,
)
# =============================================================================
# Test Step Functions
# =============================================================================
def fetch_data(step_input: StepInput) -> StepOutput:
"""Simple fetch data function."""
return StepOutput(content="Fetched data from source")
def process_data(step_input: StepInput) -> StepOutput:
"""Process data function that uses user input if available."""
user_input = step_input.additional_data.get("user_input", {}) if step_input.additional_data else {}
preference = user_input.get("preference", "default")
return StepOutput(content=f"Processed data with preference: {preference}")
def save_data(step_input: StepInput) -> StepOutput:
"""Save data function."""
return StepOutput(content="Data saved successfully")
def failing_step(step_input: StepInput) -> StepOutput:
"""A step that always fails."""
raise ValueError("Intentional test failure")
# =============================================================================
# StepRequirement Tests
# =============================================================================
class TestStepRequirement:
"""Tests for StepRequirement dataclass."""
def test_step_requirement_creation(self):
"""Test creating a StepRequirement."""
req = StepRequirement(
step_id="step-1",
step_name="test_step",
step_index=0,
requires_confirmation=True,
confirmation_message="Please confirm this step",
)
assert req.step_id == "step-1"
assert req.step_name == "test_step"
assert req.step_index == 0
assert req.requires_confirmation is True
assert req.confirmation_message == "Please confirm this step"
assert req.confirmed is None
assert req.on_reject == "cancel"
def test_step_requirement_confirm(self):
"""Test confirming a step requirement."""
req = StepRequirement(
step_id="step-1",
step_name="test_step",
step_index=0,
requires_confirmation=True,
)
assert req.confirmed is None
req.confirm()
assert req.confirmed is True
def test_step_requirement_reject(self):
"""Test rejecting a step requirement."""
req = StepRequirement(
step_id="step-1",
step_name="test_step",
step_index=0,
requires_confirmation=True,
)
assert req.confirmed is None
req.reject()
assert req.confirmed is False
def test_step_requirement_on_reject_skip(self):
"""Test step requirement with on_reject=skip."""
req = StepRequirement(
step_id="step-1",
step_name="test_step",
step_index=0,
requires_confirmation=True,
on_reject="skip",
)
assert req.on_reject == "skip"
def test_step_requirement_user_input(self):
"""Test step requirement with user input."""
schema = [
UserInputField(name="preference", field_type="str", description="Your preference", required=True),
]
req = StepRequirement(
step_id="step-1",
step_name="test_step",
step_index=0,
requires_user_input=True,
user_input_message="Please provide your preference",
user_input_schema=schema,
)
assert req.requires_user_input is True
assert req.user_input_message == "Please provide your preference"
assert len(req.user_input_schema) == 1
def test_step_requirement_set_user_input(self):
"""Test setting user input on a requirement."""
req = StepRequirement(
step_id="step-1",
step_name="test_step",
step_index=0,
requires_user_input=True,
)
# set_user_input takes **kwargs, not a dict
req.set_user_input(preference="fast")
assert req.user_input == {"preference": "fast"}
def test_step_requirement_to_dict(self):
"""Test serializing StepRequirement to dict."""
req = StepRequirement(
step_id="step-1",
step_name="test_step",
step_index=0,
requires_confirmation=True,
confirmation_message="Confirm?",
on_reject="skip",
)
req.confirm()
data = req.to_dict()
assert data["step_id"] == "step-1"
assert data["step_name"] == "test_step"
assert data["step_index"] == 0
assert data["requires_confirmation"] is True
assert data["confirmation_message"] == "Confirm?"
assert data["confirmed"] is True
assert data["on_reject"] == "skip"
def test_step_requirement_from_dict(self):
"""Test deserializing StepRequirement from dict."""
data = {
"step_id": "step-1",
"step_name": "test_step",
"step_index": 0,
"requires_confirmation": True,
"confirmation_message": "Confirm?",
"confirmed": True,
"on_reject": "skip",
}
req = StepRequirement.from_dict(data)
assert req.step_id == "step-1"
assert req.step_name == "test_step"
assert req.confirmed is True
assert req.on_reject == "skip"
def test_step_requirement_roundtrip(self):
"""Test roundtrip serialization of StepRequirement."""
original = StepRequirement(
step_id="step-1",
step_name="test_step",
step_index=0,
requires_confirmation=True,
confirmation_message="Confirm?",
requires_user_input=True,
user_input_message="Enter preference",
on_reject="skip",
)
original.confirm()
# set_user_input takes **kwargs
original.set_user_input(preference="fast")
data = original.to_dict()
restored = StepRequirement.from_dict(data)
assert restored.step_id == original.step_id
assert restored.step_name == original.step_name
assert restored.confirmed == original.confirmed
assert restored.user_input == original.user_input
assert restored.on_reject == original.on_reject
# =============================================================================
# StepRequirement Route Selection Tests (formerly RouterRequirement)
# =============================================================================
class TestStepRequirementRouteSelection:
"""Tests for StepRequirement with route selection fields."""
def test_route_selection_requirement_creation(self):
"""Test creating a StepRequirement for route selection."""
req = StepRequirement(
step_id="router-1",
step_name="test_router",
step_type="Router",
requires_route_selection=True,
available_choices=["option_a", "option_b", "option_c"],
user_input_message="Select a route",
)
assert req.step_id == "router-1"
assert req.step_name == "test_router"
assert req.requires_route_selection is True
assert req.available_choices == ["option_a", "option_b", "option_c"]
assert req.selected_choices is None
assert req.allow_multiple_selections is False
def test_route_selection_select_single(self):
"""Test selecting a single route."""
req = StepRequirement(
step_id="router-1",
step_name="test_router",
step_type="Router",
requires_route_selection=True,
available_choices=["option_a", "option_b"],
)
req.select("option_a")
assert req.selected_choices == ["option_a"]
def test_route_selection_select_multiple(self):
"""Test selecting multiple routes."""
req = StepRequirement(
step_id="router-1",
step_name="test_router",
step_type="Router",
requires_route_selection=True,
available_choices=["option_a", "option_b", "option_c"],
allow_multiple_selections=True,
)
req.select_multiple(["option_a", "option_c"])
assert req.selected_choices == ["option_a", "option_c"]
def test_route_selection_to_dict(self):
"""Test serializing route selection StepRequirement to dict."""
req = StepRequirement(
step_id="router-1",
step_name="test_router",
step_type="Router",
requires_route_selection=True,
available_choices=["option_a", "option_b"],
allow_multiple_selections=True,
)
req.select_multiple(["option_a", "option_b"])
data = req.to_dict()
assert data["step_id"] == "router-1"
assert data["step_name"] == "test_router"
assert data["available_choices"] == ["option_a", "option_b"]
assert data["selected_choices"] == ["option_a", "option_b"]
assert data["allow_multiple_selections"] is True
def test_route_selection_from_dict(self):
"""Test deserializing route selection StepRequirement from dict."""
data = {
"step_id": "router-1",
"step_name": "test_router",
"step_type": "Router",
"requires_route_selection": True,
"available_choices": ["option_a", "option_b"],
"selected_choices": ["option_a"],
"allow_multiple_selections": False,
}
req = StepRequirement.from_dict(data)
assert req.step_id == "router-1"
assert req.requires_route_selection is True
assert req.selected_choices == ["option_a"]
# =============================================================================
# ErrorRequirement Tests
# =============================================================================
class TestErrorRequirement:
"""Tests for ErrorRequirement dataclass."""
def test_error_requirement_creation(self):
"""Test creating an ErrorRequirement."""
req = ErrorRequirement(
step_id="step-1",
step_name="failing_step",
step_index=0,
error_message="Something went wrong",
error_type="ValueError",
)
assert req.step_id == "step-1"
assert req.step_name == "failing_step"
assert req.error_message == "Something went wrong"
assert req.error_type == "ValueError"
assert req.retry_count == 0
assert req.decision is None
def test_error_requirement_retry(self):
"""Test setting retry decision."""
req = ErrorRequirement(
step_id="step-1",
step_name="failing_step",
step_index=0,
error_message="Error",
)
assert req.needs_decision is True
req.retry()
assert req.decision == "retry"
assert req.should_retry is True
assert req.is_resolved is True
def test_error_requirement_skip(self):
"""Test setting skip decision."""
req = ErrorRequirement(
step_id="step-1",
step_name="failing_step",
step_index=0,
error_message="Error",
)
req.skip()
assert req.decision == "skip"
assert req.should_skip is True
assert req.is_resolved is True
def test_error_requirement_to_dict(self):
"""Test serializing ErrorRequirement to dict."""
req = ErrorRequirement(
step_id="step-1",
step_name="failing_step",
step_index=0,
error_message="Test error",
error_type="ValueError",
retry_count=1,
)
req.retry()
data = req.to_dict()
assert data["step_id"] == "step-1"
assert data["step_name"] == "failing_step"
assert data["error_message"] == "Test error"
assert data["error_type"] == "ValueError"
assert data["retry_count"] == 1
assert data["decision"] == "retry"
def test_error_requirement_from_dict(self):
"""Test deserializing ErrorRequirement from dict."""
data = {
"step_id": "step-1",
"step_name": "failing_step",
"step_index": 0,
"error_message": "Test error",
"error_type": "ValueError",
"retry_count": 2,
"decision": "skip",
}
req = ErrorRequirement.from_dict(data)
assert req.step_id == "step-1"
assert req.error_message == "Test error"
assert req.retry_count == 2
assert req.should_skip is True
# =============================================================================
# Step HITL Configuration Tests
# =============================================================================
class TestStepHITLConfiguration:
"""Tests for Step class HITL configuration."""
def test_step_requires_confirmation(self):
"""Test Step with requires_confirmation."""
step = Step(
name="confirm_step",
executor=fetch_data,
requires_confirmation=True,
confirmation_message="Please confirm this step",
)
assert step.requires_confirmation is True
assert step.confirmation_message == "Please confirm this step"
def test_step_requires_user_input(self):
"""Test Step with requires_user_input."""
# Step uses List[Dict] for user_input_schema, not List[UserInputField]
step = Step(
name="input_step",
executor=process_data,
requires_user_input=True,
user_input_message="Please provide your preference",
user_input_schema=[
{"name": "preference", "field_type": "str", "description": "Your preference", "required": True},
],
)
assert step.requires_user_input is True
assert step.user_input_message == "Please provide your preference"
assert len(step.user_input_schema) == 1
def test_step_on_reject_cancel(self):
"""Test Step with on_reject=cancel (default)."""
step = Step(
name="cancel_step",
executor=fetch_data,
requires_confirmation=True,
on_reject="cancel",
)
assert step.on_reject == "cancel"
def test_step_on_reject_skip(self):
"""Test Step with on_reject=skip."""
step = Step(
name="skip_step",
executor=fetch_data,
requires_confirmation=True,
on_reject="skip",
)
assert step.on_reject == "skip"
def test_step_on_error_fail(self):
"""Test Step with on_error=fail (default)."""
step = Step(
name="fail_step",
executor=failing_step,
on_error="fail",
)
assert step.on_error == "fail"
def test_step_on_error_skip(self):
"""Test Step with on_error=skip."""
step = Step(
name="skip_error_step",
executor=failing_step,
on_error="skip",
)
assert step.on_error == "skip"
def test_step_on_error_pause(self):
"""Test Step with on_error=pause."""
step = Step(
name="pause_error_step",
executor=failing_step,
on_error="pause",
)
assert step.on_error == "pause"
def test_step_hitl_to_dict(self):
"""Test serializing Step with HITL config to dict."""
step = Step(
name="hitl_step",
executor=fetch_data,
requires_confirmation=True,
confirmation_message="Confirm?",
requires_user_input=True,
user_input_message="Input?",
on_reject="skip",
on_error="pause",
)
data = step.to_dict()
assert data["requires_confirmation"] is True
assert data["confirmation_message"] == "Confirm?"
assert data["requires_user_input"] is True
assert data["user_input_message"] == "Input?"
assert data["on_reject"] == "skip"
assert data["on_error"] == "pause"
# =============================================================================
# WorkflowRunOutput HITL Properties Tests
# =============================================================================
class TestWorkflowRunOutputHITL:
"""Tests for WorkflowRunOutput HITL-related properties."""
def test_workflow_output_is_paused(self):
"""Test is_paused property."""
output = WorkflowRunOutput(
run_id="run-1",
session_id="session-1",
workflow_name="test_workflow",
status=RunStatus.paused,
)
assert output.is_paused is True
def test_workflow_output_step_requirements(self):
"""Test step_requirements handling."""
req = StepRequirement(
step_id="step-1",
step_name="test_step",
step_index=0,
requires_confirmation=True,
)
output = WorkflowRunOutput(
run_id="run-1",
session_id="session-1",
workflow_name="test_workflow",
status=RunStatus.paused,
step_requirements=[req],
)
assert len(output.step_requirements) == 1
assert output.step_requirements[0].step_id == "step-1"
def test_workflow_output_router_requirements(self):
"""Test router_requirements handling (now via step_requirements with requires_route_selection)."""
req = StepRequirement(
step_id="router-1",
step_name="test_router",
step_type="Router",
requires_route_selection=True,
available_choices=["a", "b"],
)
output = WorkflowRunOutput(
run_id="run-1",
session_id="session-1",
workflow_name="test_workflow",
status=RunStatus.paused,
step_requirements=[req],
)
assert len(output.step_requirements) == 1
assert output.step_requirements[0].step_id == "router-1"
def test_workflow_output_error_requirements(self):
"""Test error_requirements handling."""
req = ErrorRequirement(
step_id="step-1",
step_name="failing_step",
step_index=0,
error_message="Error occurred",
)
output = WorkflowRunOutput(
run_id="run-1",
session_id="session-1",
workflow_name="test_workflow",
status=RunStatus.paused,
error_requirements=[req],
)
assert len(output.error_requirements) == 1
assert output.error_requirements[0].step_id == "step-1"
def test_workflow_output_active_step_requirements(self):
"""Test active_step_requirements property."""
confirmed_req = StepRequirement(
step_id="step-1",
step_name="confirmed_step",
step_index=0,
requires_confirmation=True,
)
confirmed_req.confirm()
pending_req = StepRequirement(
step_id="step-2",
step_name="pending_step",
step_index=1,
requires_confirmation=True,
)
output = WorkflowRunOutput(
run_id="run-1",
session_id="session-1",
workflow_name="test_workflow",
status=RunStatus.paused,
step_requirements=[confirmed_req, pending_req],
)
active = output.active_step_requirements
assert len(active) == 1
assert active[0].step_id == "step-2"
def test_workflow_output_to_dict_with_requirements(self):
"""Test serializing WorkflowRunOutput with HITL requirements."""
step_req = StepRequirement(
step_id="step-1",
step_name="test_step",
step_index=0,
requires_confirmation=True,
)
output = WorkflowRunOutput(
run_id="run-1",
session_id="session-1",
workflow_name="test_workflow",
status=RunStatus.paused,
step_requirements=[step_req],
)
data = output.to_dict()
# Status is serialized as the enum value string
assert data["status"] == RunStatus.paused.value
assert "step_requirements" in data
assert len(data["step_requirements"]) == 1
def test_workflow_output_from_dict_with_requirements(self):
"""Test deserializing WorkflowRunOutput with HITL requirements."""
data = {
"run_id": "run-1",
"session_id": "session-1",
"workflow_name": "test_workflow",
"status": RunStatus.paused.value, # Use enum value
"step_requirements": [
{
"step_id": "step-1",
"step_name": "test_step",
"step_index": 0,
"requires_confirmation": True,
"confirmed": None,
"on_reject": "cancel",
}
],
}
output = WorkflowRunOutput.from_dict(data)
# Status may be string or enum after deserialization
assert str(output.status) == RunStatus.paused.value or output.status == RunStatus.paused
assert len(output.step_requirements) == 1
assert output.step_requirements[0].step_id == "step-1"
def test_workflow_output_paused_step_info(self):
"""Test paused_step_index and paused_step_name fields."""
output = WorkflowRunOutput(
run_id="run-1",
session_id="session-1",
workflow_name="test_workflow",
status=RunStatus.paused,
paused_step_index=2,
paused_step_name="process_data",
)
assert output.paused_step_index == 2
assert output.paused_step_name == "process_data"
def test_workflow_output_paused_step_info_serialization(self):
"""Test serialization of paused_step_index and paused_step_name."""
output = WorkflowRunOutput(
run_id="run-1",
session_id="session-1",
workflow_name="test_workflow",
status=RunStatus.paused,
paused_step_index=1,
paused_step_name="confirm_step",
)
data = output.to_dict()
assert data["paused_step_index"] == 1
assert data["paused_step_name"] == "confirm_step"
def test_workflow_output_paused_step_info_deserialization(self):
"""Test deserialization of paused_step_index and paused_step_name."""
data = {
"run_id": "run-1",
"session_id": "session-1",
"workflow_name": "test_workflow",
"status": RunStatus.paused.value,
"paused_step_index": 3,
"paused_step_name": "final_step",
}
output = WorkflowRunOutput.from_dict(data)
assert output.paused_step_index == 3
assert output.paused_step_name == "final_step"
# =============================================================================
# UserInputField Tests
# =============================================================================
class TestUserInputField:
"""Tests for UserInputField dataclass."""
def test_user_input_field_creation(self):
"""Test creating a UserInputField."""
field = UserInputField(
name="preference",
field_type="str",
description="Your preference",
required=True,
)
assert field.name == "preference"
assert field.description == "Your preference"
assert field.field_type == "str"
assert field.required is True
def test_user_input_field_defaults(self):
"""Test UserInputField default values."""
field = UserInputField(
name="optional_field",
field_type="str",
description="Optional field",
required=False,
)
assert field.field_type == "str"
assert field.required is False
assert field.value is None
def test_user_input_field_to_dict(self):
"""Test serializing UserInputField to dict."""
field = UserInputField(
name="preference",
field_type="str",
description="Your preference",
required=True,
)
data = field.to_dict()
assert data["name"] == "preference"
assert data["description"] == "Your preference"
assert data["field_type"] == "str"
assert data["required"] is True
def test_user_input_field_from_dict(self):
"""Test deserializing UserInputField from dict."""
data = {
"name": "preference",
"description": "Your preference",
"field_type": "str",
"required": True,
"value": "fast",
}
field = UserInputField.from_dict(data)
assert field.name == "preference"
assert field.description == "Your preference"
assert field.required is True
assert field.value == "fast"
# =============================================================================
# Condition on_reject Tests
# =============================================================================
class TestConditionOnReject:
"""Tests for Condition on_reject behavior (else, skip, cancel)."""
def test_condition_on_reject_else_default(self):
"""Test that Condition defaults to on_reject='else'."""
from agno.workflow.condition import Condition
from agno.workflow.types import OnReject
condition = Condition(
name="test_condition",
steps=[Step(name="if_step", executor=fetch_data)],
else_steps=[Step(name="else_step", executor=save_data)],
requires_confirmation=True,
)
assert condition.on_reject == OnReject.else_branch
def test_condition_on_reject_skip(self):
"""Test Condition with on_reject='skip'."""
from agno.workflow.condition import Condition
from agno.workflow.types import OnReject
condition = Condition(
name="test_condition",
steps=[Step(name="if_step", executor=fetch_data)],
else_steps=[Step(name="else_step", executor=save_data)],
requires_confirmation=True,
on_reject=OnReject.skip,
)
assert condition.on_reject == OnReject.skip
def test_condition_on_reject_cancel(self):
"""Test Condition with on_reject='cancel'."""
from agno.workflow.condition import Condition
from agno.workflow.types import OnReject
condition = Condition(
name="test_condition",
steps=[Step(name="if_step", executor=fetch_data)],
requires_confirmation=True,
on_reject=OnReject.cancel,
)
assert condition.on_reject == OnReject.cancel
def test_condition_create_step_requirement_on_reject_else(self):
"""Test that create_step_requirement includes on_reject='else'."""
from agno.workflow.condition import Condition
from agno.workflow.types import OnReject
condition = Condition(
name="test_condition",
steps=[Step(name="if_step", executor=fetch_data)],
else_steps=[Step(name="else_step", executor=save_data)],
requires_confirmation=True,
on_reject=OnReject.else_branch,
)
step_input = StepInput(input="test")
req = condition.create_step_requirement(0, step_input)
assert req.step_type == "Condition"
assert req.on_reject == "else"
assert req.requires_confirmation is True
def test_condition_create_step_requirement_on_reject_skip(self):
"""Test that create_step_requirement includes on_reject='skip'."""
from agno.workflow.condition import Condition
from agno.workflow.types import OnReject
condition = Condition(
name="test_condition",
steps=[Step(name="if_step", executor=fetch_data)],
requires_confirmation=True,
on_reject=OnReject.skip,
)
step_input = StepInput(input="test")
req = condition.create_step_requirement(0, step_input)
assert req.step_type == "Condition"
assert req.on_reject == "skip"
def test_condition_create_step_requirement_on_reject_cancel(self):
"""Test that create_step_requirement includes on_reject='cancel'."""
from agno.workflow.condition import Condition
from agno.workflow.types import OnReject
condition = Condition(
name="test_condition",
steps=[Step(name="if_step", executor=fetch_data)],
requires_confirmation=True,
on_reject=OnReject.cancel,
)
step_input = StepInput(input="test")
req = condition.create_step_requirement(0, step_input)
assert req.step_type == "Condition"
assert req.on_reject == "cancel"
def test_condition_evaluator_defaults_to_true(self):
"""Test that Condition evaluator defaults to True."""
from agno.workflow.condition import Condition
# Should not need to specify evaluator when using requires_confirmation
condition = Condition(
name="test_condition",
steps=[Step(name="if_step", executor=fetch_data)],
requires_confirmation=True,
)
assert condition.evaluator is True
def test_condition_force_else_branch_with_else_steps(self):
"""Test Condition.execute with force_else_branch=True when else_steps exist."""
from agno.workflow.condition import Condition
def if_step_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="IF branch executed")
def else_step_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="ELSE branch executed")
condition = Condition(
name="test_condition",
steps=[Step(name="if_step", executor=if_step_func)],
else_steps=[Step(name="else_step", executor=else_step_func)],
)
step_input = StepInput(input="test")
result = condition.execute(step_input, force_else_branch=True)
# Should execute else branch
assert "else branch" in result.content.lower()
assert result.success is True
def test_condition_force_else_branch_without_else_steps(self):
"""Test Condition.execute with force_else_branch=True when no else_steps."""
from agno.workflow.condition import Condition
def if_step_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="IF branch executed")
condition = Condition(
name="test_condition",
steps=[Step(name="if_step", executor=if_step_func)],
# No else_steps
)
step_input = StepInput(input="test")
result = condition.execute(step_input, force_else_branch=True)
# Should skip since no else_steps
assert "skipped" in result.content.lower() or "no else branch" in result.content.lower()
assert result.success is True
def test_condition_normal_execution_if_branch(self):
"""Test Condition normal execution takes if branch when evaluator is True."""
from agno.workflow.condition import Condition
def if_step_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="IF branch executed")
def else_step_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="ELSE branch executed")
condition = Condition(
name="test_condition",
evaluator=True,
steps=[Step(name="if_step", executor=if_step_func)],
else_steps=[Step(name="else_step", executor=else_step_func)],
)
step_input = StepInput(input="test")
result = condition.execute(step_input, force_else_branch=False)
# Should execute if branch
assert "if branch" in result.content.lower()
def test_condition_normal_execution_else_branch(self):
"""Test Condition normal execution takes else branch when evaluator is False."""
from agno.workflow.condition import Condition
def if_step_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="IF branch executed")
def else_step_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="ELSE branch executed")
condition = Condition(
name="test_condition",
evaluator=False,
steps=[Step(name="if_step", executor=if_step_func)],
else_steps=[Step(name="else_step", executor=else_step_func)],
)
step_input = StepInput(input="test")
result = condition.execute(step_input, force_else_branch=False)
# Should execute else branch
assert "else branch" in result.content.lower()
def test_condition_to_dict_includes_on_reject(self):
"""Test that Condition.to_dict includes on_reject field."""
from agno.workflow.condition import Condition
from agno.workflow.types import OnReject
condition = Condition(
name="test_condition",
steps=[Step(name="if_step", executor=fetch_data)],
requires_confirmation=True,
on_reject=OnReject.else_branch,
)
data = condition.to_dict()
assert "on_reject" in data
assert data["on_reject"] == "OnReject.else_branch" or data["on_reject"] == "else"
def test_condition_from_dict_restores_on_reject(self):
"""Test that Condition.from_dict restores on_reject field."""
from agno.workflow.condition import Condition
from agno.workflow.types import OnReject
data = {
"type": "Condition",
"name": "test_condition",
"evaluator": True,
"evaluator_type": "bool",
"steps": [],
"else_steps": [],
"requires_confirmation": True,
"on_reject": "skip",
}
condition = Condition.from_dict(data)
assert condition.on_reject == "skip" or condition.on_reject == OnReject.skip
def test_step_requirement_on_reject_else_only_for_condition(self):
"""Test that on_reject='else' is validated for Condition step type."""
# Create a StepRequirement with on_reject='else' but non-Condition type
req = StepRequirement(
step_id="step-1",
step_name="regular_step",
step_index=0,
step_type="Step", # Not a Condition
requires_confirmation=True,
on_reject="else", # This should be invalid for non-Condition
)
# The requirement can be created, but workflow.py should warn/fallback
assert req.on_reject == "else"
assert req.step_type == "Step"
class TestConditionOnRejectWorkflowIntegration:
"""Integration tests for Condition on_reject with Workflow."""
def test_condition_on_reject_else_executes_else_branch(self):
"""Test that rejecting a Condition with on_reject='else' executes else_steps."""
from agno.db.sqlite import SqliteDb
from agno.workflow.condition import Condition
from agno.workflow.types import OnReject
from agno.workflow.workflow import Workflow
def if_step_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="IF branch executed")
def else_step_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="ELSE branch executed")
def final_step_func(step_input: StepInput) -> StepOutput:
previous = step_input.previous_step_content or "nothing"
return StepOutput(content=f"Final step. Previous: {previous}")
condition = Condition(
name="user_choice",
steps=[Step(name="if_step", executor=if_step_func)],
else_steps=[Step(name="else_step", executor=else_step_func)],
requires_confirmation=True,
on_reject=OnReject.else_branch,
)
workflow = Workflow(
name="test_workflow",
steps=[condition, Step(name="final_step", executor=final_step_func)],
db=SqliteDb(db_file="tmp/test_condition_on_reject.db"),
)
# Run workflow - should pause at condition
run_output = workflow.run("test input")
assert run_output.is_paused
assert len(run_output.steps_requiring_confirmation) == 1
req = run_output.steps_requiring_confirmation[0]
assert req.step_name == "user_choice"
assert req.on_reject == "else"
# Reject the condition
req.reject()
# Continue the workflow
run_output = workflow.continue_run(run_output)
# Should complete and else branch should have been executed
assert run_output.status == RunStatus.completed
# The final step should have received content from else_step
assert "ELSE branch executed" in str(run_output.content) or any(
"ELSE branch executed" in str(r.content) for r in (run_output.step_results or [])
)
def test_condition_on_reject_skip_skips_entire_condition(self):
"""Test that rejecting a Condition with on_reject='skip' skips both branches."""
from agno.db.sqlite import SqliteDb
from agno.workflow.condition import Condition
from agno.workflow.types import OnReject
from agno.workflow.workflow import Workflow
def setup_step_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="Setup complete")
def if_step_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="IF branch executed")
def else_step_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="ELSE branch executed")
def final_step_func(step_input: StepInput) -> StepOutput:
previous = step_input.previous_step_content or "nothing"
return StepOutput(content=f"Final step. Previous: {previous}")
condition = Condition(
name="skippable_condition",
steps=[Step(name="if_step", executor=if_step_func)],
else_steps=[Step(name="else_step", executor=else_step_func)],
requires_confirmation=True,
on_reject=OnReject.skip,
)
workflow = Workflow(
name="test_workflow",
steps=[
Step(name="setup_step", executor=setup_step_func),
condition,
Step(name="final_step", executor=final_step_func),
],
db=SqliteDb(db_file="tmp/test_condition_skip.db"),
)
# Run workflow - should pause at condition
run_output = workflow.run("test input")
assert run_output.is_paused
req = run_output.steps_requiring_confirmation[0]
assert req.on_reject == "skip"
# Reject the condition
req.reject()
# Continue the workflow
run_output = workflow.continue_run(run_output)
# Should complete
assert run_output.status == RunStatus.completed
# Neither if nor else branch should have executed
step_names = [r.step_name for r in (run_output.step_results or [])]
assert "if_step" not in step_names
assert "else_step" not in step_names
# final_step should have "Setup complete" as previous (skipped condition)
assert "Setup complete" in str(run_output.content)
def test_condition_on_reject_cancel_cancels_workflow(self):
"""Test that rejecting a Condition with on_reject='cancel' cancels the workflow."""
from agno.db.sqlite import SqliteDb
from agno.workflow.condition import Condition
from agno.workflow.types import OnReject
from agno.workflow.workflow import Workflow
def if_step_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="IF branch executed")
condition = Condition(
name="critical_condition",
steps=[Step(name="if_step", executor=if_step_func)],
requires_confirmation=True,
on_reject=OnReject.cancel,
)
workflow = Workflow(
name="test_workflow",
steps=[condition],
db=SqliteDb(db_file="tmp/test_condition_cancel.db"),
)
# Run workflow - should pause at condition
run_output = workflow.run("test input")
assert run_output.is_paused
req = run_output.steps_requiring_confirmation[0]
assert req.on_reject == "cancel"
# Reject the condition
req.reject()
# Continue the workflow
run_output = workflow.continue_run(run_output)
# Should be cancelled
assert run_output.status == RunStatus.cancelled
assert "rejected" in str(run_output.content).lower() or "cancelled" in str(run_output.content).lower()
def test_condition_confirm_executes_if_branch(self):
"""Test that confirming a Condition executes the if branch."""
from agno.db.sqlite import SqliteDb
from agno.workflow.condition import Condition
from agno.workflow.types import OnReject
from agno.workflow.workflow import Workflow
def if_step_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="IF branch executed")
def else_step_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="ELSE branch executed")
condition = Condition(
name="user_choice",
steps=[Step(name="if_step", executor=if_step_func)],
else_steps=[Step(name="else_step", executor=else_step_func)],
requires_confirmation=True,
on_reject=OnReject.else_branch,
)
workflow = Workflow(
name="test_workflow",
steps=[condition],
db=SqliteDb(db_file="tmp/test_condition_confirm.db"),
)
# Run workflow - should pause at condition
run_output = workflow.run("test input")
assert run_output.is_paused
req = run_output.steps_requiring_confirmation[0]
# Confirm the condition
req.confirm()
# Continue the workflow
run_output = workflow.continue_run(run_output)
# Should complete and if branch should have been executed
assert run_output.status == RunStatus.completed
# Check if_step was executed
step_contents = [str(r.content) for r in (run_output.step_results or [])]
assert any("IF branch executed" in c for c in step_contents) or "if branch" in str(run_output.content).lower()
def test_condition_on_reject_else_no_else_steps_skips(self):
"""Test on_reject='else' with no else_steps skips the condition."""
from agno.db.sqlite import SqliteDb
from agno.workflow.condition import Condition
from agno.workflow.types import OnReject
from agno.workflow.workflow import Workflow
def if_step_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="IF branch executed")
def final_step_func(step_input: StepInput) -> StepOutput:
return StepOutput(content="Final step executed")
condition = Condition(
name="no_else_condition",
steps=[Step(name="if_step", executor=if_step_func)],
# No else_steps!
requires_confirmation=True,
on_reject=OnReject.else_branch,
)
workflow = Workflow(
name="test_workflow",
steps=[condition, Step(name="final_step", executor=final_step_func)],
db=SqliteDb(db_file="tmp/test_condition_no_else.db"),
)
# Run workflow - should pause at condition
run_output = workflow.run("test input")
assert run_output.is_paused
req = run_output.steps_requiring_confirmation[0]
# Reject the condition (with on_reject='else' but no else_steps)
req.reject()
# Continue the workflow
run_output = workflow.continue_run(run_output)
# Should complete - condition should be skipped since no else_steps
assert run_output.status == RunStatus.completed
# if_step should NOT have executed
step_names = [r.step_name for r in (run_output.step_results or [])]
assert "if_step" not in step_names
# final_step should have executed
assert "final_step" in step_names or "Final step executed" in str(run_output.content)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/workflow/test_hitl.py",
"license": "Apache License 2.0",
"lines": 1046,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/03_teams/02_modes/tasks/11_streaming_events.py | """
Task Mode Streaming Events
==========================
Demonstrates how to consume streaming events programmatically in `mode=tasks`.
This example shows how to:
1. Use `stream=True` with `run()` to get an iterator of events
2. Handle task iteration events (started/completed)
3. Handle task state updates
4. Process content deltas as they arrive
"""
from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.run.agent import RunContentEvent as AgentRunContentEvent
from agno.run.team import (
RunContentEvent,
TaskIterationCompletedEvent,
TaskIterationStartedEvent,
TaskStateUpdatedEvent,
TeamRunEvent,
ToolCallCompletedEvent,
ToolCallStartedEvent,
)
from agno.team.mode import TeamMode
from agno.team.team import Team
# ---------------------------------------------------------------------------
# Create Members
# ---------------------------------------------------------------------------
researcher = Agent(
name="Researcher",
role="Researches topics and gathers information",
model=OpenAIResponses(id="gpt-5.1"),
instructions=[
"Research the given topic thoroughly.",
"Provide factual information.",
],
)
summarizer = Agent(
name="Summarizer",
role="Summarizes information into concise points",
model=OpenAIResponses(id="gpt-5.1"),
instructions=["Create clear, concise summaries.", "Highlight key points."],
)
# ---------------------------------------------------------------------------
# Create Team
# ---------------------------------------------------------------------------
team = Team(
name="Research Team",
mode=TeamMode.tasks,
model=OpenAIResponses(id="gpt-5.1"),
members=[researcher, summarizer],
instructions=[
"You are a research team leader. Follow these steps exactly:",
"1. Create a task for the Researcher to gather information.",
"2. Execute the Researcher's task.",
"3. Create a task for the Summarizer to summarize the research.",
"4. Execute the Summarizer's task.",
"5. Call mark_all_complete with a final summary when all tasks are done.",
],
max_iterations=3,
)
# ---------------------------------------------------------------------------
# Sync streaming with event handling
# ---------------------------------------------------------------------------
def streaming_with_events() -> None:
"""Demonstrates sync streaming with programmatic event handling."""
print("\n--- Sync Streaming with Event Handling ---\n")
# Use stream=True to get an iterator of events
response_stream = team.run(
"What are the key benefits of microservices architecture?",
stream=True,
stream_events=True,
)
for event in response_stream:
# Handle task iteration started - show all fields
if isinstance(event, TaskIterationStartedEvent):
print("\n" + "=" * 60)
print("TASK ITERATION STARTED")
print("=" * 60)
print(f" event: {event.event}")
print(f" iteration: {event.iteration}")
print(f" max_iterations: {event.max_iterations}")
print("=" * 60)
# Handle task iteration completed - show all fields
elif isinstance(event, TaskIterationCompletedEvent):
print("\n" + "=" * 60)
print("TASK ITERATION COMPLETED")
print("=" * 60)
print(f" event: {event.event}")
print(f" iteration: {event.iteration}")
print(f" max_iterations: {event.max_iterations}")
print(
f" task_summary: {event.task_summary[:100] if event.task_summary else None}..."
)
print("=" * 60)
# Handle task state updates - show all fields
elif isinstance(event, TaskStateUpdatedEvent):
print("\n" + "-" * 60)
print("TASK STATE UPDATED")
print("-" * 60)
print(f" event: {event.event}")
print(
f" task_summary: {event.task_summary[:100] if event.task_summary else None}..."
)
print(f" goal_complete: {event.goal_complete}")
print("-" * 60)
# Handle tool call events (shows when tasks are being executed)
elif isinstance(event, ToolCallStartedEvent):
if event.tool and event.tool.tool_name:
print(f"\n[Tool: {event.tool.tool_name}]", end="")
elif isinstance(event, ToolCallCompletedEvent):
pass # Tool completed
# Handle member agent content streaming
elif isinstance(event, AgentRunContentEvent):
if event.content:
print(event.content, end="", flush=True)
# Handle team content deltas
elif isinstance(event, RunContentEvent):
if event.content:
print(event.content, end="", flush=True)
# Handle other events by their event type
elif hasattr(event, "event"):
if event.event == TeamRunEvent.run_started.value:
print("[Run Started]")
elif event.event == TeamRunEvent.run_completed.value:
print("\n[Run Completed]")
print()
if __name__ == "__main__":
streaming_with_events()
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/03_teams/02_modes/tasks/11_streaming_events.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/teams/test_tasks_mode_streaming.py | """
Integration tests for TeamMode.tasks streaming functionality.
Tests verify that task mode emits proper events during streaming:
- TaskIterationStartedEvent
- TaskIterationCompletedEvent
- TaskStateUpdatedEvent
- Tool call events for task tools (create_task, execute_task, mark_all_complete)
"""
import pytest
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.run.team import (
TaskIterationCompletedEvent,
TaskIterationStartedEvent,
TaskStateUpdatedEvent,
TeamRunEvent,
)
from agno.team.mode import TeamMode
from agno.team.team import Team
@pytest.fixture
def researcher_agent():
"""Create a researcher agent for testing."""
return Agent(
name="Researcher",
role="Researches topics and gathers information",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=["Research the given topic.", "Provide factual information."],
)
@pytest.fixture
def summarizer_agent():
"""Create a summarizer agent for testing."""
return Agent(
name="Summarizer",
role="Summarizes information into concise points",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=["Create clear, concise summaries.", "Highlight key points."],
)
@pytest.fixture
def tasks_team(researcher_agent, summarizer_agent):
"""Create a team in tasks mode for testing."""
return Team(
name="Research Team",
mode=TeamMode.tasks,
model=OpenAIChat(id="gpt-4o-mini"),
members=[researcher_agent, summarizer_agent],
instructions=[
"You are a research team leader. Follow these steps exactly:",
"1. Create a task for the Researcher to gather information.",
"2. Execute the Researcher's task.",
"3. Create a task for the Summarizer to summarize the research.",
"4. Execute the Summarizer's task.",
"5. Call mark_all_complete with a final summary when all tasks are done.",
],
max_iterations=3,
telemetry=False,
)
def test_tasks_mode_emits_iteration_events(tasks_team):
"""Test that tasks mode emits TaskIterationStartedEvent and TaskIterationCompletedEvent."""
started_events = []
completed_events = []
for event in tasks_team.run(
"What are 3 benefits of exercise?",
stream=True,
stream_events=True,
):
if isinstance(event, TaskIterationStartedEvent):
started_events.append(event)
elif isinstance(event, TaskIterationCompletedEvent):
completed_events.append(event)
# Should have TaskIterationStartedEvent
assert len(started_events) >= 1, "Should have at least one iteration started event"
# Verify TaskIterationStartedEvent fields
first_started = started_events[0]
assert first_started.event == TeamRunEvent.task_iteration_started.value
assert first_started.iteration >= 1
assert first_started.max_iterations == 3
# Should have TaskIterationCompletedEvent
assert len(completed_events) >= 1, "Should have at least one iteration completed event"
# Verify TaskIterationCompletedEvent fields
first_completed = completed_events[0]
assert first_completed.event == TeamRunEvent.task_iteration_completed.value
assert first_completed.iteration >= 1
assert first_completed.max_iterations == 3
def test_tasks_mode_emits_task_state_updated(tasks_team):
"""Test that tasks mode emits TaskStateUpdatedEvent when task state changes."""
state_events = []
for event in tasks_team.run(
"List 2 programming languages and their main use cases.",
stream=True,
stream_events=True,
):
if isinstance(event, TaskStateUpdatedEvent):
state_events.append(event)
# Should have TaskStateUpdatedEvent
assert len(state_events) >= 1, "Should have at least one task state updated event"
# Verify TaskStateUpdatedEvent fields
for state_event in state_events:
assert state_event.event == TeamRunEvent.task_state_updated.value
# task_summary should contain task information
assert state_event.task_summary is not None or state_event.goal_complete is not None
def test_tasks_mode_emits_tool_call_events(tasks_team):
"""Test that tasks mode emits tool call events for task tools."""
events = {}
for event in tasks_team.run(
"Name one country and its capital.",
stream=True,
stream_events=True,
):
if hasattr(event, "event"):
event_name = event.event
if event_name not in events:
events[event_name] = []
events[event_name].append(event)
# Should have tool_call_started events
assert TeamRunEvent.tool_call_started.value in events, "Should emit tool_call_started events"
tool_started_events = events[TeamRunEvent.tool_call_started.value]
# Check that task-related tools are called
tool_names = [e.tool.tool_name for e in tool_started_events if e.tool]
task_tools = {"create_task", "execute_task", "mark_all_complete", "update_task_status"}
# At least one task tool should be called
assert any(name in task_tools for name in tool_names), f"Should call task tools, got: {tool_names}"
def test_tasks_mode_basic_streaming(tasks_team):
"""Test basic streaming functionality in tasks mode."""
content_events = []
for event in tasks_team.run(
"What is 2+2?",
stream=True,
stream_events=True,
):
if hasattr(event, "event") and event.event == TeamRunEvent.run_content.value:
content_events.append(event)
# Should have content events
assert len(content_events) > 0, "Should emit run_content events"
@pytest.mark.asyncio
async def test_tasks_mode_async_streaming(tasks_team):
"""Test async streaming functionality in tasks mode."""
started_events = []
completed_events = []
async for event in tasks_team.arun(
"What color is the sky?",
stream=True,
stream_events=True,
):
if isinstance(event, TaskIterationStartedEvent):
started_events.append(event)
elif isinstance(event, TaskIterationCompletedEvent):
completed_events.append(event)
# Should have TaskIterationStartedEvent
assert len(started_events) >= 1, "Async should emit TaskIterationStartedEvent"
# Should have TaskIterationCompletedEvent
assert len(completed_events) >= 1, "Async should emit TaskIterationCompletedEvent"
def test_tasks_mode_run_started_and_completed_events(tasks_team):
"""Test that tasks mode emits run_started and run_completed events."""
events = {}
for event in tasks_team.run(
"Say hello",
stream=True,
stream_events=True,
):
if hasattr(event, "event"):
event_name = event.event
if event_name not in events:
events[event_name] = []
events[event_name].append(event)
# Should have run_started
assert TeamRunEvent.run_started.value in events, "Should emit run_started"
assert len(events[TeamRunEvent.run_started.value]) == 1
# Should have run_completed
assert TeamRunEvent.run_completed.value in events, "Should emit run_completed"
assert len(events[TeamRunEvent.run_completed.value]) == 1
def test_tasks_mode_iteration_numbers_increment():
"""Test that iteration numbers increment correctly across multiple iterations."""
team = Team(
name="Simple Team",
mode=TeamMode.tasks,
model=OpenAIChat(id="gpt-4o-mini"),
members=[
Agent(
name="Helper",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=["Help with the task."],
)
],
instructions=[
"Create one task for Helper and execute it.",
"Then call mark_all_complete.",
],
max_iterations=5,
telemetry=False,
)
iteration_started_events = []
for event in team.run(
"Say hi",
stream=True,
stream_events=True,
):
if isinstance(event, TaskIterationStartedEvent):
iteration_started_events.append(event)
# Should have at least one iteration
assert len(iteration_started_events) >= 1
# First iteration should be 1
assert iteration_started_events[0].iteration == 1
# If there are multiple iterations, they should increment
for i, event in enumerate(iteration_started_events):
assert event.iteration == i + 1, f"Iteration {i} should have iteration number {i + 1}"
def test_tasks_mode_max_iterations_in_events():
"""Test that max_iterations is correctly reported in events."""
max_iter = 4
team = Team(
name="Test Team",
mode=TeamMode.tasks,
model=OpenAIChat(id="gpt-4o-mini"),
members=[],
instructions=["Answer directly."],
max_iterations=max_iter,
telemetry=False,
)
for event in team.run(
"Hi",
stream=True,
stream_events=True,
):
if isinstance(event, TaskIterationStartedEvent):
assert event.max_iterations == max_iter, f"max_iterations should be {max_iter}"
break
def test_tasks_mode_goal_complete_in_state_event():
"""Test that goal_complete is set correctly in TaskStateUpdatedEvent."""
team = Team(
name="Goal Team",
mode=TeamMode.tasks,
model=OpenAIChat(id="gpt-4o-mini"),
members=[
Agent(
name="Worker",
model=OpenAIChat(id="gpt-4o-mini"),
instructions=["Complete the task."],
)
],
instructions=[
"Create a task for Worker, execute it, then call mark_all_complete with summary.",
],
max_iterations=3,
telemetry=False,
)
state_events = []
for event in team.run(
"Say done",
stream=True,
stream_events=True,
):
if isinstance(event, TaskStateUpdatedEvent):
state_events.append(event)
# Should have at least one state event
assert len(state_events) >= 1, "Should have TaskStateUpdatedEvent"
# Check if any state event has goal_complete (model may or may not call mark_all_complete)
# The test verifies the event structure is correct
for event in state_events:
assert hasattr(event, "goal_complete"), "TaskStateUpdatedEvent should have goal_complete field"
assert hasattr(event, "task_summary"), "TaskStateUpdatedEvent should have task_summary field"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/test_tasks_mode_streaming.py",
"license": "Apache License 2.0",
"lines": 259,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/02_agents/14_advanced/combined_metrics.py | """
Combined Metrics
=============================
When an agent uses multiple background features, each model's
calls are tracked under separate detail keys:
- "model" for the agent's own calls
- "reasoning_model" for reasoning manager calls
- "compression_model" for compression manager calls
- "output_model" for output model calls
- "memory_model" for memory manager calls
- "culture_model" for culture manager calls
- "session_summary_model" for session summary calls
- "eval_model" for evaluation hook calls
This example shows all detail keys and session-level metrics.
"""
from typing import List
from agno.agent import Agent
from agno.compression.manager import CompressionManager
from agno.culture.manager import CultureManager
from agno.db.postgres import PostgresDb
from agno.eval.agent_as_judge import AgentAsJudgeEval
from agno.memory.manager import MemoryManager
from agno.models.openai import OpenAIChat
from agno.session.summary import SessionSummaryManager
from agno.tools.yfinance import YFinanceTools
from pydantic import BaseModel, Field
from rich.pretty import pprint
class StockSummary(BaseModel):
ticker: str = Field(..., description="Stock ticker symbol")
summary: str = Field(..., description="Brief summary of the stock")
key_metrics: List[str] = Field(..., description="Key financial metrics")
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db = PostgresDb(db_url="postgresql+psycopg://ai:ai@localhost:5532/ai")
eval_hook = AgentAsJudgeEval(
name="Quality Check",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should be helpful and accurate",
scoring_strategy="binary",
)
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[YFinanceTools(enable_stock_price=True, enable_company_info=True)],
reasoning_model=OpenAIChat(id="gpt-4o-mini"),
reasoning=True,
compression_manager=CompressionManager(
model=OpenAIChat(id="gpt-4o-mini"),
compress_tool_results_limit=1,
),
output_model=OpenAIChat(id="gpt-4o-mini"),
output_schema=StockSummary,
structured_outputs=True,
memory_manager=MemoryManager(model=OpenAIChat(id="gpt-4o-mini"), db=db),
update_memory_on_run=True,
culture_manager=CultureManager(model=OpenAIChat(id="gpt-4o-mini"), db=db),
update_cultural_knowledge=True,
session_summary_manager=SessionSummaryManager(model=OpenAIChat(id="gpt-4o-mini")),
enable_session_summaries=True,
post_hooks=[eval_hook],
db=db,
session_id="combined-metrics-demo",
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
run_response = agent.run(
"Get the stock price and company info for NVDA and summarize it."
)
print("=" * 50)
print("RUN METRICS")
print("=" * 50)
pprint(run_response.metrics)
print("=" * 50)
print("MODEL DETAILS")
print("=" * 50)
if run_response.metrics and run_response.metrics.details:
for model_type, model_metrics_list in run_response.metrics.details.items():
print(f"\n{model_type}:")
for model_metric in model_metrics_list:
pprint(model_metric)
print("=" * 50)
print("SESSION METRICS")
print("=" * 50)
session_metrics = agent.get_session_metrics()
if session_metrics:
pprint(session_metrics)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/02_agents/14_advanced/combined_metrics.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/02_agents/14_advanced/culture_metrics.py | """
Culture Manager Metrics
=============================
When an agent uses a CultureManager, the culture model's
calls are tracked under the "culture_model" detail key.
"""
from agno.agent import Agent
from agno.culture.manager import CultureManager
from agno.db.postgres import PostgresDb
from agno.models.openai import OpenAIChat
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db = PostgresDb(db_url="postgresql+psycopg://ai:ai@localhost:5532/ai")
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
culture_manager=CultureManager(model=OpenAIChat(id="gpt-4o-mini"), db=db),
update_cultural_knowledge=True,
db=db,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
run_response = agent.run(
"Our team always does code reviews before merging. We pair program on complex features."
)
print("=" * 50)
print("RUN METRICS")
print("=" * 50)
pprint(run_response.metrics)
print("=" * 50)
print("MODEL DETAILS")
print("=" * 50)
if run_response.metrics and run_response.metrics.details:
for model_type, model_metrics_list in run_response.metrics.details.items():
print(f"\n{model_type}:")
for model_metric in model_metrics_list:
pprint(model_metric)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/02_agents/14_advanced/culture_metrics.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/02_agents/14_advanced/multi_model_metrics.py | """
Multi-Model Metrics
=============================
When an agent uses a MemoryManager, each manager's model calls
are tracked under separate detail keys in metrics.details.
This example shows the "model" vs "memory_model" breakdown.
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.memory.manager import MemoryManager
from agno.models.openai import OpenAIChat
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db = PostgresDb(db_url="postgresql+psycopg://ai:ai@localhost:5532/ai")
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
memory_manager=MemoryManager(model=OpenAIChat(id="gpt-4o-mini"), db=db),
update_memory_on_run=True,
db=db,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
run_response = agent.run(
"My name is Alice and I work at Google as a senior engineer."
)
print("=" * 50)
print("RUN METRICS")
print("=" * 50)
pprint(run_response.metrics)
print("=" * 50)
print("MODEL DETAILS")
print("=" * 50)
if run_response.metrics and run_response.metrics.details:
for model_type, model_metrics_list in run_response.metrics.details.items():
print(f"\n{model_type}:")
for model_metric in model_metrics_list:
pprint(model_metric)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/02_agents/14_advanced/multi_model_metrics.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/02_agents/14_advanced/session_metrics.py | """
Demonstrates session-level metrics that accumulate across multiple runs.
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.openai import OpenAIChat
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Setup
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url, session_table="agent_metrics_sessions")
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=db,
session_id="session_metrics_demo",
add_history_to_context=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# First run
run_output_1 = agent.run("What is the capital of France?")
print("=" * 50)
print("RUN 1 METRICS")
print("=" * 50)
pprint(run_output_1.metrics)
# Second run on the same session
run_output_2 = agent.run("What about Germany?")
print("=" * 50)
print("RUN 2 METRICS")
print("=" * 50)
pprint(run_output_2.metrics)
# Session metrics aggregate both runs
print("=" * 50)
print("SESSION METRICS (accumulated)")
print("=" * 50)
session_metrics = agent.get_session_metrics()
pprint(session_metrics)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/02_agents/14_advanced/session_metrics.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/02_agents/14_advanced/session_summary_metrics.py | """
Session Summary Metrics
=============================
When an agent uses a SessionSummaryManager, the summary model's token
usage is tracked separately under the "session_summary_model" detail key.
This lets you see how many tokens are spent summarizing the session
versus the agent's own model calls.
The session summary runs after each interaction to maintain a concise
summary of the conversation so far.
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.openai import OpenAIChat
from agno.session.summary import SessionSummaryManager
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db = PostgresDb(db_url="postgresql+psycopg://ai:ai@localhost:5532/ai")
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
session_summary_manager=SessionSummaryManager(
model=OpenAIChat(id="gpt-4o-mini"),
),
enable_session_summaries=True,
db=db,
session_id="session-summary-metrics-demo",
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# First run
run_response_1 = agent.run("My name is Alice and I work at Google.")
print("=" * 50)
print("RUN 1 METRICS")
print("=" * 50)
pprint(run_response_1.metrics)
# Second run - triggers session summary
run_response_2 = agent.run("I also enjoy hiking on weekends.")
print("=" * 50)
print("RUN 2 METRICS")
print("=" * 50)
pprint(run_response_2.metrics)
print("=" * 50)
print("MODEL DETAILS (Run 2)")
print("=" * 50)
if run_response_2.metrics and run_response_2.metrics.details:
for model_type, model_metrics_list in run_response_2.metrics.details.items():
print(f"\n{model_type}:")
for model_metric in model_metrics_list:
pprint(model_metric)
print("=" * 50)
print("SESSION METRICS (accumulated)")
print("=" * 50)
session_metrics = agent.get_session_metrics()
if session_metrics:
pprint(session_metrics)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/02_agents/14_advanced/session_summary_metrics.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/02_agents/14_advanced/streaming_metrics.py | """
Streaming Metrics
=============================
Demonstrates how to capture metrics from streaming responses.
Use yield_run_output=True to receive a RunOutput at the end of the stream.
"""
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.run.agent import RunOutput
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
)
# ---------------------------------------------------------------------------
# Run Agent (Streaming)
# ---------------------------------------------------------------------------
if __name__ == "__main__":
response = None
for event in agent.run("Count from 1 to 10.", stream=True, yield_run_output=True):
if isinstance(event, RunOutput):
response = event
if response and response.metrics:
print("=" * 50)
print("STREAMING RUN METRICS")
print("=" * 50)
pprint(response.metrics)
print("=" * 50)
print("MODEL DETAILS")
print("=" * 50)
if response.metrics.details:
for model_type, model_metrics_list in response.metrics.details.items():
print(f"\n{model_type}:")
for model_metric in model_metrics_list:
pprint(model_metric)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/02_agents/14_advanced/streaming_metrics.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/02_agents/14_advanced/tool_call_metrics.py | """
Tool Call Metrics
=============================
Demonstrates tool execution timing metrics.
Each tool call records start_time, end_time, and duration.
"""
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.tools.yfinance import YFinanceTools
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[YFinanceTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
run_output = agent.run("What is the stock price of AAPL and NVDA?")
# Run-level metrics show total tokens across all model calls
print("=" * 50)
print("RUN METRICS")
print("=" * 50)
pprint(run_output.metrics)
# Each tool call in the run carries its own timing metrics
print("=" * 50)
print("TOOL CALL METRICS")
print("=" * 50)
if run_output.tools:
for tool_call in run_output.tools:
print(f"Tool: {tool_call.tool_name}")
if tool_call.metrics:
pprint(tool_call.metrics)
print("-" * 40)
# Per-model breakdown from details
print("=" * 50)
print("MODEL DETAILS")
print("=" * 50)
if run_output.metrics and run_output.metrics.details:
for model_type, model_metrics_list in run_output.metrics.details.items():
print(f"\n{model_type}:")
for model_metric in model_metrics_list:
pprint(model_metric)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/02_agents/14_advanced/tool_call_metrics.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/03_teams/22_metrics/02_team_streaming_metrics.py | """
Team Streaming Metrics
=============================
Demonstrates how to capture metrics from team streaming responses.
Use yield_run_output=True to receive a TeamRunOutput at the end of the stream.
"""
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.run.team import TeamRunOutput
from agno.team import Team
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Create Members
# ---------------------------------------------------------------------------
assistant = Agent(
name="Assistant",
model=OpenAIChat(id="gpt-4o-mini"),
role="Helpful assistant that answers questions.",
)
# ---------------------------------------------------------------------------
# Create Team
# ---------------------------------------------------------------------------
team = Team(
name="Streaming Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=[assistant],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Team (Streaming)
# ---------------------------------------------------------------------------
if __name__ == "__main__":
response = None
for event in team.run("Count from 1 to 5.", stream=True, yield_run_output=True):
if isinstance(event, TeamRunOutput):
response = event
if response and response.metrics:
print("=" * 50)
print("STREAMING TEAM METRICS")
print("=" * 50)
pprint(response.metrics)
print("=" * 50)
print("MODEL DETAILS")
print("=" * 50)
if response.metrics.details:
for model_type, model_metrics_list in response.metrics.details.items():
print(f"\n{model_type}:")
for model_metric in model_metrics_list:
pprint(model_metric)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/03_teams/22_metrics/02_team_streaming_metrics.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/03_teams/22_metrics/03_team_session_metrics.py | """
Team Session Metrics
=============================
Demonstrates session-level metrics for teams with PostgreSQL persistence.
Metrics accumulate across multiple team runs within the same session.
Run: ./cookbook/scripts/run_pgvector.sh
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.openai import OpenAIChat
from agno.team import Team
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Setup
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url, session_table="team_metrics_sessions")
# ---------------------------------------------------------------------------
# Create Members
# ---------------------------------------------------------------------------
assistant = Agent(
name="Assistant",
model=OpenAIChat(id="gpt-4o-mini"),
role="Helpful assistant that answers questions.",
)
# ---------------------------------------------------------------------------
# Create Team
# ---------------------------------------------------------------------------
team = Team(
name="Research Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=[assistant],
db=db,
session_id="team_session_metrics_demo",
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Team
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# First run
run_output_1 = team.run("What is the capital of Japan?")
print("=" * 50)
print("RUN 1 METRICS")
print("=" * 50)
pprint(run_output_1.metrics)
# Second run on the same session
run_output_2 = team.run("What about South Korea?")
print("=" * 50)
print("RUN 2 METRICS")
print("=" * 50)
pprint(run_output_2.metrics)
# Session metrics aggregate both runs
print("=" * 50)
print("SESSION METRICS (accumulated)")
print("=" * 50)
session_metrics = team.get_session_metrics()
pprint(session_metrics)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/03_teams/22_metrics/03_team_session_metrics.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/03_teams/22_metrics/04_team_tool_metrics.py | """
Team Tool Metrics
=============================
Demonstrates metrics for teams where members use tools.
Shows leader metrics, member metrics, and tool execution timing.
"""
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.team import Team
from agno.tools.yfinance import YFinanceTools
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Create Members
# ---------------------------------------------------------------------------
stock_searcher = Agent(
name="Stock Searcher",
model=OpenAIChat(id="gpt-4o-mini"),
role="Searches for stock information.",
tools=[YFinanceTools()],
)
# ---------------------------------------------------------------------------
# Create Team
# ---------------------------------------------------------------------------
team = Team(
name="Stock Research Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=[stock_searcher],
markdown=True,
show_members_responses=True,
store_member_responses=True,
)
# ---------------------------------------------------------------------------
# Run Team
# ---------------------------------------------------------------------------
if __name__ == "__main__":
run_output = team.run("What is the stock price of NVDA?")
# Aggregated team metrics (leader + all members)
print("=" * 50)
print("AGGREGATED TEAM METRICS")
print("=" * 50)
pprint(run_output.metrics)
# Member-level metrics and tool calls
print("=" * 50)
print("MEMBER METRICS AND TOOL CALLS")
print("=" * 50)
if run_output.member_responses:
for member_response in run_output.member_responses:
print(f"\nMember: {member_response.agent_name}")
print("-" * 40)
pprint(member_response.metrics)
if member_response.tools:
print(f"\nTool calls ({len(member_response.tools)}):")
for tool_call in member_response.tools:
print(f" Tool: {tool_call.tool_name}")
if tool_call.metrics:
pprint(tool_call.metrics)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/03_teams/22_metrics/04_team_tool_metrics.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/09_evals/accuracy/accuracy_eval_metrics.py | """
Accuracy Eval Metrics
=====================
Demonstrates that eval model metrics can be accumulated into the original
agent's run_output using the run_response parameter on evaluate_answer.
The evaluator agent's token usage appears under "eval_model" in
run_output.metrics.details alongside the agent's own "model" entries.
"""
from agno.agent import Agent
from agno.eval.accuracy import AccuracyEval
from agno.models.openai import OpenAIChat
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Setup
# ---------------------------------------------------------------------------
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
instructions="Answer factual questions concisely.",
)
evaluation = AccuracyEval(
name="Capital Cities",
model=OpenAIChat(id="gpt-4o-mini"),
agent=agent,
input="What is the capital of Japan?",
expected_output="Tokyo",
num_iterations=1,
)
# ---------------------------------------------------------------------------
# Run
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# First, run the agent to get a response
run_output = agent.run("What is the capital of Japan?")
agent_output = str(run_output.content)
# Run the evaluator, passing run_output so eval metrics accumulate into it
evaluator_agent = evaluation.get_evaluator_agent()
eval_input = evaluation.get_eval_input()
eval_expected = evaluation.get_eval_expected_output()
evaluation_input = (
f"<agent_input>\n{eval_input}\n</agent_input>\n\n"
f"<expected_output>\n{eval_expected}\n</expected_output>\n\n"
f"<agent_output>\n{agent_output}\n</agent_output>"
)
result = evaluation.evaluate_answer(
input=eval_input,
evaluator_agent=evaluator_agent,
evaluation_input=evaluation_input,
evaluator_expected_output=eval_expected,
agent_output=agent_output,
run_response=run_output,
)
if result:
print(f"Score: {result.score}/10")
print(f"Reason: {result.reason[:200]}")
# The run_output now has both agent + eval metrics
if run_output.metrics:
print("\nTotal tokens (agent + eval):", run_output.metrics.total_tokens)
if run_output.metrics.details:
if "model" in run_output.metrics.details:
agent_tokens = sum(
metric.total_tokens
for metric in run_output.metrics.details["model"]
)
print("Agent model tokens:", agent_tokens)
if "eval_model" in run_output.metrics.details:
eval_tokens = sum(
metric.total_tokens
for metric in run_output.metrics.details["eval_model"]
)
print("Eval model tokens:", eval_tokens)
print("\nFull metrics breakdown:")
pprint(run_output.metrics.to_dict())
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/09_evals/accuracy/accuracy_eval_metrics.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/09_evals/agent_as_judge/agent_as_judge_eval_metrics.py | """
Agent-as-Judge Eval Metrics
============================
Demonstrates that eval model metrics are accumulated back into the
original agent's run_output when AgentAsJudgeEval is used as a post_hook.
After the agent runs, the evaluator agent makes its own model call.
Those eval tokens show up under "eval_model" in run_output.metrics.details.
"""
from agno.agent import Agent
from agno.eval.agent_as_judge import AgentAsJudgeEval
from agno.models.openai import OpenAIChat
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Create eval as a post-hook
# ---------------------------------------------------------------------------
eval_hook = AgentAsJudgeEval(
name="Quality Check",
model=OpenAIChat(id="gpt-4o-mini"),
criteria="Response should be accurate, clear, and concise",
scoring_strategy="binary",
)
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
instructions="Answer questions concisely.",
post_hooks=[eval_hook],
)
# ---------------------------------------------------------------------------
# Run
# ---------------------------------------------------------------------------
if __name__ == "__main__":
result = agent.run("What is the capital of France?")
# The run metrics now include both agent model + eval model tokens
if result.metrics:
print("Total tokens (agent + eval):", result.metrics.total_tokens)
if result.metrics.details:
# Agent's own model call
if "model" in result.metrics.details:
agent_tokens = sum(
metric.total_tokens for metric in result.metrics.details["model"]
)
print("Agent model tokens:", agent_tokens)
# Eval model call (accumulated from evaluator agent)
if "eval_model" in result.metrics.details:
eval_tokens = sum(
metric.total_tokens
for metric in result.metrics.details["eval_model"]
)
print("Eval model tokens:", eval_tokens)
for metric in result.metrics.details["eval_model"]:
print(f" Evaluator: {metric.id} ({metric.provider})")
print("\nFull metrics details:")
pprint(result.metrics.to_dict())
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/09_evals/agent_as_judge/agent_as_judge_eval_metrics.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/metrics.py | from dataclasses import asdict, dataclass
from dataclasses import fields as dc_fields
from enum import Enum
from time import time
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union
from agno.utils.timer import Timer
class ModelType(str, Enum):
"""Identifies the functional role of a model within an agent run."""
MODEL = "model"
OUTPUT_MODEL = "output_model"
PARSER_MODEL = "parser_model"
MEMORY_MODEL = "memory_model"
REASONING_MODEL = "reasoning_model"
SESSION_SUMMARY_MODEL = "session_summary_model"
CULTURE_MODEL = "culture_model"
LEARNING_MODEL = "learning_model"
COMPRESSION_MODEL = "compression_model"
if TYPE_CHECKING:
from agno.models.base import Model
from agno.models.response import ModelResponse
# ---------------------------------------------------------------------------
# Base Metrics
# ---------------------------------------------------------------------------
@dataclass
class BaseMetrics:
"""Token consumption metrics shared across all metric types."""
input_tokens: int = 0
output_tokens: int = 0
total_tokens: int = 0
audio_input_tokens: int = 0
audio_output_tokens: int = 0
audio_total_tokens: int = 0
cache_read_tokens: int = 0
cache_write_tokens: int = 0
reasoning_tokens: int = 0
cost: Optional[float] = None
# ---------------------------------------------------------------------------
# Model Metrics
# ---------------------------------------------------------------------------
@dataclass
class ModelMetrics(BaseMetrics):
"""Metrics for a specific model, aggregated by (provider, id).
At run level: one entry per unique model in details[model_type].
At session level: same structure, tokens summed across runs.
"""
id: str = ""
provider: str = ""
provider_metrics: Optional[Dict[str, Any]] = None
def accumulate(self, other: "ModelMetrics") -> None:
"""Add token counts and cost from another ModelMetrics into this one."""
self.input_tokens += other.input_tokens or 0
self.output_tokens += other.output_tokens or 0
self.total_tokens += other.total_tokens or 0
self.audio_input_tokens += other.audio_input_tokens or 0
self.audio_output_tokens += other.audio_output_tokens or 0
self.audio_total_tokens += other.audio_total_tokens or 0
self.cache_read_tokens += other.cache_read_tokens or 0
self.cache_write_tokens += other.cache_write_tokens or 0
self.reasoning_tokens += other.reasoning_tokens or 0
if other.cost is not None:
self.cost = (self.cost or 0) + other.cost
# Merge provider_metrics (sum numeric values, keep latest for others)
if other.provider_metrics is not None:
if self.provider_metrics is None:
self.provider_metrics = {}
for k, v in other.provider_metrics.items():
if (
k in self.provider_metrics
and isinstance(v, (int, float))
and isinstance(self.provider_metrics[k], (int, float))
):
self.provider_metrics[k] += v
else:
self.provider_metrics[k] = v
def to_dict(self) -> Dict[str, Any]:
metrics_dict = asdict(self)
return {
k: v
for k, v in metrics_dict.items()
if v is not None and (not isinstance(v, int) or v != 0) and (not isinstance(v, dict) or len(v) > 0)
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "ModelMetrics":
valid = {f.name for f in dc_fields(cls)}
return cls(**{k: v for k, v in data.items() if k in valid})
# ---------------------------------------------------------------------------
# Tool Call Metrics
# ---------------------------------------------------------------------------
@dataclass
class ToolCallMetrics:
"""Metrics for tool execution - only time-related fields."""
timer: Optional[Timer] = None
start_time: Optional[float] = None
end_time: Optional[float] = None
duration: Optional[float] = None
def to_dict(self) -> Dict[str, Any]:
metrics_dict = asdict(self)
metrics_dict.pop("timer", None)
return {k: v for k, v in metrics_dict.items() if v is not None and (not isinstance(v, int) or v != 0)}
def start_timer(self):
"""Start the timer and record start time."""
if self.timer is None:
self.timer = Timer()
self.timer.start()
if self.start_time is None:
self.start_time = time()
def stop_timer(self, set_duration: bool = True):
"""Stop the timer and record end time."""
if self.timer is not None:
self.timer.stop()
if set_duration:
self.duration = self.timer.elapsed
if self.end_time is None:
self.end_time = time()
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "ToolCallMetrics":
"""Create ToolCallMetrics from dict, handling ISO format strings for start_time and end_time."""
from datetime import datetime
metrics_data = data.copy()
for field_name in ("start_time", "end_time"):
if field_name in metrics_data and isinstance(metrics_data[field_name], str):
try:
metrics_data[field_name] = datetime.fromisoformat(metrics_data[field_name]).timestamp()
except (ValueError, AttributeError):
try:
metrics_data[field_name] = float(metrics_data[field_name])
except (ValueError, TypeError):
metrics_data[field_name] = None
valid_fields = {f.name for f in dc_fields(cls)}
metrics_data = {k: v for k, v in metrics_data.items() if k in valid_fields}
return cls(**metrics_data)
# ---------------------------------------------------------------------------
# Message Metrics
# ---------------------------------------------------------------------------
@dataclass
class MessageMetrics(BaseMetrics):
"""Message-level metrics — token counts and timing. Used by Message.metrics."""
timer: Optional[Timer] = None
duration: Optional[float] = None
time_to_first_token: Optional[float] = None
# Transit field: set by providers, consumed by accumulate_model_metrics → ModelMetrics
provider_metrics: Optional[Dict[str, Any]] = None
def to_dict(self) -> Dict[str, Any]:
metrics_dict = asdict(self)
metrics_dict.pop("timer", None)
return {
k: v
for k, v in metrics_dict.items()
if v is not None and (not isinstance(v, int) or v != 0) and (not isinstance(v, dict) or len(v) > 0)
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "MessageMetrics":
valid = {f.name for f in dc_fields(cls)} - {"timer"}
return cls(**{k: v for k, v in data.items() if k in valid})
def __add__(self, other: "MessageMetrics") -> "MessageMetrics":
result = MessageMetrics(
input_tokens=self.input_tokens + getattr(other, "input_tokens", 0),
output_tokens=self.output_tokens + getattr(other, "output_tokens", 0),
total_tokens=self.total_tokens + getattr(other, "total_tokens", 0),
audio_input_tokens=self.audio_input_tokens + getattr(other, "audio_input_tokens", 0),
audio_output_tokens=self.audio_output_tokens + getattr(other, "audio_output_tokens", 0),
audio_total_tokens=self.audio_total_tokens + getattr(other, "audio_total_tokens", 0),
cache_read_tokens=self.cache_read_tokens + getattr(other, "cache_read_tokens", 0),
cache_write_tokens=self.cache_write_tokens + getattr(other, "cache_write_tokens", 0),
reasoning_tokens=self.reasoning_tokens + getattr(other, "reasoning_tokens", 0),
)
# Sum cost
self_cost = self.cost
other_cost = getattr(other, "cost", None)
if self_cost is not None and other_cost is not None:
result.cost = self_cost + other_cost
elif self_cost is not None:
result.cost = self_cost
elif other_cost is not None:
result.cost = other_cost
# Sum duration
self_duration = self.duration
other_duration = getattr(other, "duration", None)
if self_duration is not None and other_duration is not None:
result.duration = self_duration + other_duration
elif self_duration is not None:
result.duration = self_duration
elif other_duration is not None:
result.duration = other_duration
# Preserve timer from self
if self.timer is not None:
result.timer = self.timer
# Keep first non-None TTFT
self_ttft = self.time_to_first_token
other_ttft = getattr(other, "time_to_first_token", None)
if self_ttft is not None:
result.time_to_first_token = self_ttft
elif other_ttft is not None:
result.time_to_first_token = other_ttft
# Merge provider_metrics (sum numeric values, keep latest for others)
self_provider_metrics = self.provider_metrics
other_provider_metrics = getattr(other, "provider_metrics", None)
if self_provider_metrics is not None or other_provider_metrics is not None:
merged_pm: Dict[str, Any] = {}
if self_provider_metrics:
merged_pm.update(self_provider_metrics)
if other_provider_metrics:
for k, v in other_provider_metrics.items():
if k in merged_pm and isinstance(v, (int, float)) and isinstance(merged_pm[k], (int, float)):
merged_pm[k] += v
else:
merged_pm[k] = v
result.provider_metrics = merged_pm
return result
def __radd__(self, other: Any) -> "MessageMetrics":
if other == 0:
return self
return self + other
def start_timer(self):
if self.timer is None:
self.timer = Timer()
self.timer.start()
def stop_timer(self, set_duration: bool = True):
if self.timer is not None:
self.timer.stop()
if set_duration:
self.duration = self.timer.elapsed
def set_time_to_first_token(self):
if self.timer is not None and self.time_to_first_token is None:
self.time_to_first_token = self.timer.elapsed
# ---------------------------------------------------------------------------
# Run Metrics
# ---------------------------------------------------------------------------
@dataclass
class RunMetrics(BaseMetrics):
"""Run-level metrics with per-model breakdown.
Used by RunOutput.metrics and TeamRunOutput.metrics.
"""
timer: Optional[Timer] = None
time_to_first_token: Optional[float] = None
duration: Optional[float] = None
# Per-model metrics breakdown
# Keys: "model", "output_model", "memory_model", "eval_model", etc.
# Values: List of ModelMetrics (one per unique provider+id)
details: Optional[Dict[str, List[ModelMetrics]]] = None
# Any additional metrics (e.g., eval_duration)
additional_metrics: Optional[Dict[str, Any]] = None
def to_dict(self) -> Dict[str, Any]:
metrics_dict = asdict(self)
metrics_dict.pop("timer", None)
# Convert details dicts properly
if metrics_dict.get("details") is not None:
details_dict = {}
valid_model_metrics_fields = {f.name for f in dc_fields(ModelMetrics)}
for model_type, model_metrics_list in metrics_dict["details"].items():
details_dict[model_type] = [
{
k: v
for k, v in model_metric.items()
if k in valid_model_metrics_fields and v is not None and (not isinstance(v, int) or v != 0)
}
for model_metric in model_metrics_list
]
metrics_dict["details"] = details_dict
return {
k: v
for k, v in metrics_dict.items()
if v is not None and (not isinstance(v, int) or v != 0) and (not isinstance(v, (dict, list)) or len(v) > 0)
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "RunMetrics":
"""Create RunMetrics from a dict, filtering to valid fields and converting details."""
valid = {f.name for f in dc_fields(cls)} - {"timer"}
filtered = {k: v for k, v in data.items() if k in valid}
# Convert details dicts to ModelMetrics objects
if "details" in filtered and filtered["details"] is not None:
converted_details: Dict[str, List[ModelMetrics]] = {}
for model_type, model_metrics_list in filtered["details"].items():
converted_details[model_type] = [
ModelMetrics.from_dict(model_metric) if isinstance(model_metric, dict) else model_metric
for model_metric in model_metrics_list
]
filtered["details"] = converted_details
return cls(**filtered)
def __add__(self, other: "RunMetrics") -> "RunMetrics":
result = RunMetrics(
input_tokens=self.input_tokens + getattr(other, "input_tokens", 0),
output_tokens=self.output_tokens + getattr(other, "output_tokens", 0),
total_tokens=self.total_tokens + getattr(other, "total_tokens", 0),
audio_input_tokens=self.audio_input_tokens + getattr(other, "audio_input_tokens", 0),
audio_output_tokens=self.audio_output_tokens + getattr(other, "audio_output_tokens", 0),
audio_total_tokens=self.audio_total_tokens + getattr(other, "audio_total_tokens", 0),
cache_read_tokens=self.cache_read_tokens + getattr(other, "cache_read_tokens", 0),
cache_write_tokens=self.cache_write_tokens + getattr(other, "cache_write_tokens", 0),
reasoning_tokens=self.reasoning_tokens + getattr(other, "reasoning_tokens", 0),
)
# Preserve timer from self
if self.timer is not None:
result.timer = self.timer
# Merge details — aggregate by (model_type, provider, id)
self_details = self.details
other_details = getattr(other, "details", None)
if self_details or other_details:
lookup: Dict[str, Dict[Tuple[str, str], ModelMetrics]] = {}
for source_details in (self_details, other_details):
if source_details:
for model_type, model_metrics_list in source_details.items():
if model_type not in lookup:
lookup[model_type] = {}
for mm in model_metrics_list:
key = (mm.provider, mm.id)
if key in lookup[model_type]:
lookup[model_type][key].accumulate(mm)
else:
lookup[model_type][key] = ModelMetrics.from_dict(mm.to_dict())
result.details = {model_type: list(entries.values()) for model_type, entries in lookup.items()}
# Sum durations
self_duration = self.duration
other_duration = getattr(other, "duration", None)
if self_duration is not None and other_duration is not None:
result.duration = self_duration + other_duration
elif self_duration is not None:
result.duration = self_duration
elif other_duration is not None:
result.duration = other_duration
# Keep earliest TTFT
self_ttft = self.time_to_first_token
other_ttft = getattr(other, "time_to_first_token", None)
if self_ttft is not None and other_ttft is not None:
result.time_to_first_token = min(self_ttft, other_ttft)
elif self_ttft is not None:
result.time_to_first_token = self_ttft
elif other_ttft is not None:
result.time_to_first_token = other_ttft
# Sum cost
self_cost = self.cost
other_cost = getattr(other, "cost", None)
if self_cost is not None and other_cost is not None:
result.cost = self_cost + other_cost
elif self_cost is not None:
result.cost = self_cost
elif other_cost is not None:
result.cost = other_cost
# Merge additional_metrics (sum numeric values, keep latest for others)
self_am = self.additional_metrics
other_am = getattr(other, "additional_metrics", None)
if self_am is not None or other_am is not None:
result.additional_metrics = {}
if self_am:
result.additional_metrics.update(self_am)
if other_am:
for k, v in other_am.items():
if (
k in result.additional_metrics
and isinstance(v, (int, float))
and isinstance(result.additional_metrics[k], (int, float))
):
result.additional_metrics[k] += v
else:
result.additional_metrics[k] = v
return result
def __radd__(self, other: Any) -> "RunMetrics":
if other == 0:
return self
return self + other
def start_timer(self):
if self.timer is None:
self.timer = Timer()
self.timer.start()
def stop_timer(self, set_duration: bool = True):
if self.timer is not None:
self.timer.stop()
if set_duration:
self.duration = self.timer.elapsed
def set_time_to_first_token(self):
if self.timer is not None and self.time_to_first_token is None:
self.time_to_first_token = self.timer.elapsed
# Backward-compat alias
Metrics = RunMetrics
# ---------------------------------------------------------------------------
# Session metrics – aggregated across runs
# ---------------------------------------------------------------------------
@dataclass
class SessionMetrics(BaseMetrics):
"""Session-level aggregated metrics across runs.
details has the same type as RunMetrics.details: Dict[str, List[ModelMetrics]].
Tokens in each ModelMetrics entry are summed across all runs.
"""
# Same type as RunMetrics.details — Dict keyed by model type
details: Optional[Dict[str, List[ModelMetrics]]] = None
# Carried from runs
additional_metrics: Optional[Dict[str, Any]] = None
def to_dict(self) -> Dict[str, Any]:
metrics_dict = asdict(self)
if metrics_dict.get("details") is not None:
valid_model_metrics_fields = {f.name for f in dc_fields(ModelMetrics)}
details_dict = {}
for model_type, model_metrics_list in metrics_dict["details"].items():
details_dict[model_type] = [
{
k: v
for k, v in model_metric.items()
if k in valid_model_metrics_fields and v is not None and (not isinstance(v, int) or v != 0)
}
for model_metric in model_metrics_list
]
metrics_dict["details"] = details_dict
return {
k: v
for k, v in metrics_dict.items()
if v is not None and (not isinstance(v, int) or v != 0) and (not isinstance(v, (dict, list)) or len(v) > 0)
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "SessionMetrics":
"""Create SessionMetrics from a dict, converting details dicts to ModelMetrics objects."""
valid = {f.name for f in dc_fields(cls)}
filtered = {k: v for k, v in data.items() if k in valid}
if "details" in filtered and filtered["details"] is not None:
details_raw = filtered["details"]
if isinstance(details_raw, dict):
converted: Dict[str, List[ModelMetrics]] = {}
for model_type, model_metrics_list in details_raw.items():
if isinstance(model_metrics_list, list):
converted[model_type] = [
ModelMetrics.from_dict(model_metric) if isinstance(model_metric, dict) else model_metric
for model_metric in model_metrics_list
]
filtered["details"] = converted if converted else None
else:
filtered.pop("details", None)
return cls(**filtered)
def accumulate_from_run(self, run_metrics: "RunMetrics") -> None:
"""Accumulate run-level RunMetrics into this SessionMetrics."""
# Accumulate token metrics
self.input_tokens += run_metrics.input_tokens
self.output_tokens += run_metrics.output_tokens
self.total_tokens += run_metrics.total_tokens
self.audio_input_tokens += run_metrics.audio_input_tokens
self.audio_output_tokens += run_metrics.audio_output_tokens
self.audio_total_tokens += run_metrics.audio_total_tokens
self.cache_read_tokens += run_metrics.cache_read_tokens
self.cache_write_tokens += run_metrics.cache_write_tokens
self.reasoning_tokens += run_metrics.reasoning_tokens
# Accumulate cost
if run_metrics.cost is not None:
self.cost = (self.cost or 0) + run_metrics.cost
# Merge additional_metrics (sum numeric values, keep latest for others)
if run_metrics.additional_metrics is not None:
if self.additional_metrics is None:
self.additional_metrics = {}
for k, v in run_metrics.additional_metrics.items():
if (
k in self.additional_metrics
and isinstance(v, (int, float))
and isinstance(self.additional_metrics[k], (int, float))
):
self.additional_metrics[k] += v
else:
self.additional_metrics[k] = v
# Merge per-model details: Dict[str, List[ModelMetrics]] -> Dict[str, List[ModelMetrics]]
if run_metrics.details:
if self.details is None:
self.details = {}
for model_type, model_metrics_list in run_metrics.details.items():
if model_type not in self.details:
self.details[model_type] = []
existing_list = self.details[model_type]
for model_metrics in model_metrics_list:
# Find existing entry by (provider, id)
found = False
for existing in existing_list:
if existing.provider == model_metrics.provider and existing.id == model_metrics.id:
existing.accumulate(model_metrics)
found = True
break
if not found:
# Create a copy so we don't alias the run-level entry
existing_list.append(ModelMetrics.from_dict(model_metrics.to_dict()))
def __add__(self, other: "SessionMetrics") -> "SessionMetrics":
"""Sum two SessionMetrics objects."""
# Merge details dicts — aggregate by (model_type, provider, id)
merged_details: Optional[Dict[str, List[ModelMetrics]]] = None
other_details = getattr(other, "details", None)
if self.details or other_details:
# Build lookup: model_type -> (provider, id) -> ModelMetrics
lookup: Dict[str, Dict[Tuple[str, str], ModelMetrics]] = {}
for source_details in (self.details, other_details):
if source_details:
for model_type, model_metrics_list in source_details.items():
if model_type not in lookup:
lookup[model_type] = {}
for mm in model_metrics_list:
key = (mm.provider, mm.id)
if key in lookup[model_type]:
lookup[model_type][key].accumulate(mm)
else:
lookup[model_type][key] = ModelMetrics.from_dict(mm.to_dict())
merged_details = {mt: list(entries.values()) for mt, entries in lookup.items()}
# Sum cost
cost = None
other_cost = getattr(other, "cost", None)
if self.cost is not None and other_cost is not None:
cost = self.cost + other_cost
elif self.cost is not None:
cost = self.cost
elif other_cost is not None:
cost = other_cost
# Merge additional_metrics (sum numeric values, keep latest for others)
merged_am = None
other_am = getattr(other, "additional_metrics", None)
if self.additional_metrics is not None or other_am is not None:
merged_am = {}
if self.additional_metrics:
merged_am.update(self.additional_metrics)
if other_am:
for k, v in other_am.items():
if k in merged_am and isinstance(v, (int, float)) and isinstance(merged_am[k], (int, float)):
merged_am[k] += v
else:
merged_am[k] = v
return SessionMetrics(
input_tokens=self.input_tokens + getattr(other, "input_tokens", 0),
output_tokens=self.output_tokens + getattr(other, "output_tokens", 0),
total_tokens=self.total_tokens + getattr(other, "total_tokens", 0),
audio_input_tokens=self.audio_input_tokens + getattr(other, "audio_input_tokens", 0),
audio_output_tokens=self.audio_output_tokens + getattr(other, "audio_output_tokens", 0),
audio_total_tokens=self.audio_total_tokens + getattr(other, "audio_total_tokens", 0),
cache_read_tokens=self.cache_read_tokens + getattr(other, "cache_read_tokens", 0),
cache_write_tokens=self.cache_write_tokens + getattr(other, "cache_write_tokens", 0),
reasoning_tokens=self.reasoning_tokens + getattr(other, "reasoning_tokens", 0),
cost=cost,
details=merged_details,
additional_metrics=merged_am,
)
def __radd__(self, other: Any) -> "SessionMetrics":
if other == 0:
return self
return self + other
# ---------------------------------------------------------------------------
# Accumulation helpers
# ---------------------------------------------------------------------------
def accumulate_model_metrics(
model_response: "ModelResponse",
model: "Model",
model_type: "Union[ModelType, str]",
run_metrics: Optional[RunMetrics] = None,
) -> None:
"""Accumulate metrics from a model response into run_metrics.
Finds or creates a ModelMetrics entry in details[model_type] by (provider, id).
Sums tokens into the existing entry if found, otherwise creates a new one.
Also accumulates top-level token counts and cost.
"""
if run_metrics is None or model_response.response_usage is None:
return
usage = model_response.response_usage
metrics = run_metrics
if metrics.details is None:
metrics.details = {}
# Coerce token values
input_tokens = usage.input_tokens or 0
output_tokens = usage.output_tokens or 0
total_tokens = usage.total_tokens or 0
audio_input_tokens = usage.audio_input_tokens or 0
audio_output_tokens = usage.audio_output_tokens or 0
audio_total_tokens = usage.audio_total_tokens or 0
cache_read_tokens = usage.cache_read_tokens or 0
cache_write_tokens = usage.cache_write_tokens or 0
reasoning_tokens = usage.reasoning_tokens or 0
model_id = model.id
model_provider = model.get_provider()
# Create ModelMetrics entry
model_metrics = ModelMetrics(
id=model_id,
provider=model_provider,
input_tokens=input_tokens,
output_tokens=output_tokens,
total_tokens=total_tokens,
audio_input_tokens=audio_input_tokens,
audio_output_tokens=audio_output_tokens,
audio_total_tokens=audio_total_tokens,
cache_read_tokens=cache_read_tokens,
cache_write_tokens=cache_write_tokens,
reasoning_tokens=reasoning_tokens,
cost=usage.cost,
provider_metrics=usage.provider_metrics,
)
# Find-and-add by (provider, id), or append new entry
_model_type_key = model_type.value if isinstance(model_type, ModelType) else model_type
entries = metrics.details.get(_model_type_key)
if entries is None:
metrics.details[_model_type_key] = [model_metrics]
else:
for entry in entries:
if entry.id == model_id and entry.provider == model_provider:
entry.accumulate(model_metrics)
break
else:
entries.append(model_metrics)
# Accumulate top-level token counts
metrics.input_tokens += input_tokens
metrics.output_tokens += output_tokens
metrics.total_tokens += total_tokens
metrics.audio_input_tokens += audio_input_tokens
metrics.audio_output_tokens += audio_output_tokens
metrics.audio_total_tokens += audio_total_tokens
metrics.cache_read_tokens += cache_read_tokens
metrics.cache_write_tokens += cache_write_tokens
metrics.reasoning_tokens += reasoning_tokens
# Accumulate cost
if usage.cost is not None:
metrics.cost = (metrics.cost or 0) + usage.cost
def accumulate_eval_metrics(
eval_metrics: Optional[RunMetrics] = None,
run_metrics: Optional[RunMetrics] = None,
prefix: str = "eval",
) -> None:
"""Accumulate child agent/eval metrics into run_metrics.
Merges a child agent's metrics under "{prefix}_model" keys in details.
"""
if run_metrics is None or eval_metrics is None:
return
if run_metrics.details is None:
run_metrics.details = {}
# Copy over model details under "{prefix}_<model_type>" keys
if eval_metrics.details:
for model_type, model_metrics_list in eval_metrics.details.items():
prefixed_key = f"{prefix}_{model_type}" if not model_type.startswith(f"{prefix}_") else model_type
if prefixed_key not in run_metrics.details:
run_metrics.details[prefixed_key] = []
# Find-and-add by (provider, id) into the prefixed list
for mm in model_metrics_list:
found = False
for existing in run_metrics.details[prefixed_key]:
if existing.provider == mm.provider and existing.id == mm.id:
existing.accumulate(mm)
found = True
break
if not found:
run_metrics.details[prefixed_key].append(ModelMetrics.from_dict(mm.to_dict()))
# Accumulate top-level token counts
run_metrics.input_tokens += eval_metrics.input_tokens
run_metrics.output_tokens += eval_metrics.output_tokens
run_metrics.total_tokens += eval_metrics.total_tokens
run_metrics.audio_input_tokens += eval_metrics.audio_input_tokens
run_metrics.audio_output_tokens += eval_metrics.audio_output_tokens
run_metrics.audio_total_tokens += eval_metrics.audio_total_tokens
run_metrics.cache_read_tokens += eval_metrics.cache_read_tokens
run_metrics.cache_write_tokens += eval_metrics.cache_write_tokens
run_metrics.reasoning_tokens += eval_metrics.reasoning_tokens
# Accumulate cost
if eval_metrics.cost is not None:
run_metrics.cost = (run_metrics.cost if run_metrics.cost is not None else 0) + eval_metrics.cost
# Track eval duration separately
if prefix == "eval" and eval_metrics.duration is not None:
if run_metrics.additional_metrics is None:
run_metrics.additional_metrics = {}
existing = run_metrics.additional_metrics.get("eval_duration", 0)
run_metrics.additional_metrics["eval_duration"] = existing + eval_metrics.duration
def merge_background_metrics(
run_metrics: Optional[RunMetrics],
background_metrics: "Sequence[Optional[RunMetrics]]",
) -> None:
"""Merge background task metrics into run_metrics on the main thread.
Each background task (memory, culture, learning) accumulates metrics into its
own isolated RunMetrics collector. After all tasks complete, this function
merges those collectors into the real run_metrics — avoiding concurrent
mutation of shared state.
"""
if run_metrics is None:
return
for bg_metrics in background_metrics:
if bg_metrics is None:
continue
metrics = run_metrics
# Accumulate top-level token counts
metrics.input_tokens += bg_metrics.input_tokens
metrics.output_tokens += bg_metrics.output_tokens
metrics.total_tokens += bg_metrics.total_tokens
metrics.audio_input_tokens += bg_metrics.audio_input_tokens
metrics.audio_output_tokens += bg_metrics.audio_output_tokens
metrics.audio_total_tokens += bg_metrics.audio_total_tokens
metrics.cache_read_tokens += bg_metrics.cache_read_tokens
metrics.cache_write_tokens += bg_metrics.cache_write_tokens
metrics.reasoning_tokens += bg_metrics.reasoning_tokens
# Accumulate cost
if bg_metrics.cost is not None:
metrics.cost = (metrics.cost or 0) + bg_metrics.cost
# Merge per-model details
if bg_metrics.details:
if metrics.details is None:
metrics.details = {}
for model_type, model_metrics_list in bg_metrics.details.items():
if model_type not in metrics.details:
metrics.details[model_type] = []
for mm in model_metrics_list:
found = False
for existing in metrics.details[model_type]:
if existing.provider == mm.provider and existing.id == mm.id:
existing.accumulate(mm)
found = True
break
if not found:
metrics.details[model_type].append(ModelMetrics.from_dict(mm.to_dict()))
# Merge additional_metrics (sum numeric values, keep latest for others)
if bg_metrics.additional_metrics:
if metrics.additional_metrics is None:
metrics.additional_metrics = {}
for k, v in bg_metrics.additional_metrics.items():
if (
k in metrics.additional_metrics
and isinstance(v, (int, float))
and isinstance(metrics.additional_metrics[k], (int, float))
):
metrics.additional_metrics[k] += v
else:
metrics.additional_metrics[k] = v
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/metrics.py",
"license": "Apache License 2.0",
"lines": 711,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/test_model_type.py | """
Unit tests for the ModelType enum and its integration with metrics accumulation.
Tests cover:
- ModelType enum values and str behavior
- Model base class model_type attribute
- accumulate_model_metrics with ModelType enum
- Dict key storage uses string values
- Agent init sets model_type on resolved models
- Backward compatibility with string model_type values
"""
from unittest.mock import MagicMock
from agno.metrics import (
MessageMetrics,
Metrics,
ModelMetrics,
ModelType,
SessionMetrics,
accumulate_eval_metrics,
accumulate_model_metrics,
)
# ---------------------------------------------------------------------------
# ModelType enum basics
# ---------------------------------------------------------------------------
class TestModelTypeEnum:
def test_enum_values(self):
assert ModelType.MODEL.value == "model"
assert ModelType.OUTPUT_MODEL.value == "output_model"
assert ModelType.PARSER_MODEL.value == "parser_model"
assert ModelType.MEMORY_MODEL.value == "memory_model"
assert ModelType.REASONING_MODEL.value == "reasoning_model"
assert ModelType.SESSION_SUMMARY_MODEL.value == "session_summary_model"
assert ModelType.CULTURE_MODEL.value == "culture_model"
assert ModelType.LEARNING_MODEL.value == "learning_model"
assert ModelType.COMPRESSION_MODEL.value == "compression_model"
def test_str_enum_equality_with_strings(self):
"""ModelType(str, Enum) should compare equal to its string value."""
assert ModelType.MODEL == "model"
assert ModelType.OUTPUT_MODEL == "output_model"
assert ModelType.REASONING_MODEL == "reasoning_model"
def test_enum_members_are_unique(self):
values = [m.value for m in ModelType]
assert len(values) == len(set(values))
def test_enum_is_hashable(self):
"""Can be used as dict keys."""
d = {ModelType.MODEL: "main", ModelType.OUTPUT_MODEL: "output"}
assert d[ModelType.MODEL] == "main"
# ---------------------------------------------------------------------------
# Model base class integration
# ---------------------------------------------------------------------------
class TestModelTypeOnModel:
def test_default_model_type(self):
"""Model instances default to ModelType.MODEL."""
from agno.models.openai.chat import OpenAIChat
model = OpenAIChat(id="gpt-4o-mini")
assert model.model_type == ModelType.MODEL
def test_model_type_can_be_overridden(self):
"""model_type can be set to a different ModelType value."""
from agno.models.openai.chat import OpenAIChat
model = OpenAIChat(id="gpt-4o-mini")
model.model_type = ModelType.OUTPUT_MODEL
assert model.model_type == ModelType.OUTPUT_MODEL
# ---------------------------------------------------------------------------
# accumulate_model_metrics with ModelType
# ---------------------------------------------------------------------------
def _make_model_response(input_tokens=10, output_tokens=5, total_tokens=15, cost=None, ttft=None):
"""Create a mock ModelResponse with response_usage."""
usage = MessageMetrics(
input_tokens=input_tokens,
output_tokens=output_tokens,
total_tokens=total_tokens,
cost=cost,
time_to_first_token=ttft,
)
response = MagicMock()
response.response_usage = usage
return response
def _make_model(model_id="gpt-4o-mini", provider="OpenAI", model_type=ModelType.MODEL):
"""Create a mock Model with the given attributes."""
model = MagicMock()
model.id = model_id
model.get_provider.return_value = provider
model.model_type = model_type
return model
class TestAccumulateModelMetrics:
def test_enum_model_type_creates_correct_dict_key(self):
"""Using ModelType enum should store under the string value key."""
run_metrics = Metrics()
model_response = _make_model_response()
model = _make_model()
accumulate_model_metrics(model_response, model, ModelType.MODEL, run_metrics)
assert "model" in run_metrics.details
assert len(run_metrics.details["model"]) == 1
def test_output_model_type_key(self):
run_metrics = Metrics()
model_response = _make_model_response()
model = _make_model()
accumulate_model_metrics(model_response, model, ModelType.OUTPUT_MODEL, run_metrics)
assert "output_model" in run_metrics.details
def test_memory_model_type_key(self):
run_metrics = Metrics()
model_response = _make_model_response()
model = _make_model()
accumulate_model_metrics(model_response, model, ModelType.MEMORY_MODEL, run_metrics)
assert "memory_model" in run_metrics.details
def test_string_model_type_still_works(self):
"""Backward compatibility: raw strings should still work."""
run_metrics = Metrics()
model_response = _make_model_response()
model = _make_model()
accumulate_model_metrics(model_response, model, "model", run_metrics)
assert "model" in run_metrics.details
def test_tokens_accumulate_correctly(self):
run_metrics = Metrics()
model = _make_model()
accumulate_model_metrics(_make_model_response(10, 5, 15), model, ModelType.MODEL, run_metrics)
accumulate_model_metrics(_make_model_response(20, 10, 30), model, ModelType.MODEL, run_metrics)
assert run_metrics.input_tokens == 30
assert run_metrics.output_tokens == 15
assert run_metrics.total_tokens == 45
def test_multiple_model_types_in_same_run(self):
"""Simulates an agent run using model + output_model."""
run_metrics = Metrics()
main_model = _make_model("gpt-4o", "OpenAI")
output_model = _make_model("gpt-4o-mini", "OpenAI")
accumulate_model_metrics(_make_model_response(100, 50, 150), main_model, ModelType.MODEL, run_metrics)
accumulate_model_metrics(_make_model_response(20, 10, 30), output_model, ModelType.OUTPUT_MODEL, run_metrics)
details = run_metrics.details
assert "model" in details
assert "output_model" in details
assert details["model"][0].id == "gpt-4o"
assert details["output_model"][0].id == "gpt-4o-mini"
assert run_metrics.total_tokens == 180
def test_accumulate_does_not_set_run_ttft(self):
"""Run TTFT is set by providers via set_time_to_first_token(), not by accumulate_model_metrics."""
run_metrics = Metrics()
model = _make_model()
accumulate_model_metrics(_make_model_response(ttft=0.5), model, ModelType.MODEL, run_metrics)
assert run_metrics.time_to_first_token is None
def test_none_response_usage_is_no_op(self):
run_metrics = Metrics()
model = _make_model()
response = MagicMock()
response.response_usage = None
accumulate_model_metrics(response, model, ModelType.MODEL, run_metrics)
# No details added when response_usage is None
assert run_metrics.details is None
# ---------------------------------------------------------------------------
# accumulate_eval_metrics with enum-keyed details
# ---------------------------------------------------------------------------
class TestAccumulateEvalMetrics:
def test_eval_prefixes_string_keys_correctly(self):
"""accumulate_eval_metrics should create 'eval_model' from 'model' key."""
eval_metrics = Metrics(
input_tokens=10,
output_tokens=5,
total_tokens=15,
details={
"model": [
ModelMetrics(id="gpt-4o-mini", provider="OpenAI", input_tokens=10, output_tokens=5, total_tokens=15)
]
},
)
run_metrics = Metrics(details={})
accumulate_eval_metrics(eval_metrics, run_metrics, prefix="eval")
assert "eval_model" in run_metrics.details
assert run_metrics.input_tokens == 10
# ---------------------------------------------------------------------------
# Metrics.to_dict / from_dict round-trip with enum keys
# ---------------------------------------------------------------------------
class TestMetricsSerialization:
def test_to_dict_preserves_string_keys(self):
"""details dict keys should be strings in the serialized output."""
metrics = Metrics(
input_tokens=100,
output_tokens=50,
total_tokens=150,
details={
"model": [
ModelMetrics(id="gpt-4o", provider="OpenAI", input_tokens=100, output_tokens=50, total_tokens=150)
]
},
)
d = metrics.to_dict()
assert "model" in d["details"]
def test_from_dict_round_trip(self):
metrics = Metrics(
input_tokens=100,
output_tokens=50,
total_tokens=150,
details={
"model": [
ModelMetrics(id="gpt-4o", provider="OpenAI", input_tokens=100, output_tokens=50, total_tokens=150)
],
"output_model": [
ModelMetrics(
id="gpt-4o-mini", provider="OpenAI", input_tokens=20, output_tokens=10, total_tokens=30
)
],
},
)
d = metrics.to_dict()
restored = Metrics.from_dict(d)
assert "model" in restored.details
assert "output_model" in restored.details
assert restored.details["model"][0].id == "gpt-4o"
def test_session_metrics_from_dict_with_string_keys(self):
"""SessionMetrics.from_dict should handle details from run Metrics (dict format)."""
data = {
"input_tokens": 100,
"total_tokens": 150,
"details": {"model": [{"id": "gpt-4o", "provider": "OpenAI", "input_tokens": 100, "total_tokens": 150}]},
}
session = SessionMetrics.from_dict(data)
assert session.details is not None
assert len(session.details) == 1
assert session.details["model"][0].id == "gpt-4o"
# ---------------------------------------------------------------------------
# Agent init sets model_type
# ---------------------------------------------------------------------------
class TestAgentInitModelType:
def test_agent_model_gets_model_type_set(self):
"""Agent's model should have model_type=MODEL after init."""
from agno.agent import Agent
from agno.models.openai.chat import OpenAIChat
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"))
assert agent.model.model_type == ModelType.MODEL
def test_agent_output_model_gets_type_set(self):
from agno.agent import Agent
from agno.models.openai.chat import OpenAIChat
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_model=OpenAIChat(id="gpt-4o-mini"),
)
assert agent.output_model.model_type == ModelType.OUTPUT_MODEL
def test_agent_parser_model_gets_type_set(self):
from agno.agent import Agent
from agno.models.openai.chat import OpenAIChat
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
parser_model=OpenAIChat(id="gpt-4o-mini"),
)
assert agent.parser_model.model_type == ModelType.PARSER_MODEL
def test_agent_reasoning_model_gets_type_set(self):
from agno.agent import Agent
from agno.models.openai.chat import OpenAIChat
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
reasoning_model=OpenAIChat(id="gpt-4o-mini"),
)
assert agent.reasoning_model.model_type == ModelType.REASONING_MODEL
# ---------------------------------------------------------------------------
# Re-export shim
# ---------------------------------------------------------------------------
class TestReExportShim:
def test_model_type_importable_from_models_metrics(self):
from agno.models.metrics import ModelType as MT
assert MT is ModelType
assert MT.MODEL.value == "model"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/test_model_type.py",
"license": "Apache License 2.0",
"lines": 258,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/models/aws/test_bedrock_client.py | from unittest.mock import MagicMock, patch
import pytest
from boto3.session import Session
from agno.models.aws import AwsBedrock
def _make_frozen_creds(access_key="ASIATEMP", secret_key="secret", token="token"):
frozen = MagicMock()
frozen.access_key = access_key
frozen.secret_key = secret_key
frozen.token = token
return frozen
def _make_mock_session(access_key="ASIATEMP", secret_key="secret", token="token", region="us-east-1"):
mock_session = MagicMock(spec=Session)
mock_session.region_name = region
mock_session.profile_name = None
mock_creds = MagicMock()
mock_creds.get_frozen_credentials.return_value = _make_frozen_creds(access_key, secret_key, token)
mock_session.get_credentials.return_value = mock_creds
mock_client = MagicMock()
mock_session.client.return_value = mock_client
return mock_session, mock_creds, mock_client
class TestSessionClientNotCached:
def test_sync_client_recreated_each_call(self):
mock_session, _, _ = _make_mock_session()
model = AwsBedrock(id="anthropic.claude-3-sonnet-20240229-v1:0", session=mock_session)
model.get_client()
model.get_client()
assert mock_session.client.call_count == 2
def test_sync_client_passes_region(self):
mock_session, _, _ = _make_mock_session(region="eu-west-1")
model = AwsBedrock(id="anthropic.claude-3-sonnet-20240229-v1:0", session=mock_session)
model.get_client()
mock_session.client.assert_called_with("bedrock-runtime", region_name="eu-west-1")
class TestStaticKeyClientCached:
def test_sync_client_cached(self):
model = AwsBedrock(
id="anthropic.claude-3-sonnet-20240229-v1:0",
aws_access_key_id="AKIA_STATIC",
aws_secret_access_key="secret",
aws_region="us-east-1",
)
with patch("agno.models.aws.bedrock.AwsClient") as MockClient:
mock_client = MagicMock()
MockClient.return_value = mock_client
client1 = model.get_client()
client2 = model.get_client()
assert MockClient.call_count == 1
assert client1 is client2
class TestSessionTokenEnv:
def test_session_token_read_from_env(self, monkeypatch):
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "ASIATEMP")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "secret")
monkeypatch.setenv("AWS_SESSION_TOKEN", "my-session-token")
monkeypatch.setenv("AWS_REGION", "us-west-2")
model = AwsBedrock(id="anthropic.claude-3-sonnet-20240229-v1:0")
with patch("agno.models.aws.bedrock.AwsClient") as MockClient:
MockClient.return_value = MagicMock()
model.get_client()
call_kwargs = MockClient.call_args[1]
assert call_kwargs["aws_session_token"] == "my-session-token"
assert call_kwargs["aws_access_key_id"] == "ASIATEMP"
def test_session_token_explicit_param(self):
model = AwsBedrock(
id="anthropic.claude-3-sonnet-20240229-v1:0",
aws_access_key_id="ASIATEMP",
aws_secret_access_key="secret",
aws_session_token="explicit-token",
aws_region="us-east-1",
)
with patch("agno.models.aws.bedrock.AwsClient") as MockClient:
MockClient.return_value = MagicMock()
model.get_client()
call_kwargs = MockClient.call_args[1]
assert call_kwargs["aws_session_token"] == "explicit-token"
def test_no_session_token_when_not_set(self, monkeypatch):
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "AKIA_STATIC")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "secret")
monkeypatch.setenv("AWS_REGION", "us-east-1")
monkeypatch.delenv("AWS_SESSION_TOKEN", raising=False)
model = AwsBedrock(id="anthropic.claude-3-sonnet-20240229-v1:0")
with patch("agno.models.aws.bedrock.AwsClient") as MockClient:
MockClient.return_value = MagicMock()
model.get_client()
call_kwargs = MockClient.call_args[1]
assert call_kwargs["aws_session_token"] is None
class TestSessionNullCredentials:
def test_async_raises_on_null_credentials(self):
try:
import aioboto3 # noqa: F401
except ImportError:
pytest.skip("aioboto3 not installed")
mock_session = MagicMock(spec=Session)
mock_session.region_name = "us-east-1"
mock_session.get_credentials.return_value = None
model = AwsBedrock(id="anthropic.claude-3-sonnet-20240229-v1:0", session=mock_session)
with pytest.raises(ValueError, match="boto3 session has no credentials"):
model.get_async_client()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/models/aws/test_bedrock_client.py",
"license": "Apache License 2.0",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/models/aws/test_claude_client.py | from unittest.mock import MagicMock, patch
import pytest
from boto3.session import Session
from agno.models.aws import Claude
def _make_frozen_creds(access_key="ASIATEMP", secret_key="secret", token="token"):
frozen = MagicMock()
frozen.access_key = access_key
frozen.secret_key = secret_key
frozen.token = token
return frozen
def _make_mock_session(access_key="ASIATEMP", secret_key="secret", token="token", region="us-east-1"):
mock_session = MagicMock(spec=Session)
mock_session.region_name = region
mock_session.profile_name = None
mock_creds = MagicMock()
mock_creds.get_frozen_credentials.return_value = _make_frozen_creds(access_key, secret_key, token)
mock_session.get_credentials.return_value = mock_creds
return mock_session, mock_creds
class TestSessionClientNotCached:
def test_sync_client_recreated_each_call(self):
mock_session, _ = _make_mock_session()
model = Claude(id="anthropic.claude-3-sonnet-20240229-v1:0", session=mock_session)
with patch("agno.models.aws.claude.AnthropicBedrock") as MockBedrock:
mock_client = MagicMock()
mock_client.is_closed.return_value = False
MockBedrock.return_value = mock_client
model.get_client()
model.get_client()
assert MockBedrock.call_count == 2
def test_async_client_recreated_each_call(self):
mock_session, _ = _make_mock_session()
model = Claude(id="anthropic.claude-3-sonnet-20240229-v1:0", session=mock_session)
with patch("agno.models.aws.claude.AsyncAnthropicBedrock") as MockAsyncBedrock:
mock_client = MagicMock()
mock_client.is_closed.return_value = False
MockAsyncBedrock.return_value = mock_client
model.get_async_client()
model.get_async_client()
assert MockAsyncBedrock.call_count == 2
class TestSessionCredsReadEachTime:
def test_fresh_creds_on_each_sync_get_client(self):
mock_session, mock_creds = _make_mock_session(access_key="KEY_V1", token="TOKEN_V1")
model = Claude(id="anthropic.claude-3-sonnet-20240229-v1:0", session=mock_session)
with patch("agno.models.aws.claude.AnthropicBedrock") as MockBedrock:
mock_client = MagicMock()
mock_client.is_closed.return_value = False
MockBedrock.return_value = mock_client
model.get_client()
first_call_kwargs = MockBedrock.call_args
assert first_call_kwargs[1]["aws_access_key"] == "KEY_V1"
assert first_call_kwargs[1]["aws_session_token"] == "TOKEN_V1"
# Simulate credential rotation
mock_creds.get_frozen_credentials.return_value = _make_frozen_creds("KEY_V2", "secret", "TOKEN_V2")
model.get_client()
second_call_kwargs = MockBedrock.call_args
assert second_call_kwargs[1]["aws_access_key"] == "KEY_V2"
assert second_call_kwargs[1]["aws_session_token"] == "TOKEN_V2"
def test_fresh_creds_on_each_async_get_client(self):
mock_session, mock_creds = _make_mock_session(access_key="KEY_V1", token="TOKEN_V1")
model = Claude(id="anthropic.claude-3-sonnet-20240229-v1:0", session=mock_session)
with patch("agno.models.aws.claude.AsyncAnthropicBedrock") as MockAsyncBedrock:
mock_client = MagicMock()
mock_client.is_closed.return_value = False
MockAsyncBedrock.return_value = mock_client
model.get_async_client()
first_call_kwargs = MockAsyncBedrock.call_args
assert first_call_kwargs[1]["aws_access_key"] == "KEY_V1"
assert first_call_kwargs[1]["aws_session_token"] == "TOKEN_V1"
# Simulate credential rotation
mock_creds.get_frozen_credentials.return_value = _make_frozen_creds("KEY_V2", "secret", "TOKEN_V2")
model.get_async_client()
second_call_kwargs = MockAsyncBedrock.call_args
assert second_call_kwargs[1]["aws_access_key"] == "KEY_V2"
assert second_call_kwargs[1]["aws_session_token"] == "TOKEN_V2"
class TestStaticKeyClientCached:
def test_sync_client_cached(self):
model = Claude(
id="anthropic.claude-3-sonnet-20240229-v1:0",
aws_access_key="AKIA_STATIC",
aws_secret_key="secret",
aws_region="us-east-1",
)
with patch("agno.models.aws.claude.AnthropicBedrock") as MockBedrock:
mock_client = MagicMock()
mock_client.is_closed.return_value = False
MockBedrock.return_value = mock_client
client1 = model.get_client()
client2 = model.get_client()
assert MockBedrock.call_count == 1
assert client1 is client2
def test_async_client_cached(self):
model = Claude(
id="anthropic.claude-3-sonnet-20240229-v1:0",
aws_access_key="AKIA_STATIC",
aws_secret_key="secret",
aws_region="us-east-1",
)
with patch("agno.models.aws.claude.AsyncAnthropicBedrock") as MockAsyncBedrock:
mock_client = MagicMock()
mock_client.is_closed.return_value = False
MockAsyncBedrock.return_value = mock_client
client1 = model.get_async_client()
client2 = model.get_async_client()
assert MockAsyncBedrock.call_count == 1
assert client1 is client2
class TestAsyncIsClosedCheck:
def test_closed_async_client_is_recreated(self):
model = Claude(
id="anthropic.claude-3-sonnet-20240229-v1:0",
aws_access_key="AKIA_STATIC",
aws_secret_key="secret",
aws_region="us-east-1",
)
with patch("agno.models.aws.claude.AsyncAnthropicBedrock") as MockAsyncBedrock:
closed_client = MagicMock()
closed_client.is_closed.return_value = True
model.async_client = closed_client
new_client = MagicMock()
new_client.is_closed.return_value = False
MockAsyncBedrock.return_value = new_client
result = model.get_async_client()
assert result is new_client
assert MockAsyncBedrock.call_count == 1
def test_open_async_client_is_reused(self):
model = Claude(
id="anthropic.claude-3-sonnet-20240229-v1:0",
aws_access_key="AKIA_STATIC",
aws_secret_key="secret",
aws_region="us-east-1",
)
open_client = MagicMock()
open_client.is_closed.return_value = False
model.async_client = open_client
result = model.get_async_client()
assert result is open_client
class TestSessionTokenEnv:
def test_session_token_read_from_env(self, monkeypatch):
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "ASIATEMP")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "secret")
monkeypatch.setenv("AWS_SESSION_TOKEN", "my-session-token")
monkeypatch.setenv("AWS_REGION", "us-west-2")
monkeypatch.delenv("AWS_BEDROCK_API_KEY", raising=False)
model = Claude(id="anthropic.claude-3-sonnet-20240229-v1:0")
params = model._get_client_params()
assert params["aws_session_token"] == "my-session-token"
assert params["aws_access_key"] == "ASIATEMP"
assert params["aws_region"] == "us-west-2"
def test_session_token_explicit_param(self, monkeypatch):
monkeypatch.delenv("AWS_BEDROCK_API_KEY", raising=False)
model = Claude(
id="anthropic.claude-3-sonnet-20240229-v1:0",
aws_access_key="ASIATEMP",
aws_secret_key="secret",
aws_session_token="explicit-token",
aws_region="us-east-1",
)
params = model._get_client_params()
assert params["aws_session_token"] == "explicit-token"
def test_no_session_token_when_not_set(self, monkeypatch):
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "AKIA_STATIC")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "secret")
monkeypatch.setenv("AWS_REGION", "us-east-1")
monkeypatch.delenv("AWS_SESSION_TOKEN", raising=False)
monkeypatch.delenv("AWS_BEDROCK_API_KEY", raising=False)
model = Claude(id="anthropic.claude-3-sonnet-20240229-v1:0")
params = model._get_client_params()
assert params["aws_session_token"] is None
class TestApiKeyPath:
def test_api_key_client_cached(self, monkeypatch):
monkeypatch.setenv("AWS_BEDROCK_API_KEY", "br-api-key-123")
monkeypatch.setenv("AWS_REGION", "us-west-2")
model = Claude(id="anthropic.claude-3-sonnet-20240229-v1:0")
with patch("agno.models.aws.claude.AnthropicBedrock") as MockBedrock:
mock_client = MagicMock()
mock_client.is_closed.return_value = False
MockBedrock.return_value = mock_client
client1 = model.get_client()
client2 = model.get_client()
assert MockBedrock.call_count == 1
assert client1 is client2
def test_api_key_params(self, monkeypatch):
monkeypatch.setenv("AWS_BEDROCK_API_KEY", "br-api-key-123")
monkeypatch.setenv("AWS_REGION", "us-west-2")
model = Claude(id="anthropic.claude-3-sonnet-20240229-v1:0")
params = model._get_client_params()
assert params["api_key"] == "br-api-key-123"
assert params["aws_region"] == "us-west-2"
assert "aws_session_token" not in params
class TestSessionNullCredentials:
def test_raises_on_null_credentials(self):
mock_session = MagicMock(spec=Session)
mock_session.region_name = "us-east-1"
mock_session.profile_name = None
mock_session.get_credentials.return_value = None
model = Claude(id="anthropic.claude-3-sonnet-20240229-v1:0", session=mock_session)
with pytest.raises(ValueError, match="boto3 session has no credentials"):
model._get_client_params()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/models/aws/test_claude_client.py",
"license": "Apache License 2.0",
"lines": 197,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/models/aws/test_session_concurrency.py | import threading
from unittest.mock import MagicMock, patch
from boto3.session import Session
from agno.models.aws import Claude
def _make_frozen_creds(access_key="ASIATEMP", secret_key="secret", token="token"):
frozen = MagicMock()
frozen.access_key = access_key
frozen.secret_key = secret_key
frozen.token = token
return frozen
class TestSessionNoSharedState:
"""Session-mode get_client() must not mutate self.client."""
def test_session_get_client_does_not_mutate_self_client(self):
mock_session = MagicMock(spec=Session)
mock_session.region_name = "us-east-1"
mock_session.profile_name = None
mock_creds = MagicMock()
mock_creds.get_frozen_credentials.return_value = _make_frozen_creds()
mock_session.get_credentials.return_value = mock_creds
model = Claude(id="anthropic.claude-3-sonnet-20240229-v1:0", session=mock_session)
assert model.client is None
with patch("agno.models.aws.claude.AnthropicBedrock") as MockBedrock:
mock_client = MagicMock()
mock_client.is_closed.return_value = False
MockBedrock.return_value = mock_client
returned = model.get_client()
assert returned is mock_client
assert model.client is None
def test_session_get_async_client_does_not_mutate_self_async_client(self):
mock_session = MagicMock(spec=Session)
mock_session.region_name = "us-east-1"
mock_session.profile_name = None
mock_creds = MagicMock()
mock_creds.get_frozen_credentials.return_value = _make_frozen_creds()
mock_session.get_credentials.return_value = mock_creds
model = Claude(id="anthropic.claude-3-sonnet-20240229-v1:0", session=mock_session)
assert model.async_client is None
with patch("agno.models.aws.claude.AsyncAnthropicBedrock") as MockAsync:
mock_client = MagicMock()
mock_client.is_closed.return_value = False
MockAsync.return_value = mock_client
returned = model.get_async_client()
assert returned is mock_client
assert model.async_client is None
class TestSessionConcurrencySafe:
"""Concurrent get_client() calls each get their own credentials."""
def test_concurrent_calls_get_correct_credentials(self):
call_count = {"n": 0}
event_a_created = threading.Event()
event_b_done = threading.Event()
def rotating_frozen_creds():
call_count["n"] += 1
return _make_frozen_creds(
access_key=f"KEY_{call_count['n']}",
token=f"TOKEN_{call_count['n']}",
)
mock_session = MagicMock(spec=Session)
mock_session.region_name = "us-east-1"
mock_session.profile_name = None
mock_creds = MagicMock()
mock_creds.get_frozen_credentials.side_effect = rotating_frozen_creds
mock_session.get_credentials.return_value = mock_creds
model = Claude(id="anthropic.claude-3-sonnet-20240229-v1:0", session=mock_session)
results = {}
with patch("agno.models.aws.claude.AnthropicBedrock") as MockBedrock:
create_count = {"n": 0}
def make_client(**kwargs):
create_count["n"] += 1
client = MagicMock()
client.is_closed.return_value = False
client._test_key = kwargs.get("aws_access_key", "unknown")
if create_count["n"] == 1:
event_a_created.set()
event_b_done.wait(timeout=5)
return client
MockBedrock.side_effect = make_client
def call_a():
results["a"] = model.get_client()
def call_b():
event_a_created.wait(timeout=5)
results["b"] = model.get_client()
event_b_done.set()
ta = threading.Thread(target=call_a)
tb = threading.Thread(target=call_b)
ta.start()
tb.start()
ta.join(timeout=10)
tb.join(timeout=10)
assert "a" in results and "b" in results
assert results["a"]._test_key == "KEY_1", f"Thread A expected KEY_1 but got {results['a']._test_key}"
assert results["b"]._test_key == "KEY_2", f"Thread B expected KEY_2 but got {results['b']._test_key}"
assert results["a"] is not results["b"]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/models/aws/test_session_concurrency.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/90_models/openai/chat/pdf_input_bytes.py | """
Openai Pdf Input Bytes
=========================
"""
from pathlib import Path
from agno.agent import Agent
from agno.media import File
from agno.models.openai.chat import OpenAIChat
from agno.utils.media import download_file
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
pdf_path = Path(__file__).parent.joinpath("ThaiRecipes.pdf")
# Download the file using the download_file function
download_file(
"https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf", str(pdf_path)
)
agent = Agent(
model=OpenAIChat(id="gpt-5-mini"),
markdown=True,
)
agent.print_response(
"Summarize the contents of the attached file.",
files=[
File(
content=pdf_path.read_bytes(),
),
],
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/openai/chat/pdf_input_bytes.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/91_tools/duckduckgo_tools_advanced.py | """
DuckDuckGo Tools - Advanced Configuration
==========================================
Demonstrates advanced DuckDuckGoTools configuration with timelimit, region,
and backend parameters for customized search behavior.
Parameters:
- timelimit: Filter results by time ("d" = day, "w" = week, "m" = month, "y" = year)
- region: Localize results (e.g., "us-en", "uk-en", "de-de", "fr-fr", "ru-ru")
- backend: Search backend ("api", "html", "lite")
"""
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.tools.duckduckgo import DuckDuckGoTools
# ---------------------------------------------------------------------------
# Example 1: Time-limited search (results from past week)
# ---------------------------------------------------------------------------
# Useful for finding recent news, updates, or time-sensitive information
weekly_search_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
DuckDuckGoTools(
timelimit="w", # Results from past week only
enable_search=True,
enable_news=True,
)
],
instructions=["Search for recent information from the past week."],
)
# ---------------------------------------------------------------------------
# Example 2: Region-specific search (US English results)
# ---------------------------------------------------------------------------
# Useful for localized results based on user's region
us_region_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
DuckDuckGoTools(
region="us-en", # US English results
enable_search=True,
enable_news=True,
)
],
instructions=["Search for information with US-localized results."],
)
# ---------------------------------------------------------------------------
# Example 3: Different backend options
# ---------------------------------------------------------------------------
# The backend parameter controls how DuckDuckGo is queried
# API backend - uses DuckDuckGo's API
api_backend_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
DuckDuckGoTools(
backend="api",
enable_search=True,
enable_news=True,
)
],
)
# HTML backend - parses HTML results
html_backend_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
DuckDuckGoTools(
backend="html",
enable_search=True,
enable_news=True,
)
],
)
# Lite backend - lightweight parsing
lite_backend_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
DuckDuckGoTools(
backend="lite",
enable_search=True,
enable_news=True,
)
],
)
# ---------------------------------------------------------------------------
# Example 4: Combined configuration - Full customization
# ---------------------------------------------------------------------------
# Combine all parameters for maximum control over search behavior
fully_configured_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
DuckDuckGoTools(
timelimit="w", # Results from past week
region="us-en", # US English results
backend="api", # Use API backend
enable_search=True,
enable_news=True,
fixed_max_results=10, # Limit to 10 results
timeout=15, # 15 second timeout
)
],
instructions=[
"You are a research assistant that finds recent US news and information.",
"Always provide sources for your findings.",
],
)
# ---------------------------------------------------------------------------
# Example 5: European region search with monthly timelimit
# ---------------------------------------------------------------------------
eu_monthly_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
DuckDuckGoTools(
timelimit="m", # Results from past month
region="de-de", # German results
enable_search=True,
enable_news=True,
)
],
instructions=["Search for information with German-localized results."],
)
# ---------------------------------------------------------------------------
# Example 6: Daily news search
# ---------------------------------------------------------------------------
# Perfect for finding breaking news and today's updates
daily_news_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
DuckDuckGoTools(
timelimit="d", # Results from past day only
enable_search=False, # Disable web search
enable_news=True, # Enable news only
)
],
instructions=[
"You are a news assistant that finds today's breaking news.",
"Focus on the most recent and relevant stories.",
],
)
# ---------------------------------------------------------------------------
# Run Examples
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# Example 1: Weekly search
print("\n" + "=" * 60)
print("Example 1: Time-limited search (past week)")
print("=" * 60)
weekly_search_agent.print_response(
"What are the latest developments in AI?", markdown=True
)
# Example 2: US region search
print("\n" + "=" * 60)
print("Example 2: Region-specific search (US English)")
print("=" * 60)
us_region_agent.print_response("What are the trending tech topics?", markdown=True)
# Example 3: API backend
print("\n" + "=" * 60)
print("Example 3: API backend")
print("=" * 60)
api_backend_agent.print_response("What is quantum computing?", markdown=True)
# Example 4: Fully configured agent
print("\n" + "=" * 60)
print("Example 4: Fully configured agent (weekly, US, API backend)")
print("=" * 60)
fully_configured_agent.print_response(
"Find recent news about renewable energy in the US", markdown=True
)
# Example 5: European region with monthly timelimit
print("\n" + "=" * 60)
print("Example 5: European region (German) with monthly timelimit")
print("=" * 60)
eu_monthly_agent.print_response(
"What are the latest technology trends?", markdown=True
)
# Example 6: Daily news
print("\n" + "=" * 60)
print("Example 6: Daily news search")
print("=" * 60)
daily_news_agent.print_response(
"What are today's top headlines in technology?", markdown=True
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/91_tools/duckduckgo_tools_advanced.py",
"license": "Apache License 2.0",
"lines": 177,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/91_tools/websearch_tools_advanced.py | """
WebSearch Tools - Advanced Configuration
=========================================
Demonstrates advanced WebSearchTools configuration with timelimit, region,
and backend parameters for customized search behavior across multiple
search engines.
Parameters:
- timelimit: Filter results by time ("d" = day, "w" = week, "m" = month, "y" = year)
- region: Localize results (e.g., "us-en", "uk-en", "de-de", "fr-fr", "ru-ru")
- backend: Search backend ("auto", "duckduckgo", "google", "bing", "brave", "yandex", "yahoo")
"""
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Example 1: Time-limited search with auto backend
# ---------------------------------------------------------------------------
# Filter results to specific time periods
# Past day - for breaking news
daily_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
WebSearchTools(
timelimit="d", # Results from past day
backend="auto",
)
],
instructions=["Search for the most recent information from today."],
)
# Past week - for recent developments
weekly_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
WebSearchTools(
timelimit="w", # Results from past week
backend="auto",
)
],
instructions=["Search for recent information from the past week."],
)
# Past month - for broader recent context
monthly_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
WebSearchTools(
timelimit="m", # Results from past month
backend="auto",
)
],
instructions=["Search for information from the past month."],
)
# Past year - for yearly trends
yearly_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
WebSearchTools(
timelimit="y", # Results from past year
backend="auto",
)
],
instructions=["Search for information from the past year."],
)
# ---------------------------------------------------------------------------
# Example 2: Region-specific searches
# ---------------------------------------------------------------------------
# Localize search results based on region
# US English
us_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
WebSearchTools(
region="us-en",
backend="auto",
)
],
instructions=["Provide US-localized search results."],
)
# UK English
uk_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
WebSearchTools(
region="uk-en",
backend="auto",
)
],
instructions=["Provide UK-localized search results."],
)
# German
de_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
WebSearchTools(
region="de-de",
backend="auto",
)
],
instructions=["Provide German-localized search results."],
)
# French
fr_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
WebSearchTools(
region="fr-fr",
backend="auto",
)
],
instructions=["Provide French-localized search results."],
)
# Russian
ru_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
WebSearchTools(
region="ru-ru",
backend="auto",
)
],
instructions=["Provide Russian-localized search results."],
)
# ---------------------------------------------------------------------------
# Example 3: Different backend options
# ---------------------------------------------------------------------------
# Use specific search engines as backends
# DuckDuckGo backend
duckduckgo_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
WebSearchTools(
backend="duckduckgo",
timelimit="w",
region="us-en",
)
],
)
# Google backend
google_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
WebSearchTools(
backend="google",
timelimit="w",
region="us-en",
)
],
)
# Bing backend
bing_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
WebSearchTools(
backend="bing",
timelimit="w",
region="us-en",
)
],
)
# Brave backend
brave_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
WebSearchTools(
backend="brave",
timelimit="w",
region="us-en",
)
],
)
# Yandex backend
yandex_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
WebSearchTools(
backend="yandex",
timelimit="w",
region="ru-ru", # Yandex works well with Russian region
)
],
)
# Yahoo backend
yahoo_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
WebSearchTools(
backend="yahoo",
timelimit="w",
region="us-en",
)
],
)
# ---------------------------------------------------------------------------
# Example 4: Combined configuration - Research assistant
# ---------------------------------------------------------------------------
# Combine all parameters for a powerful research assistant
research_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
WebSearchTools(
backend="auto", # Auto-select best available backend
timelimit="w", # Focus on recent results
region="us-en", # US English results
fixed_max_results=10, # Get more results
timeout=20, # Longer timeout for thorough search
)
],
instructions=[
"You are a research assistant that finds comprehensive, recent information.",
"Always cite your sources and provide context for your findings.",
"Focus on authoritative and reliable sources.",
],
)
# ---------------------------------------------------------------------------
# Example 5: News-focused agent with time and region filters
# ---------------------------------------------------------------------------
news_agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
WebSearchTools(
backend="auto",
timelimit="d", # Today's news only
region="us-en",
enable_search=False, # Disable general search
enable_news=True, # Enable news search only
)
],
instructions=[
"You are a news assistant that finds today's breaking news.",
"Summarize the key points and provide source links.",
],
)
# ---------------------------------------------------------------------------
# Example 6: Multi-region comparison agent
# ---------------------------------------------------------------------------
# Create agents for different regions to compare perspectives
def create_regional_agent(region: str, region_name: str) -> Agent:
"""Create a region-specific search agent."""
return Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
WebSearchTools(
backend="auto",
timelimit="w",
region=region,
)
],
instructions=[
f"You are a search assistant for {region_name}.",
"Provide localized search results and perspectives.",
],
)
# Create regional agents
us_regional = create_regional_agent("us-en", "United States")
uk_regional = create_regional_agent("uk-en", "United Kingdom")
de_regional = create_regional_agent("de-de", "Germany")
# ---------------------------------------------------------------------------
# Run Examples
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# Example 1: Time-limited search
print("\n" + "=" * 60)
print("Example 1: Weekly time-limited search")
print("=" * 60)
weekly_agent.print_response("What are the latest AI developments?", markdown=True)
# Example 2: Region-specific search (US)
print("\n" + "=" * 60)
print("Example 2: US region search")
print("=" * 60)
us_agent.print_response("What are trending tech topics?", markdown=True)
# Example 3: DuckDuckGo backend with filters
print("\n" + "=" * 60)
print("Example 3: DuckDuckGo backend with time and region filters")
print("=" * 60)
duckduckgo_agent.print_response("What is quantum computing?", markdown=True)
# Example 4: Research assistant
print("\n" + "=" * 60)
print("Example 4: Research assistant (combined configuration)")
print("=" * 60)
research_agent.print_response(
"Find recent research on large language models", markdown=True
)
# Example 5: News agent
print("\n" + "=" * 60)
print("Example 5: News-focused agent (daily news)")
print("=" * 60)
news_agent.print_response("What are today's top tech headlines?", markdown=True)
# Example 6: Regional comparison
print("\n" + "=" * 60)
print("Example 6: US regional agent")
print("=" * 60)
us_regional.print_response("What is the economic outlook?", markdown=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/91_tools/websearch_tools_advanced.py",
"license": "Apache License 2.0",
"lines": 291,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/tools/test_duckduckgo.py | """Unit tests for DuckDuckGoTools class."""
import json
from unittest.mock import MagicMock, patch
import pytest
from agno.tools.duckduckgo import DuckDuckGoTools
@pytest.fixture
def mock_ddgs():
"""Create a mock DDGS instance."""
with patch("agno.tools.websearch.DDGS") as mock_ddgs_cls:
mock_instance = MagicMock()
mock_ddgs_cls.return_value.__enter__ = MagicMock(return_value=mock_instance)
mock_ddgs_cls.return_value.__exit__ = MagicMock(return_value=False)
yield mock_instance, mock_ddgs_cls
# ============================================================================
# INITIALIZATION TESTS
# ============================================================================
def test_init_defaults():
"""Test initialization with default parameters."""
with patch("agno.tools.websearch.DDGS"):
tools = DuckDuckGoTools()
assert tools.backend == "duckduckgo"
assert tools.proxy is None
assert tools.timeout == 10
assert tools.fixed_max_results is None
assert tools.modifier is None
assert tools.verify_ssl is True
assert tools.timelimit is None
assert tools.region is None
def test_init_with_timelimit():
"""Test initialization with timelimit parameter."""
with patch("agno.tools.websearch.DDGS"):
tools = DuckDuckGoTools(timelimit="d")
assert tools.timelimit == "d"
def test_init_with_region():
"""Test initialization with region parameter."""
with patch("agno.tools.websearch.DDGS"):
tools = DuckDuckGoTools(region="us-en")
assert tools.region == "us-en"
def test_init_with_backend():
"""Test initialization with custom backend parameter."""
with patch("agno.tools.websearch.DDGS"):
tools = DuckDuckGoTools(backend="html")
assert tools.backend == "html"
def test_init_backend_defaults_to_duckduckgo():
"""Test that backend defaults to duckduckgo when not specified."""
with patch("agno.tools.websearch.DDGS"):
tools = DuckDuckGoTools()
assert tools.backend == "duckduckgo"
def test_init_with_all_new_params():
"""Test initialization with all new parameters."""
with patch("agno.tools.websearch.DDGS"):
tools = DuckDuckGoTools(
timelimit="w",
region="uk-en",
backend="lite",
)
assert tools.timelimit == "w"
assert tools.region == "uk-en"
assert tools.backend == "lite"
def test_init_with_all_params():
"""Test initialization with all parameters."""
with patch("agno.tools.websearch.DDGS"):
tools = DuckDuckGoTools(
enable_search=True,
enable_news=True,
modifier="site:example.com",
fixed_max_results=20,
proxy="http://proxy:8080",
timeout=60,
verify_ssl=False,
timelimit="m",
region="ru-ru",
backend="api",
)
assert tools.backend == "api"
assert tools.proxy == "http://proxy:8080"
assert tools.timeout == 60
assert tools.fixed_max_results == 20
assert tools.modifier == "site:example.com"
assert tools.verify_ssl is False
assert tools.timelimit == "m"
assert tools.region == "ru-ru"
# ============================================================================
# BACKWARD COMPATIBILITY TESTS
# ============================================================================
def test_backward_compat_aliases():
"""Test that backward compatibility aliases exist."""
with patch("agno.tools.websearch.DDGS"):
tools = DuckDuckGoTools()
assert tools.duckduckgo_search == tools.web_search
assert tools.duckduckgo_news == tools.search_news
# ============================================================================
# SEARCH WITH TIMELIMIT TESTS
# ============================================================================
def test_web_search_with_timelimit(mock_ddgs):
"""Test that timelimit is passed to ddgs.text()."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = DuckDuckGoTools(timelimit="d")
tools.web_search("test query")
mock_instance.text.assert_called_once_with(query="test query", max_results=5, backend="duckduckgo", timelimit="d")
def test_search_news_with_timelimit(mock_ddgs):
"""Test that timelimit is passed to ddgs.news()."""
mock_instance, _ = mock_ddgs
mock_instance.news.return_value = []
tools = DuckDuckGoTools(timelimit="w")
tools.search_news("test news")
mock_instance.news.assert_called_once_with(query="test news", max_results=5, backend="duckduckgo", timelimit="w")
def test_web_search_without_timelimit(mock_ddgs):
"""Test that timelimit is not passed when None."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = DuckDuckGoTools()
tools.web_search("test query")
mock_instance.text.assert_called_once_with(query="test query", max_results=5, backend="duckduckgo")
# ============================================================================
# SEARCH WITH REGION TESTS
# ============================================================================
def test_web_search_with_region(mock_ddgs):
"""Test that region is passed to ddgs.text()."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = DuckDuckGoTools(region="us-en")
tools.web_search("test query")
mock_instance.text.assert_called_once_with(query="test query", max_results=5, backend="duckduckgo", region="us-en")
def test_search_news_with_region(mock_ddgs):
"""Test that region is passed to ddgs.news()."""
mock_instance, _ = mock_ddgs
mock_instance.news.return_value = []
tools = DuckDuckGoTools(region="uk-en")
tools.search_news("test news")
mock_instance.news.assert_called_once_with(query="test news", max_results=5, backend="duckduckgo", region="uk-en")
def test_web_search_without_region(mock_ddgs):
"""Test that region is not passed when None."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = DuckDuckGoTools()
tools.web_search("test query")
mock_instance.text.assert_called_once_with(query="test query", max_results=5, backend="duckduckgo")
# ============================================================================
# SEARCH WITH BACKEND TESTS
# ============================================================================
def test_web_search_with_custom_backend(mock_ddgs):
"""Test that custom backend is passed to ddgs.text()."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = DuckDuckGoTools(backend="html")
tools.web_search("test query")
mock_instance.text.assert_called_once_with(query="test query", max_results=5, backend="html")
def test_search_news_with_custom_backend(mock_ddgs):
"""Test that custom backend is passed to ddgs.news()."""
mock_instance, _ = mock_ddgs
mock_instance.news.return_value = []
tools = DuckDuckGoTools(backend="lite")
tools.search_news("test news")
mock_instance.news.assert_called_once_with(query="test news", max_results=5, backend="lite")
# ============================================================================
# COMBINED PARAMETERS TESTS
# ============================================================================
def test_web_search_with_all_params(mock_ddgs):
"""Test web search with timelimit, region, and custom backend."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = [
{"title": "Result 1", "href": "https://example.com", "body": "Description 1"},
]
tools = DuckDuckGoTools(
timelimit="m",
region="us-en",
backend="api",
fixed_max_results=10,
modifier="site:github.com",
)
result = tools.web_search("python frameworks")
result_data = json.loads(result)
assert len(result_data) == 1
mock_instance.text.assert_called_once_with(
query="site:github.com python frameworks",
max_results=10,
backend="api",
timelimit="m",
region="us-en",
)
def test_search_news_with_all_params(mock_ddgs):
"""Test news search with timelimit, region, and custom backend."""
mock_instance, _ = mock_ddgs
mock_instance.news.return_value = [
{"title": "News 1", "url": "https://news.com", "body": "News body 1"},
]
tools = DuckDuckGoTools(
timelimit="d",
region="uk-en",
backend="html",
fixed_max_results=3,
)
result = tools.search_news("breaking news")
result_data = json.loads(result)
assert len(result_data) == 1
mock_instance.news.assert_called_once_with(
query="breaking news",
max_results=3,
backend="html",
timelimit="d",
region="uk-en",
)
# ============================================================================
# TIMELIMIT VALUES TESTS
# ============================================================================
def test_timelimit_day(mock_ddgs):
"""Test timelimit with 'd' for day."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = DuckDuckGoTools(timelimit="d")
tools.web_search("test")
call_kwargs = mock_instance.text.call_args[1]
assert call_kwargs["timelimit"] == "d"
def test_timelimit_week(mock_ddgs):
"""Test timelimit with 'w' for week."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = DuckDuckGoTools(timelimit="w")
tools.web_search("test")
call_kwargs = mock_instance.text.call_args[1]
assert call_kwargs["timelimit"] == "w"
def test_timelimit_month(mock_ddgs):
"""Test timelimit with 'm' for month."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = DuckDuckGoTools(timelimit="m")
tools.web_search("test")
call_kwargs = mock_instance.text.call_args[1]
assert call_kwargs["timelimit"] == "m"
def test_timelimit_year(mock_ddgs):
"""Test timelimit with 'y' for year."""
mock_instance, _ = mock_ddgs
mock_instance.text.return_value = []
tools = DuckDuckGoTools(timelimit="y")
tools.web_search("test")
call_kwargs = mock_instance.text.call_args[1]
assert call_kwargs["timelimit"] == "y"
# ============================================================================
# TIMELIMIT VALIDATION TESTS
# ============================================================================
def test_invalid_timelimit_raises_error():
"""Test that invalid timelimit raises ValueError."""
with patch("agno.tools.websearch.DDGS"):
with pytest.raises(ValueError) as exc_info:
DuckDuckGoTools(timelimit="invalid")
assert "Invalid timelimit 'invalid'" in str(exc_info.value)
def test_invalid_timelimit_empty_string():
"""Test that empty string timelimit raises ValueError."""
with patch("agno.tools.websearch.DDGS"):
with pytest.raises(ValueError) as exc_info:
DuckDuckGoTools(timelimit="")
assert "Invalid timelimit ''" in str(exc_info.value)
def test_invalid_timelimit_uppercase():
"""Test that uppercase timelimit raises ValueError (case-sensitive)."""
with patch("agno.tools.websearch.DDGS"):
with pytest.raises(ValueError) as exc_info:
DuckDuckGoTools(timelimit="W")
assert "Invalid timelimit 'W'" in str(exc_info.value)
def test_invalid_timelimit_full_word():
"""Test that full word timelimit raises ValueError."""
with patch("agno.tools.websearch.DDGS"):
with pytest.raises(ValueError) as exc_info:
DuckDuckGoTools(timelimit="week")
assert "Invalid timelimit 'week'" in str(exc_info.value)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_duckduckgo.py",
"license": "Apache License 2.0",
"lines": 266,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/07_knowledge/vector_db/pgvector/pgvector_hybrid_similarity_threshold.py | from agno.knowledge.knowledge import Knowledge
from agno.vectordb.pgvector import PgVector
from agno.vectordb.search import SearchType
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
vector_db = PgVector(
table_name="vectors_hybrid",
db_url=db_url,
search_type=SearchType.hybrid,
similarity_threshold=0.2,
)
knowledge = Knowledge(
name="Thai Recipes",
description="Knowledge base with Thai recipes",
vector_db=vector_db,
)
knowledge.insert(
name="thai_curry",
text_content="Thai green curry is a spicy dish made with coconut milk and green chilies.",
skip_if_exists=True,
)
knowledge.insert(
name="pad_thai",
text_content="Pad Thai is a stir-fried rice noodle dish commonly served as street food in Thailand.",
skip_if_exists=True,
)
knowledge.insert(
name="weather",
text_content="The weather forecast shows sunny skies with temperatures around 75 degrees.",
skip_if_exists=True,
)
query = "What is the weather today?"
results = vector_db.search(query, limit=5)
print(f"Query: '{query}'")
print(f"Chunks retrieved: {len(results)}")
for i, doc in enumerate(results):
score = doc.meta_data.get("similarity_score", 0)
print(f"{i + 1}. score={score:.3f}, {doc.content}")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/07_knowledge/vector_db/pgvector/pgvector_hybrid_similarity_threshold.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/07_knowledge/vector_db/pgvector/pgvector_similarity_threshold.py | from agno.knowledge.knowledge import Knowledge
from agno.vectordb.pgvector import PgVector
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
vector_db = PgVector(
table_name="vectors",
db_url=db_url,
similarity_threshold=0.2,
)
knowledge = Knowledge(
name="Thai Recipes",
description="Knowledge base with Thai recipes",
vector_db=vector_db,
)
knowledge.insert(
name="thai_curry",
text_content="Thai green curry is a spicy dish made with coconut milk and green chilies.",
skip_if_exists=True,
)
knowledge.insert(
name="pad_thai",
text_content="Pad Thai is a stir-fried rice noodle dish commonly served as street food in Thailand.",
skip_if_exists=True,
)
knowledge.insert(
name="weather",
text_content="The weather forecast shows sunny skies with temperatures around 75 degrees.",
skip_if_exists=True,
)
query = "What is the weather in Tokyo?"
results = vector_db.search(query, limit=5)
print(f"Query: '{query}'")
print(f"Chunks retrieved: {len(results)}")
for i, doc in enumerate(results):
score = doc.meta_data.get("similarity_score", 0)
print(f"{i + 1}. score={score:.3f}, {doc.content}")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/07_knowledge/vector_db/pgvector/pgvector_similarity_threshold.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/vectordb/score.py | """Score normalization utilities for vector database distance metrics."""
import math
from agno.vectordb.distance import Distance
def normalize_cosine(distance: float) -> float:
"""Convert cosine distance to similarity score.
Args:
distance: Cosine distance value (0=identical, 1=orthogonal, 2=opposite)
Returns:
Similarity score in [0.0, 1.0]
"""
if math.isnan(distance) or math.isinf(distance):
return 0.0
return max(0.0, min(1.0, 1.0 - distance))
def normalize_l2(distance: float) -> float:
"""Convert L2 distance to similarity score.
Args:
distance: L2 (Euclidean) distance value (0=identical)
Returns:
Similarity score in (0.0, 1.0]
"""
if math.isnan(distance) or math.isinf(distance):
return 0.0
return 1.0 / (1.0 + distance)
def normalize_max_inner_product(inner_product: float) -> float:
"""Convert inner product to similarity score.
Args:
inner_product: Inner product value (1=identical, 0=orthogonal, -1=opposite)
Returns:
Similarity score in [0.0, 1.0]
"""
if math.isnan(inner_product):
return 0.0
if math.isinf(inner_product):
return 1.0 if inner_product > 0 else 0.0
return max(0.0, min(1.0, (inner_product + 1.0) / 2.0))
def normalize_score(distance: float, metric: Distance) -> float:
"""Convert raw distance to similarity score based on metric type.
Args:
distance: Raw distance or score value
metric: Distance metric type
Returns:
Similarity score in [0.0, 1.0]
"""
if metric == Distance.cosine:
return normalize_cosine(distance)
elif metric == Distance.l2:
return normalize_l2(distance)
elif metric == Distance.max_inner_product:
return normalize_max_inner_product(distance)
else:
raise ValueError(f"Unknown distance metric: {metric}")
def score_to_cosine_distance(similarity: float) -> float:
"""Convert similarity score to cosine distance threshold."""
return 1.0 - similarity
def score_to_l2_distance(similarity: float) -> float:
"""Convert similarity score to L2 distance threshold."""
if similarity <= 0:
raise ValueError("similarity must be > 0 for L2 distance conversion")
return (1.0 / similarity) - 1.0
def score_to_max_inner_product(similarity: float) -> float:
"""Convert similarity score to inner product threshold."""
return 2.0 * similarity - 1.0
def score_to_distance_threshold(similarity: float, metric: Distance) -> float:
"""Convert similarity score to raw distance threshold.
Args:
similarity: Minimum similarity score (0.0-1.0)
metric: Distance metric type
Returns:
Raw distance threshold for filtering
"""
if metric == Distance.cosine:
return score_to_cosine_distance(similarity)
elif metric == Distance.l2:
return score_to_l2_distance(similarity)
elif metric == Distance.max_inner_product:
return score_to_max_inner_product(similarity)
else:
raise ValueError(f"Unknown distance metric: {metric}")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/vectordb/score.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/tests/unit/vectordb/test_score.py | """Unit tests for vectordb score normalization functions."""
import pytest
from agno.vectordb.distance import Distance
from agno.vectordb.score import (
normalize_cosine,
normalize_l2,
normalize_max_inner_product,
normalize_score,
score_to_cosine_distance,
score_to_distance_threshold,
score_to_l2_distance,
score_to_max_inner_product,
)
def test_normalize_cosine_identical_vectors():
"""Distance 0 should return similarity 1.0."""
assert normalize_cosine(0.0) == 1.0
def test_normalize_cosine_orthogonal_vectors():
"""Distance 1 should return similarity 0.0."""
assert normalize_cosine(1.0) == 0.0
def test_normalize_cosine_opposite_vectors():
"""Distance 2 should return similarity 0.0 (clamped)."""
assert normalize_cosine(2.0) == 0.0
def test_normalize_cosine_typical_distance():
"""Distance 0.3 should return similarity 0.7."""
assert normalize_cosine(0.3) == pytest.approx(0.7)
def test_normalize_cosine_nan_returns_zero():
"""NaN input should return 0.0."""
assert normalize_cosine(float("nan")) == 0.0
def test_normalize_cosine_inf_returns_zero():
"""Infinite input should return 0.0."""
assert normalize_cosine(float("inf")) == 0.0
assert normalize_cosine(float("-inf")) == 0.0
def test_normalize_cosine_negative_distance_clamped():
"""Negative distance should be clamped to 1.0."""
assert normalize_cosine(-0.5) == 1.0
def test_normalize_l2_identical_vectors():
"""Distance 0 should return similarity 1.0."""
assert normalize_l2(0.0) == 1.0
def test_normalize_l2_distance_one():
"""Distance 1 should return similarity 0.5."""
assert normalize_l2(1.0) == 0.5
def test_normalize_l2_large_distance():
"""Large distance should approach 0."""
assert normalize_l2(100.0) == pytest.approx(1 / 101)
def test_normalize_l2_nan_returns_zero():
"""NaN input should return 0.0."""
assert normalize_l2(float("nan")) == 0.0
def test_normalize_l2_inf_returns_zero():
"""Infinite input should return 0.0."""
assert normalize_l2(float("inf")) == 0.0
def test_normalize_max_inner_product_identical_vectors():
"""Inner product 1 should return similarity 1.0."""
assert normalize_max_inner_product(1.0) == 1.0
def test_normalize_max_inner_product_orthogonal_vectors():
"""Inner product 0 should return similarity 0.5."""
assert normalize_max_inner_product(0.0) == 0.5
def test_normalize_max_inner_product_opposite_vectors():
"""Inner product -1 should return similarity 0.0."""
assert normalize_max_inner_product(-1.0) == 0.0
def test_normalize_max_inner_product_typical():
"""Inner product 0.8 should return similarity 0.9."""
assert normalize_max_inner_product(0.8) == pytest.approx(0.9)
def test_normalize_max_inner_product_nan_returns_zero():
"""NaN input should return 0.0."""
assert normalize_max_inner_product(float("nan")) == 0.0
def test_normalize_max_inner_product_positive_inf_returns_one():
"""Positive infinity should return 1.0."""
assert normalize_max_inner_product(float("inf")) == 1.0
def test_normalize_max_inner_product_negative_inf_returns_zero():
"""Negative infinity should return 0.0."""
assert normalize_max_inner_product(float("-inf")) == 0.0
def test_normalize_score_cosine_dispatch():
"""Should dispatch to normalize_cosine."""
assert normalize_score(0.3, Distance.cosine) == pytest.approx(0.7)
def test_normalize_score_l2_dispatch():
"""Should dispatch to normalize_l2."""
assert normalize_score(1.0, Distance.l2) == 0.5
def test_normalize_score_max_inner_product_dispatch():
"""Should dispatch to normalize_max_inner_product."""
assert normalize_score(0.8, Distance.max_inner_product) == pytest.approx(0.9)
def test_normalize_score_unknown_metric_raises():
"""Unknown metric should raise ValueError."""
with pytest.raises(ValueError, match="Unknown distance metric"):
normalize_score(0.5, "invalid") # type: ignore
def test_score_to_cosine_distance_similarity_one():
"""Similarity 1.0 should return distance 0."""
assert score_to_cosine_distance(1.0) == 0.0
def test_score_to_cosine_distance_similarity_zero():
"""Similarity 0.0 should return distance 1."""
assert score_to_cosine_distance(0.0) == 1.0
def test_score_to_cosine_distance_typical():
"""Similarity 0.7 should return distance 0.3."""
assert score_to_cosine_distance(0.7) == pytest.approx(0.3)
def test_score_to_cosine_distance_roundtrip():
"""Converting back and forth should give original value."""
original = 0.65
distance = score_to_cosine_distance(original)
restored = normalize_cosine(distance)
assert restored == pytest.approx(original)
def test_score_to_l2_distance_similarity_one():
"""Similarity 1.0 should return distance 0."""
assert score_to_l2_distance(1.0) == 0.0
def test_score_to_l2_distance_similarity_half():
"""Similarity 0.5 should return distance 1."""
assert score_to_l2_distance(0.5) == 1.0
def test_score_to_l2_distance_zero_raises():
"""Similarity 0 should raise ValueError."""
with pytest.raises(ValueError, match="must be > 0"):
score_to_l2_distance(0.0)
def test_score_to_l2_distance_negative_raises():
"""Negative similarity should raise ValueError."""
with pytest.raises(ValueError, match="must be > 0"):
score_to_l2_distance(-0.5)
def test_score_to_l2_distance_roundtrip():
"""Converting back and forth should give original value."""
original = 0.8
distance = score_to_l2_distance(original)
restored = normalize_l2(distance)
assert restored == pytest.approx(original)
def test_score_to_max_inner_product_similarity_one():
"""Similarity 1.0 should return inner product 1."""
assert score_to_max_inner_product(1.0) == 1.0
def test_score_to_max_inner_product_similarity_half():
"""Similarity 0.5 should return inner product 0."""
assert score_to_max_inner_product(0.5) == 0.0
def test_score_to_max_inner_product_similarity_zero():
"""Similarity 0.0 should return inner product -1."""
assert score_to_max_inner_product(0.0) == -1.0
def test_score_to_max_inner_product_typical():
"""Similarity 0.9 should return inner product 0.8."""
assert score_to_max_inner_product(0.9) == pytest.approx(0.8)
def test_score_to_max_inner_product_roundtrip():
"""Converting back and forth should give original value."""
original = 0.75
inner_product = score_to_max_inner_product(original)
restored = normalize_max_inner_product(inner_product)
assert restored == pytest.approx(original)
def test_score_to_distance_threshold_cosine():
"""Should dispatch to score_to_cosine_distance."""
assert score_to_distance_threshold(0.7, Distance.cosine) == pytest.approx(0.3)
def test_score_to_distance_threshold_l2():
"""Should dispatch to score_to_l2_distance."""
assert score_to_distance_threshold(0.5, Distance.l2) == 1.0
def test_score_to_distance_threshold_max_inner_product():
"""Should dispatch to score_to_max_inner_product."""
assert score_to_distance_threshold(0.9, Distance.max_inner_product) == pytest.approx(0.8)
def test_score_to_distance_threshold_unknown_raises():
"""Unknown metric should raise ValueError."""
with pytest.raises(ValueError, match="Unknown distance metric"):
score_to_distance_threshold(0.5, "invalid") # type: ignore
@pytest.mark.parametrize("similarity", [0.1, 0.25, 0.5, 0.75, 0.9, 0.99])
def test_cosine_roundtrip(similarity):
"""Roundtrip conversion should return original value."""
distance = score_to_distance_threshold(similarity, Distance.cosine)
restored = normalize_score(distance, Distance.cosine)
assert restored == pytest.approx(similarity)
@pytest.mark.parametrize("similarity", [0.1, 0.25, 0.5, 0.75, 0.9, 0.99])
def test_l2_roundtrip(similarity):
"""Roundtrip conversion should return original value."""
distance = score_to_distance_threshold(similarity, Distance.l2)
restored = normalize_score(distance, Distance.l2)
assert restored == pytest.approx(similarity)
@pytest.mark.parametrize("similarity", [0.1, 0.25, 0.5, 0.75, 0.9, 0.99])
def test_max_inner_product_roundtrip(similarity):
"""Roundtrip conversion should return original value."""
threshold = score_to_distance_threshold(similarity, Distance.max_inner_product)
restored = normalize_score(threshold, Distance.max_inner_product)
assert restored == pytest.approx(similarity)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/vectordb/test_score.py",
"license": "Apache License 2.0",
"lines": 168,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/levels_of_agentic_software/level_1_tools.py | """
Level 1: Agent with Tools
======================================
The simplest useful agent. A model, tools, and clear instructions.
No memory, no persistence — pure stateless tool calling.
This is where every agent should start. You'd be surprised how much
a single agent with good instructions and the right tools can accomplish.
Run standalone:
python cookbook/levels_of_agentic_software/level_1_tools.py
Run via Agent OS:
python cookbook/levels_of_agentic_software/run.py
Then visit https://os.agno.com and select "L1 Coding Agent"
Example prompt:
"Write a Fibonacci function, save it to fib.py, and run it to verify"
"""
from pathlib import Path
from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.tools.coding import CodingTools
# ---------------------------------------------------------------------------
# Workspace
# ---------------------------------------------------------------------------
WORKSPACE = Path(__file__).parent.joinpath("workspace")
WORKSPACE.mkdir(parents=True, exist_ok=True)
# ---------------------------------------------------------------------------
# Agent Instructions
# ---------------------------------------------------------------------------
instructions = """\
You are a coding agent. You write clean, well-documented Python code.
## Workflow
1. Understand the task
2. Write the code and save it to a file
3. Run the file to verify it works
4. If there are errors, fix them and re-run
## Rules
- Always save code to files before running
- Include type hints on function signatures
- Add a brief docstring to each function
- Test with at least 2-3 example inputs
- No emojis\
"""
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
l1_coding_agent = Agent(
name="L1 Coding Agent",
model=OpenAIResponses(id="gpt-5.2"),
instructions=instructions,
tools=[CodingTools(base_dir=WORKSPACE, all=True)],
add_datetime_to_context=True,
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
l1_coding_agent.print_response(
"Write a Fibonacci function that returns the nth Fibonacci number. "
"Save it to fib.py with a main block that prints the first 10 values, "
"then run it to verify.",
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/levels_of_agentic_software/level_1_tools.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:cookbook/levels_of_agentic_software/level_2_storage_knowledge.py | """
Level 2: Agent with storage and knowledge
======================================
Add persistent storage and a searchable knowledge base.
The agent can recall conversations and use domain knowledge.
This builds on Level 1 by adding:
- Storage: SqliteDb for conversation history across sessions
- Knowledge: ChromaDb with hybrid search for domain knowledge
Run standalone:
python cookbook/levels_of_agentic_software/level_2_storage_knowledge.py
Run via Agent OS:
python cookbook/levels_of_agentic_software/run.py
Then visit https://os.agno.com and select "L2 Coding Agent"
Example prompt:
"Write a CSV parser following our coding conventions"
"""
from pathlib import Path
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.knowledge import Knowledge
from agno.knowledge.embedder.openai import OpenAIEmbedder
from agno.models.openai import OpenAIResponses
from agno.tools.coding import CodingTools
from agno.vectordb.chroma import ChromaDb, SearchType
# ---------------------------------------------------------------------------
# Workspace
# ---------------------------------------------------------------------------
WORKSPACE = Path(__file__).parent.joinpath("workspace")
WORKSPACE.mkdir(parents=True, exist_ok=True)
# ---------------------------------------------------------------------------
# Storage and Knowledge
# ---------------------------------------------------------------------------
db = SqliteDb(db_file=str(WORKSPACE / "agents.db"))
knowledge = Knowledge(
name="L2 Coding Agent Knowledge",
vector_db=ChromaDb(
collection="coding-standards",
path=str(WORKSPACE / "chromadb"),
persistent_client=True,
search_type=SearchType.hybrid,
embedder=OpenAIEmbedder(id="text-embedding-3-small"),
),
contents_db=db,
)
# ---------------------------------------------------------------------------
# Agent Instructions
# ---------------------------------------------------------------------------
instructions = """\
You are a coding agent with access to domain knowledge.
## Workflow
1. Search your knowledge base for relevant domain knowledge
2. Understand the task
3. Write code that follows the domain knowledge
4. Save the code to a file and run it to verify
5. If there are errors, fix them and re-run
## Rules
- Always search knowledge before writing code
- Follow domain knowledge found in the knowledge base
- Save code to files before running
- Include type hints and docstrings
- No emojis\
"""
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
l2_coding_agent = Agent(
name="L2 Coding Agent",
model=OpenAIResponses(id="gpt-5.2"),
instructions=instructions,
tools=[CodingTools(base_dir=WORKSPACE, all=True)],
knowledge=knowledge,
search_knowledge=True,
db=db,
add_history_to_context=True,
num_history_runs=3,
markdown=True,
add_datetime_to_context=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# Step 1: Load project conventions into the knowledge base
print("Loading domain knowledge into knowledge base...")
knowledge.insert(
text_content="""\
## Domain Knowledge
### Style
- Use snake_case for all function and variable names
- Use type hints on all function signatures
- Write docstrings in Google style format
- Prefer list comprehensions over map/filter
- Maximum line length: 88 characters (Black formatter default)
### Error Handling
- Use specific exception types, never bare except
- Always include a meaningful error message
- Use logging instead of print for non-output messages
### File I/O
- Use pathlib.Path instead of os.path
- Use context managers (with statements) for file operations
- Default encoding: utf-8
### Testing
- Include example usage in a __main__ block
- Test edge cases: empty input, single element, large input
""",
)
# Step 2: Ask the agent to write code following conventions
print("\n--- Session 1: Write code following conventions ---\n")
l2_coding_agent.print_response(
"Write a CSV parser that reads a CSV file and returns a list of "
"dictionaries. Follow our project conventions. Save it to csv_parser.py "
"and test it with sample data.",
user_id="dev@example.com",
session_id="session_1",
stream=True,
)
# Step 3: Follow-up in the same session (agent has context)
print("\n--- Session 1: Follow-up question ---\n")
l2_coding_agent.print_response(
"Add a function to write dictionaries back to CSV format.",
user_id="dev@example.com",
session_id="session_1",
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/levels_of_agentic_software/level_2_storage_knowledge.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:cookbook/levels_of_agentic_software/level_3_memory_learning.py | """
Level 3: Agent with memory and learning
====================================
The agent now learns from interactions and improves over time.
Interaction 1,000 should be better than interaction 1.
This builds on Level 2 by adding:
- LearningMachine: Captures insights and user preferences
- LearnedKnowledge (AGENTIC mode): Agent decides what to save and retrieve
- Agentic memory: Builds user profiles over time
- ReasoningTools: The think tool for structured reasoning
Run standalone:
python cookbook/levels_of_agentic_software/level_3_memory_learning.py
Run via Agent OS:
python cookbook/levels_of_agentic_software/run.py
Then visit https://os.agno.com and select "L3 Coding Agent"
Example prompt:
"Write a data pipeline using functional programming style"
"""
from pathlib import Path
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.knowledge import Knowledge
from agno.knowledge.embedder.openai import OpenAIEmbedder
from agno.learn import LearnedKnowledgeConfig, LearningMachine, LearningMode
from agno.models.openai import OpenAIResponses
from agno.tools.coding import CodingTools
from agno.tools.reasoning import ReasoningTools
from agno.vectordb.chroma import ChromaDb, SearchType
# ---------------------------------------------------------------------------
# Workspace
# ---------------------------------------------------------------------------
WORKSPACE = Path(__file__).parent.joinpath("workspace")
WORKSPACE.mkdir(parents=True, exist_ok=True)
# ---------------------------------------------------------------------------
# Storage
# ---------------------------------------------------------------------------
db = SqliteDb(db_file=str(WORKSPACE / "agents.db"))
# ---------------------------------------------------------------------------
# Knowledge: Static docs (project conventions)
# ---------------------------------------------------------------------------
docs_knowledge = Knowledge(
name="L3 Coding Agent Knowledge",
vector_db=ChromaDb(
collection="coding-standards",
path=str(WORKSPACE / "chromadb"),
persistent_client=True,
search_type=SearchType.hybrid,
embedder=OpenAIEmbedder(id="text-embedding-3-small"),
),
contents_db=db,
)
# ---------------------------------------------------------------------------
# Knowledge: Dynamic learnings (agent learns over time)
# ---------------------------------------------------------------------------
learned_knowledge = Knowledge(
vector_db=ChromaDb(
collection="coding-learnings",
path=str(WORKSPACE / "chromadb"),
persistent_client=True,
search_type=SearchType.hybrid,
embedder=OpenAIEmbedder(id="text-embedding-3-small"),
),
contents_db=db,
)
# ---------------------------------------------------------------------------
# Agent Instructions
# ---------------------------------------------------------------------------
instructions = """\
You are a coding agent that learns and improves over time.
## Workflow
1. Search your knowledge and learnings for relevant context
2. Check if the user has preferences you should follow
3. Write code that follows conventions and user preferences
4. Save the code to a file and run it to verify
5. Save any valuable insights or patterns for future use
## Rules
- Always search knowledge and learnings before writing code
- Apply user preferences from memory when writing code
- Save useful coding patterns and insights as learnings
- Follow project conventions from the knowledge base
- No emojis\
"""
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
l3_coding_agent = Agent(
name="L3 Coding Agent",
model=OpenAIResponses(id="gpt-5.2"),
instructions=instructions,
tools=[
CodingTools(base_dir=WORKSPACE, all=True),
ReasoningTools(),
],
knowledge=docs_knowledge,
search_knowledge=True,
learning=LearningMachine(
knowledge=learned_knowledge,
learned_knowledge=LearnedKnowledgeConfig(
mode=LearningMode.AGENTIC,
),
),
enable_agentic_memory=True,
db=db,
add_history_to_context=True,
num_history_runs=3,
add_datetime_to_context=True,
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Demo
# ---------------------------------------------------------------------------
if __name__ == "__main__":
user_id = "dev@example.com"
# Session 1: User teaches a preference, agent learns it
print("\n" + "=" * 60)
print("SESSION 1: Teaching the agent your preferences")
print("=" * 60 + "\n")
l3_coding_agent.print_response(
"I strongly prefer functional programming style -- no classes, "
"use pure functions, immutable data structures, and composition. "
"Remember this preference for all future coding tasks. "
"Now write a data pipeline that reads a list of numbers, filters evens, "
"doubles them, and computes the sum. Save it to pipeline.py and run it.",
user_id=user_id,
session_id="session_1",
stream=True,
)
# Show what the agent learned
if l3_coding_agent.learning_machine:
print("\n--- Learned Knowledge ---")
l3_coding_agent.learning_machine.learned_knowledge_store.print(
query="coding preferences"
)
# Session 2: New task -- agent should apply learned preferences
print("\n" + "=" * 60)
print("SESSION 2: New task -- agent should apply learned preferences")
print("=" * 60 + "\n")
l3_coding_agent.print_response(
"Write a log parser that reads a log file, extracts error lines, "
"groups them by error category, and returns a count per category. "
"Save it to log_parser.py and run it with sample data.",
user_id=user_id,
session_id="session_2",
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/levels_of_agentic_software/level_3_memory_learning.py",
"license": "Apache License 2.0",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/levels_of_agentic_software/level_4_team.py | """
Level 4: Multi-agent Team
===========================
Split responsibilities across specialized agents coordinated by a team leader.
Coder writes, Reviewer critiques, Tester validates.
This takes a different architectural path from the single-agent levels:
- Multiple specialized agents with distinct roles
- A Team leader that coordinates and synthesizes
Honest caveat: Multi-agent teams are powerful but less predictable than
single agents. The team leader is an LLM making delegation decisions --
sometimes brilliantly, sometimes not. For production automation, prefer
explicit workflows. Teams shine in human-supervised settings.
Run standalone:
python cookbook/levels_of_agentic_software/level_4_team.py
Run via Agent OS:
python cookbook/levels_of_agentic_software/run.py
Then visit https://os.agno.com and select "L4 Coding Team"
Example prompt:
"Build a stack data structure with full test coverage"
"""
from pathlib import Path
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIResponses
from agno.team.team import Team
from agno.tools.coding import CodingTools
# ---------------------------------------------------------------------------
# Workspace
# ---------------------------------------------------------------------------
WORKSPACE = Path(__file__).parent.joinpath("workspace")
WORKSPACE.mkdir(parents=True, exist_ok=True)
# ---------------------------------------------------------------------------
# Storage
# ---------------------------------------------------------------------------
db = SqliteDb(db_file=str(WORKSPACE / "agents.db"))
# ---------------------------------------------------------------------------
# Coder Agent -- writes code
# ---------------------------------------------------------------------------
coder = Agent(
name="L4 Coder",
role="Write code based on requirements",
model=OpenAIResponses(id="gpt-5.2"),
instructions="""\
You are a senior developer. Write clean, well-documented code.
## Workflow
1. Understand the requirements
2. Write the implementation with type hints and docstrings
3. Save the code to a file
## Rules
- Write production-quality code
- Include type hints and Google-style docstrings
- Handle edge cases
- No emojis\
""",
tools=[CodingTools(base_dir=WORKSPACE, all=True)],
db=db,
add_datetime_to_context=True,
)
# ---------------------------------------------------------------------------
# Reviewer Agent -- reviews code (read-only tools)
# ---------------------------------------------------------------------------
reviewer = Agent(
name="L4 Reviewer",
role="Review code for quality, bugs, and best practices",
model=OpenAIResponses(id="gpt-5.2"),
instructions="""\
You are a senior code reviewer. Provide thorough, constructive reviews.
## Workflow
1. Read the code files
2. Check for bugs, edge cases, and style issues
3. Provide specific, actionable feedback
## Review Checklist
- Correctness: Does it handle edge cases?
- Style: Consistent naming, proper type hints?
- Documentation: Clear docstrings?
- Performance: Any obvious inefficiencies?
## Rules
- Be specific -- reference line numbers and code
- Suggest fixes, not just problems
- Acknowledge what's done well
- No emojis\
""",
tools=[
CodingTools(
base_dir=WORKSPACE,
enable_read_file=True,
enable_grep=True,
enable_find=True,
enable_ls=True,
enable_write_file=False,
enable_edit_file=False,
enable_run_shell=False,
),
],
db=db,
add_datetime_to_context=True,
)
# ---------------------------------------------------------------------------
# Tester Agent -- writes and runs tests
# ---------------------------------------------------------------------------
tester = Agent(
name="L4 Tester",
role="Write and run tests for the code",
model=OpenAIResponses(id="gpt-5.2"),
instructions="""\
You are a QA engineer. Write thorough tests and run them.
## Workflow
1. Read the implementation code
2. Write tests covering normal cases, edge cases, and error cases
3. Save tests to a test file
4. Run the tests and report results
## Rules
- Test both happy path and edge cases
- Test error handling
- Use assert statements with clear messages
- No emojis\
""",
tools=[CodingTools(base_dir=WORKSPACE, all=True)],
db=db,
add_datetime_to_context=True,
)
# ---------------------------------------------------------------------------
# Create Team
# ---------------------------------------------------------------------------
l4_coding_team = Team(
name="L4 Coding Team",
model=OpenAIResponses(id="gpt-5.2"),
members=[coder, reviewer, tester],
instructions="""\
You lead a coding team with a Coder, Reviewer, and Tester.
## Process
1. Send the task to the Coder to implement
2. Send the code to the Reviewer for feedback
3. If the Reviewer finds issues, send back to the Coder to fix
4. Send the final code to the Tester to write and run tests
5. Synthesize results into a final report
## Output Format
Provide a summary with:
- **Implementation**: What was built and key design decisions
- **Review**: Key findings from the code review
- **Tests**: Test results and coverage
- **Status**: Overall pass/fail assessment\
""",
db=db,
show_members_responses=True,
add_datetime_to_context=True,
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Demo
# ---------------------------------------------------------------------------
if __name__ == "__main__":
l4_coding_team.print_response(
"Build a Stack data structure in Python with push, pop, peek, "
"is_empty, and size methods. Include proper error handling for "
"operations on an empty stack. Save to stack.py and write tests "
"in test_stack.py.",
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/levels_of_agentic_software/level_4_team.py",
"license": "Apache License 2.0",
"lines": 159,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:cookbook/levels_of_agentic_software/level_5_api.py | """
Level 5: Agentic System (Production API)
==========================================
The most complete level. Production infrastructure for agentic software.
Upgrade from development databases to PostgreSQL + PgVector, add tracing,
and expose everything as an API with AgentOS.
This builds on Level 4 by adding:
- PostgresDb: Production-grade session storage
- PgVector: Production-grade vector search
- AgentOS: FastAPI server with web UI, tracing, and session management
Prerequisites:
Start PostgreSQL with PgVector:
./cookbook/scripts/run_pgvector.sh
This starts a Postgres container on port 5532 with:
user=ai, password=ai, database=ai
Run standalone:
python cookbook/levels_of_agentic_software/level_5_api.py
Run via Agent OS:
python cookbook/levels_of_agentic_software/run.py
Then visit https://os.agno.com and select "L5 Coding Agent"
Example prompt:
"Write a function that validates email addresses using regex"
"""
from pathlib import Path
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.knowledge import Knowledge
from agno.knowledge.embedder.openai import OpenAIEmbedder
from agno.learn import LearnedKnowledgeConfig, LearningMachine, LearningMode
from agno.models.openai import OpenAIResponses
from agno.tools.coding import CodingTools
from agno.tools.reasoning import ReasoningTools
from agno.vectordb.pgvector import PgVector, SearchType
# ---------------------------------------------------------------------------
# Workspace
# ---------------------------------------------------------------------------
WORKSPACE = Path(__file__).parent.joinpath("tmp/code")
WORKSPACE.mkdir(parents=True, exist_ok=True)
# ---------------------------------------------------------------------------
# Production Database
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
# ---------------------------------------------------------------------------
# Knowledge: Static docs (PgVector for production)
# ---------------------------------------------------------------------------
docs_knowledge = Knowledge(
name="L5 Coding Agent Knowledge",
vector_db=PgVector(
db_url=db_url,
table_name="coding_standards",
search_type=SearchType.hybrid,
embedder=OpenAIEmbedder(id="text-embedding-3-small"),
),
contents_db=db,
)
# ---------------------------------------------------------------------------
# Knowledge: Dynamic learnings (PgVector for production)
# ---------------------------------------------------------------------------
learned_knowledge = Knowledge(
name="L5 Coding Agent Learnings",
vector_db=PgVector(
db_url=db_url,
table_name="coding_learnings",
search_type=SearchType.hybrid,
embedder=OpenAIEmbedder(id="text-embedding-3-small"),
),
contents_db=db,
)
# ---------------------------------------------------------------------------
# Agent Instructions
# ---------------------------------------------------------------------------
instructions = """\
You are a production coding agent that learns and improves over time.
## Workflow
1. Search your knowledge and learnings for relevant context
2. Check if the user has preferences you should follow
3. Write code that follows conventions and user preferences
4. Save the code to a file and run it to verify
5. Save any valuable insights or patterns for future use
## Rules
- Always search knowledge and learnings before writing code
- Apply user preferences from memory when writing code
- Save useful coding patterns and insights as learnings
- Follow project conventions from the knowledge base
- No emojis\
"""
# ---------------------------------------------------------------------------
# Create Production Agent
# ---------------------------------------------------------------------------
l5_coding_agent = Agent(
name="L5 Coding Agent",
model=OpenAIResponses(id="gpt-5.2"),
instructions=instructions,
tools=[
CodingTools(base_dir=WORKSPACE, all=True),
ReasoningTools(),
],
knowledge=docs_knowledge,
search_knowledge=True,
learning=LearningMachine(
knowledge=learned_knowledge,
learned_knowledge=LearnedKnowledgeConfig(
mode=LearningMode.AGENTIC,
),
),
enable_agentic_memory=True,
db=db,
add_history_to_context=True,
num_history_runs=3,
add_datetime_to_context=True,
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Demo (standalone)
# ---------------------------------------------------------------------------
if __name__ == "__main__":
l5_coding_agent.print_response(
"Write a function that validates email addresses using regex. "
"Save it to email_validator.py and test it with valid and invalid examples.",
user_id="dev@example.com",
session_id="production_test",
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/levels_of_agentic_software/level_5_api.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/levels_of_agentic_software/run.py | """
Agent OS - Web Interface for the 5 Levels of Agentic Software
===============================================================
This file starts an Agent OS server that provides a web interface for
all 5 levels of agentic software from this cookbook.
All levels are available in the Agent OS UI. Level 5 is the most complete,
with production databases, learning, and tracing. Levels 1-4 are included
so you can compare the progression and test each stage interactively.
How to Use
----------
1. Start PostgreSQL (required for Level 5):
./cookbook/scripts/run_pgvector.sh
2. Start the server:
python cookbook/levels_of_agentic_software/run.py
3. Visit https://os.agno.com in your browser
4. Add your local endpoint: http://localhost:7777
5. Select any agent or team and start chatting:
- L1 Coding Agent: Stateless tool calling (no setup needed)
- L2 Coding Agent: Knowledge + storage (ChromaDb + SQLite)
- L3 Coding Agent: Memory + learning (learns from interactions)
- L4 Coding Team: Multi-agent team (Coder/Reviewer/Tester)
- L5 Coding Agent: Production system (PostgreSQL + PgVector + tracing)
Prerequisites
-------------
- PostgreSQL with PgVector running on port 5532 (for Level 5)
- OPENAI_API_KEY environment variable set
"""
from pathlib import Path
from agno.os import AgentOS
from level_1_tools import l1_coding_agent
from level_2_storage_knowledge import l2_coding_agent
from level_3_memory_learning import l3_coding_agent
from level_4_team import l4_coding_team
from level_5_api import l5_coding_agent
# ---------------------------------------------------------------------------
# AgentOS Config
# ---------------------------------------------------------------------------
config_path = str(Path(__file__).parent.joinpath("config.yaml"))
# ---------------------------------------------------------------------------
# Create AgentOS
# ---------------------------------------------------------------------------
# All levels are registered so users can compare the progression.
# Level 5 is the most complete — start there for the full experience.
agent_os = AgentOS(
id="Coding Agent OS",
agents=[l1_coding_agent, l2_coding_agent, l3_coding_agent, l5_coding_agent],
teams=[l4_coding_team],
config=config_path,
tracing=True,
)
app = agent_os.get_app()
# ---------------------------------------------------------------------------
# Run AgentOS
# ---------------------------------------------------------------------------
if __name__ == "__main__":
agent_os.serve(app="run:app", reload=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/levels_of_agentic_software/run.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/tests/unit/os/test_client_user_id.py | from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
from agno.client import AgentOSClient
from agno.db.base import SessionType
# ---------------------------------------------------------------------------
# SDK Client tests — verify user_id is serialized into HTTP request params
# ---------------------------------------------------------------------------
@pytest.mark.asyncio
async def test_run_agent_serializes_empty_string_user_id():
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {"run_id": "run-1", "agent_id": "a-1", "content": "ok", "created_at": 0}
with patch.object(client, "_apost", new_callable=AsyncMock) as mock_post:
mock_post.return_value = mock_data
await client.run_agent(agent_id="a-1", message="hi", user_id="", session_id="")
form_data = mock_post.call_args[0][1]
assert form_data["user_id"] == ""
assert form_data["session_id"] == ""
@pytest.mark.asyncio
async def test_run_agent_omits_none_user_id():
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {"run_id": "run-1", "agent_id": "a-1", "content": "ok", "created_at": 0}
with patch.object(client, "_apost", new_callable=AsyncMock) as mock_post:
mock_post.return_value = mock_data
await client.run_agent(agent_id="a-1", message="hi")
form_data = mock_post.call_args[0][1]
assert "user_id" not in form_data
assert "session_id" not in form_data
@pytest.mark.asyncio
async def test_run_team_serializes_empty_string_user_id():
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {"run_id": "run-1", "team_id": "t-1", "content": "ok", "created_at": 0}
with patch.object(client, "_apost", new_callable=AsyncMock) as mock_post:
mock_post.return_value = mock_data
await client.run_team(team_id="t-1", message="hi", user_id="", session_id="")
form_data = mock_post.call_args[0][1]
assert form_data["user_id"] == ""
assert form_data["session_id"] == ""
@pytest.mark.asyncio
async def test_run_workflow_serializes_empty_string_user_id():
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {"run_id": "run-1", "workflow_id": "w-1", "content": "ok", "created_at": 0}
with patch.object(client, "_apost", new_callable=AsyncMock) as mock_post:
mock_post.return_value = mock_data
await client.run_workflow(workflow_id="w-1", message="hi", user_id="", session_id="")
form_data = mock_post.call_args[0][1]
assert form_data["user_id"] == ""
assert form_data["session_id"] == ""
@pytest.mark.asyncio
async def test_delete_session_includes_user_id_in_params():
client = AgentOSClient(base_url="http://localhost:7777")
with patch.object(client, "_adelete", new_callable=AsyncMock) as mock_delete:
await client.delete_session("sess-1", user_id="alice")
params = mock_delete.call_args.kwargs["params"]
assert params["user_id"] == "alice"
@pytest.mark.asyncio
async def test_delete_session_omits_user_id_when_none():
client = AgentOSClient(base_url="http://localhost:7777")
with patch.object(client, "_adelete", new_callable=AsyncMock) as mock_delete:
await client.delete_session("sess-1")
params = mock_delete.call_args.kwargs.get("params", {})
assert "user_id" not in params
@pytest.mark.asyncio
async def test_delete_sessions_includes_user_id_in_params():
client = AgentOSClient(base_url="http://localhost:7777")
with patch.object(client, "_adelete", new_callable=AsyncMock) as mock_delete:
await client.delete_sessions(
session_ids=["s-1"],
session_types=[SessionType.AGENT],
user_id="alice",
)
params = mock_delete.call_args.kwargs["params"]
assert params["user_id"] == "alice"
@pytest.mark.asyncio
async def test_rename_session_includes_user_id_in_params():
client = AgentOSClient(base_url="http://localhost:7777")
mock_data = {
"agent_session_id": "as-1",
"session_id": "sess-1",
"session_name": "Renamed",
"agent_id": "a-1",
"user_id": "alice",
}
with patch.object(client, "_apost", new_callable=AsyncMock) as mock_post:
mock_post.return_value = mock_data
await client.rename_session("sess-1", "Renamed", user_id="alice")
params = mock_post.call_args.kwargs["params"]
assert params["user_id"] == "alice"
# ---------------------------------------------------------------------------
# FastAPI Router tests — verify Query(user_id) actually binds from ?user_id=
# ---------------------------------------------------------------------------
@pytest.fixture
def mock_db():
from agno.session.agent import AgentSession
db = MagicMock()
db.delete_session = MagicMock()
db.delete_sessions = MagicMock()
db.rename_session = MagicMock(
return_value=AgentSession(
session_id="sess-1",
agent_id="a-1",
user_id="alice",
session_data={"session_name": "Renamed"},
runs=[],
created_at=0,
updated_at=0,
)
)
return db
@pytest.fixture
def test_app(mock_db, monkeypatch):
monkeypatch.delenv("OS_SECURITY_KEY", raising=False)
from agno.os.routers.session.session import get_session_router
from agno.os.settings import AgnoAPISettings
settings = AgnoAPISettings()
dbs = {"default": [mock_db]}
router = get_session_router(dbs, settings)
app = FastAPI()
app.include_router(router)
return app
@pytest.fixture
def test_app_with_jwt(mock_db, monkeypatch):
"""Test app with simulated JWT middleware that sets request.state.user_id."""
monkeypatch.delenv("OS_SECURITY_KEY", raising=False)
from starlette.middleware.base import BaseHTTPMiddleware
from agno.os.routers.session.session import get_session_router
from agno.os.settings import AgnoAPISettings
settings = AgnoAPISettings()
dbs = {"default": [mock_db]}
router = get_session_router(dbs, settings)
app = FastAPI()
app.include_router(router)
class FakeJWTMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
request.state.user_id = "jwt_alice"
return await call_next(request)
app.add_middleware(FakeJWTMiddleware)
return app
def test_router_delete_session_jwt_overrides_query_user_id(test_app_with_jwt, mock_db):
"""JWT user_id must always override client-supplied ?user_id (IDOR protection)."""
client = TestClient(test_app_with_jwt)
resp = client.delete("/sessions/sess-1?user_id=attacker")
assert resp.status_code == 204
call_kwargs = mock_db.delete_session.call_args.kwargs
assert call_kwargs["user_id"] == "jwt_alice"
assert call_kwargs["session_id"] == "sess-1"
def test_router_delete_sessions_jwt_overrides_query_user_id(test_app_with_jwt, mock_db):
"""JWT user_id must override client-supplied ?user_id for bulk delete."""
client = TestClient(test_app_with_jwt)
resp = client.request(
"DELETE",
"/sessions?user_id=attacker",
json={"session_ids": ["s-1"], "session_types": ["agent"]},
)
assert resp.status_code == 204
call_kwargs = mock_db.delete_sessions.call_args.kwargs
assert call_kwargs["user_id"] == "jwt_alice"
def test_router_rename_session_jwt_overrides_query_user_id(test_app_with_jwt, mock_db):
"""JWT user_id must override client-supplied ?user_id for rename."""
client = TestClient(test_app_with_jwt)
resp = client.post(
"/sessions/sess-1/rename?user_id=attacker",
json={"session_name": "Hacked"},
)
assert resp.status_code == 200
call_kwargs = mock_db.rename_session.call_args.kwargs
assert call_kwargs["user_id"] == "jwt_alice"
assert call_kwargs["session_id"] == "sess-1"
def test_router_delete_session_receives_user_id_from_query(test_app, mock_db):
"""Verify FastAPI binds user_id from ?user_id=alice to the endpoint param."""
client = TestClient(test_app)
resp = client.delete("/sessions/sess-1?user_id=alice")
assert resp.status_code == 204
mock_db.delete_session.assert_called_once()
call_kwargs = mock_db.delete_session.call_args.kwargs
assert call_kwargs["user_id"] == "alice"
assert call_kwargs["session_id"] == "sess-1"
def test_router_delete_session_user_id_defaults_to_none(test_app, mock_db):
"""Without ?user_id=, the param should be None (no user scoping)."""
client = TestClient(test_app)
resp = client.delete("/sessions/sess-1")
assert resp.status_code == 204
call_kwargs = mock_db.delete_session.call_args.kwargs
assert call_kwargs["user_id"] is None
def test_router_delete_sessions_receives_user_id_from_query(test_app, mock_db):
"""Verify bulk delete binds user_id from query string."""
client = TestClient(test_app)
resp = client.request(
"DELETE",
"/sessions?user_id=alice",
json={"session_ids": ["s-1"], "session_types": ["agent"]},
)
assert resp.status_code == 204
mock_db.delete_sessions.assert_called_once()
call_kwargs = mock_db.delete_sessions.call_args.kwargs
assert call_kwargs["user_id"] == "alice"
def test_router_rename_session_receives_user_id_from_query(test_app, mock_db):
"""Verify rename binds user_id from query string."""
client = TestClient(test_app)
resp = client.post(
"/sessions/sess-1/rename?user_id=alice",
json={"session_name": "Renamed"},
)
assert resp.status_code == 200
mock_db.rename_session.assert_called_once()
call_kwargs = mock_db.rename_session.call_args.kwargs
assert call_kwargs["user_id"] == "alice"
assert call_kwargs["session_id"] == "sess-1"
assert call_kwargs["session_name"] == "Renamed"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/os/test_client_user_id.py",
"license": "Apache License 2.0",
"lines": 210,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/db/test_datetime_serialization.py | """
Unit tests for datetime serialization in database utilities.
These tests verify the fix for GitHub issue #6327:
TypeError: Object of type datetime is not JSON serializable when saving agent sessions.
"""
import json
from datetime import date, datetime, timezone
from uuid import uuid4
from agno.db.utils import CustomJSONEncoder, json_serializer, serialize_session_json_fields
from agno.session.agent import AgentSession
class TestCustomJSONEncoder:
"""Tests for CustomJSONEncoder class."""
def test_encode_datetime(self):
"""Test that datetime objects are encoded to ISO format."""
dt = datetime(2025, 1, 15, 10, 30, 0, tzinfo=timezone.utc)
result = json.dumps({"timestamp": dt}, cls=CustomJSONEncoder)
assert '"2025-01-15T10:30:00+00:00"' in result
def test_encode_datetime_naive(self):
"""Test that naive datetime objects are encoded to ISO format."""
dt = datetime(2025, 1, 15, 10, 30, 0)
result = json.dumps({"timestamp": dt}, cls=CustomJSONEncoder)
assert '"2025-01-15T10:30:00"' in result
def test_encode_date(self):
"""Test that date objects are encoded to ISO format."""
d = date(2025, 1, 15)
result = json.dumps({"date": d}, cls=CustomJSONEncoder)
assert '"2025-01-15"' in result
def test_encode_uuid(self):
"""Test that UUID objects are encoded to string."""
uid = uuid4()
result = json.dumps({"id": uid}, cls=CustomJSONEncoder)
assert str(uid) in result
def test_encode_nested_datetime(self):
"""Test that nested datetime objects are encoded."""
data = {
"created_at": datetime(2025, 1, 15, 10, 0, 0, tzinfo=timezone.utc),
"nested": {
"updated_at": datetime(2025, 1, 16, 12, 0, 0, tzinfo=timezone.utc),
"items": [
{"date": date(2025, 1, 17)},
],
},
}
result = json.dumps(data, cls=CustomJSONEncoder)
parsed = json.loads(result)
assert parsed["created_at"] == "2025-01-15T10:00:00+00:00"
assert parsed["nested"]["updated_at"] == "2025-01-16T12:00:00+00:00"
assert parsed["nested"]["items"][0]["date"] == "2025-01-17"
def test_encode_type(self):
"""Test that type objects are encoded to string."""
result = json.dumps({"type": str}, cls=CustomJSONEncoder)
assert "<class 'str'>" in result
class TestJsonSerializer:
"""Tests for json_serializer function used by SQLAlchemy engine."""
def test_serializer_with_datetime(self):
"""Test that json_serializer handles datetime objects."""
data = {"timestamp": datetime(2025, 1, 15, 10, 0, 0, tzinfo=timezone.utc)}
result = json_serializer(data)
assert '"2025-01-15T10:00:00+00:00"' in result
def test_serializer_with_nested_datetime(self):
"""Test that json_serializer handles nested datetime objects."""
data = {
"metadata": {
"created_at": datetime.now(timezone.utc),
"nested": {
"updated_at": datetime.now(timezone.utc),
},
}
}
# Should not raise TypeError
result = json_serializer(data)
assert isinstance(result, str)
def test_serializer_returns_valid_json(self):
"""Test that json_serializer returns valid JSON string."""
data = {
"id": uuid4(),
"timestamp": datetime.now(timezone.utc),
"date": date.today(),
}
result = json_serializer(data)
# Should be valid JSON that can be parsed
parsed = json.loads(result)
assert "id" in parsed
assert "timestamp" in parsed
assert "date" in parsed
class TestSerializeSessionJsonFields:
"""Tests for serialize_session_json_fields function used by SQLite."""
def test_serialize_metadata_with_datetime(self):
"""Test that metadata with datetime is serialized correctly."""
session = {
"session_id": "test-123",
"metadata": {
"created_at": datetime(2025, 1, 15, 10, 0, 0, tzinfo=timezone.utc),
"environment": "test",
},
}
result = serialize_session_json_fields(session)
# metadata should now be a JSON string
assert isinstance(result["metadata"], str)
# Parse it back and verify datetime was converted
parsed = json.loads(result["metadata"])
assert parsed["created_at"] == "2025-01-15T10:00:00+00:00"
assert parsed["environment"] == "test"
def test_serialize_session_data_with_datetime(self):
"""Test that session_data with datetime is serialized correctly."""
session = {
"session_id": "test-123",
"session_data": {
"last_updated": datetime.now(timezone.utc),
},
}
result = serialize_session_json_fields(session)
assert isinstance(result["session_data"], str)
parsed = json.loads(result["session_data"])
assert "last_updated" in parsed
def test_serialize_agent_data_with_datetime(self):
"""Test that agent_data with datetime is serialized correctly."""
session = {
"session_id": "test-123",
"agent_data": {
"agent_id": "agent-1",
"initialized_at": datetime.now(timezone.utc),
},
}
result = serialize_session_json_fields(session)
assert isinstance(result["agent_data"], str)
parsed = json.loads(result["agent_data"])
assert parsed["agent_id"] == "agent-1"
assert "initialized_at" in parsed
def test_serialize_all_fields_with_datetime(self):
"""Test that all JSON fields can contain datetime objects."""
now = datetime.now(timezone.utc)
session = {
"session_id": "test-123",
"session_data": {"ts": now},
"agent_data": {"ts": now},
"team_data": {"ts": now},
"workflow_data": {"ts": now},
"metadata": {"ts": now},
"chat_history": [{"ts": now}],
"summary": {"ts": now},
"runs": [{"ts": now}],
}
# Should not raise TypeError
result = serialize_session_json_fields(session)
# All fields should be JSON strings now
for field in [
"session_data",
"agent_data",
"team_data",
"workflow_data",
"metadata",
"chat_history",
"summary",
"runs",
]:
assert isinstance(result[field], str), f"{field} should be a string"
def test_serialize_none_fields(self):
"""Test that None fields are handled correctly."""
session = {
"session_id": "test-123",
"metadata": None,
"session_data": None,
}
result = serialize_session_json_fields(session)
assert result["metadata"] is None
assert result["session_data"] is None
class TestAgentSessionWithDatetime:
"""Tests for AgentSession serialization with datetime objects."""
def test_session_to_dict_with_datetime_metadata(self):
"""Test that AgentSession.to_dict works with datetime in metadata."""
session = AgentSession(
session_id="test-123",
agent_id="agent-1",
metadata={
"created_at": datetime(2025, 1, 15, 10, 0, 0, tzinfo=timezone.utc),
"nested": {
"updated_at": datetime.now(timezone.utc),
},
},
)
# to_dict should work (datetime objects are preserved)
session_dict = session.to_dict()
assert "metadata" in session_dict
# Serializing with CustomJSONEncoder should work
result = json.dumps(session_dict["metadata"], cls=CustomJSONEncoder)
assert isinstance(result, str)
def test_session_to_dict_with_datetime_session_data(self):
"""Test that AgentSession.to_dict works with datetime in session_data."""
session = AgentSession(
session_id="test-123",
agent_id="agent-1",
session_data={
"last_activity": datetime.now(timezone.utc),
},
)
session_dict = session.to_dict()
# Serializing with CustomJSONEncoder should work
result = json.dumps(session_dict["session_data"], cls=CustomJSONEncoder)
assert isinstance(result, str)
class TestDatetimeSerializationRegression:
"""Regression tests for GitHub issue #6327."""
def test_issue_6327_metadata_with_datetime(self):
"""
Regression test for issue #6327.
When using datetime objects in agent metadata, the session save
should not fail with "TypeError: Object of type datetime is not JSON serializable".
"""
# This is the exact scenario from the bug report
session_metadata = {
"created_at": datetime.now(timezone.utc),
"environment": "test",
"nested": {
"last_updated": datetime.now(timezone.utc),
},
}
session = AgentSession(
session_id="test-session-123",
agent_id="test-agent",
user_id="test-user",
metadata=session_metadata,
)
session_dict = session.to_dict()
# This should NOT raise TypeError
serialized = serialize_session_json_fields(session_dict.copy())
# Verify metadata was serialized
assert isinstance(serialized["metadata"], str)
parsed = json.loads(serialized["metadata"])
assert "created_at" in parsed
assert "nested" in parsed
assert "last_updated" in parsed["nested"]
def test_issue_6327_json_serializer_for_postgres(self):
"""
Test that json_serializer works for PostgreSQL JSONB columns.
PostgreSQL uses json_serializer parameter on create_engine() to handle
non-JSON-serializable types in JSONB columns.
"""
# Simulate what PostgreSQL would store in JSONB
data = {
"created_at": datetime.now(timezone.utc),
"nested": {
"timestamp": datetime.now(timezone.utc),
},
}
# json_serializer is what SQLAlchemy calls for JSONB serialization
result = json_serializer(data)
# Should be valid JSON
parsed = json.loads(result)
assert isinstance(parsed["created_at"], str)
assert isinstance(parsed["nested"]["timestamp"], str)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/db/test_datetime_serialization.py",
"license": "Apache License 2.0",
"lines": 250,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/04_workflows/06_advanced_concepts/file_propagation/file_generation_workflow.py | from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIChat
from agno.tools.file_generation import FileGenerationTools
from agno.workflow.step import Step
from agno.workflow.workflow import Workflow
# ---------------------------------------------------------------------------
# Step 1: Generate a Report
# ---------------------------------------------------------------------------
report_generator = Agent(
name="Report Generator",
model=OpenAIChat(id="gpt-4o-mini"),
tools=[FileGenerationTools(enable_pdf_generation=True)],
instructions=[
"You are a data analyst that generates reports.",
"When asked to create a report, use the generate_pdf_file tool to create it.",
"Include meaningful data in the report.",
],
)
generate_report_step = Step(
name="Generate Report",
agent=report_generator,
description="Generate a PDF report with quarterly sales data",
)
# ---------------------------------------------------------------------------
# Step 2: Analyze the Report
# ---------------------------------------------------------------------------
report_analyzer = Agent(
name="Report Analyzer",
model=OpenAIChat(id="gpt-4o"),
instructions=[
"You are a business analyst.",
"Analyze the attached PDF report and provide insights.",
"Focus on trends, anomalies, and recommendations.",
],
)
analyze_report_step = Step(
name="Analyze Report",
agent=report_analyzer,
description="Analyze the generated report and provide insights",
)
# ---------------------------------------------------------------------------
# Create Workflow
# ---------------------------------------------------------------------------
report_workflow = Workflow(
name="Report Generation and Analysis",
description="Generate a report and analyze it for insights",
db=SqliteDb(
session_table="file_propagation_workflow",
db_file="tmp/file_propagation_workflow.db",
),
steps=[generate_report_step, analyze_report_step],
)
# ---------------------------------------------------------------------------
# Run Workflow
# ---------------------------------------------------------------------------
if __name__ == "__main__":
print("=" * 60)
print("File Generation and Propagation Workflow")
print("=" * 60)
print()
print("This workflow demonstrates file propagation between steps:")
print("1. Step 1 generates a PDF report using FileGenerationTools")
print("2. The file is automatically propagated to Step 2")
print("3. Step 2 analyzes the report content")
print()
print("-" * 60)
result = report_workflow.run(
input="Create a quarterly sales report for Q4 2024 with data for 4 regions (North, South, East, West) and then analyze it for insights.",
)
print()
print("=" * 60)
print("Workflow Result")
print("=" * 60)
print()
print(result.content)
print()
# Show file propagation
print("-" * 60)
print("Files in workflow output:")
if result.files:
for f in result.files:
print(f" - {f.filename} ({f.mime_type}, {f.size} bytes)")
else:
print(" No files in final output (files were consumed by analysis step)")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/04_workflows/06_advanced_concepts/file_propagation/file_generation_workflow.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/07_knowledge/filters/isolate_with_filter_expressions.py | """
Demonstrates isolate_vector_search combined with list-based FilterExpr filters.
When multiple Knowledge instances share the same vector database and
isolate_vector_search=True, each instance's searches are scoped to its own data
via an auto-injected linked_to filter. This works seamlessly with user-supplied
FilterExpr filters — the linked_to filter is prepended automatically.
This cookbook shows:
1. Two Knowledge instances sharing one vector database, each isolated.
2. Inserting documents with metadata into each instance.
3. Querying via an Agent with knowledge_filters using EQ, IN, AND, NOT operators.
4. The linked_to filter is auto-injected alongside user filters.
"""
from agno.agent import Agent
from agno.filters import AND, EQ, IN, NOT
from agno.knowledge.knowledge import Knowledge
from agno.utils.media import (
SampleDataFileExtension,
download_knowledge_filters_sample_data,
)
from agno.vectordb.pgvector import PgVector
# Download sample CSV files — 4 files with sales/survey/financial data
downloaded_csv_paths = download_knowledge_filters_sample_data(
num_files=4, file_extension=SampleDataFileExtension.CSV
)
# Shared vector database — both Knowledge instances use the same table
vector_db = PgVector(
table_name="isolated_filter_demo",
db_url="postgresql+psycopg://ai:ai@localhost:5532/ai",
)
# -----------------------------------------------------------------------------
# Two isolated Knowledge instances sharing the same vector database
# -----------------------------------------------------------------------------
sales_knowledge = Knowledge(
name="sales-data",
description="Sales and financial data",
vector_db=vector_db,
isolate_vector_search=True, # Scoped to sales-data documents only
)
survey_knowledge = Knowledge(
name="survey-data",
description="Customer survey data",
vector_db=vector_db,
isolate_vector_search=True, # Scoped to survey-data documents only
)
# -----------------------------------------------------------------------------
# Insert documents into each isolated instance
# Documents are tagged with linked_to metadata automatically
# -----------------------------------------------------------------------------
# Sales documents go into the sales knowledge instance
sales_knowledge.insert_many(
[
{
"path": downloaded_csv_paths[0],
"metadata": {
"data_type": "sales",
"quarter": "Q1",
"year": 2024,
"region": "north_america",
"currency": "USD",
},
},
{
"path": downloaded_csv_paths[1],
"metadata": {
"data_type": "sales",
"year": 2024,
"region": "europe",
"currency": "EUR",
},
},
{
"path": downloaded_csv_paths[3],
"metadata": {
"data_type": "financial",
"sector": "technology",
"year": 2024,
"report_type": "quarterly_earnings",
},
},
],
)
# Survey documents go into the survey knowledge instance
survey_knowledge.insert_many(
[
{
"path": downloaded_csv_paths[2],
"metadata": {
"data_type": "survey",
"survey_type": "customer_satisfaction",
"year": 2024,
"target_demographic": "mixed",
},
},
],
)
# -----------------------------------------------------------------------------
# Query with list-based FilterExpr filters
# The linked_to filter is auto-injected alongside any user-supplied filters
# -----------------------------------------------------------------------------
sales_agent = Agent(
knowledge=sales_knowledge,
search_knowledge=True,
)
survey_agent = Agent(
knowledge=survey_knowledge,
search_knowledge=True,
)
# EQ filter on the sales-isolated instance
# Effective filters: linked_to="sales-data" AND region="north_america"
print("--- Sales agent: EQ filter (North America only) ---")
sales_agent.print_response(
"Describe revenue performance for the region",
knowledge_filters=[EQ("region", "north_america")],
markdown=True,
)
# IN filter on the sales-isolated instance
# Effective filters: linked_to="sales-data" AND region IN ["north_america", "europe"]
print("--- Sales agent: IN filter (multiple regions) ---")
sales_agent.print_response(
"Compare revenue across regions",
knowledge_filters=[IN("region", ["north_america", "europe"])],
markdown=True,
)
# AND + NOT compound filter on the sales-isolated instance
# Effective filters: linked_to="sales-data" AND data_type="sales" AND NOT region="europe"
print("--- Sales agent: AND + NOT compound filter ---")
sales_agent.print_response(
"Describe revenue performance excluding Europe",
knowledge_filters=[AND(EQ("data_type", "sales"), NOT(EQ("region", "europe")))],
markdown=True,
)
# Survey agent — isolated to survey-data only, even though it shares the same vector DB
# Effective filters: linked_to="survey-data" AND survey_type="customer_satisfaction"
print("--- Survey agent: EQ filter (customer satisfaction) ---")
survey_agent.print_response(
"Summarize the customer satisfaction survey results",
knowledge_filters=[EQ("survey_type", "customer_satisfaction")],
markdown=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/07_knowledge/filters/isolate_with_filter_expressions.py",
"license": "Apache License 2.0",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/07_knowledge/custom_retriever/team_retriever.py | from typing import Optional
from agno.agent import Agent
from agno.knowledge.embedder.openai import OpenAIEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.team.team import Team
from agno.vectordb.qdrant import Qdrant
from qdrant_client import QdrantClient
# ---------------------------------------------------------
# This section loads the knowledge base. Skip if your knowledge base was populated elsewhere.
# Define the embedder
embedder = OpenAIEmbedder(id="text-embedding-3-small")
# Initialize vector database connection
vector_db = Qdrant(
collection="thai-recipes", url="http://localhost:6333", embedder=embedder
)
# Load the knowledge base
knowledge = Knowledge(
vector_db=vector_db,
)
knowledge.insert(
url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf",
)
# ---------------------------------------------------------
# Define the custom knowledge retriever
def knowledge_retriever(
query: str, team: Optional[Team] = None, num_documents: int = 5, **kwargs
) -> Optional[list[dict]]:
"""
Custom knowledge retriever function for a Team.
Args:
query (str): The search query string
team (Team): The team instance making the query
num_documents (int): Number of documents to retrieve (default: 5)
**kwargs: Additional keyword arguments
Returns:
Optional[list[dict]]: List of retrieved documents or None if search fails
"""
try:
qdrant_client = QdrantClient(url="http://localhost:6333")
query_embedding = embedder.get_embedding(query)
results = qdrant_client.query_points(
collection_name="thai-recipes",
query=query_embedding,
limit=num_documents,
)
results_dict = results.model_dump()
if "points" in results_dict:
return results_dict["points"]
else:
return None
except Exception as e:
print(f"Error during vector database search: {str(e)}")
return None
def main():
"""Main function to demonstrate team usage with a custom knowledge retriever."""
# Create a member agent that summarizes recipes
summary_agent = Agent(
name="Summary Agent",
role="Summarize and format recipe information into clear, readable responses",
)
# Initialize team with custom knowledge retriever
# The team searches the knowledge base directly using the custom retriever,
# then delegates formatting tasks to the summary agent.
team = Team(
name="Recipe Team",
members=[summary_agent],
knowledge=knowledge,
knowledge_retriever=knowledge_retriever,
search_knowledge=True,
instructions=[
"Always use the search_knowledge_base tool to find recipe information before delegating to members.",
"Delegate to the Summary Agent only for formatting the results.",
],
)
# Example query
query = "List down the ingredients to make Massaman Gai"
team.print_response(query, markdown=True)
if __name__ == "__main__":
main()
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/07_knowledge/custom_retriever/team_retriever.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/unit/agent/test_knowledge_retriever_tool_priority.py | """Tests for unified knowledge search tool registration.
Regression test for https://github.com/agno-agi/agno/issues/6533
All knowledge search goes through a single unified path via
get_relevant_docs_from_knowledge(), which checks knowledge_retriever
first and falls back to knowledge.search().
"""
from unittest.mock import MagicMock
import pytest
from agno.agent import Agent
from agno.agent._tools import aget_tools, get_tools
from agno.models.base import Function
from agno.run.agent import RunOutput
from agno.run.base import RunContext
from agno.session.agent import AgentSession
class MockKnowledge:
"""Minimal mock that satisfies the knowledge protocol."""
def __init__(self):
self.max_results = 5
self.vector_db = None
def _make_run_context():
return RunContext(run_id="test-run", session_id="test-session")
def _make_session():
return AgentSession(session_id="test-session")
def _make_run_response():
return RunOutput(run_id="test-run", session_id="test-session", agent_id="test-agent")
def _get_knowledge_tools(tools):
return [t for t in tools if isinstance(t, Function) and t.name == "search_knowledge_base"]
def test_get_tools_registers_search_tool_when_both_knowledge_and_retriever_set():
"""When both knowledge and knowledge_retriever are set, a search tool is registered."""
def custom_retriever(query, agent=None, num_documents=None, **kwargs):
return [{"content": "from retriever"}]
agent = Agent()
agent.knowledge = MockKnowledge() # type: ignore
agent.knowledge_retriever = custom_retriever # type: ignore
agent.search_knowledge = True
tools = get_tools(agent, _make_run_response(), _make_run_context(), _make_session())
knowledge_tools = _get_knowledge_tools(tools)
assert len(knowledge_tools) == 1
def test_get_tools_registers_search_tool_when_only_knowledge_set():
"""When only knowledge is set (no retriever), a search tool is still registered."""
agent = Agent()
agent.knowledge = MockKnowledge() # type: ignore
agent.knowledge_retriever = None
agent.search_knowledge = True
tools = get_tools(agent, _make_run_response(), _make_run_context(), _make_session())
knowledge_tools = _get_knowledge_tools(tools)
assert len(knowledge_tools) == 1
def test_get_tools_registers_search_tool_when_only_retriever_set():
"""When only knowledge_retriever is set (no knowledge), a search tool is registered."""
def custom_retriever(query, agent=None, num_documents=None, **kwargs):
return [{"content": "from retriever"}]
agent = Agent()
agent.knowledge = None
agent.knowledge_retriever = custom_retriever # type: ignore
agent.search_knowledge = True
tools = get_tools(agent, _make_run_response(), _make_run_context(), _make_session())
knowledge_tools = _get_knowledge_tools(tools)
assert len(knowledge_tools) == 1
def test_get_tools_no_search_tool_when_neither_knowledge_nor_retriever_set():
"""When neither knowledge nor knowledge_retriever is set, no search tool is registered."""
agent = Agent()
agent.knowledge = None
agent.knowledge_retriever = None
agent.search_knowledge = True
tools = get_tools(agent, _make_run_response(), _make_run_context(), _make_session())
knowledge_tools = _get_knowledge_tools(tools)
assert len(knowledge_tools) == 0
def test_search_tool_invokes_custom_retriever_when_both_set():
"""End-to-end: when both knowledge and retriever are set, invoking the tool calls the retriever."""
retriever_mock = MagicMock(return_value=[{"content": "from custom retriever"}])
agent = Agent()
agent.knowledge = MockKnowledge() # type: ignore
agent.knowledge_retriever = retriever_mock # type: ignore
agent.search_knowledge = True
tools = get_tools(agent, _make_run_response(), _make_run_context(), _make_session())
knowledge_tools = _get_knowledge_tools(tools)
assert len(knowledge_tools) == 1
# Invoke the tool's entrypoint directly
result = knowledge_tools[0].entrypoint("test query")
retriever_mock.assert_called_once()
assert "from custom retriever" in result
def test_search_tool_invokes_custom_retriever_when_only_retriever_set():
"""End-to-end: when only retriever is set (no knowledge), invoking the tool calls the retriever."""
retriever_mock = MagicMock(return_value=[{"content": "retriever only"}])
agent = Agent()
agent.knowledge = None
agent.knowledge_retriever = retriever_mock # type: ignore
agent.search_knowledge = True
tools = get_tools(agent, _make_run_response(), _make_run_context(), _make_session())
knowledge_tools = _get_knowledge_tools(tools)
assert len(knowledge_tools) == 1
result = knowledge_tools[0].entrypoint("test query")
retriever_mock.assert_called_once()
assert "retriever only" in result
@pytest.mark.asyncio
async def test_aget_tools_registers_search_tool_when_both_knowledge_and_retriever_set():
"""Async: when both are set, a search tool is registered."""
def custom_retriever(query, agent=None, num_documents=None, **kwargs):
return [{"content": "from retriever"}]
agent = Agent()
agent.knowledge = MockKnowledge() # type: ignore
agent.knowledge_retriever = custom_retriever # type: ignore
agent.search_knowledge = True
tools = await aget_tools(agent, _make_run_response(), _make_run_context(), _make_session())
knowledge_tools = _get_knowledge_tools(tools)
assert len(knowledge_tools) == 1
@pytest.mark.asyncio
async def test_aget_tools_registers_search_tool_when_only_knowledge_set():
"""Async: when only knowledge is set, a search tool is still registered."""
agent = Agent()
agent.knowledge = MockKnowledge() # type: ignore
agent.knowledge_retriever = None
agent.search_knowledge = True
tools = await aget_tools(agent, _make_run_response(), _make_run_context(), _make_session())
knowledge_tools = _get_knowledge_tools(tools)
assert len(knowledge_tools) == 1
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/agent/test_knowledge_retriever_tool_priority.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/team/test_knowledge_retriever_tool_priority.py | """Tests for unified knowledge search tool registration on Team.
Regression test for https://github.com/agno-agi/agno/issues/6533
Same fix as Agent: all knowledge search goes through a single unified path
via get_relevant_docs_from_knowledge().
"""
from unittest.mock import MagicMock
from agno.models.base import Function
from agno.run.base import RunContext
from agno.run.team import TeamRunOutput
from agno.session.team import TeamSession
from agno.team.team import Team
class MockKnowledge:
"""Minimal mock that satisfies the knowledge protocol."""
def __init__(self):
self.max_results = 5
self.vector_db = None
def _make_run_context():
return RunContext(run_id="test-run", session_id="test-session")
def _make_session():
return TeamSession(session_id="test-session")
def _make_run_response():
return TeamRunOutput(run_id="test-run", session_id="test-session", team_id="test-team")
def _make_model():
model = MagicMock()
model.get_tools_for_api.return_value = []
model.add_tool.return_value = None
return model
def _get_knowledge_tools(tools):
return [t for t in tools if isinstance(t, Function) and t.name == "search_knowledge_base"]
def test_team_tools_registers_search_tool_when_both_knowledge_and_retriever_set():
"""When both knowledge and knowledge_retriever are set, a search tool is registered."""
from agno.team._tools import _determine_tools_for_model
def custom_retriever(query, team=None, num_documents=None, **kwargs):
return [{"content": "from retriever"}]
team = Team(name="test-team", members=[])
team.knowledge = MockKnowledge() # type: ignore
team.knowledge_retriever = custom_retriever # type: ignore
team.search_knowledge = True
tools = _determine_tools_for_model(
team=team,
model=_make_model(),
run_response=_make_run_response(),
run_context=_make_run_context(),
team_run_context={},
session=_make_session(),
async_mode=False,
)
knowledge_tools = _get_knowledge_tools(tools)
assert len(knowledge_tools) == 1
def test_team_tools_registers_search_tool_when_only_knowledge_set():
"""When only knowledge is set (no retriever), a search tool is still registered."""
from agno.team._tools import _determine_tools_for_model
team = Team(name="test-team", members=[])
team.knowledge = MockKnowledge() # type: ignore
team.knowledge_retriever = None
team.search_knowledge = True
tools = _determine_tools_for_model(
team=team,
model=_make_model(),
run_response=_make_run_response(),
run_context=_make_run_context(),
team_run_context={},
session=_make_session(),
async_mode=False,
)
knowledge_tools = _get_knowledge_tools(tools)
assert len(knowledge_tools) == 1
def test_team_tools_registers_search_tool_when_only_retriever_set():
"""When only knowledge_retriever is set (no knowledge), a search tool is registered."""
from agno.team._tools import _determine_tools_for_model
def custom_retriever(query, team=None, num_documents=None, **kwargs):
return [{"content": "from retriever"}]
team = Team(name="test-team", members=[])
team.knowledge = None
team.knowledge_retriever = custom_retriever # type: ignore
team.search_knowledge = True
tools = _determine_tools_for_model(
team=team,
model=_make_model(),
run_response=_make_run_response(),
run_context=_make_run_context(),
team_run_context={},
session=_make_session(),
async_mode=False,
)
knowledge_tools = _get_knowledge_tools(tools)
assert len(knowledge_tools) == 1
def test_team_search_tool_invokes_custom_retriever_when_both_set():
"""End-to-end: when both knowledge and retriever are set, invoking the tool calls the retriever."""
from agno.team._tools import _determine_tools_for_model
retriever_mock = MagicMock(return_value=[{"content": "from team retriever"}])
team = Team(name="test-team", members=[])
team.knowledge = MockKnowledge() # type: ignore
team.knowledge_retriever = retriever_mock # type: ignore
team.search_knowledge = True
tools = _determine_tools_for_model(
team=team,
model=_make_model(),
run_response=_make_run_response(),
run_context=_make_run_context(),
team_run_context={},
session=_make_session(),
async_mode=False,
)
knowledge_tools = _get_knowledge_tools(tools)
assert len(knowledge_tools) == 1
result = knowledge_tools[0].entrypoint("test query")
retriever_mock.assert_called_once()
assert "from team retriever" in result
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/team/test_knowledge_retriever_tool_priority.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/workflow/test_parallel_run_context_isolation.py | """Tests that each parallel step receives its own run_context copy.
Regression test for https://github.com/agno-agi/agno/issues/6590
Race condition: when Parallel steps contain agents with different output_schema
types, the shared run_context.output_schema was overwritten concurrently.
"""
import threading
from typing import Any, Dict, Optional
from pydantic import BaseModel
from agno.run.base import RunContext
from agno.workflow.parallel import Parallel
from agno.workflow.step import Step
from agno.workflow.types import StepInput, StepOutput
class SchemaA(BaseModel):
field_a: str
class SchemaB(BaseModel):
field_b: int
def _make_step_that_captures_run_context(name: str, captured: Dict[str, Any], barrier: threading.Barrier):
"""Create a Step whose executor captures the run_context it receives."""
def executor(
step_input: StepInput,
*,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
workflow_run_response: Any = None,
store_executor_outputs: bool = True,
workflow_session: Any = None,
add_workflow_history_to_steps: Optional[bool] = False,
num_history_runs: int = 3,
run_context: Optional[RunContext] = None,
session_state: Optional[Dict[str, Any]] = None,
background_tasks: Any = None,
) -> StepOutput:
# Each step sets its own output_schema on the run_context it received
if name == "step_a" and run_context is not None:
run_context.output_schema = SchemaA
elif name == "step_b" and run_context is not None:
run_context.output_schema = SchemaB
# Synchronize so both steps overlap
barrier.wait(timeout=5)
# Capture what this step's run_context.output_schema is after the barrier
captured[name] = run_context.output_schema if run_context else None
return StepOutput(step_name=name, content=f"{name} done")
return Step(name=name, description=f"Test step {name}", executor=executor)
class TestParallelRunContextIsolation:
def test_each_parallel_step_gets_own_run_context(self):
"""Verify parallel steps do not share the same run_context object."""
barrier = threading.Barrier(2)
captured: Dict[str, Any] = {}
step_a = _make_step_that_captures_run_context("step_a", captured, barrier)
step_b = _make_step_that_captures_run_context("step_b", captured, barrier)
parallel = Parallel(step_a, step_b, name="test_parallel")
run_context = RunContext(run_id="test", session_id="test")
step_input = StepInput(input="test input")
parallel.execute(step_input, run_context=run_context)
# Each step should have kept its own output_schema
assert captured["step_a"] is SchemaA, f"step_a should have SchemaA but got {captured['step_a']}"
assert captured["step_b"] is SchemaB, f"step_b should have SchemaB but got {captured['step_b']}"
def test_parallel_steps_share_session_state(self):
"""Verify that session_state is still shared across parallel steps (shallow copy)."""
shared_state: Dict[str, Any] = {"counter": 0}
captured_states: Dict[str, Any] = {}
barrier = threading.Barrier(2)
def make_state_step(name: str):
def executor(
step_input: StepInput,
*,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
workflow_run_response: Any = None,
store_executor_outputs: bool = True,
workflow_session: Any = None,
add_workflow_history_to_steps: Optional[bool] = False,
num_history_runs: int = 3,
run_context: Optional[RunContext] = None,
session_state: Optional[Dict[str, Any]] = None,
background_tasks: Any = None,
) -> StepOutput:
captured_states[name] = id(run_context.session_state) if run_context else None
barrier.wait(timeout=5)
return StepOutput(step_name=name, content=f"{name} done")
return Step(name=name, description=f"State test {name}", executor=executor)
step_a = make_state_step("a")
step_b = make_state_step("b")
parallel = Parallel(step_a, step_b, name="state_test")
run_context = RunContext(run_id="test", session_id="test", session_state=shared_state)
step_input = StepInput(input="test input")
parallel.execute(step_input, run_context=run_context)
# session_state should be the same object (shared) across steps
assert captured_states["a"] == captured_states["b"], "session_state should be shared across parallel steps"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/workflow/test_parallel_run_context_isolation.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/02_agents/10_human_in_the_loop/confirmation_with_session_state.py | """
Confirmation with Session State
=============================
HITL confirmation where the tool modifies session_state before pausing.
Verifies that state changes survive the pause/continue round-trip.
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIChat
from agno.run import RunContext, RunStatus
from agno.tools import tool
from agno.utils import pprint
from rich.console import Console
from rich.prompt import Prompt
console = Console()
@tool(requires_confirmation=True)
def add_to_watchlist(run_context: RunContext, symbol: str) -> str:
"""Add a stock symbol to the user's watchlist. Requires confirmation.
Args:
symbol: Stock ticker symbol (e.g. AAPL, TSLA)
Returns:
Confirmation message with updated watchlist
"""
if run_context.session_state is None:
run_context.session_state = {}
watchlist = run_context.session_state.get("watchlist", [])
symbol = symbol.upper()
if symbol not in watchlist:
watchlist.append(symbol)
run_context.session_state["watchlist"] = watchlist
return f"Added {symbol} to watchlist. Current watchlist: {watchlist}"
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[add_to_watchlist],
session_state={"watchlist": []},
instructions="You MUST use the add_to_watchlist tool when the user asks to add a stock. The user's watchlist is: {watchlist}",
db=SqliteDb(db_file="tmp/hitl_state.db"),
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
console.print(
"[bold]Step 1:[/] Asking agent to add AAPL to watchlist (will pause for confirmation)"
)
run_response = agent.run("Add AAPL to my watchlist using the add_to_watchlist tool")
console.print(f"[dim]Status: {run_response.status}[/]")
console.print(f"[dim]Session state after pause: {agent.get_session_state()}[/]")
if run_response.status != RunStatus.paused:
console.print(
"[yellow]Agent did not pause (model may not have called the tool). Try re-running.[/]"
)
else:
for requirement in run_response.active_requirements:
if requirement.needs_confirmation:
console.print(
f"Tool [bold blue]{requirement.tool_execution.tool_name}({requirement.tool_execution.tool_args})[/] requires confirmation."
)
message = (
Prompt.ask(
"Do you want to continue?", choices=["y", "n"], default="y"
)
.strip()
.lower()
)
if message == "n":
requirement.reject()
else:
requirement.confirm()
console.print("\n[bold]Step 2:[/] Continuing run after confirmation")
run_response = agent.continue_run(
run_id=run_response.run_id,
requirements=run_response.requirements,
)
pprint.pprint_run_response(run_response)
final_state = agent.get_session_state()
console.print(f"\n[bold green]Final session state:[/] {final_state}")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/02_agents/10_human_in_the_loop/confirmation_with_session_state.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/unit/team/test_team_run_regressions.py | import inspect
from typing import Any
import pytest
from agno.agent.agent import Agent
from agno.run import RunContext
from agno.run.base import RunStatus
from agno.run.team import TeamRunOutput
from agno.session import TeamSession
from agno.team import _hooks
from agno.team import _run as team_run
from agno.team.team import Team
def test_all_team_pause_handlers_accept_run_context():
for fn in [
_hooks.handle_team_run_paused,
_hooks.handle_team_run_paused_stream,
_hooks.ahandle_team_run_paused,
_hooks.ahandle_team_run_paused_stream,
]:
params = inspect.signature(fn).parameters
assert "run_context" in params, f"{fn.__name__} missing run_context param"
def test_handle_team_run_paused_forwards_run_context_to_cleanup(monkeypatch: pytest.MonkeyPatch):
captured: dict[str, Any] = {}
def spy_cleanup(team, run_response, session, run_context=None):
captured["run_context"] = run_context
monkeypatch.setattr(team_run, "_cleanup_and_store", spy_cleanup)
monkeypatch.setattr("agno.run.approval.create_approval_from_pause", lambda **kwargs: None)
team = Team(name="test-team", members=[Agent(name="m1")])
run_context = RunContext(run_id="r1", session_id="s1", session_state={"key": "val"})
_hooks.handle_team_run_paused(
team=team,
run_response=TeamRunOutput(run_id="r1", session_id="s1", messages=[]),
session=TeamSession(session_id="s1"),
run_context=run_context,
)
assert captured["run_context"] is run_context
@pytest.mark.asyncio
async def test_ahandle_team_run_paused_forwards_run_context_to_cleanup(monkeypatch: pytest.MonkeyPatch):
captured: dict[str, Any] = {}
async def spy_acleanup(team, run_response, session, run_context=None):
captured["run_context"] = run_context
async def noop_acreate_approval(**kwargs):
return None
monkeypatch.setattr(team_run, "_acleanup_and_store", spy_acleanup)
monkeypatch.setattr("agno.run.approval.acreate_approval_from_pause", noop_acreate_approval)
team = Team(name="test-team", members=[Agent(name="m1")])
run_context = RunContext(run_id="r1", session_id="s1", session_state={"key": "val"})
await _hooks.ahandle_team_run_paused(
team=team,
run_response=TeamRunOutput(run_id="r1", session_id="s1", messages=[]),
session=TeamSession(session_id="s1"),
run_context=run_context,
)
assert captured["run_context"] is run_context
def test_handle_team_run_paused_persists_session_state(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr(team_run, "scrub_run_output_for_storage", lambda team, run_response: None)
monkeypatch.setattr("agno.team._session.update_session_metrics", lambda team, session, run_response: None)
monkeypatch.setattr("agno.run.approval.create_approval_from_pause", lambda **kwargs: None)
team = Team(name="test-team", members=[Agent(name="m1")])
monkeypatch.setattr(team, "save_session", lambda session: None)
session = TeamSession(session_id="s1", session_data={})
run_response = TeamRunOutput(run_id="r1", session_id="s1", messages=[])
run_context = RunContext(run_id="r1", session_id="s1", session_state={"watchlist": ["AAPL"]})
result = _hooks.handle_team_run_paused(
team=team,
run_response=run_response,
session=session,
run_context=run_context,
)
assert result.status == RunStatus.paused
assert session.session_data["session_state"] == {"watchlist": ["AAPL"]}
assert result.session_state == {"watchlist": ["AAPL"]}
def test_handle_team_run_paused_without_run_context_does_not_set_state(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr(team_run, "scrub_run_output_for_storage", lambda team, run_response: None)
monkeypatch.setattr("agno.team._session.update_session_metrics", lambda team, session, run_response: None)
monkeypatch.setattr("agno.run.approval.create_approval_from_pause", lambda **kwargs: None)
team = Team(name="test-team", members=[Agent(name="m1")])
monkeypatch.setattr(team, "save_session", lambda session: None)
session = TeamSession(session_id="s1", session_data={})
result = _hooks.handle_team_run_paused(
team=team,
run_response=TeamRunOutput(run_id="r1", session_id="s1", messages=[]),
session=session,
)
assert result.status == RunStatus.paused
assert "session_state" not in session.session_data
def test_handle_team_run_paused_persists_state_when_session_data_is_none(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr(team_run, "scrub_run_output_for_storage", lambda team, run_response: None)
monkeypatch.setattr("agno.team._session.update_session_metrics", lambda team, session, run_response: None)
monkeypatch.setattr("agno.run.approval.create_approval_from_pause", lambda **kwargs: None)
team = Team(name="test-team", members=[Agent(name="m1")])
monkeypatch.setattr(team, "save_session", lambda session: None)
session = TeamSession(session_id="s1", session_data=None)
run_response = TeamRunOutput(run_id="r1", session_id="s1", messages=[])
run_context = RunContext(run_id="r1", session_id="s1", session_state={"watchlist": ["AAPL"]})
result = _hooks.handle_team_run_paused(
team=team,
run_response=run_response,
session=session,
run_context=run_context,
)
assert result.status == RunStatus.paused
assert result.session_state == {"watchlist": ["AAPL"]}
assert session.session_data == {"session_state": {"watchlist": ["AAPL"]}}
@pytest.mark.asyncio
async def test_ahandle_team_run_paused_persists_state_when_session_data_is_none(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr(team_run, "scrub_run_output_for_storage", lambda team, run_response: None)
monkeypatch.setattr("agno.team._session.update_session_metrics", lambda team, session, run_response: None)
async def noop_acreate_approval(**kwargs):
return None
monkeypatch.setattr("agno.run.approval.acreate_approval_from_pause", noop_acreate_approval)
team = Team(name="test-team", members=[Agent(name="m1")])
async def noop_asave(session):
return None
monkeypatch.setattr(team, "asave_session", noop_asave)
session = TeamSession(session_id="s1", session_data=None)
run_response = TeamRunOutput(run_id="r1", session_id="s1", messages=[])
run_context = RunContext(run_id="r1", session_id="s1", session_state={"cart": ["item-1"]})
result = await _hooks.ahandle_team_run_paused(
team=team,
run_response=run_response,
session=session,
run_context=run_context,
)
assert result.status == RunStatus.paused
assert result.session_state == {"cart": ["item-1"]}
assert session.session_data == {"session_state": {"cart": ["item-1"]}}
@pytest.mark.asyncio
async def test_ahandle_team_run_paused_persists_session_state(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr(team_run, "scrub_run_output_for_storage", lambda team, run_response: None)
monkeypatch.setattr("agno.team._session.update_session_metrics", lambda team, session, run_response: None)
async def noop_acreate_approval(**kwargs):
return None
monkeypatch.setattr("agno.run.approval.acreate_approval_from_pause", noop_acreate_approval)
team = Team(name="test-team", members=[Agent(name="m1")])
async def noop_asave(session):
return None
monkeypatch.setattr(team, "asave_session", noop_asave)
session = TeamSession(session_id="s1", session_data={})
run_response = TeamRunOutput(run_id="r1", session_id="s1", messages=[])
run_context = RunContext(run_id="r1", session_id="s1", session_state={"cart": ["item-1"]})
result = await _hooks.ahandle_team_run_paused(
team=team,
run_response=run_response,
session=session,
run_context=run_context,
)
assert result.status == RunStatus.paused
assert session.session_data["session_state"] == {"cart": ["item-1"]}
assert result.session_state == {"cart": ["item-1"]}
def test_handle_team_run_paused_stream_forwards_run_context_to_cleanup(monkeypatch: pytest.MonkeyPatch):
captured: dict[str, Any] = {}
def spy_cleanup(team, run_response, session, run_context=None):
captured["run_context"] = run_context
monkeypatch.setattr(team_run, "_cleanup_and_store", spy_cleanup)
monkeypatch.setattr("agno.run.approval.create_approval_from_pause", lambda **kwargs: None)
team = Team(name="test-team", members=[Agent(name="m1")])
run_context = RunContext(run_id="r1", session_id="s1", session_state={"key": "val"})
events = list(
_hooks.handle_team_run_paused_stream(
team=team,
run_response=TeamRunOutput(run_id="r1", session_id="s1", messages=[]),
session=TeamSession(session_id="s1"),
run_context=run_context,
)
)
assert captured["run_context"] is run_context
assert len(events) >= 1
@pytest.mark.asyncio
async def test_ahandle_team_run_paused_stream_forwards_run_context_to_cleanup(monkeypatch: pytest.MonkeyPatch):
captured: dict[str, Any] = {}
async def spy_acleanup(team, run_response, session, run_context=None):
captured["run_context"] = run_context
async def noop_acreate_approval(**kwargs):
return None
monkeypatch.setattr(team_run, "_acleanup_and_store", spy_acleanup)
monkeypatch.setattr("agno.run.approval.acreate_approval_from_pause", noop_acreate_approval)
team = Team(name="test-team", members=[Agent(name="m1")])
run_context = RunContext(run_id="r1", session_id="s1", session_state={"key": "val"})
events = []
async for event in _hooks.ahandle_team_run_paused_stream(
team=team,
run_response=TeamRunOutput(run_id="r1", session_id="s1", messages=[]),
session=TeamSession(session_id="s1"),
run_context=run_context,
):
events.append(event)
assert captured["run_context"] is run_context
assert len(events) >= 1
def test_handle_team_run_paused_stream_persists_session_state(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr(team_run, "scrub_run_output_for_storage", lambda team, run_response: None)
monkeypatch.setattr("agno.team._session.update_session_metrics", lambda team, session, run_response: None)
monkeypatch.setattr("agno.run.approval.create_approval_from_pause", lambda **kwargs: None)
team = Team(name="test-team", members=[Agent(name="m1")])
monkeypatch.setattr(team, "save_session", lambda session: None)
session = TeamSession(session_id="s1", session_data={})
run_response = TeamRunOutput(run_id="r1", session_id="s1", messages=[])
run_context = RunContext(run_id="r1", session_id="s1", session_state={"watchlist": ["AAPL"]})
events = list(
_hooks.handle_team_run_paused_stream(
team=team,
run_response=run_response,
session=session,
run_context=run_context,
)
)
assert len(events) >= 1
assert session.session_data["session_state"] == {"watchlist": ["AAPL"]}
assert run_response.session_state == {"watchlist": ["AAPL"]}
@pytest.mark.asyncio
async def test_ahandle_team_run_paused_stream_persists_session_state(monkeypatch: pytest.MonkeyPatch):
monkeypatch.setattr(team_run, "scrub_run_output_for_storage", lambda team, run_response: None)
monkeypatch.setattr("agno.team._session.update_session_metrics", lambda team, session, run_response: None)
async def noop_acreate_approval(**kwargs):
return None
monkeypatch.setattr("agno.run.approval.acreate_approval_from_pause", noop_acreate_approval)
team = Team(name="test-team", members=[Agent(name="m1")])
async def noop_asave(session):
return None
monkeypatch.setattr(team, "asave_session", noop_asave)
session = TeamSession(session_id="s1", session_data={})
run_response = TeamRunOutput(run_id="r1", session_id="s1", messages=[])
run_context = RunContext(run_id="r1", session_id="s1", session_state={"cart": ["item-1"]})
events = []
async for event in _hooks.ahandle_team_run_paused_stream(
team=team,
run_response=run_response,
session=session,
run_context=run_context,
):
events.append(event)
assert len(events) >= 1
assert session.session_data["session_state"] == {"cart": ["item-1"]}
assert run_response.session_state == {"cart": ["item-1"]}
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/team/test_team_run_regressions.py",
"license": "Apache License 2.0",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/90_models/google/gemini/csv_input.py | """
Google Csv Input
================
Cookbook example for `google/gemini/csv_input.py`.
"""
from pathlib import Path
from agno.agent import Agent
from agno.media import File
from agno.models.google import Gemini
from agno.utils.media import download_file
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
csv_path = Path(__file__).parent.joinpath("IMDB-Movie-Data.csv")
download_file(
"https://agno-public.s3.amazonaws.com/demo_data/IMDB-Movie-Data.csv", str(csv_path)
)
agent = Agent(
model=Gemini(id="gemini-2.5-flash"),
markdown=True,
)
agent.print_response(
"Analyze the top 10 highest-grossing movies in this dataset. Which genres perform best at the box office?",
files=[
File(
filepath=csv_path,
mime_type="text/csv",
),
],
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/google/gemini/csv_input.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/03_teams/02_modes/broadcast/04_structured_debate.py | """Broadcast Mode
Same task is sent to every agent in the team. Moderator synthesizes the answer.
"""
from agno.agent import Agent
from agno.models.anthropic import Claude
from agno.models.openai import OpenAIResponses
from agno.team.team import Team, TeamMode
proponent = Agent(
name="Proponent",
role="Argue FOR the proposition. Be concise: thesis, 2-3 points, conclusion.",
model=Claude(id="claude-opus-4-6"),
)
opponent = Agent(
name="Opponent",
role="Argue AGAINST the proposition. Be concise: thesis, 2-3 points, conclusion.",
model=OpenAIResponses(id="gpt-5.2"),
)
team = Team(
name="Structured Debate",
mode=TeamMode.broadcast,
model=Claude(id="claude-sonnet-4-6"),
members=[proponent, opponent],
instructions=[
"Synthesize responses: highlight points for, against, areas of agreement, and the verdict"
],
show_members_responses=True,
markdown=True,
)
if __name__ == "__main__":
team.print_response(
"Remote work is better than in-office work for software teams.", stream=True
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/03_teams/02_modes/broadcast/04_structured_debate.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/knowledge/remote_content/azure_blob.py | from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from agno.knowledge.remote_content.base import BaseStorageConfig
if TYPE_CHECKING:
from agno.knowledge.remote_content.remote_content import AzureBlobContent
class AzureBlobConfig(BaseStorageConfig):
"""Configuration for Azure Blob Storage content source.
Uses Azure AD client credentials flow for authentication.
Required Azure AD App Registration permissions:
- Storage Blob Data Reader (or Contributor) role on the storage account
Example:
```python
config = AzureBlobConfig(
id="company-docs",
name="Company Documents",
tenant_id=os.getenv("AZURE_TENANT_ID"),
client_id=os.getenv("AZURE_CLIENT_ID"),
client_secret=os.getenv("AZURE_CLIENT_SECRET"),
storage_account=os.getenv("AZURE_STORAGE_ACCOUNT_NAME"),
container=os.getenv("AZURE_CONTAINER_NAME"),
)
```
"""
tenant_id: str
client_id: str
client_secret: str
storage_account: str
container: str
prefix: Optional[str] = None
def file(self, blob_name: str) -> "AzureBlobContent":
"""Create a content reference for a specific blob (file).
Args:
blob_name: The blob name (path to file in container).
Returns:
AzureBlobContent configured with this source's credentials.
"""
from agno.knowledge.remote_content.remote_content import AzureBlobContent
return AzureBlobContent(
config_id=self.id,
blob_name=blob_name,
)
def folder(self, prefix: str) -> "AzureBlobContent":
"""Create a content reference for a folder (prefix).
Args:
prefix: The blob prefix (folder path).
Returns:
AzureBlobContent configured with this source's credentials.
"""
from agno.knowledge.remote_content.remote_content import AzureBlobContent
return AzureBlobContent(
config_id=self.id,
prefix=prefix,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/remote_content/azure_blob.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/agno/knowledge/remote_content/base.py | from __future__ import annotations
from dataclasses import dataclass, field
from typing import List, Optional
from pydantic import BaseModel, ConfigDict
@dataclass
class ListFilesResult:
"""Result of listing files from a remote source."""
files: List[dict] = field(default_factory=list)
folders: List[dict] = field(default_factory=list)
page: int = 1
limit: int = 100
total_count: int = 0
total_pages: int = 0
class BaseStorageConfig(BaseModel):
"""Base configuration for remote content sources."""
id: str
name: str
metadata: Optional[dict] = None
model_config = ConfigDict(extra="allow")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/remote_content/base.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/knowledge/remote_content/gcs.py | from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from agno.knowledge.remote_content.base import BaseStorageConfig
if TYPE_CHECKING:
from agno.knowledge.remote_content.remote_content import GCSContent
class GcsConfig(BaseStorageConfig):
"""Configuration for Google Cloud Storage content source."""
bucket_name: str
project: Optional[str] = None
credentials_path: Optional[str] = None
prefix: Optional[str] = None
def file(self, blob_name: str) -> "GCSContent":
"""Create a content reference for a specific file.
Args:
blob_name: The GCS blob name (path to file).
Returns:
GCSContent configured with this source's credentials.
"""
from agno.knowledge.remote_content.remote_content import GCSContent
return GCSContent(
bucket_name=self.bucket_name,
blob_name=blob_name,
config_id=self.id,
)
def folder(self, prefix: str) -> "GCSContent":
"""Create a content reference for a folder (prefix).
Args:
prefix: The GCS prefix (folder path).
Returns:
GCSContent configured with this source's credentials.
"""
from agno.knowledge.remote_content.remote_content import GCSContent
return GCSContent(
bucket_name=self.bucket_name,
prefix=prefix,
config_id=self.id,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/remote_content/gcs.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/knowledge/remote_content/github.py | from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Union
from pydantic import model_validator
from agno.knowledge.remote_content.base import BaseStorageConfig
if TYPE_CHECKING:
from agno.knowledge.remote_content.remote_content import GitHubContent
class GitHubConfig(BaseStorageConfig):
"""Configuration for GitHub content source.
Supports two authentication methods:
- Personal Access Token: set ``token`` to a fine-grained PAT
- GitHub App: set ``app_id``, ``installation_id``, and ``private_key``
For GitHub App auth, the loader generates a JWT and exchanges it for an
installation access token automatically. Requires ``PyJWT[crypto]``.
"""
repo: str
token: Optional[str] = None
branch: Optional[str] = None
path: Optional[str] = None
# GitHub App authentication (alternative to token)
app_id: Optional[Union[str, int]] = None
installation_id: Optional[Union[str, int]] = None
private_key: Optional[str] = None
@model_validator(mode="after")
def _validate_app_auth_fields(self) -> "GitHubConfig":
"""Ensure all three GitHub App fields are set together and private_key is PEM-formatted."""
app_fields = [self.app_id, self.installation_id, self.private_key]
provided = [f for f in app_fields if f is not None]
if 0 < len(provided) < 3:
missing = []
if self.app_id is None:
missing.append("app_id")
if self.installation_id is None:
missing.append("installation_id")
if self.private_key is None:
missing.append("private_key")
raise ValueError(
f"GitHub App authentication requires all three fields: app_id, installation_id, private_key. "
f"Missing: {', '.join(missing)}"
)
if self.private_key is not None and not self.private_key.strip().startswith("-----BEGIN"):
raise ValueError(
"private_key must be a PEM-formatted RSA private key "
"(starting with '-----BEGIN RSA PRIVATE KEY-----' or '-----BEGIN PRIVATE KEY-----')"
)
return self
def file(self, file_path: str, branch: Optional[str] = None) -> "GitHubContent":
"""Create a content reference for a specific file.
Args:
file_path: Path to the file in the repository.
branch: Optional branch override.
Returns:
GitHubContent configured with this source's credentials.
"""
from agno.knowledge.remote_content.remote_content import GitHubContent
return GitHubContent(
config_id=self.id,
file_path=file_path,
branch=branch or self.branch,
)
def folder(self, folder_path: str, branch: Optional[str] = None) -> "GitHubContent":
"""Create a content reference for a folder.
Args:
folder_path: Path to the folder in the repository.
branch: Optional branch override.
Returns:
GitHubContent configured with this source's credentials.
"""
from agno.knowledge.remote_content.remote_content import GitHubContent
return GitHubContent(
config_id=self.id,
folder_path=folder_path,
branch=branch or self.branch,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/remote_content/github.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/knowledge/remote_content/s3.py | from __future__ import annotations
import mimetypes
from typing import TYPE_CHECKING, List, Optional, Tuple
from agno.knowledge.remote_content.base import BaseStorageConfig, ListFilesResult
if TYPE_CHECKING:
from agno.knowledge.remote_content.remote_content import S3Content
class S3Config(BaseStorageConfig):
"""Configuration for AWS S3 content source."""
bucket_name: str
region: Optional[str] = None
aws_access_key_id: Optional[str] = None
aws_secret_access_key: Optional[str] = None
prefix: Optional[str] = None
def list_files(
self,
prefix: Optional[str] = None,
delimiter: str = "/",
limit: int = 100,
page: int = 1,
) -> ListFilesResult:
"""List files and folders in this S3 source with pagination.
Uses S3's native continuation-token pagination to avoid loading all
objects into memory. Only fetches the objects needed for the requested
page (plus objects to skip for earlier pages).
Args:
prefix: Path prefix to filter files (e.g., "reports/2024/").
Overrides the config's prefix when provided.
delimiter: Folder delimiter (default "/")
limit: Max files to return per request (1-1000, clamped)
page: Page number (1-indexed)
Returns:
ListFilesResult with files, folders, and pagination info
"""
try:
import boto3
except ImportError:
raise ImportError("The `boto3` package is not installed. Please install it via `pip install boto3`.")
limit = max(1, min(limit, 1000))
session_kwargs, client_kwargs = self._build_session_and_client_kwargs()
effective_prefix = prefix if prefix is not None else (self.prefix or "")
skip_count = (page - 1) * limit
skipped = 0
collected: list = []
folders: list = []
folders_seen = False
total_count = 0
has_more = False
list_kwargs = self._build_list_kwargs(effective_prefix, delimiter)
session = boto3.Session(**session_kwargs)
s3_client = session.client("s3", **client_kwargs)
while True:
response = s3_client.list_objects_v2(**list_kwargs)
folders, folders_seen, collected, skipped, total_count, page_has_more = self._process_list_response(
response,
effective_prefix,
folders,
folders_seen,
collected,
limit,
skip_count,
skipped,
total_count,
)
if page_has_more:
has_more = True
break
if response.get("IsTruncated"):
list_kwargs["ContinuationToken"] = response["NextContinuationToken"]
else:
break
return self._build_result(collected, folders, page, limit, total_count, has_more)
def _build_session_and_client_kwargs(self) -> Tuple[dict, dict]:
"""Build boto3/aioboto3 session and client kwargs from config."""
session_kwargs: dict = {}
if self.region:
session_kwargs["region_name"] = self.region
client_kwargs: dict = {}
if self.aws_access_key_id and self.aws_secret_access_key:
client_kwargs["aws_access_key_id"] = self.aws_access_key_id
client_kwargs["aws_secret_access_key"] = self.aws_secret_access_key
return session_kwargs, client_kwargs
def _build_list_kwargs(self, effective_prefix: str, delimiter: str) -> dict:
"""Build kwargs for list_objects_v2."""
list_kwargs: dict = {"Bucket": self.bucket_name, "MaxKeys": 1000}
if effective_prefix:
list_kwargs["Prefix"] = effective_prefix
if delimiter:
list_kwargs["Delimiter"] = delimiter
return list_kwargs
@staticmethod
def _process_list_response(
response: dict,
effective_prefix: str,
folders: List[dict],
folders_seen: bool,
collected: List[dict],
limit: int,
skip_count: int,
skipped: int,
total_count: int,
) -> Tuple[List[dict], bool, List[dict], int, int, bool]:
"""Process a single list_objects_v2 response page.
Returns (folders, folders_seen, collected, skipped, total_count, has_more).
"""
has_more = False
if not folders_seen:
for prefix_obj in response.get("CommonPrefixes", []):
folder_prefix = prefix_obj.get("Prefix", "")
folder_name = folder_prefix.rstrip("/").rsplit("/", 1)[-1]
if folder_name:
folders.append(
{
"prefix": folder_prefix,
"name": folder_name,
"is_empty": False,
}
)
folders_seen = True
for obj in response.get("Contents", []):
key = obj.get("Key", "")
if key == effective_prefix:
continue
name = key.rsplit("/", 1)[-1] if "/" in key else key
if not name:
continue
total_count += 1
if skipped < skip_count:
skipped += 1
continue
if len(collected) < limit:
collected.append(
{
"key": key,
"name": name,
"size": obj.get("Size"),
"last_modified": obj.get("LastModified"),
"content_type": mimetypes.guess_type(name)[0],
}
)
if response.get("IsTruncated") and len(collected) >= limit:
has_more = True
return folders, folders_seen, collected, skipped, total_count, has_more
@staticmethod
def _build_result(
collected: list,
folders: list,
page: int,
limit: int,
total_count: int,
has_more: bool,
) -> ListFilesResult:
"""Build the final ListFilesResult from accumulated data."""
if has_more:
total_pages = page + 1
else:
total_pages = (total_count + limit - 1) // limit if limit > 0 else 0
if page > 1:
folders = []
return ListFilesResult(
files=collected,
folders=folders,
page=page,
limit=limit,
total_count=total_count,
total_pages=total_pages,
)
async def alist_files(
self,
prefix: Optional[str] = None,
delimiter: str = "/",
limit: int = 100,
page: int = 1,
) -> ListFilesResult:
"""Async version of list_files using aioboto3.
Args:
prefix: Path prefix to filter files (e.g., "reports/2024/").
Overrides the config's prefix when provided.
delimiter: Folder delimiter (default "/")
limit: Max files to return per request (1-1000, clamped)
page: Page number (1-indexed)
Returns:
ListFilesResult with files, folders, and pagination info
"""
try:
import aioboto3
except ImportError:
raise ImportError("The `aioboto3` package is not installed. Please install it via `pip install aioboto3`.")
limit = max(1, min(limit, 1000))
session_kwargs, client_kwargs = self._build_session_and_client_kwargs()
effective_prefix = prefix if prefix is not None else (self.prefix or "")
skip_count = (page - 1) * limit
skipped = 0
collected: list = []
folders: list = []
folders_seen = False
total_count = 0
has_more = False
list_kwargs = self._build_list_kwargs(effective_prefix, delimiter)
session = aioboto3.Session(**session_kwargs)
async with session.client("s3", **client_kwargs) as s3_client:
while True:
response = await s3_client.list_objects_v2(**list_kwargs)
folders, folders_seen, collected, skipped, total_count, page_has_more = self._process_list_response(
response,
effective_prefix,
folders,
folders_seen,
collected,
limit,
skip_count,
skipped,
total_count,
)
if page_has_more:
has_more = True
break
if response.get("IsTruncated"):
list_kwargs["ContinuationToken"] = response["NextContinuationToken"]
else:
break
return self._build_result(collected, folders, page, limit, total_count, has_more)
def file(self, key: str) -> "S3Content":
"""Create a content reference for a specific file.
Args:
key: The S3 object key (path to file).
Returns:
S3Content configured with this source's credentials.
"""
from agno.knowledge.remote_content.remote_content import S3Content
return S3Content(
bucket_name=self.bucket_name,
key=key,
config_id=self.id,
)
def folder(self, prefix: str) -> "S3Content":
"""Create a content reference for a folder (prefix).
Args:
prefix: The S3 prefix (folder path).
Returns:
S3Content configured with this source's credentials.
"""
from agno.knowledge.remote_content.remote_content import S3Content
return S3Content(
bucket_name=self.bucket_name,
prefix=prefix,
config_id=self.id,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/remote_content/s3.py",
"license": "Apache License 2.0",
"lines": 247,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/knowledge/remote_content/sharepoint.py | from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from agno.knowledge.remote_content.base import BaseStorageConfig
if TYPE_CHECKING:
from agno.knowledge.remote_content.remote_content import SharePointContent
class SharePointConfig(BaseStorageConfig):
"""Configuration for SharePoint content source."""
tenant_id: str
client_id: str
client_secret: str
hostname: str
site_path: Optional[str] = None
site_id: Optional[str] = None # Full site ID (e.g., "contoso.sharepoint.com,guid1,guid2")
folder_path: Optional[str] = None
def file(self, file_path: str, site_path: Optional[str] = None) -> "SharePointContent":
"""Create a content reference for a specific file.
Args:
file_path: Path to the file in SharePoint.
site_path: Optional site path override.
Returns:
SharePointContent configured with this source's credentials.
"""
from agno.knowledge.remote_content.remote_content import SharePointContent
return SharePointContent(
config_id=self.id,
file_path=file_path,
site_path=site_path or self.site_path,
)
def folder(self, folder_path: str, site_path: Optional[str] = None) -> "SharePointContent":
"""Create a content reference for a folder.
Args:
folder_path: Path to the folder in SharePoint.
site_path: Optional site path override.
Returns:
SharePointContent configured with this source's credentials.
"""
from agno.knowledge.remote_content.remote_content import SharePointContent
return SharePointContent(
config_id=self.id,
folder_path=folder_path,
site_path=site_path or self.site_path,
)
def _get_access_token(self) -> Optional[str]:
"""Get an access token for Microsoft Graph API."""
try:
from msal import ConfidentialClientApplication # type: ignore
except ImportError:
raise ImportError("The `msal` package is not installed. Please install it via `pip install msal`.")
authority = f"https://login.microsoftonline.com/{self.tenant_id}"
app = ConfidentialClientApplication(
self.client_id,
authority=authority,
client_credential=self.client_secret,
)
scopes = ["https://graph.microsoft.com/.default"]
result = app.acquire_token_for_client(scopes=scopes)
if "access_token" in result:
return result["access_token"]
return None
def _get_site_id(self, access_token: str) -> Optional[str]:
"""Get the SharePoint site ID."""
import httpx
if self.site_id:
return self.site_id
if self.site_path:
url = f"https://graph.microsoft.com/v1.0/sites/{self.hostname}:/{self.site_path}"
else:
url = f"https://graph.microsoft.com/v1.0/sites/{self.hostname}"
try:
response = httpx.get(url, headers={"Authorization": f"Bearer {access_token}"})
if response.status_code == 200:
return response.json().get("id")
except httpx.HTTPError:
pass
return None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/remote_content/sharepoint.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/knowledge/test_metadata_utils.py | """Unit tests for metadata utility functions in agno.knowledge.utils."""
from agno.knowledge.utils import (
RESERVED_AGNO_KEY,
get_agno_metadata,
merge_user_metadata,
set_agno_metadata,
strip_agno_metadata,
)
# =============================================================================
# merge_user_metadata
# =============================================================================
class TestMergeUserMetadata:
def test_none_existing_returns_incoming(self):
incoming = {"key": "value"}
assert merge_user_metadata(None, incoming) == incoming
def test_empty_existing_returns_incoming(self):
incoming = {"key": "value"}
assert merge_user_metadata({}, incoming) == incoming
def test_none_incoming_returns_existing(self):
existing = {"key": "value"}
assert merge_user_metadata(existing, None) == existing
def test_empty_incoming_returns_existing(self):
existing = {"key": "value"}
assert merge_user_metadata(existing, {}) == existing
def test_both_none_returns_none(self):
assert merge_user_metadata(None, None) is None
def test_incoming_overwrites_existing_keys(self):
existing = {"a": 1, "b": 2}
incoming = {"b": 99, "c": 3}
result = merge_user_metadata(existing, incoming)
assert result == {"a": 1, "b": 99, "c": 3}
def test_agno_keys_are_deep_merged(self):
existing = {"_agno": {"source_type": "s3", "bucket": "my-bucket"}, "user_key": "x"}
incoming = {"_agno": {"source_url": "https://example.com"}, "user_key": "y"}
result = merge_user_metadata(existing, incoming)
assert result["_agno"] == {
"source_type": "s3",
"bucket": "my-bucket",
"source_url": "https://example.com",
}
assert result["user_key"] == "y"
def test_agno_incoming_overwrites_conflicting_agno_keys(self):
existing = {"_agno": {"status": "old"}}
incoming = {"_agno": {"status": "new"}}
result = merge_user_metadata(existing, incoming)
assert result["_agno"]["status"] == "new"
def test_agno_non_dict_incoming_treated_as_empty(self):
existing = {"_agno": {"key": "value"}}
incoming = {"_agno": "not-a-dict"}
result = merge_user_metadata(existing, incoming)
assert result["_agno"] == {"key": "value"}
def test_does_not_mutate_existing(self):
existing = {"a": 1, "_agno": {"x": 1}}
incoming = {"a": 2, "_agno": {"y": 2}}
original_existing = {"a": 1, "_agno": {"x": 1}}
merge_user_metadata(existing, incoming)
assert existing == original_existing
# =============================================================================
# set_agno_metadata
# =============================================================================
class TestSetAgnoMetadata:
def test_sets_key_on_none_metadata(self):
result = set_agno_metadata(None, "source_type", "url")
assert result == {"_agno": {"source_type": "url"}}
def test_sets_key_on_empty_metadata(self):
result = set_agno_metadata({}, "source_type", "s3")
assert result == {"_agno": {"source_type": "s3"}}
def test_sets_key_preserving_existing_user_metadata(self):
metadata = {"user_key": "value"}
result = set_agno_metadata(metadata, "source_type", "gcs")
assert result["user_key"] == "value"
assert result["_agno"]["source_type"] == "gcs"
def test_sets_key_preserving_existing_agno_keys(self):
metadata = {"_agno": {"existing_key": "keep"}}
result = set_agno_metadata(metadata, "new_key", "added")
assert result["_agno"] == {"existing_key": "keep", "new_key": "added"}
def test_overwrites_existing_agno_key(self):
metadata = {"_agno": {"status": "old"}}
result = set_agno_metadata(metadata, "status", "new")
assert result["_agno"]["status"] == "new"
def test_returns_same_dict_reference(self):
metadata = {"key": "value"}
result = set_agno_metadata(metadata, "x", 1)
assert result is metadata
def test_handles_none_agno_value_in_metadata(self):
metadata = {"_agno": None}
result = set_agno_metadata(metadata, "key", "value")
assert result["_agno"] == {"key": "value"}
# =============================================================================
# get_agno_metadata
# =============================================================================
class TestGetAgnoMetadata:
def test_returns_none_for_none_metadata(self):
assert get_agno_metadata(None, "key") is None
def test_returns_none_for_empty_metadata(self):
assert get_agno_metadata({}, "key") is None
def test_returns_none_when_agno_missing(self):
assert get_agno_metadata({"user_key": "value"}, "key") is None
def test_returns_none_when_agno_is_not_dict(self):
assert get_agno_metadata({"_agno": "string"}, "key") is None
def test_returns_none_when_key_not_in_agno(self):
assert get_agno_metadata({"_agno": {"other": "value"}}, "key") is None
def test_returns_value_for_existing_key(self):
metadata = {"_agno": {"source_type": "url", "source_url": "https://example.com"}}
assert get_agno_metadata(metadata, "source_type") == "url"
assert get_agno_metadata(metadata, "source_url") == "https://example.com"
def test_returns_falsy_values_correctly(self):
metadata = {"_agno": {"count": 0, "flag": False, "empty": ""}}
assert get_agno_metadata(metadata, "count") == 0
assert get_agno_metadata(metadata, "flag") is False
assert get_agno_metadata(metadata, "empty") == ""
# =============================================================================
# strip_agno_metadata
# =============================================================================
class TestStripAgnoMetadata:
def test_returns_none_for_none(self):
assert strip_agno_metadata(None) is None
def test_returns_empty_for_empty(self):
result = strip_agno_metadata({})
assert result is not None
assert result == {}
def test_strips_agno_key(self):
metadata = {"user_key": "value", "_agno": {"source_type": "s3"}}
result = strip_agno_metadata(metadata)
assert result == {"user_key": "value"}
assert "_agno" not in result
def test_returns_copy_not_original(self):
metadata = {"key": "value", "_agno": {"x": 1}}
result = strip_agno_metadata(metadata)
assert result is not metadata
def test_preserves_all_non_agno_keys(self):
metadata = {"a": 1, "b": "two", "c": [3], "_agno": {"internal": True}}
result = strip_agno_metadata(metadata)
assert result == {"a": 1, "b": "two", "c": [3]}
def test_no_agno_key_returns_copy(self):
metadata = {"key": "value"}
result = strip_agno_metadata(metadata)
assert result == {"key": "value"}
assert result is not metadata
def test_does_not_mutate_original(self):
metadata = {"key": "value", "_agno": {"x": 1}}
strip_agno_metadata(metadata)
assert "_agno" in metadata
# =============================================================================
# RESERVED_AGNO_KEY constant
# =============================================================================
def test_reserved_agno_key_value():
assert RESERVED_AGNO_KEY == "_agno"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/knowledge/test_metadata_utils.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/models/anthropic/test_streaming_metrics.py | """
Regression test for Anthropic streaming metrics double-counting bug (#6537).
Both MessageStartEvent and MessageStopEvent carry .message.usage with full
token counts. The old code extracted usage from both, causing input_tokens
to be summed twice via the += accumulation in base.py._populate_stream_data.
The fix restricts usage extraction to MessageStopEvent only.
"""
from unittest.mock import MagicMock
from anthropic.types import MessageStartEvent, MessageStopEvent, Usage
from agno.models.anthropic.claude import Claude
def _make_usage(input_tokens: int, output_tokens: int) -> Usage:
"""Create an Anthropic Usage object."""
return Usage(
input_tokens=input_tokens,
output_tokens=output_tokens,
cache_creation_input_tokens=0,
cache_read_input_tokens=0,
server_tool_use=None,
)
def _make_message_event(event_cls, input_tokens: int, output_tokens: int):
"""Create a mock MessageStartEvent or MessageStopEvent with usage."""
event = MagicMock(spec=event_cls)
event.message = MagicMock()
event.message.usage = _make_usage(input_tokens, output_tokens)
event.message.content = []
return event
def test_message_start_event_does_not_emit_usage():
"""MessageStartEvent should NOT produce response_usage (would cause double-counting)."""
claude = Claude(id="claude-sonnet-4-5-20250929")
start_event = _make_message_event(MessageStartEvent, input_tokens=50000, output_tokens=0)
result = claude._parse_provider_response_delta(start_event)
assert result.response_usage is None, (
"MessageStartEvent should not emit response_usage; "
"only MessageStopEvent should, to prevent double-counting input_tokens"
)
def test_message_stop_event_emits_usage():
"""MessageStopEvent SHOULD produce response_usage with correct token counts."""
claude = Claude(id="claude-sonnet-4-5-20250929")
stop_event = _make_message_event(MessageStopEvent, input_tokens=50000, output_tokens=1200)
result = claude._parse_provider_response_delta(stop_event)
assert result.response_usage is not None, "MessageStopEvent should emit response_usage"
assert result.response_usage.input_tokens == 50000
assert result.response_usage.output_tokens == 1200
assert result.response_usage.total_tokens == 51200
def test_streaming_metrics_not_doubled():
"""Simulate a full streaming sequence and verify input_tokens is NOT doubled."""
claude = Claude(id="claude-sonnet-4-5-20250929")
start_event = _make_message_event(MessageStartEvent, input_tokens=50000, output_tokens=0)
stop_event = _make_message_event(MessageStopEvent, input_tokens=50000, output_tokens=1200)
start_result = claude._parse_provider_response_delta(start_event)
stop_result = claude._parse_provider_response_delta(stop_event)
# Only stop_result should have usage
assert start_result.response_usage is None
assert stop_result.response_usage is not None
# If both emitted usage, accumulation via += would give 100000 input_tokens
# With the fix, only 50000 is reported
assert stop_result.response_usage.input_tokens == 50000
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/models/anthropic/test_streaming_metrics.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/03_teams/03_tools/member_information.py | """
Member Information
=================
Demonstrates enabling the `get_member_information_tool` capability on a Team.
"""
from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.team import Team
# ---------------------------------------------------------------------------
# Create Members
# ---------------------------------------------------------------------------
technical_agent = Agent(
name="Technical Analyst",
role="Technical investigations",
model=OpenAIResponses(id="gpt-5-mini"),
instructions=[
"Handle technical implementation questions.",
"Keep responses grounded and testable.",
],
)
billing_agent = Agent(
name="Billing Specialist",
role="Billing and invoicing",
model=OpenAIResponses(id="gpt-5-mini"),
instructions=[
"Handle billing disputes and payment-related questions.",
"Return clear next steps for account resolution.",
],
)
# ---------------------------------------------------------------------------
# Create Team
# ---------------------------------------------------------------------------
support_team = Team(
name="Support Coordination Team",
model=OpenAIResponses(id="gpt-5-mini"),
members=[technical_agent, billing_agent],
get_member_information_tool=True,
instructions=[
"Use team members as the source of truth for routing questions.",
"Choose the most relevant member for each request.",
],
show_members_responses=True,
)
# ---------------------------------------------------------------------------
# Run Team
# ---------------------------------------------------------------------------
if __name__ == "__main__":
support_team.print_response(
"I have a payment chargeback and also a bug in the mobile app. Which member is relevant for this?",
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/03_teams/03_tools/member_information.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/03_teams/03_tools/tool_call_limit.py | """
Tool Call Limit
===============
Demonstrates constraining how many tool calls a Team can make in a single run.
"""
from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.team import Team
from agno.tools import tool
@tool()
def lookup_product_price(product_name: str) -> str:
"""Get a static price for supported products."""
catalog = {
"camera": "$699 USD",
"drone": "$899 USD",
"laptop": "$1,249 USD",
}
return catalog.get(product_name.lower(), "This product is not in the catalog")
@tool()
def lookup_shipping_time(country: str) -> str:
"""Get a static shipping time by destination."""
shipping_times = {
"us": "3-5 business days",
"eu": "5-7 business days",
"asia": "7-14 business days",
}
return shipping_times.get(country.lower(), "Unknown shipping zone")
# ---------------------------------------------------------------------------
# Create Members
# ---------------------------------------------------------------------------
order_agent = Agent(
name="Order Planner",
model=OpenAIResponses(id="gpt-5-mini"),
instructions=[
"Create accurate order summaries for the requested products.",
"If info is missing, ask for clarification.",
],
)
# ---------------------------------------------------------------------------
# Create Team
# ---------------------------------------------------------------------------
orders_team = Team(
name="Order Team",
model=OpenAIResponses(id="gpt-5-mini"),
members=[order_agent],
tools=[lookup_product_price, lookup_shipping_time],
tool_call_limit=1,
instructions=[
"You are a retail assistant.",
"Use tools only when needed and keep responses concise.",
"Remember that only one tool call is allowed in this run.",
],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Team
# ---------------------------------------------------------------------------
if __name__ == "__main__":
orders_team.print_response(
"For the camera sale, tell me the price and shipping time to EU.",
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/03_teams/03_tools/tool_call_limit.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/03_teams/03_tools/tool_choice.py | """
Tool Choice
===========
Demonstrates using `tool_choice` to force the Team to execute a specific tool.
"""
from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.team import Team
from agno.tools import tool
@tool()
def get_city_timezone(city: str) -> str:
"""Return a known timezone identifier for a supported city."""
city_to_timezone = {
"new york": "America/New_York",
"london": "Europe/London",
"tokyo": "Asia/Tokyo",
"sydney": "Australia/Sydney",
}
return city_to_timezone.get(city.lower(), "Unsupported city for this example")
# ---------------------------------------------------------------------------
# Create Members
# ---------------------------------------------------------------------------
agent = Agent(
name="Operations Analyst",
model=OpenAIResponses(id="gpt-5-mini"),
instructions=[
"Use the tool output to answer timezone questions.",
"Do not invent values that are not in the tool output.",
],
)
# ---------------------------------------------------------------------------
# Create Team
# ---------------------------------------------------------------------------
teams_timezone = Team(
name="Tool Choice Team",
model=OpenAIResponses(id="gpt-5-mini"),
members=[agent],
tools=[get_city_timezone],
tool_choice={
"type": "function",
"function": {"name": "get_city_timezone"},
},
instructions=[
"You are a logistics assistant.",
"For every request, resolve the city timezone using the available tool.",
"Return the timezone identifier only in one sentence.",
],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Team
# ---------------------------------------------------------------------------
if __name__ == "__main__":
teams_timezone.print_response("What is the timezone for London?", stream=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/03_teams/03_tools/tool_choice.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/03_teams/04_structured_input_output/expected_output.py | """
Expected Output
===============
Demonstrates setting a team-level `expected_output` to describe the desired
run result shape.
"""
from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.team import Team
# ---------------------------------------------------------------------------
# Create Members
# ---------------------------------------------------------------------------
incident_analyst = Agent(
name="Incident Analyst",
model=OpenAIResponses(id="gpt-5-mini"),
instructions=[
"Extract outcomes and risks clearly.",
"Avoid unnecessary speculation.",
],
)
# ---------------------------------------------------------------------------
# Create Team
# ---------------------------------------------------------------------------
incident_team = Team(
name="Incident Reporting Team",
model=OpenAIResponses(id="gpt-5-mini"),
members=[incident_analyst],
expected_output=(
"Three sections: Summary, Impact, and Next Step. "
"Keep each section to one sentence."
),
instructions=[
"Summarize incidents in a clear operational style.",
"Prefer plain language over technical jargon.",
],
)
# ---------------------------------------------------------------------------
# Run Team
# ---------------------------------------------------------------------------
if __name__ == "__main__":
incident_team.print_response(
(
"A deployment changed the auth callback behavior, login requests increased by 12%, "
"and a rollback script is already prepared."
),
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/03_teams/04_structured_input_output/expected_output.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/03_teams/05_knowledge/05_team_update_knowledge.py | """
Team Update Knowledge
====================
Demonstrates enabling `update_knowledge` so teams can persist new facts.
"""
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.models.openai import OpenAIResponses
from agno.team import Team
from agno.vectordb.lancedb import LanceDb
# ---------------------------------------------------------------------------
# Setup
# ---------------------------------------------------------------------------
team_knowledge = Knowledge(
vector_db=LanceDb(
table_name="team_update_knowledge",
uri="tmp/lancedb",
),
)
team_knowledge.insert(
text_content=(
"Agno teams can coordinate multiple specialist agents for operational tasks "
"and can use shared memory utilities to stay aligned."
)
)
# ---------------------------------------------------------------------------
# Create Members
# ---------------------------------------------------------------------------
ops_agent = Agent(
name="Operations Team Member",
model=OpenAIResponses(id="gpt-5-mini"),
instructions=[
"Store reliable facts when users ask to remember them.",
"When asked, retrieve from knowledge first, then answer succinctly.",
],
)
# ---------------------------------------------------------------------------
# Create Team
# ---------------------------------------------------------------------------
operations_team = Team(
name="Knowledge Ops Team",
model=OpenAIResponses(id="gpt-5-mini"),
members=[ops_agent],
knowledge=team_knowledge,
update_knowledge=True,
add_knowledge_to_context=True,
instructions=[
"You maintain an operations playbook for the team.",
"Use knowledge tools to remember and recall short business facts.",
],
)
# ---------------------------------------------------------------------------
# Run Team
# ---------------------------------------------------------------------------
if __name__ == "__main__":
operations_team.print_response(
"Remember: incident triage runs every weekday at 9:30 local time.",
stream=True,
)
operations_team.print_response(
"What does our playbook say about incident triage timing?",
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/03_teams/05_knowledge/05_team_update_knowledge.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/03_teams/06_memory/03_memories_in_context.py | """
Memories in Context
===================
Demonstrates `add_memories_to_context` with team memory capture.
"""
from pprint import pprint
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.memory import MemoryManager
from agno.models.openai import OpenAIResponses
from agno.team import Team
# ---------------------------------------------------------------------------
# Setup
# ---------------------------------------------------------------------------
db_file = "tmp/team_memories.db"
team_db = SqliteDb(
db_file=db_file, session_table="team_sessions", memory_table="team_memories"
)
memory_manager = MemoryManager(
model=OpenAIResponses(id="gpt-5-mini"),
db=team_db,
)
# ---------------------------------------------------------------------------
# Create Members
# ---------------------------------------------------------------------------
assistant_agent = Agent(
name="Personal Assistant",
model=OpenAIResponses(id="gpt-5-mini"),
instructions=[
"Use recent memories to personalize responses.",
"When unsure, ask for clarification.",
],
)
# ---------------------------------------------------------------------------
# Create Team
# ---------------------------------------------------------------------------
personal_team = Team(
name="Personal Memory Team",
model=OpenAIResponses(id="gpt-5-mini"),
members=[assistant_agent],
db=team_db,
memory_manager=memory_manager,
update_memory_on_run=True,
add_memories_to_context=True,
)
# ---------------------------------------------------------------------------
# Run Team
# ---------------------------------------------------------------------------
if __name__ == "__main__":
user_id = "jane.doe@example.com"
personal_team.print_response(
"My preferred coding language is Python and I like weekend hikes.",
stream=True,
user_id=user_id,
)
personal_team.print_response(
"What do you know about my preferences?",
stream=True,
user_id=user_id,
)
memories = personal_team.get_user_memories(user_id=user_id)
print("\nCaptured memories:")
pprint(memories)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/03_teams/06_memory/03_memories_in_context.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/03_teams/07_session/custom_session_summary.py | """
Custom Session Summary
=====================
Demonstrates configuring a custom session summary manager and reusing summaries in
context.
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIResponses
from agno.session import SessionSummaryManager
from agno.team import Team
# ---------------------------------------------------------------------------
# Setup
# ---------------------------------------------------------------------------
db = SqliteDb(
db_file="tmp/team_session_summary.db",
session_table="team_summary_sessions",
)
summary_manager = SessionSummaryManager(model=OpenAIResponses(id="gpt-5-mini"))
# ---------------------------------------------------------------------------
# Create Members
# ---------------------------------------------------------------------------
planner = Agent(
name="Sprint Planner",
model=OpenAIResponses(id="gpt-5-mini"),
instructions=[
"Build concise, sequenced plan summaries.",
"Keep recommendations practical.",
],
)
# ---------------------------------------------------------------------------
# Create Team
# ---------------------------------------------------------------------------
sprint_team = Team(
name="Sprint Team",
model=OpenAIResponses(id="gpt-5-mini"),
members=[planner],
db=db,
session_summary_manager=summary_manager,
add_session_summary_to_context=True,
)
# ---------------------------------------------------------------------------
# Run Team
# ---------------------------------------------------------------------------
if __name__ == "__main__":
session_id = "sprint-planning-session"
sprint_team.print_response(
"Plan a two-week sprint for a small team shipping a documentation portal.",
stream=True,
session_id=session_id,
)
sprint_team.print_response(
"Now add testing and rollout milestones to that plan.",
stream=True,
session_id=session_id,
)
summary = sprint_team.get_session_summary(session_id=session_id)
if summary is not None:
print(f"\nSession summary: {summary.summary}")
if summary.topics:
print(f"Topics: {', '.join(summary.topics)}")
sprint_team.print_response(
"Using what we discussed, suggest the most important next action.",
stream=True,
session_id=session_id,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/03_teams/07_session/custom_session_summary.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/03_teams/09_context_management/additional_context.py | """
Additional Context
=================
Demonstrates adding custom `additional_context` and resolving placeholders at
run time through Team context resolution.
"""
from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.team import Team
# ---------------------------------------------------------------------------
# Create Members
# ---------------------------------------------------------------------------
ops_agent = Agent(
name="Ops Copilot",
model=OpenAIResponses(id="gpt-5-mini"),
instructions=[
"Follow operational policy and include ownership guidance.",
],
)
# ---------------------------------------------------------------------------
# Create Team
# ---------------------------------------------------------------------------
policy_team = Team(
name="Policy Team",
model=OpenAIResponses(id="gpt-5-mini"),
members=[ops_agent],
additional_context=(
"The requester is a {role} in the {region}. Use language suitable for an "
"internal process update and include owner + timeline whenever possible."
),
resolve_in_context=True,
dependencies={"role": "support lead", "region": "EMEA"},
instructions=["Answer as a practical operational policy assistant."],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Team
# ---------------------------------------------------------------------------
if __name__ == "__main__":
policy_team.print_response(
"A partner asked for a temporary extension on compliance docs.",
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/03_teams/09_context_management/additional_context.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/03_teams/09_context_management/custom_system_message.py | """
Custom Team System Message
=========================
Demonstrates setting a custom system message, role, and including the team
name in context.
"""
from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.team import Team
# ---------------------------------------------------------------------------
# Create Members
# ---------------------------------------------------------------------------
coach = Agent(
name="Coaching Agent",
model=OpenAIResponses(id="gpt-5-mini"),
instructions=[
"Offer practical, concise improvements.",
"Keep advice actionable and realistic.",
],
)
# ---------------------------------------------------------------------------
# Create Team
# ---------------------------------------------------------------------------
coaching_team = Team(
name="Team Coach",
model=OpenAIResponses(id="gpt-5-mini"),
members=[coach],
instructions=["Focus on high-leverage behavior changes."],
system_message=(
"You are a performance coach for remote teams. "
"Every answer must end with one concrete next action."
),
system_message_role="system",
add_name_to_context=True,
)
# ---------------------------------------------------------------------------
# Run Team
# ---------------------------------------------------------------------------
if __name__ == "__main__":
coaching_team.print_response(
"How should my team improve meeting quality this week?",
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/03_teams/09_context_management/custom_system_message.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/03_teams/09_context_management/location_context.py | """
Location Context
================
Demonstrates adding location and timezone context to team prompts.
"""
from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.team import Team
# ---------------------------------------------------------------------------
# Create Members
# ---------------------------------------------------------------------------
planner = Agent(
name="Travel Planner",
model=OpenAIResponses(id="gpt-5-mini"),
instructions=[
"Use location context in recommendations.",
"Keep suggestions concise and practical.",
],
)
# ---------------------------------------------------------------------------
# Create Team
# ---------------------------------------------------------------------------
trip_planner_team = Team(
name="Trip Planner",
model=OpenAIResponses(id="gpt-5-mini"),
members=[planner],
add_location_to_context=True,
timezone_identifier="America/Chicago",
instructions=[
"Plan recommendations around local time and season.",
"Mention when local timing may affect itinerary decisions.",
],
)
# ---------------------------------------------------------------------------
# Run Team
# ---------------------------------------------------------------------------
if __name__ == "__main__":
trip_planner_team.print_response(
"What should I pack for a weekend trip based on local time and climate context?",
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/03_teams/09_context_management/location_context.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/01_demo/agents/gcode/agent.py | """
Gcode - Lightweight Coding Agent
==================================
A lightweight coding agent that writes, reviews, and iterates on code.
No bloat, no IDE -- just a fast agent that gets sharper the more you use it.
Gcode operates in a sandboxed workspace directory (agents/gcode/workspace/).
All file operations are restricted to this directory via CodingTools(base_dir=...).
Test:
python -m agents.gcode.agent
"""
from pathlib import Path
from agno.agent import Agent
from agno.learn import (
LearnedKnowledgeConfig,
LearningMachine,
LearningMode,
)
from agno.models.openai import OpenAIResponses
from agno.tools.coding import CodingTools
from agno.tools.reasoning import ReasoningTools
from db import create_knowledge, get_postgres_db
# ---------------------------------------------------------------------------
# Setup
# ---------------------------------------------------------------------------
agent_db = get_postgres_db()
WORKSPACE = Path(__file__).parent / "workspace"
WORKSPACE.mkdir(exist_ok=True)
# Dual knowledge system
gcode_knowledge = create_knowledge("Gcode Knowledge", "gcode_knowledge")
gcode_learnings = create_knowledge("Gcode Learnings", "gcode_learnings")
# ---------------------------------------------------------------------------
# Instructions
# ---------------------------------------------------------------------------
instructions = """\
You are Gcode, a lightweight coding agent.
## Your Purpose
You write, review, and iterate on code. No bloat, no IDE. You have
a small set of powerful tools and you use them well. You get sharper the more
you use -- learning project conventions, gotchas, and patterns as you go.
You operate in a sandboxed workspace directory. All files you create, read,
and edit live there. Use relative paths (e.g. "app.py", "src/main.py").
## Coding Workflow
### 0. Recall
- Run `search_knowledge_base` and `search_learnings` FIRST -- you may already know
this project's conventions, gotchas, test setup, or past fixes.
- Check what projects already exist in the workspace with `ls`.
### 1. Read First
- Always read a file before editing it. No exceptions.
- Use `grep` and `find` to orient yourself in an unfamiliar codebase.
- Use `ls` to understand directory structure.
- Read related files to understand context: imports, callers, tests.
- Use `think` from ReasoningTools for complex debugging chains.
### 2. Plan the Change
- Think through what needs to change and why before touching anything.
- Identify all files that need modification.
- Consider edge cases, error handling, and existing tests.
### 3. Make Surgical Edits
- Use `edit_file` for targeted changes with enough surrounding context.
- If an edit fails (no match or multiple matches), re-read the file and adjust.
### 4. Verify
- Run tests after making changes. Always.
- If there are no tests, suggest or write them.
- Use `run_shell` for git operations, linting, type checking, builds.
### 5. Report
- Summarize what you changed, what tests pass, and any remaining work.
## Shell Safety
You have full shell access inside the workspace. Use it responsibly:
- No `rm -rf` on directories -- delete specific files only
- No `sudo` commands
- No network calls (curl, wget, pip install) -- you're sandboxed
- No operations outside the workspace directory
- If unsure whether a command is safe, use `think` to reason through it first
## When to save_learning
After discovering project conventions:
```
save_learning(
title="todo-app uses pytest with fixtures in conftest.py",
learning="This project uses pytest. Fixtures are in conftest.py, not inline. Run tests with: pytest -v"
)
```
After fixing an error caused by a codebase quirk:
```
save_learning(
title="math-parser: regex edge case with negative numbers",
learning="The tokenizer breaks on negative numbers like -3.14. Must handle unary minus in the parser, not the tokenizer."
)
```
After learning a user's coding preferences:
```
save_learning(
title="User prefers relative imports within project folders",
learning="Use 'from .utils import helper' not absolute paths. User also prefers type hints on all function signatures."
)
```
After discovering a useful pattern:
```
save_learning(
title="Python CLI apps: use argparse with subcommands",
learning="User prefers argparse with subcommands over click. Structure: main.py with subcommand functions, each in its own module."
)
```
## Workspace
Your workspace is a directory where all your projects live. Each task gets
its own project folder:
- When starting a new task, create a descriptively named subdirectory
(e.g. "todo-app/", "math-parser/", "url-shortener/")
- All files for that task go inside its project folder
- Always `ls` the workspace root first to see existing projects before creating a new one
- If the user asks to continue working on something, find the existing
project folder first -- don't create a duplicate
## Personality
Direct and competent. No filler, no flattery. Reads before editing, tests
after changing. Honest about uncertainty -- says "I'm not sure" rather than
guessing.\
"""
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
gcode = Agent(
id="gcode",
name="Gcode",
model=OpenAIResponses(id="gpt-5.2"),
db=agent_db,
instructions=instructions,
knowledge=gcode_knowledge,
search_knowledge=True,
learning=LearningMachine(
knowledge=gcode_learnings,
learned_knowledge=LearnedKnowledgeConfig(mode=LearningMode.AGENTIC),
),
tools=[CodingTools(base_dir=WORKSPACE, all=True), ReasoningTools()],
enable_agentic_memory=True,
add_datetime_to_context=True,
add_history_to_context=True,
read_chat_history=True,
num_history_runs=20,
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
gcode.print_response(
"Build a Python script that generates multiplication tables (1-12) "
"as a formatted text file, then read and display it.",
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/01_demo/agents/gcode/agent.py",
"license": "Apache License 2.0",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:cookbook/02_agents/10_human_in_the_loop/user_feedback.py | """
User Feedback (Structured Questions)
=====================================
Human-in-the-Loop: Presenting structured questions with predefined options.
Uses UserFeedbackTools to pause the agent and collect user selections.
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIResponses
from agno.tools.user_feedback import UserFeedbackTools
from agno.utils import pprint
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=OpenAIResponses(id="gpt-5.2"),
tools=[UserFeedbackTools()],
instructions=[
"You are a helpful travel assistant.",
"When the user asks you to plan a trip, use the ask_user tool to clarify their preferences.",
],
markdown=True,
db=SqliteDb(db_file="tmp/user_feedback.db"),
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
run_response = agent.run("Help me plan a vacation")
while run_response.is_paused:
for requirement in run_response.active_requirements:
if requirement.needs_user_feedback:
feedback_schema = requirement.user_feedback_schema
if not feedback_schema:
continue
selections = {}
for question in feedback_schema:
print(f"\n{question.header or 'Question'}: {question.question}")
if question.options:
for i, opt in enumerate(question.options, 1):
desc = f" - {opt.description}" if opt.description else ""
print(f" {i}. {opt.label}{desc}")
if question.multi_select:
raw = input("Select options (comma-separated numbers): ")
indices = [
int(x.strip()) - 1
for x in raw.split(",")
if x.strip().isdigit()
]
selected = [
question.options[i].label
for i in indices
if question.options and 0 <= i < len(question.options)
]
else:
raw = input("Select an option (number): ")
idx = int(raw.strip()) - 1 if raw.strip().isdigit() else -1
selected = (
[question.options[idx].label]
if question.options and 0 <= idx < len(question.options)
else []
)
selections[question.question] = selected
print(f" -> Selected: {selected}")
requirement.provide_user_feedback(selections)
run_response = agent.continue_run(
run_id=run_response.run_id,
requirements=run_response.requirements,
)
if not run_response.is_paused:
pprint.pprint_run_response(run_response)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/02_agents/10_human_in_the_loop/user_feedback.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/91_tools/coding_tools/01_basic_usage.py | """
CodingTools: Minimal Tools for Coding Agents
=============================================
A single toolkit with 4 core tools (read, edit, write, shell) that lets
an agent perform any coding task. Inspired by the Pi coding agent's
philosophy: a small number of composable tools is more powerful than
many specialized ones.
Core tools (enabled by default):
- read_file: Read files with line numbers and pagination
- edit_file: Exact text find-and-replace with diff output
- write_file: Create or overwrite files
- run_shell: Execute shell commands with timeout
Exploration tools (opt-in):
- grep: Search file contents
- find: Search for files by glob pattern
- ls: List directory contents
"""
from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.tools.coding import CodingTools
# ---------------------------------------------------------------------------
# Create Agent with CodingTools
# ---------------------------------------------------------------------------
agent = Agent(
model=OpenAIResponses(id="gpt-5.2"),
tools=[CodingTools(base_dir=".")],
instructions="You are a coding assistant. Use the coding tools to help the user.",
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Demo
# ---------------------------------------------------------------------------
if __name__ == "__main__":
agent.print_response(
"List the files in the current directory and read the README.md file if it exists.",
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/91_tools/coding_tools/01_basic_usage.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:cookbook/91_tools/coding_tools/02_all_tools.py | """
CodingTools: All 7 Tools Enabled
=================================
Enable all tools including the exploration tools (grep, find, ls)
by setting all=True or enabling them individually.
"""
from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.tools.coding import CodingTools
# ---------------------------------------------------------------------------
# Create Agent with all CodingTools
# ---------------------------------------------------------------------------
agent = Agent(
model=OpenAIResponses(id="gpt-5.2"),
tools=[CodingTools(base_dir=".", all=True)],
instructions="You are a coding assistant. Use the coding tools to help the user.",
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Demo
# ---------------------------------------------------------------------------
if __name__ == "__main__":
agent.print_response(
"Find all Python files in this directory, then grep for any import statements.",
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/91_tools/coding_tools/02_all_tools.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/tools/coding.py | import functools
import shlex
import subprocess
import tempfile
from pathlib import Path
from textwrap import dedent
from typing import Any, List, Optional, Union
from agno.tools import Toolkit
from agno.utils.log import log_error, log_info, logger
@functools.lru_cache(maxsize=None)
def _warn_coding_tools() -> None:
logger.warning("CodingTools can run arbitrary shell commands, please provide human supervision.")
class CodingTools(Toolkit):
"""A minimal, powerful toolkit for coding agents.
Provides four core tools (read, edit, write, shell) and three optional
exploration tools (grep, find, ls). With these primitives, an agent can
perform any file operation, run tests, use git, install packages, search
codebases, and more.
Inspired by the Pi coding agent's philosophy: a small number of composable
tools is more powerful than many specialized ones.
"""
DEFAULT_ALLOWED_COMMANDS: List[str] = [
"python",
"python3",
"pytest",
"pip",
"pip3",
"cat",
"head",
"tail",
"wc",
"ls",
"find",
"grep",
"mkdir",
"rm",
"mv",
"cp",
"touch",
"echo",
"printf",
"git",
"chmod",
"diff",
"sort",
"uniq",
"tr",
"cut",
]
DEFAULT_INSTRUCTIONS = dedent("""\
You have access to coding tools: read_file, edit_file, write_file, and run_shell.
With these tools, you can perform any coding task including reading code, making edits,
creating files, running tests, using git, installing packages, and searching codebases.
## Tool Usage Guidelines
**read_file** - Read files with line numbers. Use offset and limit to paginate large files.
- Always read a file before editing it to understand its current contents.
- Use the line numbers in the output to understand the file structure.
**edit_file** - Make precise edits using exact text matching (find and replace).
- The old_text must match exactly one location in the file, including whitespace and indentation.
- Include enough surrounding context in old_text to ensure a unique match.
- Prefer small, focused edits over rewriting entire files.
- If an edit fails due to multiple matches, include more surrounding lines in old_text.
**write_file** - Create new files or overwrite existing ones entirely.
- Use this for creating new files. For modifying existing files, prefer edit_file.
- Parent directories are created automatically.
**run_shell** - Execute shell commands with timeout protection.
- Use this for: running tests, git operations, installing packages, searching files (grep/find),
checking system state, compiling code, and any other command-line task.
- Commands run from the base directory.
- Output is truncated if too long; the full output is saved to a temp file.
## Best Practices
- Read before editing: always read_file before edit_file to see current contents.
- Make small, incremental edits rather than rewriting entire files.
- Run tests after making changes to verify correctness.\
""")
EXPLORATION_INSTRUCTIONS = dedent("""\
**grep** - Search file contents for a pattern with line numbers.
- Use for finding code patterns, function definitions, imports, etc.
- Supports regex patterns and case-insensitive search.
- Use the include parameter to filter by file type (e.g. "*.py").
**find** - Search for files by glob pattern.
- Use for discovering files in the project structure.
- Supports recursive patterns like "**/*.py".
**ls** - List directory contents.
- Use for quick directory exploration.
- Directories are shown with a trailing /.\
""")
def __init__(
self,
base_dir: Optional[Union[Path, str]] = None,
restrict_to_base_dir: bool = True,
max_lines: int = 2000,
max_bytes: int = 50_000,
shell_timeout: int = 120,
enable_read_file: bool = True,
enable_edit_file: bool = True,
enable_write_file: bool = True,
enable_run_shell: bool = True,
enable_grep: bool = False,
enable_find: bool = False,
enable_ls: bool = False,
instructions: Optional[str] = None,
add_instructions: bool = True,
all: bool = False,
allowed_commands: Optional[List[str]] = None,
**kwargs: Any,
):
"""Initialize CodingTools.
Args:
base_dir: Root directory for file operations. Defaults to cwd.
restrict_to_base_dir: If True, file and shell operations cannot escape base_dir.
max_lines: Maximum lines to return before truncating (default 2000).
max_bytes: Maximum bytes to return before truncating (default 50KB).
shell_timeout: Timeout in seconds for shell commands (default 120).
enable_read_file: Enable the read_file tool.
enable_edit_file: Enable the edit_file tool.
enable_write_file: Enable the write_file tool.
enable_run_shell: Enable the run_shell tool.
enable_grep: Enable the grep tool (disabled by default).
enable_find: Enable the find tool (disabled by default).
enable_ls: Enable the ls tool (disabled by default).
instructions: Custom instructions for the LLM. Uses defaults if None.
add_instructions: Whether to add instructions to the agent's system message.
all: Enable all tools regardless of individual flags.
allowed_commands: List of allowed shell command names when restrict_to_base_dir is True.
Defaults to DEFAULT_ALLOWED_COMMANDS. Set to None explicitly after init to disable.
"""
self.base_dir: Path = Path(base_dir).resolve() if base_dir else Path.cwd().resolve()
self.restrict_to_base_dir = restrict_to_base_dir
self.allowed_commands: Optional[List[str]] = (
allowed_commands if allowed_commands is not None else self.DEFAULT_ALLOWED_COMMANDS
)
self.max_lines = max_lines
self.max_bytes = max_bytes
self.shell_timeout = shell_timeout
self._temp_files: List[str] = []
import atexit
atexit.register(self._cleanup_temp_files)
has_exploration = all or enable_grep or enable_find or enable_ls
if instructions is None:
resolved_instructions = self.DEFAULT_INSTRUCTIONS
if has_exploration:
resolved_instructions += self.EXPLORATION_INSTRUCTIONS
else:
resolved_instructions = instructions
tools: List[Any] = []
if all or enable_read_file:
tools.append(self.read_file)
if all or enable_edit_file:
tools.append(self.edit_file)
if all or enable_write_file:
tools.append(self.write_file)
if all or enable_run_shell:
tools.append(self.run_shell)
if all or enable_grep:
tools.append(self.grep)
if all or enable_find:
tools.append(self.find)
if all or enable_ls:
tools.append(self.ls)
super().__init__(
name="coding_tools",
tools=tools,
instructions=resolved_instructions,
add_instructions=add_instructions,
**kwargs,
)
def _truncate_output(self, text: str) -> tuple:
"""Truncate text to configured limits.
Returns:
Tuple of (possibly truncated text, was_truncated, total_line_count).
"""
lines = text.split("\n")
total_lines = len(lines)
was_truncated = False
if total_lines > self.max_lines:
lines = lines[: self.max_lines]
was_truncated = True
result = "\n".join(lines)
if len(result.encode("utf-8", errors="replace")) > self.max_bytes:
# Truncate by bytes: find last complete line within limit
truncated_lines = []
current_bytes = 0
for line in lines:
line_bytes = len((line + "\n").encode("utf-8", errors="replace"))
if current_bytes + line_bytes > self.max_bytes:
break
truncated_lines.append(line)
current_bytes += line_bytes
result = "\n".join(truncated_lines)
was_truncated = True
return result, was_truncated, total_lines
def _cleanup_temp_files(self) -> None:
"""Remove temporary files created during shell output truncation."""
for path in self._temp_files:
try:
Path(path).unlink(missing_ok=True)
except OSError:
pass
self._temp_files.clear()
# Shell operators that enable command chaining or substitution
_DANGEROUS_PATTERNS: List[str] = ["&&", "||", ";", "|", "$(", "`", ">", ">>", "<"]
def _check_command(self, command: str) -> Optional[str]:
"""Check if a shell command is safe to execute.
When restrict_to_base_dir is True, this method:
1. Blocks shell metacharacters that enable chaining/substitution.
2. Validates the command name against the allowed_commands list (if set).
3. Checks that path-like tokens don't escape the base directory.
Returns an error message if a violation is found, None if safe.
"""
if not self.restrict_to_base_dir:
return None
# Block shell operators that enable chaining/substitution
for pattern in self._DANGEROUS_PATTERNS:
if pattern in command:
return f"Error: Shell operator '{pattern}' is not allowed in restricted mode."
try:
tokens = shlex.split(command)
except ValueError:
return "Error: Could not parse shell command."
# Validate command against allowlist
if self.allowed_commands is not None and tokens:
cmd = tokens[0]
cmd_base = Path(cmd).name # Handle /usr/bin/python -> python
if cmd_base not in self.allowed_commands:
return f"Error: Command '{cmd_base}' is not in the allowed commands list."
for i, token in enumerate(tokens):
# Skip the command itself (already validated by allowlist above)
if i == 0:
continue
# Skip flags
if token.startswith("-"):
continue
# Check tokens that look like paths
if "/" in token or token == "..":
try:
# Resolve relative to base_dir
if token.startswith("/"):
resolved = Path(token).resolve()
else:
resolved = (self.base_dir / token).resolve()
# Check if resolved path is within base_dir
try:
resolved.relative_to(self.base_dir)
except ValueError:
return f"Error: Command references path outside base directory: {token}"
except (OSError, RuntimeError):
continue
return None
def read_file(self, file_path: str, offset: int = 0, limit: Optional[int] = None) -> str:
"""Read the contents of a file with line numbers.
Returns file contents with line numbers prefixed to each line. Supports
pagination via offset and limit for large files. Output is truncated if
it exceeds the configured limits.
:param file_path: Path to the file to read (relative to base_dir or absolute).
:param offset: Line number to start reading from (0-indexed). Default 0.
:param limit: Maximum number of lines to read. Defaults to max_lines setting.
:return: File contents with line numbers, or an error message.
"""
try:
safe, resolved_path = self._check_path(file_path, self.base_dir, self.restrict_to_base_dir)
if not safe:
return f"Error: Path '{file_path}' is outside the allowed base directory"
if not resolved_path.exists():
return f"Error: File not found: {file_path}"
if not resolved_path.is_file():
return f"Error: Not a file: {file_path}"
# Detect binary files
try:
with open(resolved_path, "rb") as f:
chunk = f.read(8192)
if b"\x00" in chunk:
return f"Error: Binary file detected: {file_path}"
except Exception:
pass
contents = resolved_path.read_text(encoding="utf-8", errors="replace")
if not contents:
return f"File is empty: {file_path}"
lines = contents.split("\n")
total_lines = len(lines)
# Apply offset and limit
effective_limit = limit if limit is not None else self.max_lines
selected_lines = lines[offset : offset + effective_limit]
# Format with line numbers
# Calculate width for line number alignment
max_line_num = offset + len(selected_lines)
num_width = max(len(str(max_line_num)), 4)
formatted_lines = []
for i, line in enumerate(selected_lines):
line_num = offset + i + 1 # 1-based
formatted_lines.append(f"{line_num:>{num_width}} | {line}")
output = "\n".join(formatted_lines)
# Apply truncation
output, was_truncated, _ = self._truncate_output(output)
# Add summary footer
shown_start = offset + 1
shown_end = offset + len(selected_lines)
if was_truncated or shown_end < total_lines or offset > 0:
output += f"\n[Showing lines {shown_start}-{shown_end} of {total_lines} total]"
return output
except UnicodeDecodeError:
return f"Error: Cannot decode file as text: {file_path}"
except PermissionError:
return f"Error: Permission denied: {file_path}"
except Exception as e:
log_error(f"Error reading file: {e}")
return f"Error reading file: {e}"
def edit_file(self, file_path: str, old_text: str, new_text: str) -> str:
"""Edit a file by replacing an exact text match with new text.
The old_text must match exactly one location in the file. If it matches
zero or multiple locations, the edit is rejected with an error message.
Returns a unified diff showing the change.
:param file_path: Path to the file to edit (relative to base_dir or absolute).
:param old_text: The exact text to find and replace. Must match uniquely.
:param new_text: The text to replace old_text with.
:return: A unified diff of the change, or an error message.
"""
try:
safe, resolved_path = self._check_path(file_path, self.base_dir, self.restrict_to_base_dir)
if not safe:
return f"Error: Path '{file_path}' is outside the allowed base directory"
if not resolved_path.exists():
return f"Error: File not found: {file_path}"
if not resolved_path.is_file():
return f"Error: Not a file: {file_path}"
if not old_text:
return "Error: old_text cannot be empty"
if old_text == new_text:
return "No changes needed: old_text and new_text are identical"
contents = resolved_path.read_text(encoding="utf-8")
# Count occurrences
count = contents.count(old_text)
if count == 0:
return (
f"Error: old_text not found in {file_path}. "
"Make sure the text matches exactly (including whitespace and indentation)."
)
if count > 1:
return (
f"Error: old_text matches {count} locations in {file_path}. "
"Provide more surrounding context to make the match unique."
)
# Perform the replacement
new_contents = contents.replace(old_text, new_text, 1)
# Write the file
resolved_path.write_text(new_contents, encoding="utf-8")
# Generate unified diff
import difflib
old_lines = contents.splitlines(keepends=True)
new_lines = new_contents.splitlines(keepends=True)
diff = difflib.unified_diff(
old_lines,
new_lines,
fromfile=f"a/{file_path}",
tofile=f"b/{file_path}",
n=3,
)
diff_output = "".join(diff)
if not diff_output:
return "Edit applied but no visible diff generated"
# Truncate if needed
diff_output, was_truncated, total_lines = self._truncate_output(diff_output)
if was_truncated:
diff_output += f"\n[Diff truncated: {total_lines} lines total]"
log_info(f"Edited {file_path}")
return diff_output
except PermissionError:
return f"Error: Permission denied: {file_path}"
except Exception as e:
log_error(f"Error editing file: {e}")
return f"Error editing file: {e}"
def write_file(self, file_path: str, contents: str) -> str:
"""Create or overwrite a file with the given contents.
Parent directories are created automatically if they do not exist.
:param file_path: Path to the file to write (relative to base_dir or absolute).
:param contents: The full contents to write to the file.
:return: A success message with the file path, or an error message.
"""
try:
safe, resolved_path = self._check_path(file_path, self.base_dir, self.restrict_to_base_dir)
if not safe:
return f"Error: Path '{file_path}' is outside the allowed base directory"
# Create parent directories
if not resolved_path.parent.exists():
resolved_path.parent.mkdir(parents=True, exist_ok=True)
resolved_path.write_text(contents, encoding="utf-8")
line_count = len(contents.split("\n"))
log_info(f"Wrote {file_path}")
return f"Wrote {line_count} lines to {file_path}"
except PermissionError:
return f"Error: Permission denied: {file_path}"
except Exception as e:
log_error(f"Error writing file: {e}")
return f"Error writing file: {e}"
def run_shell(self, command: str, timeout: Optional[int] = None) -> str:
"""Execute a shell command and return its output.
Runs the command as a string via the system shell. Output (stdout + stderr)
is truncated if it exceeds the configured limits. When output is truncated,
the full output is saved to a temporary file and its path is included in
the response.
:param command: The shell command to execute as a single string.
:param timeout: Timeout in seconds. Defaults to the toolkit's shell_timeout.
:return: Command output (stdout and stderr combined), or an error message.
"""
try:
_warn_coding_tools()
log_info(f"Running shell command: {command}")
# Check for path escapes in command
path_error = self._check_command(command)
if path_error:
return path_error
effective_timeout = timeout if timeout is not None else self.shell_timeout
result = subprocess.run(
command,
shell=True,
capture_output=True,
text=True,
timeout=effective_timeout,
cwd=str(self.base_dir),
)
# Combine stdout and stderr
output = result.stdout
if result.stderr:
output += result.stderr
header = f"Exit code: {result.returncode}\n"
# Apply truncation
truncated_output, was_truncated, total_lines = self._truncate_output(output)
if was_truncated:
# Save full output to temp file
tmp = tempfile.NamedTemporaryFile(
mode="w",
delete=False,
suffix=".txt",
prefix="coding_tools_",
)
tmp.write(output)
tmp.close()
self._temp_files.append(tmp.name)
truncated_output += f"\n[Output truncated: {total_lines} lines total. Full output saved to: {tmp.name}]"
return header + truncated_output
except subprocess.TimeoutExpired:
effective_timeout = timeout if timeout is not None else self.shell_timeout
return f"Error: Command timed out after {effective_timeout} seconds"
except Exception as e:
log_error(f"Error running shell command: {e}")
return f"Error running shell command: {e}"
def grep(
self,
pattern: str,
path: Optional[str] = None,
ignore_case: bool = False,
include: Optional[str] = None,
context: int = 0,
limit: int = 100,
) -> str:
"""Search file contents for a pattern.
Returns matching lines with file paths and line numbers. Respects
.gitignore when using grep -r. Output is truncated if it exceeds limits.
:param pattern: Search pattern (regex by default).
:param path: Directory or file to search in (default: base directory).
:param ignore_case: Case-insensitive search (default: False).
:param include: Filter files by glob pattern, e.g. '*.py'.
:param context: Number of lines to show before and after each match (default: 0).
:param limit: Maximum number of matches to return (default: 100).
:return: Matching lines with file paths and line numbers, or an error message.
"""
try:
if not pattern:
return "Error: Pattern cannot be empty"
# Resolve search path
if path:
safe, resolved_path = self._check_path(path, self.base_dir, self.restrict_to_base_dir)
if not safe:
return f"Error: Path '{path}' is outside the allowed base directory"
else:
resolved_path = self.base_dir
if not resolved_path.exists():
return f"Error: Path not found: {path or '.'}"
# Build grep command
cmd = ["grep", "-rn"]
if ignore_case:
cmd.append("-i")
if context > 0:
cmd.extend(["-C", str(context)])
if include:
cmd.extend(["--include", include])
cmd.append(pattern)
cmd.append(str(resolved_path))
result = subprocess.run(
cmd,
capture_output=True,
text=True,
timeout=30,
cwd=str(self.base_dir),
)
output = result.stdout
if not output:
if result.returncode == 1:
return f"No matches found for pattern: {pattern}"
if result.stderr:
return f"Error: {result.stderr.strip()}"
return f"No matches found for pattern: {pattern}"
# Make paths relative to base_dir
base_str = str(self.base_dir) + "/"
output = output.replace(base_str, "")
# Enforce global match limit
output_lines = output.split("\n")
if len(output_lines) > limit:
output = "\n".join(output_lines[:limit])
output += f"\n[Results limited to {limit} matches]"
# Apply truncation
output, was_truncated, total_lines = self._truncate_output(output)
if was_truncated:
output += f"\n[Output truncated: {total_lines} lines total]"
return output
except subprocess.TimeoutExpired:
return "Error: grep timed out after 30 seconds"
except FileNotFoundError:
return "Error: grep command not found. Install grep to use this tool."
except Exception as e:
log_error(f"Error running grep: {e}")
return f"Error running grep: {e}"
def find(self, pattern: str, path: Optional[str] = None, limit: int = 500) -> str:
"""Search for files by glob pattern.
Returns matching file paths relative to the search directory.
:param pattern: Glob pattern to match files, e.g. '*.py', '**/*.json'.
:param path: Directory to search in (default: base directory).
:param limit: Maximum number of results (default: 500).
:return: Matching file paths, one per line, or an error message.
"""
try:
if not pattern:
return "Error: Pattern cannot be empty"
# Resolve search path
if path:
safe, resolved_path = self._check_path(path, self.base_dir, self.restrict_to_base_dir)
if not safe:
return f"Error: Path '{path}' is outside the allowed base directory"
else:
resolved_path = self.base_dir
if not resolved_path.exists():
return f"Error: Path not found: {path or '.'}"
if not resolved_path.is_dir():
return f"Error: Not a directory: {path}"
# Use pathlib glob
matches = []
for match in resolved_path.glob(pattern):
try:
rel_path = match.relative_to(self.base_dir)
suffix = "/" if match.is_dir() else ""
matches.append(str(rel_path) + suffix)
except ValueError:
continue # Skip paths outside base_dir
if len(matches) >= limit:
break
if not matches:
return f"No files found matching pattern: {pattern}"
result = "\n".join(sorted(matches))
footer = ""
if len(matches) >= limit:
footer = f"\n[Results limited to {limit} entries]"
return result + footer
except Exception as e:
log_error(f"Error finding files: {e}")
return f"Error finding files: {e}"
def ls(self, path: Optional[str] = None, limit: int = 500) -> str:
"""List directory contents.
Returns entries sorted alphabetically with '/' suffix for directories.
Includes dotfiles.
:param path: Directory to list (default: base directory).
:param limit: Maximum number of entries to return (default: 500).
:return: Directory listing, one entry per line, or an error message.
"""
try:
# Resolve path
if path:
safe, resolved_path = self._check_path(path, self.base_dir, self.restrict_to_base_dir)
if not safe:
return f"Error: Path '{path}' is outside the allowed base directory"
else:
resolved_path = self.base_dir
if not resolved_path.exists():
return f"Error: Path not found: {path or '.'}"
if not resolved_path.is_dir():
return f"Error: Not a directory: {path}"
entries = []
for entry in sorted(resolved_path.iterdir(), key=lambda p: p.name.lower()):
suffix = "/" if entry.is_dir() else ""
entries.append(entry.name + suffix)
if len(entries) >= limit:
break
if not entries:
return f"Directory is empty: {path or '.'}"
result = "\n".join(entries)
if len(entries) >= limit:
result += f"\n[Listing limited to {limit} entries]"
return result
except PermissionError:
return f"Error: Permission denied: {path or '.'}"
except Exception as e:
log_error(f"Error listing directory: {e}")
return f"Error listing directory: {e}"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/coding.py",
"license": "Apache License 2.0",
"lines": 602,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.