sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
agno-agi/agno:libs/agno/tests/unit/agent/test_basic.py | from agno.agent.agent import Agent
from agno.utils.string import is_valid_uuid
def test_set_id():
agent = Agent(
id="test_id",
)
agent.set_id()
assert agent.id == "test_id"
def test_set_id_from_name():
agent = Agent(
name="Test Name",
)
agent.set_id()
# Asserting the set_id method uses the name to generate the id
agent_id = agent.id
expected_id = "test-name"
assert expected_id == agent_id
# Asserting the set_id method is deterministic
agent.set_id()
assert agent.id == agent_id
def test_set_id_auto_generated():
agent = Agent()
agent.set_id()
assert is_valid_uuid(agent.id)
def test_deep_copy():
"""Test that Agent.deep_copy() works with all dataclass fields.
This test ensures that all dataclass fields with defaults are properly
handled by deep_copy(), preventing TypeError for unexpected keyword arguments.
"""
# Create agent with minimal configuration
# The key is that deep_copy will try to pass ALL dataclass fields to __init__
original = Agent(name="test-agent")
# This should not raise TypeError about unexpected keyword arguments
copied = original.deep_copy()
# Verify it's a different instance but with same values
assert copied is not original
assert copied.name == original.name
assert copied.user_message_role == "user"
assert copied.system_message_role == "system"
# Test deep_copy with update
updated = original.deep_copy(update={"name": "updated-agent"})
assert updated.name == "updated-agent"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/agent/test_basic.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/team/test_basic.py | from unittest.mock import MagicMock
import pytest
pytest.importorskip("duckduckgo_search")
pytest.importorskip("yfinance")
from agno.agent import Agent
from agno.models.message import Message
from agno.models.metrics import RunMetrics
from agno.models.openai import OpenAIChat
from agno.run import RunContext
from agno.run.team import TeamRunOutput
from agno.session.team import TeamSession
from agno.team._run import _asetup_session
from agno.team.team import Team
from agno.tools.websearch import WebSearchTools
from agno.tools.yfinance import YFinanceTools
from agno.utils.string import is_valid_uuid
@pytest.fixture
def team():
web_agent = Agent(
name="Web Agent",
model=OpenAIChat("gpt-4o"),
role="Search the web for information",
tools=[WebSearchTools(cache_results=True)],
)
finance_agent = Agent(
name="Finance Agent",
model=OpenAIChat("gpt-4o"),
role="Get financial data",
tools=[YFinanceTools(include_tools=["get_current_stock_price"])],
)
team = Team(name="Router Team", model=OpenAIChat("gpt-4o"), members=[web_agent, finance_agent])
return team
def test_team_system_message_content(team):
"""Test basic functionality of a route team."""
# Get the actual content
members_content = team.get_members_system_message_content()
# Check for expected content with fuzzy matching
assert "Agent 1:" in members_content
assert "ID: web-agent" in members_content
assert "Name: Web Agent" in members_content
assert "Role: Search the web for information" in members_content
assert "Agent 2:" in members_content
assert "ID: finance-agent" in members_content
assert "Name: Finance Agent" in members_content
assert "Role: Get financial data" in members_content
def test_delegate_to_wrong_member(team):
function = team._get_delegate_task_function(
session=TeamSession(session_id="test-session"),
run_response=TeamRunOutput(content="Hello, world!"),
run_context=RunContext(session_state={}, run_id="test-run", session_id="test-session"),
team_run_context={},
)
response = list(function.entrypoint(member_id="wrong-agent", task="Get the current stock price of AAPL"))
assert "Member with ID wrong-agent not found in the team or any subteams" in response[0]
def test_set_id():
team = Team(
id="test_id",
members=[],
)
team.set_id()
assert team.id == "test_id"
def test_set_id_from_name():
team = Team(
name="Test Name",
members=[],
)
team.set_id()
team_id = team.id
assert team_id is not None
assert team_id == "test-name"
team.id = None
team.set_id()
# It is deterministic, so it should be the same
assert team.id == team_id
def test_set_id_auto_generated():
team = Team(
members=[],
)
team.set_id()
assert team.id is not None
assert is_valid_uuid(team.id)
def test_team_calculate_metrics_preserves_duration(team):
"""Test that _calculate_metrics preserves the duration from current_run_metrics."""
initial_metrics = RunMetrics()
initial_metrics.duration = 5.5
initial_metrics.time_to_first_token = 0.5
message_metrics = RunMetrics()
message_metrics.input_tokens = 10
message_metrics.output_tokens = 20
messages = [Message(role="assistant", content="Response", metrics=message_metrics)]
# Pass the initial metrics (containing duration) to the calculation
calculated = team._calculate_metrics(messages, current_run_metrics=initial_metrics)
# Tokens should be summed (0 from initial + 10/20 from message)
assert calculated.input_tokens == 10
assert calculated.output_tokens == 20
# Duration should be preserved from initial_metrics
assert calculated.duration == 5.5
assert calculated.time_to_first_token == 0.5
def test_team_update_session_metrics_accumulates(team):
"""Test that _update_session_metrics correctly accumulates metrics using run_response."""
session = TeamSession(session_id="test_session")
session.session_data = {}
# First Run
run1 = TeamRunOutput(content="run 1")
run1.metrics = RunMetrics()
run1.metrics.duration = 2.0
run1.metrics.input_tokens = 100
team._update_session_metrics(session, run_response=run1)
metrics1 = session.session_data["session_metrics"]
assert metrics1.duration == 2.0
assert metrics1.input_tokens == 100
# Second Run
run2 = TeamRunOutput(content="run 2")
run2.metrics = RunMetrics()
run2.metrics.duration = 3.0
run2.metrics.input_tokens = 50
# Should accumulate with previous session metrics
team._update_session_metrics(session, run_response=run2)
metrics2 = session.session_data["session_metrics"]
assert metrics2.duration == 5.0 # 2.0 + 3.0
assert metrics2.input_tokens == 150 # 100 + 50
@pytest.mark.asyncio
async def test_asetup_session_resolves_deps_after_state_loaded():
"""Verify callable dependencies are resolved AFTER session state is loaded from DB.
This is a regression test: if dependency resolution runs before state loading,
the callable won't see DB-stored session state values.
"""
from unittest.mock import patch
import agno.team._run as run_module
# Create a session with DB-stored state
db_session = TeamSession(session_id="test-session")
db_session.session_data = {"session_state": {"from_db": "loaded"}}
# Track the session_state snapshot at the time _aresolve_run_dependencies is called
captured_state = {}
async def capture_state_on_resolve(team, run_context):
"""Capture session_state at dep resolution time, then do actual resolution."""
captured_state.update(run_context.session_state or {})
# Create a minimal Team mock (only used to pass to the functions)
team = MagicMock()
run_context = RunContext(
run_id="test-run",
session_id="test-session",
session_state={},
dependencies={"some_dep": lambda: "value"},
)
# Mock the submodule functions at their source modules (where they're imported FROM)
with (
patch("agno.team._init._has_async_db", return_value=False),
patch("agno.team._storage._read_or_create_session", return_value=db_session),
patch("agno.team._storage._update_metadata", return_value=None),
patch("agno.team._init._initialize_session_state", side_effect=lambda team, session_state, **kw: session_state),
patch(
"agno.team._storage._load_session_state",
side_effect=lambda team, session, session_state: {
**session_state,
**session.session_data.get("session_state", {}),
},
),
patch.object(run_module, "_aresolve_run_dependencies", side_effect=capture_state_on_resolve),
):
result_session = await _asetup_session(
team=team,
run_context=run_context,
session_id="test-session",
user_id=None,
run_id="test-run",
)
assert result_session == db_session
# At the time deps were resolved, session_state should already contain DB values
assert captured_state.get("from_db") == "loaded"
# And run_context.session_state should have the loaded value
assert run_context.session_state["from_db"] == "loaded"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/team/test_basic.py",
"license": "Apache License 2.0",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/telemetry/test_agent_telemetry.py | from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from agno.agent.agent import Agent
def test_agent_telemetry():
"""Test that telemetry logging is called during sync agent run."""
agent = Agent()
# Assert telemetry is active by default
assert agent.telemetry
# Mock the telemetry logging method in the _telemetry module (called by _run.py)
with patch("agno.agent._telemetry.log_agent_telemetry") as mock_log:
agent.model = MagicMock()
agent.run("This is a test run")
# Assert the telemetry logging func was called
mock_log.assert_called_once()
# Assert the telemetry logging func was called with the correct arguments
call_args = mock_log.call_args
assert "session_id" in call_args.kwargs
assert call_args.kwargs["session_id"] is not None
assert "run_id" in call_args.kwargs
assert call_args.kwargs["run_id"] is not None
@pytest.mark.asyncio
async def test_agent_telemetry_async():
"""Test that telemetry logging is called during async agent run."""
agent = Agent()
# Assert telemetry is active by default
assert agent.telemetry
# Mock the async telemetry logging method in the _telemetry module (called by _run.py)
with patch("agno.agent._telemetry.alog_agent_telemetry") as mock_alog:
mock_model = AsyncMock()
mock_model.get_instructions_for_model = MagicMock(return_value=None)
mock_model.get_system_message_for_model = MagicMock(return_value=None)
agent.model = mock_model
await agent.arun("This is a test run")
# Assert the telemetry logging func was called
mock_alog.assert_called_once()
# Assert the telemetry logging func was called with the correct arguments
call_args = mock_alog.call_args
assert "session_id" in call_args.kwargs
assert call_args.kwargs["session_id"] is not None
assert "run_id" in call_args.kwargs
assert call_args.kwargs["run_id"] is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/telemetry/test_agent_telemetry.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/telemetry/test_eval_telemetry.py | from unittest.mock import MagicMock, patch
from agno.agent.agent import Agent
from agno.eval.accuracy import AccuracyEval
from agno.eval.agent_as_judge import AgentAsJudgeEval
from agno.eval.performance import PerformanceEval
from agno.eval.reliability import ReliabilityEval
def test_accuracy_evals_telemetry():
"""Test that telemetry logging is called during sync accuracy eval run."""
agent = Agent()
accuracy_eval = AccuracyEval(agent=agent, input="What is the capital of France?", expected_output="Paris")
# Assert telemetry is active by default
assert accuracy_eval.telemetry
# Mock the API call that gets made when telemetry is enabled
with patch("agno.api.evals.create_eval_run_telemetry") as mock_create:
agent.model = MagicMock()
accuracy_eval.run(print_summary=False, print_results=False)
# Verify API was called with correct parameters
mock_create.assert_called_once()
call_args = mock_create.call_args[1]["eval_run"]
assert call_args.run_id == accuracy_eval.eval_id
assert call_args.eval_type.value == "accuracy"
def test_performance_evals_telemetry():
"""Test that telemetry works for performance evaluations."""
def sample_func():
return "test result"
performance_eval = PerformanceEval(func=sample_func)
# Assert telemetry is active by default
assert performance_eval.telemetry
# Mock the API call that gets made when telemetry is enabled
with patch("agno.api.evals.create_eval_run_telemetry") as mock_create:
performance_eval.run()
# Verify API was called with correct parameters
mock_create.assert_called_once()
call_args = mock_create.call_args[1]["eval_run"]
assert call_args.run_id == performance_eval.eval_id
assert call_args.eval_type.value == "performance"
def test_reliability_evals_telemetry():
"""Test that telemetry works for reliability evaluations."""
from agno.models.message import Message
from agno.run.agent import RunOutput
# Create a mock RunOutput with proper messages and tool calls
mock_message = Message(
role="assistant",
content="Test response",
tool_calls=[{"id": "call_1", "type": "function", "function": {"name": "test_tool", "arguments": "{}"}}],
)
mock_response = RunOutput(content="Test response", messages=[mock_message])
reliability_eval = ReliabilityEval(agent_response=mock_response, expected_tool_calls=["test_tool"])
# Assert telemetry is active by default
assert reliability_eval.telemetry
# Mock the API call that gets made when telemetry is enabled
with patch("agno.api.evals.create_eval_run_telemetry") as mock_create:
reliability_eval.run(print_results=False)
# Verify API was called with correct parameters
mock_create.assert_called_once()
call_args = mock_create.call_args[1]["eval_run"]
assert call_args.run_id == reliability_eval.eval_id
assert call_args.eval_type.value == "reliability"
def test_agent_as_judge_numeric_telemetry():
"""Test that telemetry works for agent-as-judge evaluations (numeric mode)."""
eval = AgentAsJudgeEval(
criteria="Response must be helpful",
scoring_strategy="numeric",
threshold=7,
)
# Assert telemetry is active by default
assert eval.telemetry
evaluator = eval.get_evaluator_agent()
evaluator.model = MagicMock()
# Mock the API call that gets made when telemetry is enabled
with patch("agno.api.evals.create_eval_run_telemetry") as mock_create:
eval.run(input="What is Python?", output="Python is a programming language.", print_results=False)
# Verify API was called with correct parameters
mock_create.assert_called_once()
call_args = mock_create.call_args[1]["eval_run"]
assert call_args.eval_type.value == "agent_as_judge"
def test_agent_as_judge_binary_telemetry():
"""Test that telemetry works for agent-as-judge evaluations (binary mode)."""
eval = AgentAsJudgeEval(
criteria="Response must not contain personal info",
scoring_strategy="binary",
)
# Assert telemetry is active by default
assert eval.telemetry
evaluator = eval.get_evaluator_agent()
evaluator.model = MagicMock()
# Mock the API call that gets made when telemetry is enabled
with patch("agno.api.evals.create_eval_run_telemetry") as mock_create:
eval.run(input="Tell me about privacy", output="Privacy is important.", print_results=False)
# Verify API was called with correct parameters
mock_create.assert_called_once()
call_args = mock_create.call_args[1]["eval_run"]
assert call_args.eval_type.value == "agent_as_judge"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/telemetry/test_eval_telemetry.py",
"license": "Apache License 2.0",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/telemetry/test_os_telemetry.py | from unittest.mock import patch
from agno.agent.agent import Agent
from agno.os import AgentOS
def test_accuracy_evals_telemetry():
"""Test that telemetry logging is called when initializing an AgentOS instance."""
agent = Agent()
# Mock the API call that gets made when telemetry is enabled
with patch("agno.api.os.log_os_telemetry") as mock_create:
os = AgentOS(id="test", agents=[agent])
# Assert telemetry is active by default
assert os.telemetry
# Verify API was called with correct parameters
mock_create.assert_called_once()
call_args = mock_create.call_args[1]["launch"]
assert call_args.os_id == "test"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/telemetry/test_os_telemetry.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/telemetry/test_team_telemetry.py | from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from agno.agent.agent import Agent
from agno.team.team import Team
def test_team_telemetry():
"""Test that telemetry logging is called during sync team run."""
agent = Agent()
team = Team(members=[agent])
# Assert telemetry is active by default
assert team.telemetry
# Mock the telemetry logging method in the _telemetry module (called by _run.py)
with patch("agno.team._telemetry.log_team_telemetry") as mock_log:
agent.model = MagicMock()
team.model = MagicMock()
team.run("This is a test run")
# Assert the telemetry logging func was called
mock_log.assert_called_once()
# Assert the telemetry logging func was called with the correct arguments
call_args = mock_log.call_args
assert "session_id" in call_args.kwargs
assert call_args.kwargs["session_id"] is not None
assert "run_id" in call_args.kwargs
assert call_args.kwargs["run_id"] is not None
@pytest.mark.asyncio
async def test_team_telemetry_async():
"""Test that telemetry logging is called during async team run."""
agent = Agent()
team = Team(members=[agent])
# Assert telemetry is active by default
assert team.telemetry
# Mock the async telemetry logging method in the _telemetry module (called by _run.py)
with patch("agno.team._telemetry.alog_team_telemetry") as mock_alog:
mock_model = AsyncMock()
mock_model.get_instructions_for_model = MagicMock(return_value=None)
mock_model.get_system_message_for_model = MagicMock(return_value=None)
agent.model = mock_model
team.model = mock_model
await team.arun("This is a test run")
# Assert the telemetry logging func was called
mock_alog.assert_called_once()
# Assert the telemetry logging func was called with the correct arguments
call_args = mock_alog.call_args
assert "session_id" in call_args.kwargs
assert call_args.kwargs["session_id"] is not None
assert "run_id" in call_args.kwargs
assert call_args.kwargs["run_id"] is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/telemetry/test_team_telemetry.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/telemetry/test_workflow_telemetry.py | from unittest.mock import patch
import pytest
from agno.workflow.step import Step
from agno.workflow.workflow import Workflow
def dummy_step(step_input):
"""Simple dummy step for testing"""
from agno.workflow.types import StepOutput
return StepOutput(content="Test step executed")
def test_workflow_telemetry():
"""Test that telemetry logging is called during sync workflow run."""
workflow = Workflow(steps=[Step(executor=dummy_step)])
# Assert telemetry is active by default
assert workflow.telemetry
# Mock the telemetry logging method
with patch.object(workflow, "_log_workflow_telemetry") as mock_log:
workflow.run("This is a test run")
# Assert the telemetry logging func was called
mock_log.assert_called_once()
# Assert the telemetry logging func was called with the correct arguments
call_args = mock_log.call_args
assert "session_id" in call_args.kwargs
assert call_args.kwargs["session_id"] is not None
assert "run_id" in call_args.kwargs
assert call_args.kwargs["run_id"] is not None
@pytest.mark.asyncio
async def test_workflow_telemetry_async():
"""Test that telemetry logging is called during async workflow run."""
workflow = Workflow(steps=[Step(executor=dummy_step)])
# Assert telemetry is active by default
assert workflow.telemetry
# Mock the async telemetry logging method
with patch.object(workflow, "_alog_workflow_telemetry") as mock_alog:
await workflow.arun("This is a test run")
# Assert the telemetry logging func was called
mock_alog.assert_called_once()
# Assert the telemetry logging func was called with the correct arguments
call_args = mock_alog.call_args
assert "session_id" in call_args.kwargs
assert call_args.kwargs["session_id"] is not None
assert "run_id" in call_args.kwargs
assert call_args.kwargs["run_id"] is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/telemetry/test_workflow_telemetry.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/utils/test_logging.py | from unittest.mock import Mock
from agno.utils import log as log_module
from agno.utils.log import configure_agno_logging
def test_configure_agno_logging_sets_global_logger():
"""Test that configure_agno_logging can set the global logger."""
mock_logger = Mock()
original_logger = log_module.logger
try:
# Configure Agno to use the custom logger and assert it works
configure_agno_logging(custom_default_logger=mock_logger)
assert log_module.logger is mock_logger
# Import our general log_info, call it and assert it used our custom logger
from agno.utils.log import log_info
log_info("Test message")
mock_logger.info.assert_called_once_with("Test message")
finally:
log_module.logger = original_logger
def test_configure_agno_logging_sets_specialized_loggers():
"""Test that configure_agno_logging can set agent_logger, team_logger, and workflow_logger."""
mock_agent_logger = Mock()
mock_team_logger = Mock()
mock_workflow_logger = Mock()
original_agent_logger = log_module.agent_logger
original_team_logger = log_module.team_logger
original_workflow_logger = log_module.workflow_logger
try:
configure_agno_logging(
custom_agent_logger=mock_agent_logger,
custom_team_logger=mock_team_logger,
custom_workflow_logger=mock_workflow_logger,
)
assert log_module.agent_logger is mock_agent_logger
assert log_module.team_logger is mock_team_logger
assert log_module.workflow_logger is mock_workflow_logger
finally:
log_module.agent_logger = original_agent_logger
log_module.team_logger = original_team_logger
log_module.workflow_logger = original_workflow_logger
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/utils/test_logging.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/utils/test_team.py | import uuid
from unittest.mock import patch
import pytest
from agno.agent.agent import Agent
from agno.models.openai import OpenAIChat
from agno.team.team import Team
from agno.utils.team import get_member_id
@pytest.fixture
def team_show_member_responses_true():
agent = Agent(name="Test Agent", model=OpenAIChat(id="gpt-4o-mini"))
return Team(
name="Test Team",
members=[agent],
model=OpenAIChat(id="gpt-4o-mini"),
show_members_responses=True,
)
@pytest.fixture
def team_show_member_responses_false():
agent = Agent(name="Test Agent", model=OpenAIChat(id="gpt-4o-mini"))
return Team(
name="Test Team",
members=[agent],
model=OpenAIChat(id="gpt-4o-mini"),
show_members_responses=False,
)
def test_show_member_responses_fallback(team_show_member_responses_true):
"""Test fallback to team.show_members_responses"""
with patch("agno.team._cli.print_response") as mock:
team_show_member_responses_true.print_response("test", stream=False)
assert mock.call_args[1]["show_member_responses"] is True
def test_show_member_responses_override_false(team_show_member_responses_true):
"""Test parameter overrides team default"""
with patch("agno.team._cli.print_response") as mock:
team_show_member_responses_true.print_response("test", stream=False, show_member_responses=False)
assert mock.call_args[1]["show_member_responses"] is False
def test_show_member_responses_override_true(team_show_member_responses_false):
"""Test parameter overrides team default"""
with patch("agno.team._cli.print_response") as mock:
team_show_member_responses_false.print_response("test", stream=False, show_member_responses=True)
assert mock.call_args[1]["show_member_responses"] is True
def test_show_member_responses_streaming(team_show_member_responses_true):
"""Test parameter with streaming"""
with patch("agno.team._cli.print_response_stream") as mock:
team_show_member_responses_true.print_response("test", stream=True, show_member_responses=False)
assert mock.call_args[1]["show_member_responses"] is False
@pytest.mark.asyncio
async def test_async_show_member_responses_fallback(team_show_member_responses_true):
"""Test fallback to team.show_members_responses"""
with patch("agno.team._cli.aprint_response") as mock:
await team_show_member_responses_true.aprint_response("test", stream=False)
assert mock.call_args[1]["show_member_responses"] is True
@pytest.mark.asyncio
async def test_async_show_member_responses_override_false(team_show_member_responses_true):
"""Test parameter overrides team default"""
with patch("agno.team._cli.aprint_response") as mock:
await team_show_member_responses_true.aprint_response("test", stream=False, show_member_responses=False)
assert mock.call_args[1]["show_member_responses"] is False
@pytest.mark.asyncio
async def test_async_show_member_responses_override_true(team_show_member_responses_false):
"""Test parameter overrides team default"""
with patch("agno.team._cli.aprint_response") as mock:
await team_show_member_responses_false.aprint_response("test", stream=False, show_member_responses=True)
assert mock.call_args[1]["show_member_responses"] is True
@pytest.mark.asyncio
async def test_async_show_member_responses_streaming(team_show_member_responses_true):
"""Test parameter override with streaming"""
with patch("agno.team._cli.aprint_response_stream") as mock:
await team_show_member_responses_true.aprint_response("test", stream=True, show_member_responses=False)
assert mock.call_args[1]["show_member_responses"] is False
def test_get_member_id():
# Agent with only name -> use name
member = Agent(name="Test Agent")
assert get_member_id(member) == "test-agent"
# Agent with custom (non-UUID) id -> use id (takes priority over name)
member = Agent(name="Test Agent", id="123")
assert get_member_id(member) == "123"
# Agent with UUID id -> use the UUID id (takes priority over name)
uuid_id = str(uuid.uuid4())
member = Agent(name="Test Agent", id=uuid_id)
assert get_member_id(member) == uuid_id
# Agent with only UUID id (no name) -> use the UUID id
member = Agent(id=str(uuid.uuid4()))
assert get_member_id(member) == member.id
# Team with only name -> use name
member = Agent(name="Test Agent")
inner_team = Team(name="Test Team", members=[member])
assert get_member_id(inner_team) == "test-team"
# Team with custom (non-UUID) id -> use id (takes priority over name)
inner_team = Team(name="Test Team", id="123", members=[member])
assert get_member_id(inner_team) == "123"
# Team with UUID id -> use the UUID id (takes priority over name)
uuid_id = str(uuid.uuid4())
inner_team = Team(name="Test Team", id=uuid_id, members=[member])
assert get_member_id(inner_team) == uuid_id
# Team with only UUID id (no name) -> use the UUID id
inner_team = Team(id=str(uuid.uuid4()), members=[member])
assert get_member_id(inner_team) == inner_team.id
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/utils/test_team.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/vectordb/test_upstashdb.py | import os
from typing import List
from unittest.mock import MagicMock, Mock, patch
import pytest
from agno.knowledge.document import Document
from agno.vectordb.upstashdb import UpstashVectorDb
@pytest.fixture
def mock_embedder():
"""Fixture to create a mock embedder"""
embedder = MagicMock()
embedder.dimensions = 384
embedder.get_embedding.return_value = [0.1] * 384
embedder.embedding_dim = 384
return embedder
@pytest.fixture
def mock_upstash_index():
"""Fixture to create a mock Upstash index"""
with patch("upstash_vector.Index") as mock_index_class:
mock_index = Mock()
mock_index_class.return_value = mock_index
# Mock info response
mock_info = Mock()
mock_info.vector_count = 0
mock_info.dimension = 384
mock_index.info.return_value = mock_info
# Mock upsert response
mock_index.upsert.return_value = "Success"
# Mock query response
mock_index.query.return_value = []
# Mock delete response
mock_delete_result = Mock()
mock_delete_result.deleted = 0
mock_index.delete.return_value = mock_delete_result
# Mock fetch response
mock_index.fetch.return_value = []
# Mock reset response
mock_index.reset.return_value = "Success"
yield mock_index
@pytest.fixture
def upstash_db(mock_upstash_index):
"""Fixture to create an UpstashVectorDb instance with mocked dependencies"""
with patch.dict(
os.environ,
{"UPSTASH_VECTOR_REST_URL": "https://test-url.upstash.io", "UPSTASH_VECTOR_REST_TOKEN": "test-token"},
):
db = UpstashVectorDb(
url="https://test-url.upstash.io",
token="test-token",
embedder=None, # Use Upstash embeddings
)
db._index = mock_upstash_index
yield db
@pytest.fixture
def upstash_db_with_custom_embedder(mock_upstash_index, mock_embedder):
"""Fixture to create an UpstashVectorDb instance with custom embedder"""
with patch.dict(
os.environ,
{"UPSTASH_VECTOR_REST_URL": "https://test-url.upstash.io", "UPSTASH_VECTOR_REST_TOKEN": "test-token"},
):
db = UpstashVectorDb(url="https://test-url.upstash.io", token="test-token", embedder=mock_embedder)
db._index = mock_upstash_index
yield db
@pytest.fixture
def sample_documents() -> List[Document]:
"""Fixture to create sample documents"""
return [
Document(
content="Tom Kha Gai is a Thai coconut soup with chicken",
meta_data={"cuisine": "Thai", "type": "soup"},
name="tom_kha",
id="doc_1",
),
Document(
content="Pad Thai is a stir-fried rice noodle dish",
meta_data={"cuisine": "Thai", "type": "noodles"},
name="pad_thai",
id="doc_2",
),
Document(
content="Green curry is a spicy Thai curry with coconut milk",
meta_data={"cuisine": "Thai", "type": "curry"},
name="green_curry",
id="doc_3",
),
]
def test_initialization_with_embedder(mock_embedder):
"""Test UpstashVectorDb initialization with custom embedder"""
db = UpstashVectorDb(url="https://test-url.upstash.io", token="test-token", embedder=mock_embedder)
assert db.url == "https://test-url.upstash.io"
assert db.token == "test-token"
assert db.embedder == mock_embedder
assert db.use_upstash_embeddings is False
def test_initialization_without_embedder():
"""Test UpstashVectorDb initialization without embedder (Upstash embeddings)"""
db = UpstashVectorDb(url="https://test-url.upstash.io", token="test-token")
assert db.url == "https://test-url.upstash.io"
assert db.token == "test-token"
assert db.embedder is None
assert db.use_upstash_embeddings is True
def test_exists(upstash_db):
"""Test index existence check"""
assert upstash_db.exists() is True
# Test when index doesn't exist
upstash_db.index.info.side_effect = Exception("Index not found")
assert upstash_db.exists() is False
def test_upsert_with_upstash_embeddings(upstash_db, sample_documents):
"""Test upserting documents with Upstash embeddings"""
upstash_db.upsert(documents=sample_documents, content_hash="test_hash")
# Verify upsert was called
upstash_db.index.upsert.assert_called_once()
# Check the vectors passed to upsert
call_args = upstash_db.index.upsert.call_args
vectors = call_args[0][0] # First positional argument
assert len(vectors) == 3
for i, vector in enumerate(vectors):
assert vector.id == sample_documents[i].id
assert vector.data == sample_documents[i].content
assert vector.metadata["name"] == sample_documents[i].name
assert vector.metadata["cuisine"] == sample_documents[i].meta_data["cuisine"]
def test_upsert_with_filters(upstash_db, sample_documents):
"""Test upserting documents with filters"""
filters = {"source": "test", "version": "1.0"}
upstash_db.upsert(documents=sample_documents, content_hash="test_hash", filters=filters)
# Check that filters were added to metadata
call_args = upstash_db.index.upsert.call_args
vectors = call_args[0][0]
for vector in vectors:
assert vector.metadata["source"] == "test"
assert vector.metadata["version"] == "1.0"
def test_upsert_with_content_id(upstash_db, sample_documents):
"""Test upserting documents with content_id"""
# Add content_id to documents
for i, doc in enumerate(sample_documents):
doc.content_id = f"content_{i + 1}"
upstash_db.upsert(documents=sample_documents, content_hash="test_hash")
call_args = upstash_db.index.upsert.call_args
vectors = call_args[0][0]
for i, vector in enumerate(vectors):
assert vector.metadata["content_id"] == f"content_{i + 1}"
def test_search_with_upstash_embeddings(upstash_db):
"""Test search with Upstash embeddings"""
# Mock query response
mock_result = Mock()
mock_result.id = "doc_1"
mock_result.data = "Tom Kha Gai is a Thai coconut soup"
mock_result.metadata = {"cuisine": "Thai"}
mock_result.vector = [0.1] * 384
upstash_db.index.query.return_value = [mock_result]
results = upstash_db.search("Thai soup", limit=5)
assert len(results) == 1
assert results[0].id == "doc_1"
assert results[0].content == "Tom Kha Gai is a Thai coconut soup"
assert results[0].meta_data["cuisine"] == "Thai"
# Verify query was called with correct parameters
upstash_db.index.query.assert_called_once_with(
data="Thai soup",
namespace="",
top_k=5,
filter="",
include_data=True,
include_metadata=True,
include_vectors=True,
)
def test_search_with_custom_embeddings(upstash_db_with_custom_embedder):
"""Test search with custom embeddings"""
# Mock query response
mock_result = Mock()
mock_result.id = "doc_1"
mock_result.data = "Tom Kha Gai is a Thai coconut soup"
mock_result.metadata = {"cuisine": "Thai"}
mock_result.vector = [0.1] * 384
upstash_db_with_custom_embedder.index.query.return_value = [mock_result]
results = upstash_db_with_custom_embedder.search("Thai soup", limit=5)
assert len(results) == 1
# Verify embedder was called
upstash_db_with_custom_embedder.embedder.get_embedding.assert_called_once_with("Thai soup")
def test_delete_by_id(upstash_db):
"""Test deleting documents by ID"""
# Mock successful deletion
mock_delete_result = Mock()
mock_delete_result.deleted = 1
upstash_db.index.delete.return_value = mock_delete_result
result = upstash_db.delete_by_id("doc_1")
assert result is True
upstash_db.index.delete.assert_called_once_with(ids=["doc_1"], namespace="")
def test_delete_by_name(upstash_db):
"""Test deleting documents by name"""
# Mock successful deletion
mock_delete_result = Mock()
mock_delete_result.deleted = 2
upstash_db.index.delete.return_value = mock_delete_result
result = upstash_db.delete_by_name("tom_kha")
assert result is True
upstash_db.index.delete.assert_called_once_with(filter='name = "tom_kha"', namespace="")
def test_delete_by_metadata(upstash_db):
"""Test deleting documents by metadata"""
# Mock successful deletion
mock_delete_result = Mock()
mock_delete_result.deleted = 3
upstash_db.index.delete.return_value = mock_delete_result
metadata = {"cuisine": "Thai", "type": "soup"}
result = upstash_db.delete_by_metadata(metadata)
assert result is True
upstash_db.index.delete.assert_called_once_with(filter='cuisine = "Thai" AND type = "soup"', namespace="")
def test_delete_by_metadata_with_numbers(upstash_db):
"""Test deleting documents by metadata with numeric values"""
# Mock successful deletion
mock_delete_result = Mock()
mock_delete_result.deleted = 1
upstash_db.index.delete.return_value = mock_delete_result
metadata = {"rating": 5, "spicy": True}
result = upstash_db.delete_by_metadata(metadata)
assert result is True
upstash_db.index.delete.assert_called_once_with(filter="rating = 5 AND spicy = True", namespace="")
def test_delete_by_content_id(upstash_db):
"""Test deleting documents by content_id"""
# Mock successful deletion
mock_delete_result = Mock()
mock_delete_result.deleted = 1
upstash_db.index.delete.return_value = mock_delete_result
result = upstash_db.delete_by_content_id("content_123")
assert result is True
upstash_db.index.delete.assert_called_once_with(filter='content_id = "content_123"', namespace="")
def test_delete_all(upstash_db):
"""Test deleting all documents"""
upstash_db.index.reset.return_value = "Success"
result = upstash_db.delete(delete_all=True)
assert result is True
upstash_db.index.reset.assert_called_once_with(namespace="", all=True)
def test_delete_namespace(upstash_db):
"""Test deleting documents in a namespace"""
upstash_db.index.reset.return_value = "Success"
result = upstash_db.delete(namespace="test_namespace")
assert result is True
upstash_db.index.reset.assert_called_once_with(namespace="test_namespace", all=False)
def test_get_index_info(upstash_db):
"""Test getting index information"""
mock_info = Mock()
mock_info.vector_count = 100
mock_info.dimension = 384
upstash_db.index.info.return_value = mock_info
info = upstash_db.get_index_info()
assert info.vector_count == 100
assert info.dimension == 384
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/vectordb/test_upstashdb.py",
"license": "Apache License 2.0",
"lines": 244,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno_infra/agno/aws/api_client.py | from typing import Any, Optional
from agno.utilities.logging import logger
class AwsApiClient:
def __init__(
self,
aws_region: Optional[str] = None,
aws_profile: Optional[str] = None,
):
super().__init__()
self.aws_region: Optional[str] = aws_region
self.aws_profile: Optional[str] = aws_profile
# aws boto3 session
self._boto3_session: Optional[Any] = None
logger.debug("**-+-** AwsApiClient created")
def create_boto3_session(self) -> Optional[Any]:
"""Create a boto3 session"""
import boto3
logger.debug("Creating boto3.Session")
try:
self._boto3_session = boto3.Session(
region_name=self.aws_region,
profile_name=self.aws_profile,
)
logger.debug("**-+-** boto3.Session created")
logger.debug(f"\taws_region: {self._boto3_session.region_name}")
logger.debug(f"\taws_profile: {self._boto3_session.profile_name}")
except Exception as e:
logger.error("Could not connect to aws. Please confirm aws cli is installed and configured")
logger.error(e)
exit(0)
return self._boto3_session
@property
def boto3_session(self) -> Optional[Any]:
if self._boto3_session is None:
self._boto3_session = self.create_boto3_session()
return self._boto3_session
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/aws/api_client.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno_infra/agno/aws/app/base.py | from typing import TYPE_CHECKING, Any, Dict, List, Optional
from pydantic import Field, field_validator
from pydantic_core.core_schema import ValidationInfo
from agno.aws.context import AwsBuildContext
from agno.base.app import InfraApp
from agno.base.context import ContainerContext
from agno.utilities.logging import logger
if TYPE_CHECKING:
from agno.aws.resource.base import AwsResource
from agno.aws.resource.ec2.security_group import SecurityGroup
from agno.aws.resource.ecs.cluster import EcsCluster
from agno.aws.resource.ecs.container import EcsContainer
from agno.aws.resource.ecs.service import EcsService
from agno.aws.resource.ecs.task_definition import EcsTaskDefinition
from agno.aws.resource.elb.listener import Listener
from agno.aws.resource.elb.load_balancer import LoadBalancer
from agno.aws.resource.elb.target_group import TargetGroup
class AwsApp(InfraApp):
# -*- OS Configuration
# Path to the OS directory inside the container
infra_dir_container_path: str = "/app"
# Read secret variables from AWS Secrets
aws_secrets: Optional[Any] = None
# -*- Networking Configuration
# List of subnets for the app: Type: Union[str, Subnet]
# Added to the load balancer, target group, and ECS service
subnets: Optional[List[Any]] = None
# -*- ECS Configuration
ecs_cluster: Optional[Any] = None
# Create a cluster if ecs_cluster is None
create_ecs_cluster: bool = True
# Name of the ECS cluster
ecs_cluster_name: Optional[str] = None
ecs_launch_type: str = "FARGATE"
ecs_task_cpu: str = "1024"
ecs_task_memory: str = "2048"
ecs_service_count: int = 1
ecs_enable_service_connect: bool = False
ecs_service_connect_protocol: Optional[str] = None
ecs_service_connect_namespace: str = "default"
assign_public_ip: Optional[bool] = None
ecs_bedrock_access: bool = True
ecs_exec_access: bool = True
ecs_secret_access: bool = True
ecs_s3_access: bool = True
# -*- ECS Volume Configuration
# List of EcsVolume objects to attach to the task definition
ecs_volumes: Optional[List[Any]] = None
# Mount points for the container: [{"sourceVolume": "name", "containerPath": "/path"}]
ecs_container_mount_points: Optional[List[Dict[str, Any]]] = None
# -*- Security Group Configuration
# List of security groups for the ECS Service. Type: SecurityGroup
security_groups: Optional[List[Any]] = None
# If create_security_groups=True,
# Create security groups for the app and load balancer
create_security_groups: bool = True
# inbound_security_groups to add to the app security group
inbound_security_groups: Optional[List[Any]] = None
# inbound_security_group_ids to add to the app security group
inbound_security_group_ids: Optional[List[str]] = None
# -*- LoadBalancer Configuration
load_balancer: Optional[Any] = None
# Create a load balancer if load_balancer is None
create_load_balancer: bool = False
# Enable HTTPS on the load balancer
load_balancer_enable_https: bool = False
# ACM certificate for HTTPS
# load_balancer_certificate or load_balancer_certificate_arn
# is required if enable_https is True
load_balancer_certificate: Optional[Any] = None
# ARN of the certificate for HTTPS, required if enable_https is True
load_balancer_certificate_arn: Optional[str] = None
# Security groups for the load balancer: List[SecurityGroup]
# The App creates a security group for the load balancer if:
# load_balancer_security_groups is None
# and create_load_balancer is True
# and create_security_groups is True
load_balancer_security_groups: Optional[List[Any]] = None
# -*- Listener Configuration
listeners: Optional[List[Any]] = None
# Create a listener if listener is None
create_listeners: Optional[bool] = Field(None, validate_default=True)
# -*- TargetGroup Configuration
target_group: Optional[Any] = None
# Create a target group if target_group is None
create_target_group: Optional[bool] = Field(None, validate_default=True)
# HTTP or HTTPS. Recommended to use HTTP because HTTPS is handled by the load balancer
target_group_protocol: str = "HTTP"
# Port number for the target group
# If target_group_port is None, then use container_port
target_group_port: Optional[int] = None
target_group_type: str = "ip"
health_check_protocol: Optional[str] = None
health_check_port: Optional[str] = None
health_check_enabled: Optional[bool] = None
health_check_path: Optional[str] = None
health_check_interval_seconds: Optional[int] = None
health_check_timeout_seconds: Optional[int] = None
healthy_threshold_count: Optional[int] = None
unhealthy_threshold_count: Optional[int] = None
# -*- Add NGINX reverse proxy
enable_nginx: bool = False
nginx_image: Optional[Any] = None
nginx_image_name: str = "nginx"
nginx_image_tag: str = "1.25.2-alpine"
nginx_container_port: int = 80
def set_aws_env_vars(self, env_dict: Dict[str, str], aws_region: Optional[str] = None) -> None:
from agno.constants import (
AWS_DEFAULT_REGION_ENV_VAR,
AWS_REGION_ENV_VAR,
)
if aws_region is not None:
# logger.debug(f"Setting AWS Region to {aws_region}")
env_dict[AWS_REGION_ENV_VAR] = aws_region
env_dict[AWS_DEFAULT_REGION_ENV_VAR] = aws_region
elif self.infra_settings is not None and self.infra_settings.aws_region is not None:
# logger.debug(f"Setting AWS Region to {aws_region} using os_settings")
env_dict[AWS_REGION_ENV_VAR] = self.infra_settings.aws_region
env_dict[AWS_DEFAULT_REGION_ENV_VAR] = self.infra_settings.aws_region
@field_validator("create_listeners", mode="before")
def update_create_listeners(cls, create_listeners, info: ValidationInfo):
if create_listeners:
return create_listeners
# If create_listener is False, then create a listener if create_load_balancer is True
return info.data.get("create_load_balancer", None)
@field_validator("create_target_group", mode="before")
def update_create_target_group(cls, create_target_group, info: ValidationInfo):
if create_target_group:
return create_target_group
# If create_target_group is False, then create a target group if create_load_balancer is True
return info.data.get("create_load_balancer", None)
def get_container_context(self) -> Optional[ContainerContext]:
logger.debug("Building ContainerContext")
if self.container_context is not None: # type: ignore
return self.container_context # type: ignore
infra_name = self.infra_name
if infra_name is None:
raise Exception("Could not determine infra_name")
infra_root_in_container = self.infra_dir_container_path
if infra_root_in_container is None:
raise Exception("Could not determine infra_root in container")
infra_parent_paths = infra_root_in_container.split("/")[0:-1]
infra_parent_in_container = "/".join(infra_parent_paths)
self.container_context = ContainerContext(
infra_name=infra_name,
infra_root=infra_root_in_container,
infra_parent=infra_parent_in_container,
)
if self.requirements_file is not None:
self.container_context.requirements_file = f"{infra_root_in_container}/{self.requirements_file}" # type: ignore
return self.container_context
def get_container_env(self, container_context: ContainerContext, build_context: AwsBuildContext) -> Dict[str, str]:
from agno.constants import (
AGNO_INFRA_ROOT,
AGNO_RUNTIME_ENV_VAR,
PYTHONPATH_ENV_VAR,
REQUIREMENTS_FILE_PATH_ENV_VAR,
)
# Container Environment
container_env: Dict[str, str] = self.container_env or {}
container_env.update(
{
"INSTALL_REQUIREMENTS": str(self.install_requirements),
"PRINT_ENV_ON_LOAD": str(self.print_env_on_load),
AGNO_RUNTIME_ENV_VAR: "ecs",
REQUIREMENTS_FILE_PATH_ENV_VAR: container_context.requirements_file or "",
AGNO_INFRA_ROOT: container_context.infra_root or "",
}
)
if self.set_python_path:
python_path = self.python_path
if python_path is None:
python_path = container_context.infra_root
if self.add_python_paths is not None:
python_path = "{}:{}".format(python_path, ":".join(self.add_python_paths))
if python_path is not None:
container_env[PYTHONPATH_ENV_VAR] = python_path
# Set aws region and profile
self.set_aws_env_vars(env_dict=container_env, aws_region=build_context.aws_region)
# Update the container env using env_file
env_data_from_file = self.get_env_file_data()
if env_data_from_file is not None:
container_env.update({k: str(v) for k, v in env_data_from_file.items() if v is not None})
# Update the container env using secrets_file
secret_data_from_file = self.get_secret_file_data()
if secret_data_from_file is not None:
container_env.update({k: str(v) for k, v in secret_data_from_file.items() if v is not None})
# Update the container env with user provided env_vars
# this overwrites any existing variables with the same key
if self.env_vars is not None and isinstance(self.env_vars, dict):
container_env.update({k: v for k, v in self.env_vars.items() if v is not None})
# logger.debug("Container Environment: {}".format(container_env))
return container_env
def get_load_balancer_security_groups(self) -> Optional[List["SecurityGroup"]]:
from agno.aws.resource.ec2.security_group import InboundRule, SecurityGroup
load_balancer_security_groups: Optional[List[SecurityGroup]] = self.load_balancer_security_groups
if load_balancer_security_groups is None:
# Create security group for the load balancer
if self.create_load_balancer and self.create_security_groups:
load_balancer_security_groups = []
lb_sg = SecurityGroup(
name=f"{self.get_app_name()}-lb-security-group",
description=f"Security group for {self.get_app_name()} load balancer",
inbound_rules=[
InboundRule(
description="Allow HTTP traffic from the internet",
port=80,
cidr_ip="0.0.0.0/0",
),
],
)
if self.load_balancer_enable_https:
if lb_sg.inbound_rules is None:
lb_sg.inbound_rules = []
lb_sg.inbound_rules.append(
InboundRule(
description="Allow HTTPS traffic from the internet",
port=443,
cidr_ip="0.0.0.0/0",
)
)
load_balancer_security_groups.append(lb_sg)
return load_balancer_security_groups
def security_group_definition(self) -> "SecurityGroup":
from agno.aws.resource.ec2.security_group import InboundRule, SecurityGroup
from agno.aws.resource.reference import AwsReference
# Create security group for the app
app_sg = SecurityGroup(
name=f"{self.get_app_name()}-security-group",
description=f"Security group for {self.get_app_name()}",
)
# Add inbound rules for the app security group
# Allow traffic from the load balancer security groups
load_balancer_security_groups = self.get_load_balancer_security_groups()
if load_balancer_security_groups is not None:
if app_sg.inbound_rules is None:
app_sg.inbound_rules = []
if app_sg.depends_on is None:
app_sg.depends_on = []
for lb_sg in load_balancer_security_groups:
app_sg.inbound_rules.append(
InboundRule(
description=f"Allow traffic from {lb_sg.name} to the {self.get_app_name()}",
port=self.container_port,
source_security_group_id=AwsReference(lb_sg.get_security_group_id),
)
)
app_sg.depends_on.append(lb_sg)
# Allow traffic from inbound_security_groups
if self.inbound_security_groups is not None:
if app_sg.inbound_rules is None:
app_sg.inbound_rules = []
if app_sg.depends_on is None:
app_sg.depends_on = []
for inbound_sg in self.inbound_security_groups:
app_sg.inbound_rules.append(
InboundRule(
description=f"Allow traffic from {inbound_sg.name} to the {self.get_app_name()}",
port=self.container_port,
source_security_group_id=AwsReference(inbound_sg.get_security_group_id),
)
)
# Allow traffic from inbound_security_group_ids
if self.inbound_security_group_ids is not None:
if app_sg.inbound_rules is None:
app_sg.inbound_rules = []
if app_sg.depends_on is None:
app_sg.depends_on = []
for inbound_sg_id in self.inbound_security_group_ids:
app_sg.inbound_rules.append(
InboundRule(
description=f"Allow traffic from {inbound_sg_id} to the {self.get_app_name()}",
port=self.container_port,
source_security_group_id=inbound_sg_id,
)
)
return app_sg
def get_security_groups(self) -> Optional[List["SecurityGroup"]]:
from agno.aws.resource.ec2.security_group import SecurityGroup
security_groups: Optional[List[SecurityGroup]] = self.security_groups
if security_groups is None:
# Create security group for the service
if self.create_security_groups:
security_groups = []
app_security_group = self.security_group_definition()
if app_security_group is not None:
security_groups.append(app_security_group)
return security_groups
def get_all_security_groups(self) -> Optional[List["SecurityGroup"]]:
from agno.aws.resource.ec2.security_group import SecurityGroup
security_groups: List[SecurityGroup] = []
load_balancer_security_groups = self.get_load_balancer_security_groups()
if load_balancer_security_groups is not None:
for lb_sg in load_balancer_security_groups:
if isinstance(lb_sg, SecurityGroup):
security_groups.append(lb_sg)
service_security_groups = self.get_security_groups()
if service_security_groups is not None:
for sg in service_security_groups:
if isinstance(sg, SecurityGroup):
security_groups.append(sg)
return security_groups if len(security_groups) > 0 else None
def ecs_cluster_definition(self) -> "EcsCluster":
from agno.aws.resource.ecs.cluster import EcsCluster
ecs_cluster = EcsCluster(
name=f"{self.get_app_name()}-cluster",
ecs_cluster_name=self.ecs_cluster_name or self.get_app_name(),
capacity_providers=[self.ecs_launch_type],
)
if self.ecs_enable_service_connect:
ecs_cluster.service_connect_namespace = self.ecs_service_connect_namespace
return ecs_cluster
def get_ecs_cluster(self) -> "EcsCluster":
from agno.aws.resource.ecs.cluster import EcsCluster
if self.ecs_cluster is None:
if self.create_ecs_cluster:
return self.ecs_cluster_definition()
raise Exception("Please provide ECSCluster or set create_ecs_cluster to True")
elif isinstance(self.ecs_cluster, EcsCluster):
return self.ecs_cluster
else:
raise Exception(f"Invalid ECSCluster: {self.ecs_cluster} - Must be of type EcsCluster")
def get_ecs_volumes(self) -> Optional[List[Any]]:
from agno.aws.resource.ecs.volume import EcsVolume
if self.ecs_volumes is None:
return None
for vol in self.ecs_volumes:
if not isinstance(vol, EcsVolume):
raise Exception(f"Invalid EcsVolume: {vol} - Must be of type EcsVolume")
return self.ecs_volumes
def load_balancer_definition(self) -> "LoadBalancer":
from agno.aws.resource.elb.load_balancer import LoadBalancer
return LoadBalancer(
name=f"{self.get_app_name()}-lb",
subnets=self.subnets,
security_groups=self.get_load_balancer_security_groups(),
protocol="HTTPS" if self.load_balancer_enable_https else "HTTP",
)
def get_load_balancer(self) -> Optional["LoadBalancer"]:
from agno.aws.resource.elb.load_balancer import LoadBalancer
if self.load_balancer is None:
if self.create_load_balancer:
return self.load_balancer_definition()
return None
elif isinstance(self.load_balancer, LoadBalancer):
return self.load_balancer
else:
raise Exception(f"Invalid LoadBalancer: {self.load_balancer} - Must be of type LoadBalancer")
def target_group_definition(self) -> "TargetGroup":
from agno.aws.resource.elb.target_group import TargetGroup
return TargetGroup(
name=f"{self.get_app_name()}-tg",
port=self.target_group_port or self.container_port,
protocol=self.target_group_protocol,
subnets=self.subnets,
target_type=self.target_group_type,
health_check_protocol=self.health_check_protocol,
health_check_port=self.health_check_port,
health_check_enabled=self.health_check_enabled,
health_check_path=self.health_check_path,
health_check_interval_seconds=self.health_check_interval_seconds,
health_check_timeout_seconds=self.health_check_timeout_seconds,
healthy_threshold_count=self.healthy_threshold_count,
unhealthy_threshold_count=self.unhealthy_threshold_count,
)
def get_target_group(self) -> Optional["TargetGroup"]:
from agno.aws.resource.elb.target_group import TargetGroup
if self.target_group is None:
if self.create_target_group:
return self.target_group_definition()
return None
elif isinstance(self.target_group, TargetGroup):
return self.target_group
else:
raise Exception(f"Invalid TargetGroup: {self.target_group} - Must be of type TargetGroup")
def listeners_definition(
self, load_balancer: Optional["LoadBalancer"], target_group: Optional["TargetGroup"]
) -> List["Listener"]:
from agno.aws.resource.elb.listener import Listener
listener = Listener(
name=f"{self.get_app_name()}-listener",
load_balancer=load_balancer,
target_group=target_group,
)
if self.load_balancer_certificate_arn is not None:
listener.certificates = [{"CertificateArn": self.load_balancer_certificate_arn}]
if self.load_balancer_certificate is not None:
listener.acm_certificates = [self.load_balancer_certificate]
listeners: List[Listener] = [listener]
if self.load_balancer_enable_https:
# Add a listener to redirect HTTP to HTTPS
listeners.append(
Listener(
name=f"{self.get_app_name()}-redirect-listener",
port=80,
protocol="HTTP",
load_balancer=load_balancer,
default_actions=[
{
"Type": "redirect",
"RedirectConfig": {
"Protocol": "HTTPS",
"Port": "443",
"StatusCode": "HTTP_301",
"Host": "#{host}",
"Path": "/#{path}",
"Query": "#{query}",
},
}
],
)
)
return listeners
def get_listeners(
self, load_balancer: Optional["LoadBalancer"], target_group: Optional["TargetGroup"]
) -> Optional[List["Listener"]]:
from agno.aws.resource.elb.listener import Listener
if self.listeners is None:
if self.create_listeners:
return self.listeners_definition(load_balancer, target_group)
return None
elif isinstance(self.listeners, list):
for listener in self.listeners:
if not isinstance(listener, Listener):
raise Exception(f"Invalid Listener: {listener} - Must be of type Listener")
return self.listeners
else:
raise Exception(f"Invalid Listener: {self.listeners} - Must be of type List[Listener]")
def get_container_command(self) -> Optional[List[str]]:
if isinstance(self.command, str):
return self.command.strip().split(" ")
return self.command
def get_ecs_container_port_mappings(self) -> List[Dict[str, Any]]:
port_mapping: Dict[str, Any] = {"containerPort": self.container_port}
# To enable service connect, we need to set the port name to the app name
if self.ecs_enable_service_connect:
port_mapping["name"] = self.get_app_name()
if self.ecs_service_connect_protocol is not None:
port_mapping["appProtocol"] = self.ecs_service_connect_protocol
return [port_mapping]
def get_ecs_container(self, container_context: ContainerContext, build_context: AwsBuildContext) -> "EcsContainer":
from agno.aws.resource.ecs.container import EcsContainer
# -*- Get Container Environment
container_env: Dict[str, str] = self.get_container_env(
container_context=container_context, build_context=build_context
)
# -*- Get Container Command
container_cmd: Optional[List[str]] = self.get_container_command()
if container_cmd:
logger.debug("Command: {}".format(" ".join(container_cmd)))
aws_region = build_context.aws_region or (self.infra_settings.aws_region if self.infra_settings else None)
return EcsContainer(
name=self.get_app_name(),
image=self.get_image_str(),
port_mappings=self.get_ecs_container_port_mappings(),
command=container_cmd,
essential=True,
environment=[{"name": k, "value": v} for k, v in container_env.items()],
log_configuration={
"logDriver": "awslogs",
"options": {
"awslogs-group": self.get_app_name(),
"awslogs-region": aws_region,
"awslogs-create-group": "true",
"awslogs-stream-prefix": self.get_app_name(),
},
},
linux_parameters={"initProcessEnabled": True},
env_from_secrets=self.aws_secrets,
)
def get_ecs_task_definition(self, ecs_container: "EcsContainer") -> "EcsTaskDefinition":
from agno.aws.resource.ecs.task_definition import EcsTaskDefinition
return EcsTaskDefinition(
name=f"{self.get_app_name()}-td",
family=self.get_app_name(),
network_mode="awsvpc",
cpu=self.ecs_task_cpu,
memory=self.ecs_task_memory,
containers=[ecs_container],
requires_compatibilities=[self.ecs_launch_type],
add_bedrock_access_to_task=self.ecs_bedrock_access,
add_exec_access_to_task=self.ecs_exec_access,
add_secret_access_to_ecs=self.ecs_secret_access,
add_secret_access_to_task=self.ecs_secret_access,
add_s3_access_to_task=self.ecs_s3_access,
)
def get_ecs_service(
self,
ecs_container: "EcsContainer",
ecs_task_definition: "EcsTaskDefinition",
ecs_cluster: "EcsCluster",
target_group: Optional["TargetGroup"],
) -> Optional["EcsService"]:
from agno.aws.resource.ecs.service import EcsService
service_security_groups = self.get_security_groups()
ecs_service = EcsService(
name=f"{self.get_app_name()}-service",
desired_count=self.ecs_service_count,
launch_type=self.ecs_launch_type,
cluster=ecs_cluster,
task_definition=ecs_task_definition,
target_group=target_group,
target_container_name=ecs_container.name,
target_container_port=self.container_port,
subnets=self.subnets,
security_groups=service_security_groups,
assign_public_ip=self.assign_public_ip,
# Force delete the service.
force_delete=True,
# Force a new deployment of the service on update.
force_new_deployment=True,
enable_execute_command=self.ecs_exec_access,
)
if self.ecs_enable_service_connect:
# namespace is used from the cluster
ecs_service.service_connect_configuration = {
"enabled": True,
"services": [
{
"portName": self.get_app_name(),
"clientAliases": [
{
"port": self.container_port,
"dnsName": self.get_app_name(),
}
],
},
],
}
return ecs_service
def build_resources(self, build_context: AwsBuildContext) -> List["AwsResource"]:
from agno.aws.resource.base import AwsResource
from agno.aws.resource.ec2.security_group import SecurityGroup
from agno.aws.resource.ecs.cluster import EcsCluster
from agno.aws.resource.ecs.container import EcsContainer
from agno.aws.resource.ecs.service import EcsService
from agno.aws.resource.ecs.task_definition import EcsTaskDefinition
from agno.aws.resource.ecs.volume import EcsVolume
from agno.aws.resource.elb.listener import Listener
from agno.aws.resource.elb.load_balancer import LoadBalancer
from agno.aws.resource.elb.target_group import TargetGroup
from agno.docker.resource.image import DockerImage
from agno.utilities.defaults import get_default_volume_name
logger.debug(f"------------ Building {self.get_app_name()} ------------")
# -*- Get Container Context
container_context: Optional[ContainerContext] = self.get_container_context()
if container_context is None:
raise Exception("Could not build ContainerContext")
logger.debug(f"ContainerContext: {container_context.model_dump_json(indent=2)}")
# -*- Get Security Groups
security_groups: Optional[List[SecurityGroup]] = self.get_all_security_groups()
# -*- Get ECS cluster
ecs_cluster: EcsCluster = self.get_ecs_cluster()
# -*- Get Load Balancer
load_balancer: Optional[LoadBalancer] = self.get_load_balancer()
# -*- Get Target Group
target_group: Optional[TargetGroup] = self.get_target_group()
# Point the target group to the nginx container port if:
# - nginx is enabled
# - user provided target_group is None
# - user provided target_group_port is None
if self.enable_nginx and self.target_group is None and self.target_group_port is None:
if target_group is not None:
target_group.port = self.nginx_container_port
# -*- Get Listener
listeners: Optional[List[Listener]] = self.get_listeners(load_balancer=load_balancer, target_group=target_group)
# -*- Get ECSContainer
ecs_container: EcsContainer = self.get_ecs_container(
container_context=container_context, build_context=build_context
)
# -*- Add nginx container if nginx is enabled
nginx_container: Optional[EcsContainer] = None
nginx_shared_volume: Optional[EcsVolume] = None
if self.enable_nginx and ecs_container is not None:
nginx_container_name = f"{self.get_app_name()}-nginx"
nginx_shared_volume = EcsVolume(name=get_default_volume_name(self.get_app_name()))
nginx_image_str = f"{self.nginx_image_name}:{self.nginx_image_tag}"
if self.nginx_image and isinstance(self.nginx_image, DockerImage):
nginx_image_str = self.nginx_image.get_image_str()
nginx_container = EcsContainer(
name=nginx_container_name,
image=nginx_image_str,
essential=True,
port_mappings=[{"containerPort": self.nginx_container_port}],
environment=ecs_container.environment,
log_configuration={
"logDriver": "awslogs",
"options": {
"awslogs-group": self.get_app_name(),
"awslogs-region": build_context.aws_region
or (self.infra_settings.aws_region if self.infra_settings else None),
"awslogs-create-group": "true",
"awslogs-stream-prefix": nginx_container_name,
},
},
mount_points=[
{
"sourceVolume": nginx_shared_volume.name,
"containerPath": container_context.infra_root,
}
],
linux_parameters=ecs_container.linux_parameters,
env_from_secrets=ecs_container.env_from_secrets,
save_output=ecs_container.save_output,
output_dir=ecs_container.output_dir,
skip_create=ecs_container.skip_create,
skip_delete=ecs_container.skip_delete,
wait_for_create=ecs_container.wait_for_create,
wait_for_delete=ecs_container.wait_for_delete,
)
# Add shared volume to ecs_container (copy list to avoid aliasing)
ecs_container.mount_points = list(nginx_container.mount_points) if nginx_container.mount_points else []
# -*- Add user-defined mount points to ecs_container
if self.ecs_container_mount_points and ecs_container is not None:
if ecs_container.mount_points:
ecs_container.mount_points.extend(self.ecs_container_mount_points)
else:
ecs_container.mount_points = list(self.ecs_container_mount_points)
# -*- Get ECS Task Definition
ecs_task_definition: EcsTaskDefinition = self.get_ecs_task_definition(ecs_container=ecs_container)
# -*- Add nginx container to ecs_task_definition if nginx is enabled
if self.enable_nginx:
if ecs_task_definition is not None:
if nginx_container is not None:
if ecs_task_definition.containers:
ecs_task_definition.containers.append(nginx_container)
else:
logger.error("While adding Nginx container, found TaskDefinition.containers to be None")
else:
logger.error("While adding Nginx container, found nginx_container to be None")
if nginx_shared_volume:
ecs_task_definition.volumes = [nginx_shared_volume]
# -*- Add user-defined volumes to ecs_task_definition
ecs_volumes = self.get_ecs_volumes()
if ecs_volumes and ecs_task_definition is not None:
if ecs_task_definition.volumes:
ecs_task_definition.volumes.extend(ecs_volumes)
else:
ecs_task_definition.volumes = list(ecs_volumes)
# -*- Get ECS Service
ecs_service: Optional[EcsService] = self.get_ecs_service(
ecs_cluster=ecs_cluster,
ecs_task_definition=ecs_task_definition,
target_group=target_group,
ecs_container=ecs_container,
)
# -*- Add nginx container as target_container if nginx is enabled
if self.enable_nginx:
if ecs_service is not None:
if nginx_container is not None:
ecs_service.target_container_name = nginx_container.name
ecs_service.target_container_port = self.nginx_container_port
else:
logger.error("While adding Nginx container as target_container, found nginx_container to be None")
# -*- List of AwsResources created by this App
app_resources: List[AwsResource] = []
if security_groups:
app_resources.extend(security_groups)
if load_balancer:
app_resources.append(load_balancer)
if target_group:
app_resources.append(target_group)
if listeners:
app_resources.extend(listeners)
if ecs_cluster:
app_resources.append(ecs_cluster)
if ecs_task_definition:
app_resources.append(ecs_task_definition)
if ecs_service:
app_resources.append(ecs_service)
logger.debug(f"------------ {self.get_app_name()} Built ------------")
return app_resources
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/aws/app/base.py",
"license": "Apache License 2.0",
"lines": 672,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno_infra/agno/aws/app/celery/worker.py | from typing import List, Optional, Union
from agno.aws.app.base import AwsApp, AwsBuildContext, ContainerContext # noqa: F401
class CeleryWorker(AwsApp):
# -*- App Name
name: str = "celery-worker"
# -*- Image Configuration
image_name: str = "agnohq/celery-worker"
image_tag: str = "latest"
command: Optional[Union[str, List[str]]] = "celery -A tasks.celery worker --loglevel=info"
# -*- OS Configuration
# Path to the os directory inside the container
infra_dir_container_path: str = "/app"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/aws/app/celery/worker.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno_infra/agno/aws/app/fastapi/fastapi.py | from typing import Dict, List, Optional, Union
from agno.aws.app.base import AwsApp, AwsBuildContext, ContainerContext # noqa: F401
class FastApi(AwsApp):
# -*- App Name
name: str = "fastapi"
# -*- Image Configuration
image_name: str = "agnohq/fastapi"
image_tag: str = "0.104"
command: Optional[Union[str, List[str]]] = "uvicorn main:app --reload"
# -*- App Ports
# Open a container port if open_port=True
open_port: bool = True
port_number: int = 8000
# -*- OS Configuration
# Path to the os directory inside the container
infra_dir_container_path: str = "/app"
# -*- ECS Configuration
ecs_task_cpu: str = "1024"
ecs_task_memory: str = "2048"
ecs_service_count: int = 1
assign_public_ip: Optional[bool] = True
# -*- Uvicorn Configuration
uvicorn_host: str = "0.0.0.0"
# Defaults to the port_number
uvicorn_port: Optional[int] = None
uvicorn_reload: Optional[bool] = None
uvicorn_log_level: Optional[str] = None
web_concurrency: Optional[int] = None
def get_container_env(self, container_context: ContainerContext, build_context: AwsBuildContext) -> Dict[str, str]:
container_env: Dict[str, str] = super().get_container_env(
container_context=container_context, build_context=build_context
)
if self.uvicorn_host is not None:
container_env["UVICORN_HOST"] = self.uvicorn_host
uvicorn_port = self.uvicorn_port
if uvicorn_port is None:
if self.port_number is not None:
uvicorn_port = self.port_number
if uvicorn_port is not None:
container_env["UVICORN_PORT"] = str(uvicorn_port)
if self.uvicorn_reload is not None:
container_env["UVICORN_RELOAD"] = str(self.uvicorn_reload)
if self.uvicorn_log_level is not None:
container_env["UVICORN_LOG_LEVEL"] = self.uvicorn_log_level
if self.web_concurrency is not None:
container_env["WEB_CONCURRENCY"] = str(self.web_concurrency)
return container_env
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/aws/app/fastapi/fastapi.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno_infra/agno/aws/app/streamlit/streamlit.py | from typing import Dict, List, Optional, Union
from agno.aws.app.base import AwsApp, AwsBuildContext, ContainerContext # noqa: F401
class Streamlit(AwsApp):
# -*- App Name
name: str = "streamlit"
# -*- Image Configuration
image_name: str = "agnohq/streamlit"
image_tag: str = "1.27"
command: Optional[Union[str, List[str]]] = "streamlit hello"
# -*- App Ports
# Open a container port if open_port=True
open_port: bool = True
port_number: int = 8501
# -*- OS Configuration
# Path to the os directory inside the container
infra_dir_container_path: str = "/app"
# -*- ECS Configuration
ecs_task_cpu: str = "1024"
ecs_task_memory: str = "2048"
ecs_service_count: int = 1
assign_public_ip: Optional[bool] = True
# -*- Streamlit Configuration
# Server settings
# Defaults to the port_number
streamlit_server_port: Optional[int] = None
streamlit_server_headless: bool = True
streamlit_server_run_on_save: Optional[bool] = None
streamlit_server_max_upload_size: Optional[int] = None
streamlit_browser_gather_usage_stats: bool = False
# Browser settings
streamlit_browser_server_port: Optional[str] = None
streamlit_browser_server_address: Optional[str] = None
def get_container_env(self, container_context: ContainerContext, build_context: AwsBuildContext) -> Dict[str, str]:
container_env: Dict[str, str] = super().get_container_env(
container_context=container_context, build_context=build_context
)
streamlit_server_port = self.streamlit_server_port
if streamlit_server_port is None:
port_number = self.port_number
if port_number is not None:
streamlit_server_port = port_number
if streamlit_server_port is not None:
container_env["STREAMLIT_SERVER_PORT"] = str(streamlit_server_port)
if self.streamlit_server_headless is not None:
container_env["STREAMLIT_SERVER_HEADLESS"] = str(self.streamlit_server_headless)
if self.streamlit_server_run_on_save is not None:
container_env["STREAMLIT_SERVER_RUN_ON_SAVE"] = str(self.streamlit_server_run_on_save)
if self.streamlit_server_max_upload_size is not None:
container_env["STREAMLIT_SERVER_MAX_UPLOAD_SIZE"] = str(self.streamlit_server_max_upload_size)
if self.streamlit_browser_gather_usage_stats is not None:
container_env["STREAMLIT_BROWSER_GATHER_USAGE_STATS"] = str(self.streamlit_browser_gather_usage_stats)
if self.streamlit_browser_server_port is not None:
container_env["STREAMLIT_BROWSER_SERVER_PORT"] = self.streamlit_browser_server_port
if self.streamlit_browser_server_address is not None:
container_env["STREAMLIT_BROWSER_SERVER_ADDRESS"] = self.streamlit_browser_server_address
return container_env
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/aws/app/streamlit/streamlit.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno_infra/agno/aws/resource/base.py | from typing import Any, Optional
from agno.aws.api_client import AwsApiClient
from agno.base.resource import InfraResource
from agno.cli.console import print_info
from agno.utilities.logging import logger
class AwsResource(InfraResource):
"""Base class for AWS Resources."""
service_name: str
service_client: Optional[Any] = None
service_resource: Optional[Any] = None
aws_region: Optional[str] = None
aws_profile: Optional[str] = None
aws_client: Optional[AwsApiClient] = None
def get_aws_region(self) -> Optional[str]:
# Priority 1: Use aws_region from resource
if self.aws_region:
return self.aws_region
# Priority 2: Get aws_region from os settings
if self.infra_settings is not None and self.infra_settings.aws_region is not None:
self.aws_region = self.infra_settings.aws_region
return self.aws_region
# Priority 3: Get aws_region from env
from os import getenv
from agno.constants import AWS_REGION_ENV_VAR
aws_region_env = getenv(AWS_REGION_ENV_VAR)
if aws_region_env is not None:
logger.debug(f"{AWS_REGION_ENV_VAR}: {aws_region_env}")
self.aws_region = aws_region_env
return self.aws_region
def get_aws_profile(self) -> Optional[str]:
# Priority 1: Use aws_region from resource
if self.aws_profile:
return self.aws_profile
# Priority 2: Get aws_profile from os settings
if self.infra_settings is not None and self.infra_settings.aws_profile is not None:
self.aws_profile = self.infra_settings.aws_profile
return self.aws_profile
# Priority 3: Get aws_profile from env
from os import getenv
from agno.constants import AWS_PROFILE_ENV_VAR
aws_profile_env = getenv(AWS_PROFILE_ENV_VAR)
if aws_profile_env is not None:
logger.debug(f"{AWS_PROFILE_ENV_VAR}: {aws_profile_env}")
self.aws_profile = aws_profile_env
return self.aws_profile
def get_service_client(self, aws_client: AwsApiClient):
from boto3 import session
if self.service_client is None:
boto3_session: session = aws_client.boto3_session
self.service_client = boto3_session.client(service_name=self.service_name)
return self.service_client
def get_service_resource(self, aws_client: AwsApiClient):
from boto3 import session
if self.service_resource is None:
boto3_session: session = aws_client.boto3_session
self.service_resource = boto3_session.resource(service_name=self.service_name)
return self.service_resource
def get_aws_client(self) -> AwsApiClient:
if self.aws_client is not None:
return self.aws_client
self.aws_client = AwsApiClient(aws_region=self.get_aws_region(), aws_profile=self.get_aws_profile())
return self.aws_client
def _read(self, aws_client: AwsApiClient) -> Any:
logger.warning(f"@_read method not defined for {self.get_resource_name()}")
return True
def read(self, aws_client: Optional[AwsApiClient] = None) -> Any:
"""Reads the resource from Aws"""
# Step 1: Use cached value if available
if self.use_cache and self.active_resource is not None:
return self.active_resource
# Step 2: Skip resource creation if skip_read = True
if self.skip_read:
print_info(f"Skipping read: {self.get_resource_name()}")
return True
# Step 3: Read resource
client: AwsApiClient = aws_client or self.get_aws_client()
return self._read(client)
def is_active(self, aws_client: AwsApiClient) -> bool:
"""Returns True if the resource is active on Aws"""
_resource = self.read(aws_client=aws_client)
return True if _resource is not None else False
def _create(self, aws_client: AwsApiClient) -> bool:
logger.warning(f"@_create method not defined for {self.get_resource_name()}")
return True
def create(self, aws_client: Optional[AwsApiClient] = None) -> bool:
"""Creates the resource on Aws"""
# Step 1: Skip resource creation if skip_create = True
if self.skip_create:
print_info(f"Skipping create: {self.get_resource_name()}")
return True
# Step 2: Check if resource is active and use_cache = True
client: AwsApiClient = aws_client or self.get_aws_client()
if self.use_cache and self.is_active(client):
self.resource_created = True
print_info(f"{self.get_resource_type()}: {self.get_resource_name()} already exists")
# Step 3: Create the resource
else:
self.resource_created = self._create(client)
if self.resource_created:
print_info(f"{self.get_resource_type()}: {self.get_resource_name()} created")
# Step 4: Run post create steps
if self.resource_created:
if self.save_output:
self.save_output_file()
logger.debug(f"Running post-create for {self.get_resource_type()}: {self.get_resource_name()}")
return self.post_create(client)
logger.error(f"Failed to create {self.get_resource_type()}: {self.get_resource_name()}")
return self.resource_created
def post_create(self, aws_client: AwsApiClient) -> bool:
return True
def _update(self, aws_client: AwsApiClient) -> Any:
logger.warning(f"@_update method not defined for {self.get_resource_name()}")
return True
def update(self, aws_client: Optional[AwsApiClient] = None) -> bool:
"""Updates the resource on Aws"""
# Step 1: Skip resource update if skip_update = True
if self.skip_update:
print_info(f"Skipping update: {self.get_resource_name()}")
return True
# Step 2: Update the resource
client: AwsApiClient = aws_client or self.get_aws_client()
if self.is_active(client):
self.resource_updated = self._update(client)
else:
print_info(f"{self.get_resource_type()}: {self.get_resource_name()} does not exist")
return True
# Step 3: Run post update steps
if self.resource_updated:
print_info(f"{self.get_resource_type()}: {self.get_resource_name()} updated")
if self.save_output:
self.save_output_file()
logger.debug(f"Running post-update for {self.get_resource_type()}: {self.get_resource_name()}")
return self.post_update(client)
logger.error(f"Failed to update {self.get_resource_type()}: {self.get_resource_name()}")
return self.resource_updated
def post_update(self, aws_client: AwsApiClient) -> bool:
return True
def _delete(self, aws_client: AwsApiClient) -> Any:
logger.warning(f"@_delete method not defined for {self.get_resource_name()}")
return True
def delete(self, aws_client: Optional[AwsApiClient] = None) -> bool:
"""Deletes the resource from Aws"""
# Step 1: Skip resource deletion if skip_delete = True
if self.skip_delete:
print_info(f"Skipping delete: {self.get_resource_name()}")
return True
# Step 2: Delete the resource
client: AwsApiClient = aws_client or self.get_aws_client()
if self.is_active(client):
self.resource_deleted = self._delete(client)
else:
print_info(f"{self.get_resource_type()}: {self.get_resource_name()} does not exist")
return True
# Step 3: Run post delete steps
if self.resource_deleted:
print_info(f"{self.get_resource_type()}: {self.get_resource_name()} deleted")
if self.save_output:
self.delete_output_file()
logger.debug(f"Running post-delete for {self.get_resource_type()}: {self.get_resource_name()}.")
return self.post_delete(client)
logger.error(f"Failed to delete {self.get_resource_type()}: {self.get_resource_name()}")
return self.resource_deleted
def post_delete(self, aws_client: AwsApiClient) -> bool:
return True
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/aws/resource/base.py",
"license": "Apache License 2.0",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno_infra/agno/aws/resource/ec2/volume.py | from typing import Any, Dict, Optional
from typing_extensions import Literal
from agno.aws.api_client import AwsApiClient
from agno.aws.resource.base import AwsResource
from agno.cli.console import print_info
from agno.utilities.logging import logger
class EbsVolume(AwsResource):
"""
Reference:
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#volume
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.create_volume
"""
resource_type: Optional[str] = "EbsVolume"
service_name: str = "ec2"
# The unique name to give to your volume.
name: str
# The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size.
# If you specify a snapshot, the default is the snapshot size. You can specify a volume size that is
# equal to or larger than the snapshot size.
#
# The following are the supported volumes sizes for each volume type:
# gp2 and gp3 : 1-16,384
# io1 and io2 : 4-16,384
# st1 and sc1 : 125-16,384
# standard : 1-1,024
size: int
# The Availability Zone in which to create the volume.
availability_zone: str
# Indicates whether the volume should be encrypted. The effect of setting the encryption state to
# true depends on the volume origin (new or from a snapshot), starting encryption state, ownership,
# and whether encryption by default is enabled.
# Encrypted Amazon EBS volumes must be attached to instances that support Amazon EBS encryption.
encrypted: Optional[bool] = None
# The number of I/O operations per second (IOPS). For gp3 , io1 , and io2 volumes, this represents the
# number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline
# performance of the volume and the rate at which the volume accumulates I/O credits for bursting.
#
# The following are the supported values for each volume type:
# gp3 : 3,000-16,000 IOPS
# io1 : 100-64,000 IOPS
# io2 : 100-64,000 IOPS
#
# This parameter is required for io1 and io2 volumes.
# The default for gp3 volumes is 3,000 IOPS.
# This parameter is not supported for gp2 , st1 , sc1 , or standard volumes.
iops: Optional[int] = None
# The identifier of the Key Management Service (KMS) KMS key to use for Amazon EBS encryption.
# If this parameter is not specified, your KMS key for Amazon EBS is used. If KmsKeyId is specified,
# the encrypted state must be true .
kms_key_id: Optional[str] = None
# The Amazon Resource Name (ARN) of the Outpost.
outpost_arn: Optional[str] = None
# The snapshot from which to create the volume. You must specify either a snapshot ID or a volume size.
snapshot_id: Optional[str] = None
# The volume type. This parameter can be one of the following values:
#
# General Purpose SSD: gp2 | gp3
# Provisioned IOPS SSD: io1 | io2
# Throughput Optimized HDD: st1
# Cold HDD: sc1
# Magnetic: standard
#
# Default: gp2
volume_type: Optional[Literal["standard", "io_1", "io_2", "gp_2", "sc_1", "st_1", "gp_3"]] = None
# Checks whether you have the required permissions for the action, without actually making the request,
# and provides an error response. If you have the required permissions, the error response is DryRunOperation.
# Otherwise, it is UnauthorizedOperation .
dry_run: Optional[bool] = None
# The tags to apply to the volume during creation.
tags: Optional[Dict[str, str]] = None
# The tag to use for volume name
name_tag: str = "Name"
# Indicates whether to enable Amazon EBS Multi-Attach. If you enable Multi-Attach, you can attach the volume to
# up to 16 Instances built on the Nitro System in the same Availability Zone. This parameter is supported with
# io1 and io2 volumes only.
multi_attach_enabled: Optional[bool] = None
# The throughput to provision for a volume, with a maximum of 1,000 MiB/s.
# This parameter is valid only for gp3 volumes.
# Valid Range: Minimum value of 125. Maximum value of 1000.
throughput: Optional[int] = None
# Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
# This field is autopopulated if not provided.
client_token: Optional[str] = None
wait_for_create: bool = False
volume_id: Optional[str] = None
def _create(self, aws_client: AwsApiClient) -> bool:
"""Creates the EbsVolume
Args:
aws_client: The AwsApiClient for the current volume
"""
print_info(f"Creating {self.get_resource_type()}: {self.get_resource_name()}")
# Step 1: Build Volume configuration
# Add name as a tag because volumes do not have names
tags = {self.name_tag: self.name}
if self.tags is not None and isinstance(self.tags, dict):
tags.update(self.tags)
# create a dict of args which are not null, otherwise aws type validation fails
not_null_args: Dict[str, Any] = {}
if self.encrypted:
not_null_args["Encrypted"] = self.encrypted
if self.iops:
not_null_args["Iops"] = self.iops
if self.kms_key_id:
not_null_args["KmsKeyId"] = self.kms_key_id
if self.outpost_arn:
not_null_args["OutpostArn"] = self.outpost_arn
if self.snapshot_id:
not_null_args["SnapshotId"] = self.snapshot_id
if self.volume_type:
not_null_args["VolumeType"] = self.volume_type
if self.dry_run:
not_null_args["DryRun"] = self.dry_run
if tags:
not_null_args["TagSpecifications"] = [
{
"ResourceType": "volume",
"Tags": [{"Key": k, "Value": v} for k, v in tags.items()],
},
]
if self.multi_attach_enabled:
not_null_args["MultiAttachEnabled"] = self.multi_attach_enabled
if self.throughput:
not_null_args["Throughput"] = self.throughput
if self.client_token:
not_null_args["ClientToken"] = self.client_token
# Step 2: Create Volume
service_client = self.get_service_client(aws_client)
try:
create_response = service_client.create_volume(
AvailabilityZone=self.availability_zone,
Size=self.size,
**not_null_args,
)
logger.debug(f"create_response: {create_response}")
# Validate Volume creation
if create_response is not None:
create_time = create_response.get("CreateTime", None)
self.volume_id = create_response.get("VolumeId", None)
logger.debug(f"create_time: {create_time}")
logger.debug(f"volume_id: {self.volume_id}")
if create_time is not None:
self.active_resource = create_response
return True
except Exception as e:
logger.error(f"{self.get_resource_type()} could not be created.")
logger.error(e)
return False
def post_create(self, aws_client: AwsApiClient) -> bool:
# Wait for Volume to be created
if self.wait_for_create:
try:
if self.volume_id is not None:
print_info(f"Waiting for {self.get_resource_type()} to be created.")
waiter = self.get_service_client(aws_client).get_waiter("volume_available")
waiter.wait(
VolumeIds=[self.volume_id],
WaiterConfig={
"Delay": self.waiter_delay,
"MaxAttempts": self.waiter_max_attempts,
},
)
else:
logger.warning("Skipping waiter, no volume_id found")
except Exception as e:
logger.error("Waiter failed.")
logger.error(e)
return True
def _read(self, aws_client: AwsApiClient) -> Optional[Any]:
"""Returns the EbsVolume
Args:
aws_client: The AwsApiClient for the current volume
"""
logger.debug(f"Reading {self.get_resource_type()}: {self.get_resource_name()}")
from botocore.exceptions import ClientError
service_client = self.get_service_client(aws_client)
try:
volume = None
describe_volumes = service_client.describe_volumes(
Filters=[
{
"Name": "tag:" + self.name_tag,
"Values": [self.name],
},
],
)
# logger.debug(f"describe_volumes: {describe_volumes}")
for _volume in describe_volumes.get("Volumes", []):
_volume_tags = _volume.get("Tags", None)
if _volume_tags is not None and isinstance(_volume_tags, list):
for _tag in _volume_tags:
if _tag["Key"] == self.name_tag and _tag["Value"] == self.name:
volume = _volume
break
# found volume, break loop
if volume is not None:
break
if volume is not None:
create_time = volume.get("CreateTime", None)
logger.debug(f"create_time: {create_time}")
self.volume_id = volume.get("VolumeId", None)
logger.debug(f"volume_id: {self.volume_id}")
self.active_resource = volume
except ClientError as ce:
logger.debug(f"ClientError: {ce}")
except Exception as e:
logger.error(f"Error reading {self.get_resource_type()}.")
logger.error(e)
return self.active_resource
def _delete(self, aws_client: AwsApiClient) -> bool:
"""Deletes the EbsVolume
Args:
aws_client: The AwsApiClient for the current volume
"""
print_info(f"Deleting {self.get_resource_type()}: {self.get_resource_name()}")
self.active_resource = None
service_client = self.get_service_client(aws_client)
try:
volume = self._read(aws_client)
logger.debug(f"EbsVolume: {volume}")
if volume is None or self.volume_id is None:
logger.warning(f"No {self.get_resource_type()} to delete")
return True
# detach the volume from all instances
for attachment in volume.get("Attachments", []):
device = attachment.get("Device", None)
instance_id = attachment.get("InstanceId", None)
print_info(f"Detaching volume from device: {device}, instance_id: {instance_id}")
service_client.detach_volume(
Device=device,
InstanceId=instance_id,
VolumeId=self.volume_id,
)
# delete volume
service_client.delete_volume(VolumeId=self.volume_id)
return True
except Exception as e:
logger.error(f"{self.get_resource_type()} could not be deleted.")
logger.error("Please try again or delete resources manually.")
logger.error(e)
return False
def _update(self, aws_client: AwsApiClient) -> bool:
"""Updates the EbsVolume
Args:
aws_client: The AwsApiClient for the current volume
"""
print_info(f"Updating {self.get_resource_type()}: {self.get_resource_name()}")
# Step 1: Build Volume configuration
# Add name as a tag because volumes do not have names
tags = {self.name_tag: self.name}
if self.tags is not None and isinstance(self.tags, dict):
tags.update(self.tags)
# create a dict of args which are not null, otherwise aws type validation fails
not_null_args: Dict[str, Any] = {}
if self.iops:
not_null_args["Iops"] = self.iops
if self.volume_type:
not_null_args["VolumeType"] = self.volume_type
if self.dry_run:
not_null_args["DryRun"] = self.dry_run
if tags:
not_null_args["TagSpecifications"] = [
{
"ResourceType": "volume",
"Tags": [{"Key": k, "Value": v} for k, v in tags.items()],
},
]
if self.multi_attach_enabled:
not_null_args["MultiAttachEnabled"] = self.multi_attach_enabled
if self.throughput:
not_null_args["Throughput"] = self.throughput
service_client = self.get_service_client(aws_client)
try:
volume = self._read(aws_client)
logger.debug(f"EbsVolume: {volume}")
if volume is None or self.volume_id is None:
logger.warning(f"No {self.get_resource_type()} to update")
return True
# update volume
update_response = service_client.modify_volume(
VolumeId=self.volume_id,
**not_null_args,
)
logger.debug(f"update_response: {update_response}")
# Validate Volume update
volume_modification = update_response.get("VolumeModification", None)
if volume_modification is not None:
volume_id_after_modification = volume_modification.get("VolumeId", None)
logger.debug(f"volume_id: {volume_id_after_modification}")
if volume_id_after_modification is not None:
return True
except Exception as e:
logger.error(f"{self.get_resource_type()} could not be updated.")
logger.error("Please try again or update resources manually.")
logger.error(e)
return False
def get_volume_id(self, aws_client: Optional[AwsApiClient] = None) -> Optional[str]:
"""Returns the volume_id of the EbsVolume"""
client = aws_client or self.get_aws_client()
if client is not None:
self._read(client)
return self.volume_id
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/aws/resource/ec2/volume.py",
"license": "Apache License 2.0",
"lines": 301,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno_infra/agno/aws/resource/ecs/cluster.py | from typing import Any, Dict, List, Optional
from agno.aws.api_client import AwsApiClient
from agno.aws.resource.base import AwsResource
from agno.cli.console import print_info
from agno.utilities.logging import logger
class EcsCluster(AwsResource):
"""
Reference:
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html
"""
resource_type: Optional[str] = "EcsCluster"
service_name: str = "ecs"
# Name of the cluster.
name: str
# Name for the cluster.
# Use name if not provided.
ecs_cluster_name: Optional[str] = None
tags: Optional[List[Dict[str, str]]] = None
# The setting to use when creating a cluster.
settings: Optional[List[Dict[str, Any]]] = None
# The execute command configuration for the cluster.
configuration: Optional[Dict[str, Any]] = None
# The short name of one or more capacity providers to associate with the cluster.
# A capacity provider must be associated with a cluster before it can be included as part of the default capacity
# provider strategy of the cluster or used in a capacity provider strategy when calling the CreateService/RunTask.
capacity_providers: Optional[List[str]] = None
# The capacity provider strategy to set as the default for the cluster. After a default capacity provider strategy
# is set for a cluster, when you call the RunTask or CreateService APIs with no capacity provider strategy or
# launch type specified, the default capacity provider strategy for the cluster is used.
default_capacity_provider_strategy: Optional[List[Dict[str, Any]]] = None
# Use this parameter to set a default Service Connect namespace.
# After you set a default Service Connect namespace, any new services with Service Connect turned on that are
# created in the cluster are added as client services in the namespace.
service_connect_namespace: Optional[str] = None
def get_ecs_cluster_name(self):
return self.ecs_cluster_name or self.name
def _create(self, aws_client: AwsApiClient) -> bool:
"""Creates the EcsCluster
Args:
aws_client: The AwsApiClient for the current cluster
"""
print_info(f"Creating {self.get_resource_type()}: {self.get_resource_name()}")
# create a dict of args which are not null, otherwise aws type validation fails
not_null_args: Dict[str, Any] = {}
if self.tags is not None:
not_null_args["tags"] = self.tags
if self.settings is not None:
not_null_args["settings"] = self.settings
if self.configuration is not None:
not_null_args["configuration"] = self.configuration
if self.capacity_providers is not None:
not_null_args["capacityProviders"] = self.capacity_providers
if self.default_capacity_provider_strategy is not None:
not_null_args["defaultCapacityProviderStrategy"] = self.default_capacity_provider_strategy
if self.service_connect_namespace is not None:
not_null_args["serviceConnectDefaults"] = {
"namespace": self.service_connect_namespace,
}
# Create EcsCluster
service_client = self.get_service_client(aws_client)
try:
create_response = service_client.create_cluster(
clusterName=self.get_ecs_cluster_name(),
**not_null_args,
)
logger.debug(f"EcsCluster: {create_response}")
resource_dict = create_response.get("cluster", {})
# Validate resource creation
if resource_dict is not None:
self.active_resource = create_response
return True
except Exception as e:
logger.error(f"{self.get_resource_type()} could not be created.")
logger.error(e)
return False
def _read(self, aws_client: AwsApiClient) -> Optional[Any]:
"""Returns the EcsCluster
Args:
aws_client: The AwsApiClient for the current cluster
"""
logger.debug(f"Reading {self.get_resource_type()}: {self.get_resource_name()}")
from botocore.exceptions import ClientError
service_client = self.get_service_client(aws_client)
try:
cluster_name = self.get_ecs_cluster_name()
describe_response = service_client.describe_clusters(clusters=[cluster_name])
logger.debug(f"EcsCluster: {describe_response}")
resource_list = describe_response.get("clusters", None)
if resource_list is not None and isinstance(resource_list, list):
for resource in resource_list:
_cluster_identifier = resource.get("clusterName", None)
if _cluster_identifier == cluster_name:
_cluster_status = resource.get("status", None)
if _cluster_status == "ACTIVE":
self.active_resource = resource
break
except ClientError as ce:
logger.debug(f"ClientError: {ce}")
except Exception as e:
logger.error(f"Error reading {self.get_resource_type()}.")
logger.error(e)
return self.active_resource
def _delete(self, aws_client: AwsApiClient) -> bool:
"""Deletes the EcsCluster
Args:
aws_client: The AwsApiClient for the current cluster
"""
print_info(f"Deleting {self.get_resource_type()}: {self.get_resource_name()}")
service_client = self.get_service_client(aws_client)
self.active_resource = None
try:
delete_response = service_client.delete_cluster(cluster=self.get_ecs_cluster_name())
logger.debug(f"EcsCluster: {delete_response}")
return True
except Exception as e:
logger.error(f"{self.get_resource_type()} could not be deleted.")
logger.error("Please try again or delete resources manually.")
logger.error(e)
return False
def get_arn(self, aws_client: AwsApiClient) -> Optional[str]:
tg = self._read(aws_client)
if tg is None:
return None
tg_arn = tg.get("ListenerArn", None)
return tg_arn
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/aws/resource/ecs/cluster.py",
"license": "Apache License 2.0",
"lines": 125,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno_infra/agno/aws/resource/ecs/container.py | from typing import Any, Dict, List, Optional, Union
from agno.aws.api_client import AwsApiClient
from agno.aws.resource.base import AwsResource
from agno.aws.resource.secret.manager import SecretsManager
from agno.aws.resource.secret.reader import read_secrets
from agno.utilities.logging import logger
class EcsContainer(AwsResource):
"""
Reference:
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html
"""
resource_type: Optional[str] = "EcsContainer"
service_name: str = "ecs"
# The name of a container.
# If you're linking multiple containers together in a task definition, the name of one container can be entered in
# the links of another container to connect the containers.
name: str
# The image used to start a container.
image: str
# The private repository authentication credentials to use.
repository_credentials: Optional[Dict[str, Any]] = None
# The number of cpu units reserved for the container.
cpu: Optional[int] = None
# The amount (in MiB) of memory to present to the container.
memory: Optional[int] = None
# The soft limit (in MiB) of memory to reserve for the container.
memory_reservation: Optional[int] = None
# The links parameter allows containers to communicate with each other without the need for port mappings.
links: Optional[List[str]] = None
# The list of port mappings for the container. Port mappings allow containers to access ports on the host container
# instance to send or receive traffic.
port_mappings: Optional[List[Dict[str, Any]]] = None
# If the essential parameter of a container is marked as true , and that container fails or stops for any reason,
# all other containers that are part of the task are stopped. If the essential parameter of a container is marked
# as false , its failure doesn't affect the rest of the containers in a task. If this parameter is omitted,
# a container is assumed to be essential.
essential: Optional[bool] = None
# The entry point that's passed to the container.
entry_point: Optional[List[str]] = None
# The command that's passed to the container.
command: Optional[List[str]] = None
# The environment variables to pass to a container.
environment: Optional[List[Dict[str, Any]]] = None
# A list of files containing the environment variables to pass to a container.
environment_files: Optional[List[Dict[str, Any]]] = None
# Read environment variables from AWS Secrets.
env_from_secrets: Optional[Union[SecretsManager, List[SecretsManager]]] = None
# The mount points for data volumes in your container.
mount_points: Optional[List[Dict[str, Any]]] = None
# Data volumes to mount from another container.
volumes_from: Optional[List[Dict[str, Any]]] = None
# Linux-specific modifications that are applied to the container, such as Linux kernel capabilities.
linux_parameters: Optional[Dict[str, Any]] = None
# The secrets to pass to the container.
secrets: Optional[List[Dict[str, Any]]] = None
# The dependencies defined for container startup and shutdown.
depends_on: Optional[List[Dict[str, Any]]] = None
# Time duration (in seconds) to wait before giving up on resolving dependencies for a container.
start_timeout: Optional[int] = None
# Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally.
stop_timeout: Optional[int] = None
# The hostname to use for your container.
hostname: Optional[str] = None
# The user to use inside the container.
user: Optional[str] = None
# The working directory to run commands inside the container in.
working_directory: Optional[str] = None
# When this parameter is true, networking is disabled within the container.
disable_networking: Optional[bool] = None
# When this parameter is true, the container is given elevated privileges
# on the host container instance (similar to the root user).
privileged: Optional[bool] = None
readonly_root_filesystem: Optional[bool] = None
dns_servers: Optional[List[str]] = None
dns_search_domains: Optional[List[str]] = None
extra_hosts: Optional[List[Dict[str, Any]]] = None
docker_security_options: Optional[List[str]] = None
interactive: Optional[bool] = None
pseudo_terminal: Optional[bool] = None
docker_labels: Optional[Dict[str, Any]] = None
ulimits: Optional[List[Dict[str, Any]]] = None
log_configuration: Optional[Dict[str, Any]] = None
health_check: Optional[Dict[str, Any]] = None
system_controls: Optional[List[Dict[str, Any]]] = None
resource_requirements: Optional[List[Dict[str, Any]]] = None
firelens_configuration: Optional[Dict[str, Any]] = None
def get_container_definition(self, aws_client: Optional[AwsApiClient] = None) -> Dict[str, Any]:
container_definition: Dict[str, Any] = {}
# Build container environment
container_environment: List[Dict[str, Any]] = self.build_container_environment(aws_client=aws_client)
if container_environment is not None:
container_definition["environment"] = container_environment
if self.name is not None:
container_definition["name"] = self.name
if self.image is not None:
container_definition["image"] = self.image
if self.repository_credentials is not None:
container_definition["repositoryCredentials"] = self.repository_credentials
if self.cpu is not None:
container_definition["cpu"] = self.cpu
if self.memory is not None:
container_definition["memory"] = self.memory
if self.memory_reservation is not None:
container_definition["memoryReservation"] = self.memory_reservation
if self.links is not None:
container_definition["links"] = self.links
if self.port_mappings is not None:
container_definition["portMappings"] = self.port_mappings
if self.essential is not None:
container_definition["essential"] = self.essential
if self.entry_point is not None:
container_definition["entryPoint"] = self.entry_point
if self.command is not None:
container_definition["command"] = self.command
if self.environment_files is not None:
container_definition["environmentFiles"] = self.environment_files
if self.mount_points is not None:
container_definition["mountPoints"] = self.mount_points
if self.volumes_from is not None:
container_definition["volumesFrom"] = self.volumes_from
if self.linux_parameters is not None:
container_definition["linuxParameters"] = self.linux_parameters
if self.secrets is not None:
container_definition["secrets"] = self.secrets
if self.depends_on is not None:
container_definition["dependsOn"] = self.depends_on
if self.start_timeout is not None:
container_definition["startTimeout"] = self.start_timeout
if self.stop_timeout is not None:
container_definition["stopTimeout"] = self.stop_timeout
if self.hostname is not None:
container_definition["hostname"] = self.hostname
if self.user is not None:
container_definition["user"] = self.user
if self.working_directory is not None:
container_definition["workingDirectory"] = self.working_directory
if self.disable_networking is not None:
container_definition["disableNetworking"] = self.disable_networking
if self.privileged is not None:
container_definition["privileged"] = self.privileged
if self.readonly_root_filesystem is not None:
container_definition["readonlyRootFilesystem"] = self.readonly_root_filesystem
if self.dns_servers is not None:
container_definition["dnsServers"] = self.dns_servers
if self.dns_search_domains is not None:
container_definition["dnsSearchDomains"] = self.dns_search_domains
if self.extra_hosts is not None:
container_definition["extraHosts"] = self.extra_hosts
if self.docker_security_options is not None:
container_definition["dockerSecurityOptions"] = self.docker_security_options
if self.interactive is not None:
container_definition["interactive"] = self.interactive
if self.pseudo_terminal is not None:
container_definition["pseudoTerminal"] = self.pseudo_terminal
if self.docker_labels is not None:
container_definition["dockerLabels"] = self.docker_labels
if self.ulimits is not None:
container_definition["ulimits"] = self.ulimits
if self.log_configuration is not None:
container_definition["logConfiguration"] = self.log_configuration
if self.health_check is not None:
container_definition["healthCheck"] = self.health_check
if self.system_controls is not None:
container_definition["systemControls"] = self.system_controls
if self.resource_requirements is not None:
container_definition["resourceRequirements"] = self.resource_requirements
if self.firelens_configuration is not None:
container_definition["firelensConfiguration"] = self.firelens_configuration
return container_definition
def build_container_environment(self, aws_client: Optional[AwsApiClient] = None) -> List[Dict[str, Any]]:
logger.debug("Building container environment")
container_environment: List[Dict[str, Any]] = []
if self.environment is not None:
from agno.aws.resource.reference import AwsReference
for env in self.environment:
env_name = env.get("name", None)
env_value = env.get("value", None)
env_value_parsed = None
if isinstance(env_value, AwsReference):
logger.debug(f"{env_name} is an AwsReference")
try:
env_value_parsed = env_value.get_reference(aws_client=aws_client)
except Exception as e:
logger.error(f"Error while parsing {env_name}: {e}")
else:
env_value_parsed = env_value
if env_value_parsed is not None:
try:
env_val_str = str(env_value_parsed)
container_environment.append({"name": env_name, "value": env_val_str})
except Exception as e:
logger.error(f"Error while converting {env_value} to str: {e}")
if self.env_from_secrets is not None:
secrets: Dict[str, Any] = read_secrets(self.env_from_secrets, aws_client=aws_client)
for secret_name, secret_value in secrets.items():
try:
secret_value = str(secret_value)
container_environment.append({"name": secret_name, "value": secret_value})
except Exception as e:
logger.error(f"Error while converting {secret_value} to str: {e}")
return container_environment
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/aws/resource/ecs/container.py",
"license": "Apache License 2.0",
"lines": 201,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno_infra/agno/aws/resource/ecs/volume.py | from typing import Any, Dict, Optional
from agno.aws.resource.base import AwsResource
from agno.utilities.logging import logger
class EcsVolume(AwsResource):
"""
Reference:
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html
"""
resource_type: Optional[str] = "EcsVolume"
service_name: str = "ecs"
name: str
host: Optional[Dict[str, Any]] = None
docker_volume_configuration: Optional[Dict[str, Any]] = None
efs_volume_configuration: Optional[Dict[str, Any]] = None
fsx_windows_file_server_volume_configuration: Optional[Dict[str, Any]] = None
def get_volume_definition(self) -> Dict[str, Any]:
volume_definition: Dict[str, Any] = {}
if self.name is not None:
volume_definition["name"] = self.name
if self.host is not None:
volume_definition["host"] = self.host
if self.docker_volume_configuration is not None:
volume_definition["dockerVolumeConfiguration"] = self.docker_volume_configuration
if self.efs_volume_configuration is not None:
volume_definition["efsVolumeConfiguration"] = self.efs_volume_configuration
if self.fsx_windows_file_server_volume_configuration is not None:
volume_definition["fsxWindowsFileServerVolumeConfiguration"] = (
self.fsx_windows_file_server_volume_configuration
)
return volume_definition
def volume_definition_up_to_date(self, volume_definition: Dict[str, Any]) -> bool:
if self.name is not None:
if volume_definition.get("name") != self.name:
logger.debug("{} != {}".format(self.name, volume_definition.get("name")))
return False
if self.host is not None:
if volume_definition.get("host") != self.host:
logger.debug("{} != {}".format(self.host, volume_definition.get("host")))
return False
if self.docker_volume_configuration is not None:
if volume_definition.get("dockerVolumeConfiguration") != self.docker_volume_configuration:
logger.debug(
"{} != {}".format(
self.docker_volume_configuration,
volume_definition.get("dockerVolumeConfiguration"),
)
)
return False
if self.efs_volume_configuration is not None:
if volume_definition.get("efsVolumeConfiguration") != self.efs_volume_configuration:
logger.debug(
"{} != {}".format(
self.efs_volume_configuration,
volume_definition.get("efsVolumeConfiguration"),
)
)
return False
if self.fsx_windows_file_server_volume_configuration is not None:
if (
volume_definition.get("fsxWindowsFileServerVolumeConfiguration")
!= self.fsx_windows_file_server_volume_configuration
):
logger.debug(
"{} != {}".format(
self.fsx_windows_file_server_volume_configuration,
volume_definition.get("fsxWindowsFileServerVolumeConfiguration"),
)
)
return False
return True
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/aws/resource/ecs/volume.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno_infra/agno/aws/resource/elasticache/cluster.py | from pathlib import Path
from typing import Any, Dict, List, Optional
from typing_extensions import Literal
from agno.aws.api_client import AwsApiClient
from agno.aws.resource.base import AwsResource
from agno.aws.resource.ec2.security_group import SecurityGroup
from agno.aws.resource.elasticache.subnet_group import CacheSubnetGroup
from agno.cli.console import print_info
from agno.utilities.logging import logger
class CacheCluster(AwsResource):
"""
Reference:
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elasticache.html
"""
resource_type: Optional[str] = "CacheCluster"
service_name: str = "elasticache"
# Name of the cluster.
name: str
# The node group (shard) identifier. This parameter is stored as a lowercase string.
# If None, use the name as the cache_cluster_id
# Constraints:
# A name must contain from 1 to 50 alphanumeric characters or hyphens.
# The first character must be a letter.
# A name cannot end with a hyphen or contain two consecutive hyphens.
cache_cluster_id: Optional[str] = None
# The name of the cache engine to be used for this cluster.
engine: Literal["memcached", "redis"]
# Compute and memory capacity of the nodes in the node group (shard).
cache_node_type: str
# The initial number of cache nodes that the cluster has.
# For clusters running Redis, this value must be 1.
# For clusters running Memcached, this value must be between 1 and 40.
num_cache_nodes: int
# The ID of the replication group to which this cluster should belong.
# If this parameter is specified, the cluster is added to the specified replication group as a read replica;
# otherwise, the cluster is a standalone primary that is not part of any replication group.
replication_group_id: Optional[str] = None
# Specifies whether the nodes in this Memcached cluster are created in a single Availability Zone or
# created across multiple Availability Zones in the cluster's region.
# This parameter is only supported for Memcached clusters.
az_mode: Optional[Literal["single-az", "cross-az"]] = None
# The EC2 Availability Zone in which the cluster is created.
# All nodes belonging to this cluster are placed in the preferred Availability Zone. If you want to create your
# nodes across multiple Availability Zones, use PreferredAvailabilityZones .
# Default: System chosen Availability Zone.
preferred_availability_zone: Optional[str] = None
# A list of the Availability Zones in which cache nodes are created. The order of the zones is not important.
# This option is only supported on Memcached.
preferred_availability_zones: Optional[List[str]] = None
# The version number of the cache engine to be used for this cluster.
engine_version: Optional[str] = None
cache_parameter_group_name: Optional[str] = None
# The name of the subnet group to be used for the cluster.
cache_subnet_group_name: Optional[str] = None
# If cache_subnet_group_name is None,
# Read the cache_subnet_group_name from cache_subnet_group
cache_subnet_group: Optional[CacheSubnetGroup] = None
# A list of security group names to associate with this cluster.
# Use this parameter only when you are creating a cluster outside of an Amazon Virtual Private Cloud (Amazon VPC).
cache_security_group_names: Optional[List[str]] = None
# One or more VPC security groups associated with the cluster.
# Use this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC).
cache_security_group_ids: Optional[List[str]] = None
# If cache_security_group_ids is None
# Read the security_group_id from cache_security_groups
cache_security_groups: Optional[List[SecurityGroup]] = None
tags: Optional[List[Dict[str, str]]] = None
snapshot_arns: Optional[List[str]] = None
snapshot_name: Optional[str] = None
preferred_maintenance_window: Optional[str] = None
# The version number of the cache engine to be used for this cluster.
port: Optional[int] = None
notification_topic_arn: Optional[str] = None
auto_minor_version_upgrade: Optional[bool] = None
snapshot_retention_limit: Optional[int] = None
snapshot_window: Optional[str] = None
# The password used to access a password protected server.
# Password constraints:
# - Must be only printable ASCII characters.
# - Must be at least 16 characters and no more than 128 characters in length.
# - The only permitted printable special characters are !, &, #, $, ^, <, >, and -.
# Other printable special characters cannot be used in the AUTH token.
# - For more information, see AUTH password at http://redis.io/commands/AUTH.
# Provide AUTH_TOKEN here or as AUTH_TOKEN in secrets_file
auth_token: Optional[str] = None
outpost_mode: Optional[Literal["single-outpost", "cross-outpost"]] = None
preferred_outpost_arn: Optional[str] = None
preferred_outpost_arns: Optional[List[str]] = None
log_delivery_configurations: Optional[List[Dict[str, Any]]] = None
transit_encryption_enabled: Optional[bool] = None
network_type: Optional[Literal["ipv4", "ipv6", "dual_stack"]] = None
ip_discovery: Optional[Literal["ipv4", "ipv6"]] = None
# The user-supplied name of a final cluster snapshot
final_snapshot_identifier: Optional[str] = None
# Read secrets from a file in yaml format
secrets_file: Optional[Path] = None
# The following attributes are used for update function
cache_node_ids_to_remove: Optional[List[str]] = None
new_availability_zone: Optional[List[str]] = None
security_group_ids: Optional[List[str]] = None
notification_topic_status: Optional[str] = None
apply_immediately: Optional[bool] = None
auth_token_update_strategy: Optional[Literal["SET", "ROTATE", "DELETE"]] = None
def get_cache_cluster_id(self):
return self.cache_cluster_id or self.name
def get_auth_token(self) -> Optional[str]:
auth_token = self.auth_token
if auth_token is None and self.secrets_file is not None:
# read from secrets_file
secret_data = self.get_secret_file_data()
if secret_data is not None:
auth_token = secret_data.get("AUTH_TOKEN", auth_token)
return auth_token
def _create(self, aws_client: AwsApiClient) -> bool:
"""Creates the CacheCluster
Args:
aws_client: The AwsApiClient for the current cluster
"""
print_info(f"Creating {self.get_resource_type()}: {self.get_resource_name()}")
# create a dict of args which are not null, otherwise aws type validation fails
not_null_args: Dict[str, Any] = {}
# Get the CacheSubnetGroupName
cache_subnet_group_name = self.cache_subnet_group_name
if cache_subnet_group_name is None and self.cache_subnet_group is not None:
cache_subnet_group_name = self.cache_subnet_group.name
logger.debug(f"Using CacheSubnetGroup: {cache_subnet_group_name}")
if cache_subnet_group_name is not None:
not_null_args["CacheSubnetGroupName"] = cache_subnet_group_name
cache_security_group_ids = self.cache_security_group_ids
if cache_security_group_ids is None and self.cache_security_groups is not None:
sg_ids = []
for sg in self.cache_security_groups:
sg_id = sg.get_security_group_id(aws_client)
if sg_id is not None:
sg_ids.append(sg_id)
if len(sg_ids) > 0:
cache_security_group_ids = sg_ids
logger.debug(f"Using SecurityGroups: {cache_security_group_ids}")
if cache_security_group_ids is not None:
not_null_args["SecurityGroupIds"] = cache_security_group_ids
if self.replication_group_id is not None:
not_null_args["ReplicationGroupId"] = self.replication_group_id
if self.az_mode is not None:
not_null_args["AZMode"] = self.az_mode
if self.preferred_availability_zone is not None:
not_null_args["PreferredAvailabilityZone"] = self.preferred_availability_zone
if self.preferred_availability_zones is not None:
not_null_args["PreferredAvailabilityZones"] = self.preferred_availability_zones
if self.num_cache_nodes is not None:
not_null_args["NumCacheNodes"] = self.num_cache_nodes
if self.cache_node_type is not None:
not_null_args["CacheNodeType"] = self.cache_node_type
if self.engine is not None:
not_null_args["Engine"] = self.engine
if self.engine_version is not None:
not_null_args["EngineVersion"] = self.engine_version
if self.cache_parameter_group_name is not None:
not_null_args["CacheParameterGroupName"] = self.cache_parameter_group_name
if self.cache_security_group_names is not None:
not_null_args["CacheSecurityGroupNames"] = self.cache_security_group_names
if self.tags is not None:
not_null_args["Tags"] = self.tags
if self.snapshot_arns is not None:
not_null_args["SnapshotArns"] = self.snapshot_arns
if self.snapshot_name is not None:
not_null_args["SnapshotName"] = self.snapshot_name
if self.preferred_maintenance_window is not None:
not_null_args["PreferredMaintenanceWindow"] = self.preferred_maintenance_window
if self.port is not None:
not_null_args["Port"] = self.port
if self.notification_topic_arn is not None:
not_null_args["NotificationTopicArn"] = self.notification_topic_arn
if self.auto_minor_version_upgrade is not None:
not_null_args["AutoMinorVersionUpgrade"] = self.auto_minor_version_upgrade
if self.snapshot_retention_limit is not None:
not_null_args["SnapshotRetentionLimit"] = self.snapshot_retention_limit
if self.snapshot_window is not None:
not_null_args["SnapshotWindow"] = self.snapshot_window
if self.auth_token is not None:
not_null_args["AuthToken"] = self.get_auth_token()
if self.outpost_mode is not None:
not_null_args["OutpostMode"] = self.outpost_mode
if self.preferred_outpost_arn is not None:
not_null_args["PreferredOutpostArn"] = self.preferred_outpost_arn
if self.preferred_outpost_arns is not None:
not_null_args["PreferredOutpostArns"] = self.preferred_outpost_arns
if self.log_delivery_configurations is not None:
not_null_args["LogDeliveryConfigurations"] = self.log_delivery_configurations
if self.transit_encryption_enabled is not None:
not_null_args["TransitEncryptionEnabled"] = self.transit_encryption_enabled
if self.network_type is not None:
not_null_args["NetworkType"] = self.network_type
if self.ip_discovery is not None:
not_null_args["IpDiscovery"] = self.ip_discovery
# Create CacheCluster
service_client = self.get_service_client(aws_client)
try:
create_response = service_client.create_cache_cluster(
CacheClusterId=self.get_cache_cluster_id(),
**not_null_args,
)
logger.debug(f"CacheCluster: {create_response}")
resource_dict = create_response.get("CacheCluster", {})
# Validate resource creation
if resource_dict is not None:
print_info(f"CacheCluster created: {self.get_cache_cluster_id()}")
self.active_resource = create_response
return True
except Exception as e:
logger.error(f"{self.get_resource_type()} could not be created.")
logger.error(e)
return False
def post_create(self, aws_client: AwsApiClient) -> bool:
# Wait for CacheCluster to be created
if self.wait_for_create:
try:
print_info(f"Waiting for {self.get_resource_type()} to be active.")
waiter = self.get_service_client(aws_client).get_waiter("cache_cluster_available")
waiter.wait(
CacheClusterId=self.get_cache_cluster_id(),
WaiterConfig={
"Delay": self.waiter_delay,
"MaxAttempts": self.waiter_max_attempts,
},
)
except Exception as e:
logger.error("Waiter failed.")
logger.error(e)
return True
def _read(self, aws_client: AwsApiClient) -> Optional[Any]:
"""Returns the CacheCluster
Args:
aws_client: The AwsApiClient for the current cluster
"""
logger.debug(f"Reading {self.get_resource_type()}: {self.get_resource_name()}")
from botocore.exceptions import ClientError
service_client = self.get_service_client(aws_client)
try:
cache_cluster_id = self.get_cache_cluster_id()
describe_response = service_client.describe_cache_clusters(CacheClusterId=cache_cluster_id)
logger.debug(f"CacheCluster: {describe_response}")
resource_list = describe_response.get("CacheClusters", None)
if resource_list is not None and isinstance(resource_list, list):
for resource in resource_list:
_cluster_identifier = resource.get("CacheClusterId", None)
if _cluster_identifier == cache_cluster_id:
self.active_resource = resource
break
except ClientError as ce:
logger.debug(f"ClientError: {ce}")
except Exception as e:
logger.error(f"Error reading {self.get_resource_type()}.")
logger.error(e)
return self.active_resource
def _update(self, aws_client: AwsApiClient) -> bool:
"""Updates the CacheCluster
Args:
aws_client: The AwsApiClient for the current cluster
"""
logger.debug(f"Updating {self.get_resource_type()}: {self.get_resource_name()}")
cache_cluster_id = self.get_cache_cluster_id()
if cache_cluster_id is None:
logger.error("CacheClusterId is None")
return False
# create a dict of args which are not null, otherwise aws type validation fails
not_null_args: Dict[str, Any] = {}
if self.num_cache_nodes is not None:
not_null_args["NumCacheNodes"] = self.num_cache_nodes
if self.cache_node_ids_to_remove is not None:
not_null_args["CacheNodeIdsToRemove"] = self.cache_node_ids_to_remove
if self.az_mode is not None:
not_null_args["AZMode"] = self.az_mode
if self.new_availability_zone is not None:
not_null_args["NewAvailabilityZone"] = self.new_availability_zone
if self.cache_security_group_names is not None:
not_null_args["CacheSecurityGroupNames"] = self.cache_security_group_names
if self.security_group_ids is not None:
not_null_args["SecurityGroupIds"] = self.security_group_ids
if self.preferred_maintenance_window is not None:
not_null_args["PreferredMaintenanceWindow"] = self.preferred_maintenance_window
if self.notification_topic_arn is not None:
not_null_args["NotificationTopicArn"] = self.notification_topic_arn
if self.cache_parameter_group_name is not None:
not_null_args["CacheParameterGroupName"] = self.cache_parameter_group_name
if self.notification_topic_status is not None:
not_null_args["NotificationTopicStatus"] = self.notification_topic_status
if self.apply_immediately is not None:
not_null_args["ApplyImmediately"] = self.apply_immediately
if self.engine_version is not None:
not_null_args["EngineVersion"] = self.engine_version
if self.auto_minor_version_upgrade is not None:
not_null_args["AutoMinorVersionUpgrade"] = self.auto_minor_version_upgrade
if self.snapshot_retention_limit is not None:
not_null_args["SnapshotRetentionLimit"] = self.snapshot_retention_limit
if self.snapshot_window is not None:
not_null_args["SnapshotWindow"] = self.snapshot_window
if self.cache_node_type is not None:
not_null_args["CacheNodeType"] = self.cache_node_type
if self.auth_token is not None:
not_null_args["AuthToken"] = self.get_auth_token()
if self.auth_token_update_strategy is not None:
not_null_args["AuthTokenUpdateStrategy"] = self.auth_token_update_strategy
if self.log_delivery_configurations is not None:
not_null_args["LogDeliveryConfigurations"] = self.log_delivery_configurations
service_client = self.get_service_client(aws_client)
try:
modify_response = service_client.modify_cache_cluster(
CacheClusterId=cache_cluster_id,
**not_null_args,
)
logger.debug(f"CacheCluster: {modify_response}")
resource_dict = modify_response.get("CacheCluster", {})
# Validate resource creation
if resource_dict is not None:
print_info(f"CacheCluster updated: {self.get_cache_cluster_id()}")
self.active_resource = modify_response
return True
except Exception as e:
logger.error(f"{self.get_resource_type()} could not be updated.")
logger.error(e)
return False
def _delete(self, aws_client: AwsApiClient) -> bool:
"""Deletes the CacheCluster
Args:
aws_client: The AwsApiClient for the current cluster
"""
print_info(f"Deleting {self.get_resource_type()}: {self.get_resource_name()}")
# create a dict of args which are not null, otherwise aws type validation fails
not_null_args: Dict[str, Any] = {}
if self.final_snapshot_identifier:
not_null_args["FinalSnapshotIdentifier"] = self.final_snapshot_identifier
service_client = self.get_service_client(aws_client)
self.active_resource = None
try:
delete_response = service_client.delete_cache_cluster(
CacheClusterId=self.get_cache_cluster_id(),
**not_null_args,
)
logger.debug(f"CacheCluster: {delete_response}")
return True
except Exception as e:
logger.error(f"{self.get_resource_type()} could not be deleted.")
logger.error("Please try again or delete resources manually.")
logger.error(e)
return False
def post_delete(self, aws_client: AwsApiClient) -> bool:
# Wait for CacheCluster to be deleted
if self.wait_for_delete:
try:
print_info(f"Waiting for {self.get_resource_type()} to be deleted.")
waiter = self.get_service_client(aws_client).get_waiter("cache_cluster_deleted")
waiter.wait(
CacheClusterId=self.get_cache_cluster_id(),
WaiterConfig={
"Delay": self.waiter_delay,
"MaxAttempts": self.waiter_max_attempts,
},
)
except Exception as e:
logger.error("Waiter failed.")
logger.error(e)
return True
def get_cache_endpoint(self, aws_client: Optional[AwsApiClient] = None) -> Optional[str]:
"""Returns the CacheCluster endpoint
Args:
aws_client: The AwsApiClient for the current cluster
"""
cache_endpoint = None
try:
client: AwsApiClient = aws_client or self.get_aws_client()
cache_cluster_id = self.get_cache_cluster_id()
describe_response = self.get_service_client(client).describe_cache_clusters(
CacheClusterId=cache_cluster_id, ShowCacheNodeInfo=True
)
# logger.debug(f"CacheCluster: {describe_response}")
resource_list = describe_response.get("CacheClusters", None)
if resource_list is not None and isinstance(resource_list, list):
for resource in resource_list:
_cluster_identifier = resource.get("CacheClusterId", None)
if _cluster_identifier == cache_cluster_id:
for node in resource.get("CacheNodes", []):
cache_endpoint = node.get("Endpoint", {}).get("Address", None)
if cache_endpoint is not None and isinstance(cache_endpoint, str):
return cache_endpoint
break
except Exception as e:
logger.error(f"Error reading {self.get_resource_type()}.")
logger.error(e)
return cache_endpoint
def get_cache_port(self, aws_client: Optional[AwsApiClient] = None) -> Optional[int]:
"""Returns the CacheCluster port
Args:
aws_client: The AwsApiClient for the current cluster
"""
cache_port = None
try:
client: AwsApiClient = aws_client or self.get_aws_client()
cache_cluster_id = self.get_cache_cluster_id()
describe_response = self.get_service_client(client).describe_cache_clusters(
CacheClusterId=cache_cluster_id, ShowCacheNodeInfo=True
)
# logger.debug(f"CacheCluster: {describe_response}")
resource_list = describe_response.get("CacheClusters", None)
if resource_list is not None and isinstance(resource_list, list):
for resource in resource_list:
_cluster_identifier = resource.get("CacheClusterId", None)
if _cluster_identifier == cache_cluster_id:
for node in resource.get("CacheNodes", []):
cache_port = node.get("Endpoint", {}).get("Port", None)
if cache_port is not None and isinstance(cache_port, int):
return cache_port
break
except Exception as e:
logger.error(f"Error reading {self.get_resource_type()}.")
logger.error(e)
return cache_port
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/aws/resource/elasticache/cluster.py",
"license": "Apache License 2.0",
"lines": 416,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno_infra/agno/aws/resource/emr/cluster.py | from typing import Any, Dict, List, Optional
from typing_extensions import Literal
from agno.aws.api_client import AwsApiClient
from agno.aws.resource.base import AwsResource
from agno.cli.console import print_info
from agno.utilities.logging import logger
class EmrCluster(AwsResource):
"""
Reference:
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/emr.html
"""
resource_type: Optional[str] = "EmrCluster"
service_name: str = "emr"
# Name of the cluster.
name: str
# The location in Amazon S3 to write the log files of the job flow.
# If a value is not provided, logs are not created.
log_uri: Optional[str] = None
# The KMS key used for encrypting log files. If a value is not provided, the logs remain encrypted by AES-256.
# This attribute is only available with Amazon EMR version 5.30.0 and later, excluding Amazon EMR 6.0.0.
log_encryption_kms_key_id: Optional[str] = None
# A JSON string for selecting additional features.
additional_info: Optional[str] = None
# The Amazon EMR release label, which determines the version of open-source application packages installed on the
# cluster. Release labels are in the form emr-x.x.x,
# where x.x.x is an Amazon EMR release version such as emr-5.14.0 .
release_label: Optional[str] = None
# A specification of the number and type of Amazon EC2 instances.
instances: Optional[Dict[str, Any]] = None
# A list of steps to run.
steps: Optional[List[Dict[str, Any]]] = None
# A list of bootstrap actions to run before Hadoop starts on the cluster nodes.
bootstrap_actions: Optional[List[Dict[str, Any]]] = None
# For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and later, use Applications.
# A list of strings that indicates third-party software to use.
supported_products: Optional[List[str]] = None
new_supported_products: Optional[List[Dict[str, Any]]] = None
# Applies to Amazon EMR releases 4.0 and later.
# A case-insensitive list of applications for Amazon EMR to install and configure when launching the cluster.
applications: Optional[List[Dict[str, Any]]] = None
# For Amazon EMR releases 4.0 and later. The list of configurations supplied for the EMR cluster you are creating.
configurations: Optional[List[Dict[str, Any]]] = None
# Also called instance profile and EC2 role. An IAM role for an EMR cluster.
# The EC2 instances of the cluster assume this role. The default role is EMR_EC2_DefaultRole.
# In order to use the default role, you must have already created it using the CLI or console.
job_flow_role: Optional[str] = None
# he IAM role that Amazon EMR assumes in order to access Amazon Web Services resources on your behalf.
service_role: Optional[str] = None
# A list of tags to associate with a cluster and propagate to Amazon EC2 instances.
tags: Optional[List[Dict[str, str]]] = None
# The name of a security configuration to apply to the cluster.
security_configuration: Optional[str] = None
# An IAM role for automatic scaling policies. The default role is EMR_AutoScaling_DefaultRole.
# The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2
# instances in an instance group.
auto_scaling_role: Optional[str] = None
scale_down_behavior: Optional[Literal["TERMINATE_AT_INSTANCE_HOUR", "TERMINATE_AT_TASK_COMPLETION"]] = None
custom_ami_id: Optional[str] = None
# The size, in GiB, of the Amazon EBS root device volume of the Linux AMI that is used for each EC2 instance.
ebs_root_volume_size: Optional[int] = None
repo_upgrade_on_boot: Optional[Literal["SECURITY", "NONE"]] = None
# Attributes for Kerberos configuration when Kerberos authentication is enabled using a security configuration.
kerberos_attributes: Optional[Dict[str, str]] = None
# Specifies the number of steps that can be executed concurrently.
# The default value is 1 . The maximum value is 256 .
step_concurrency_level: Optional[int] = None
# The specified managed scaling policy for an Amazon EMR cluster.
managed_scaling_policy: Optional[Dict[str, Any]] = None
placement_group_configs: Optional[List[Dict[str, Any]]] = None
# The auto-termination policy defines the amount of idle time in seconds after which a cluster terminates.
auto_termination_policy: Optional[Dict[str, int]] = None
# provided by api on create
# A unique identifier for the job flow.
job_flow_id: Optional[str] = None
# The Amazon Resource Name (ARN) of the cluster.
cluster_arn: Optional[str] = None
# ClusterSummary returned on read
cluster_summary: Optional[Dict] = None
def _create(self, aws_client: AwsApiClient) -> bool:
"""Creates the EmrCluster
Args:
aws_client: The AwsApiClient for the current cluster
"""
print_info(f"Creating {self.get_resource_type()}: {self.get_resource_name()}")
try:
# create a dict of args which are not null, otherwise aws type validation fails
not_null_args: Dict[str, Any] = {}
if self.log_uri:
not_null_args["LogUri"] = self.log_uri
if self.log_encryption_kms_key_id:
not_null_args["LogEncryptionKmsKeyId"] = self.log_encryption_kms_key_id
if self.additional_info:
not_null_args["AdditionalInfo"] = self.additional_info
if self.release_label:
not_null_args["ReleaseLabel"] = self.release_label
if self.instances:
not_null_args["Instances"] = self.instances
if self.steps:
not_null_args["Steps"] = self.steps
if self.bootstrap_actions:
not_null_args["BootstrapActions"] = self.bootstrap_actions
if self.supported_products:
not_null_args["SupportedProducts"] = self.supported_products
if self.new_supported_products:
not_null_args["NewSupportedProducts"] = self.new_supported_products
if self.applications:
not_null_args["Applications"] = self.applications
if self.configurations:
not_null_args["Configurations"] = self.configurations
if self.job_flow_role:
not_null_args["JobFlowRole"] = self.job_flow_role
if self.service_role:
not_null_args["ServiceRole"] = self.service_role
if self.tags:
not_null_args["Tags"] = self.tags
if self.security_configuration:
not_null_args["SecurityConfiguration"] = self.security_configuration
if self.auto_scaling_role:
not_null_args["AutoScalingRole"] = self.auto_scaling_role
if self.scale_down_behavior:
not_null_args["ScaleDownBehavior"] = self.scale_down_behavior
if self.custom_ami_id:
not_null_args["CustomAmiId"] = self.custom_ami_id
if self.ebs_root_volume_size:
not_null_args["EbsRootVolumeSize"] = self.ebs_root_volume_size
if self.repo_upgrade_on_boot:
not_null_args["RepoUpgradeOnBoot"] = self.repo_upgrade_on_boot
if self.kerberos_attributes:
not_null_args["KerberosAttributes"] = self.kerberos_attributes
if self.step_concurrency_level:
not_null_args["StepConcurrencyLevel"] = self.step_concurrency_level
if self.managed_scaling_policy:
not_null_args["ManagedScalingPolicy"] = self.managed_scaling_policy
if self.placement_group_configs:
not_null_args["PlacementGroupConfigs"] = self.placement_group_configs
if self.auto_termination_policy:
not_null_args["AutoTerminationPolicy"] = self.auto_termination_policy
# Get the service_client
service_client = self.get_service_client(aws_client)
# Create EmrCluster
create_response = service_client.run_job_flow(
Name=self.name,
**not_null_args,
)
logger.debug(f"create_response type: {type(create_response)}")
logger.debug(f"create_response: {create_response}")
self.job_flow_id = create_response.get("JobFlowId", None)
self.cluster_arn = create_response.get("ClusterArn", None)
self.active_resource = create_response
if self.active_resource is not None:
print_info(f"{self.get_resource_type()}: {self.get_resource_name()} created")
logger.debug(f"JobFlowId: {self.job_flow_id}")
logger.debug(f"ClusterArn: {self.cluster_arn}")
return True
except Exception as e:
logger.error(f"{self.get_resource_type()} could not be created.")
logger.error(e)
return False
def post_create(self, aws_client: AwsApiClient) -> bool:
## Wait for Cluster to be created
if self.wait_for_create:
try:
print_info("Waiting for EmrCluster to be active.")
if self.job_flow_id is not None:
waiter = self.get_service_client(aws_client).get_waiter("cluster_running")
waiter.wait(
ClusterId=self.job_flow_id,
WaiterConfig={
"Delay": self.waiter_delay,
"MaxAttempts": self.waiter_max_attempts,
},
)
else:
logger.warning("Skipping waiter, No ClusterId found")
except Exception as e:
logger.error("Waiter failed.")
logger.error(e)
return True
def _read(self, aws_client: AwsApiClient) -> Optional[Any]:
"""Returns the EmrCluster
Args:
aws_client: The AwsApiClient for the current cluster
"""
from botocore.exceptions import ClientError
logger.debug(f"Reading {self.get_resource_type()}: {self.get_resource_name()}")
try:
service_client = self.get_service_client(aws_client)
list_response = service_client.list_clusters()
# logger.debug(f"list_response type: {type(list_response)}")
# logger.debug(f"list_response: {list_response}")
cluster_summary_list = list_response.get("Clusters", None)
if cluster_summary_list is not None and isinstance(cluster_summary_list, list):
for _cluster_summary in cluster_summary_list:
cluster_name = _cluster_summary.get("Name", None)
if cluster_name == self.name:
self.active_resource = _cluster_summary
break
if self.active_resource is None:
logger.debug(f"No {self.get_resource_type()} found")
return None
# logger.debug(f"EmrCluster: {self.active_resource}")
self.job_flow_id = self.active_resource.get("Id", None)
self.cluster_arn = self.active_resource.get("ClusterArn", None)
except ClientError as ce:
logger.debug(f"ClientError: {ce}")
except Exception as e:
logger.error(f"Error reading {self.get_resource_type()}.")
logger.error(e)
return self.active_resource
def _delete(self, aws_client: AwsApiClient) -> bool:
"""Deletes the EmrCluster
Args:
aws_client: The AwsApiClient for the current cluster
"""
print_info(f"Deleting {self.get_resource_type()}: {self.get_resource_name()}")
try:
# populate self.job_flow_id
self._read(aws_client)
service_client = self.get_service_client(aws_client)
self.active_resource = None
if self.job_flow_id:
service_client.terminate_job_flows(JobFlowIds=[self.job_flow_id])
print_info(f"{self.get_resource_type()}: {self.get_resource_name()} deleted")
else:
logger.error("Could not find cluster id")
return True
except Exception as e:
logger.error(f"{self.get_resource_type()} could not be deleted.")
logger.error("Please try again or delete resources manually.")
logger.error(e)
return False
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/aws/resource/emr/cluster.py",
"license": "Apache License 2.0",
"lines": 231,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno_infra/agno/aws/resource/s3/bucket.py | from typing import Any, Dict, List, Optional
from typing_extensions import Literal
from agno.aws.api_client import AwsApiClient
from agno.aws.resource.base import AwsResource
from agno.aws.resource.s3.object import S3Object
from agno.cli.console import print_info
from agno.utilities.logging import logger
class S3Bucket(AwsResource):
"""
Reference:
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#service-resource
"""
resource_type: str = "s3"
service_name: str = "s3"
# Name of the bucket
name: str
# The canned ACL to apply to the bucket.
acl: Optional[Literal["private", "public-read", "public-read-write", "authenticated-read"]] = None
grant_full_control: Optional[str] = None
grant_read: Optional[str] = None
grant_read_ACP: Optional[str] = None
grant_write: Optional[str] = None
grant_write_ACP: Optional[str] = None
object_lock_enabled_for_bucket: Optional[bool] = None
object_ownership: Optional[Literal["BucketOwnerPreferred", "ObjectWriter", "BucketOwnerEnforced"]] = None
@property
def uri(self) -> str:
"""Returns the URI of the s3.Bucket
Returns:
str: The URI of the s3.Bucket
"""
return f"s3://{self.name}"
def get_resource(self, aws_client: Optional[AwsApiClient] = None) -> Optional[Any]:
"""Returns the s3.Bucket
Args:
aws_client: The AwsApiClient for the current cluster
"""
client: AwsApiClient = aws_client or self.get_aws_client()
service_resource = self.get_service_resource(client)
return service_resource.Bucket(name=self.name)
def _create(self, aws_client: AwsApiClient) -> bool:
"""Creates the s3.Bucket
Args:
aws_client: The AwsApiClient for the current cluster
"""
print_info(f"Creating {self.get_resource_type()}: {self.get_resource_name()}")
# Step 1: Build bucket configuration
# Bucket names are GLOBALLY unique!
# AWS will give you the IllegalLocationConstraintException if you collide
# with an already existing bucket if you've specified a region different than
# the region of the already existing bucket. If you happen to guess the correct region of the
# existing bucket it will give you the BucketAlreadyExists exception.
bucket_configuration = None
if aws_client.aws_region is not None and aws_client.aws_region != "us-east-1":
bucket_configuration = {"LocationConstraint": aws_client.aws_region}
# create a dict of args which are not null, otherwise aws type validation fails
not_null_args: Dict[str, Any] = {}
if bucket_configuration:
not_null_args["CreateBucketConfiguration"] = bucket_configuration
if self.acl:
not_null_args["ACL"] = self.acl
if self.grant_full_control:
not_null_args["GrantFullControl"] = self.grant_full_control
if self.grant_read:
not_null_args["GrantRead"] = self.grant_read
if self.grant_read_ACP:
not_null_args["GrantReadACP"] = self.grant_read_ACP
if self.grant_write:
not_null_args["GrantWrite"] = self.grant_write
if self.grant_write_ACP:
not_null_args["GrantWriteACP"] = self.grant_write_ACP
if self.object_lock_enabled_for_bucket:
not_null_args["ObjectLockEnabledForBucket"] = self.object_lock_enabled_for_bucket
if self.object_ownership:
not_null_args["ObjectOwnership"] = self.object_ownership
# Step 2: Create Bucket
service_client = self.get_service_client(aws_client)
try:
response = service_client.create_bucket(
Bucket=self.name,
**not_null_args,
)
logger.debug(f"Response: {response}")
bucket_location = response.get("Location")
if bucket_location is not None:
logger.debug(f"Bucket created: {bucket_location}")
self.active_resource = response
return True
except Exception as e:
logger.error(f"{self.get_resource_type()} could not be created.")
logger.error(e)
return False
def post_create(self, aws_client: AwsApiClient) -> bool:
# Wait for Bucket to be created
if self.wait_for_create:
try:
print_info(f"Waiting for {self.get_resource_type()} to be created.")
waiter = self.get_service_client(aws_client).get_waiter("bucket_exists")
waiter.wait(
Bucket=self.name,
WaiterConfig={
"Delay": self.waiter_delay,
"MaxAttempts": self.waiter_max_attempts,
},
)
except Exception as e:
logger.error("Waiter failed.")
logger.error(e)
return True
def _read(self, aws_client: AwsApiClient) -> Optional[Any]:
"""Returns the s3.Bucket
Args:
aws_client: The AwsApiClient for the current cluster
"""
logger.debug(f"Reading {self.get_resource_type()}: {self.get_resource_name()}")
from botocore.exceptions import ClientError
try:
service_resource = self.get_service_resource(aws_client)
bucket = service_resource.Bucket(name=self.name)
bucket.load()
creation_date = bucket.creation_date
logger.debug(f"Bucket creation_date: {creation_date}")
if creation_date is not None:
logger.debug(f"Bucket found: {bucket.name}")
self.active_resource = {
"name": bucket.name,
"creation_date": creation_date,
}
except ClientError as ce:
logger.debug(f"ClientError: {ce}")
except Exception as e:
logger.error(f"Error reading {self.get_resource_type()}.")
logger.error(e)
return self.active_resource
def _delete(self, aws_client: AwsApiClient) -> bool:
"""Deletes the s3.Bucket
Args:
aws_client: The AwsApiClient for the current cluster
"""
print_info(f"Deleting {self.get_resource_type()}: {self.get_resource_name()}")
service_client = self.get_service_client(aws_client)
self.active_resource = None
try:
response = service_client.delete_bucket(Bucket=self.name)
logger.debug(f"Response: {response}")
return True
except Exception as e:
logger.error(f"{self.get_resource_type()} could not be deleted.")
logger.error("Please try again or delete resources manually.")
logger.error(e)
return False
def get_objects(self, aws_client: Optional[AwsApiClient] = None, prefix: Optional[str] = None) -> List[Any]:
"""Returns a list of s3.Object objects for the s3.Bucket
Args:
aws_client: The AwsApiClient for the current cluster
prefix: Prefix to filter objects by
"""
bucket = self.get_resource(aws_client)
if bucket is None:
logger.warning(f"Could not get bucket: {self.name}")
return []
logger.debug(f"Getting objects for bucket: {bucket.name}")
# Get all objects in bucket
object_summaries = bucket.objects.all()
all_objects: List[S3Object] = []
for object_summary in object_summaries:
if prefix is not None and not object_summary.key.startswith(prefix):
continue
all_objects.append(S3Object(bucket_name=bucket.name, key=object_summary.key))
return all_objects
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/aws/resource/s3/bucket.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno_infra/agno/aws/resource/s3/object.py | from pathlib import Path
from typing import Any, Optional
from pydantic import Field
from agno.aws.api_client import AwsApiClient
from agno.aws.resource.base import AwsResource
from agno.utilities.logging import logger
class S3Object(AwsResource):
"""
Reference:
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3/object/index.html
"""
resource_type: str = "s3"
service_name: str = "s3"
# The Object's bucket_name identifier. This must be set.
bucket_name: str
# The Object's key identifier. This must be set.
name: str = Field(..., alias="key")
@property
def uri(self) -> str:
"""Returns the URI of the s3.Object
Returns:
str: The URI of the s3.Object
"""
return f"s3://{self.bucket_name}/{self.name}"
def get_resource(self, aws_client: Optional[AwsApiClient] = None) -> Any:
"""Returns the s3.Object
Args:
aws_client: The AwsApiClient for the current cluster
Returns:
The s3.Object
"""
client: AwsApiClient = aws_client or self.get_aws_client()
service_resource = self.get_service_resource(client)
return service_resource.Object(
bucket_name=self.bucket_name,
key=self.name,
)
def download(self, path: Path, aws_client: Optional[AwsApiClient] = None) -> None:
"""Downloads the s3.Object to the specified path
Args:
path: The path to download the s3.Object to
aws_client: The AwsApiClient for the current cluster
"""
logger.info(f"Downloading {self.uri} to {path}")
object_resource = self.get_resource(aws_client=aws_client)
path.parent.mkdir(parents=True, exist_ok=True)
with path.open(mode="wb") as f:
object_resource.download_fileobj(f)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/aws/resource/s3/object.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno_infra/agno/aws/resource/secret/manager.py | import json
from pathlib import Path
from typing import Any, Dict, List, Optional
from agno.aws.api_client import AwsApiClient
from agno.aws.resource.base import AwsResource
from agno.cli.console import print_info
from agno.utilities.logging import logger
class SecretsManager(AwsResource):
"""
Reference:
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/secretsmanager.html
"""
resource_type: Optional[str] = "Secret"
service_name: str = "secretsmanager"
# The name of the secret.
name: str
client_request_token: Optional[str] = None
# The description of the secret.
description: Optional[str] = None
kms_key_id: Optional[str] = None
# The binary data to encrypt and store in the new version of the secret.
# We recommend that you store your binary data in a file and then pass the contents of the file as a parameter.
secret_binary: Optional[bytes] = None
# The text data to encrypt and store in this new version of the secret.
# We recommend you use a JSON structure of key/value pairs for your secret value.
# Either SecretString or SecretBinary must have a value, but not both.
secret_string: Optional[str] = None
# A list of tags to attach to the secret.
tags: Optional[List[Dict[str, str]]] = None
# A list of Regions and KMS keys to replicate secrets.
add_replica_regions: Optional[List[Dict[str, str]]] = None
# Specifies whether to overwrite a secret with the same name in the destination Region.
force_overwrite_replica_secret: Optional[str] = None
# Read secret key/value pairs from yaml files
secret_files: Optional[List[Path]] = None
# Read secret key/value pairs from yaml files in a directory
secrets_dir: Optional[Path] = None
# Force delete the secret without recovery
force_delete: Optional[bool] = True
# Provided by api on create
secret_arn: Optional[str] = None
secret_name: Optional[str] = None
secret_value: Optional[dict] = None
cached_secret: Optional[Dict[str, Any]] = None
def read_secrets_from_files(self) -> Dict[str, Any]:
"""Reads secrets from files"""
from agno.utilities.yaml_io import read_yaml_file
secret_dict: Dict[str, Any] = {}
if self.secret_files:
for f in self.secret_files:
_s = read_yaml_file(f)
if _s is not None:
secret_dict.update(_s)
if self.secrets_dir:
for f in self.secrets_dir.glob("*.yaml"):
_s = read_yaml_file(f)
if _s is not None:
secret_dict.update(_s)
for f in self.secrets_dir.glob("*.yml"):
_s = read_yaml_file(f)
if _s is not None:
secret_dict.update(_s)
return secret_dict
def _create(self, aws_client: AwsApiClient) -> bool:
"""Creates the SecretsManager
Args:
aws_client: The AwsApiClient for the current secret
"""
print_info(f"Creating {self.get_resource_type()}: {self.get_resource_name()}")
# Step 1: Read secrets from files
secret_dict: Dict[str, Any] = self.read_secrets_from_files()
# Step 2: Add secret_string if provided
if self.secret_string is not None:
secret_dict.update(json.loads(self.secret_string))
# Step 3: Build secret_string
secret_string: Optional[str] = json.dumps(secret_dict) if len(secret_dict) > 0 else None
# Step 4: Build SecretsManager configuration
# create a dict of args which are not null, otherwise aws type validation fails
not_null_args: Dict[str, Any] = {}
if self.client_request_token:
not_null_args["ClientRequestToken"] = self.client_request_token
if self.description:
not_null_args["Description"] = self.description
if self.kms_key_id:
not_null_args["KmsKeyId"] = self.kms_key_id
if self.secret_binary:
not_null_args["SecretBinary"] = self.secret_binary
if secret_string:
not_null_args["SecretString"] = secret_string
if self.tags:
not_null_args["Tags"] = self.tags
if self.add_replica_regions:
not_null_args["AddReplicaRegions"] = self.add_replica_regions
if self.force_overwrite_replica_secret:
not_null_args["ForceOverwriteReplicaSecret"] = self.force_overwrite_replica_secret
# Step 3: Create SecretsManager
service_client = self.get_service_client(aws_client)
try:
created_resource = service_client.create_secret(
Name=self.name,
**not_null_args,
)
logger.debug(f"SecretsManager: {created_resource}")
# Validate SecretsManager creation
self.secret_arn = created_resource.get("ARN", None)
self.secret_name = created_resource.get("Name", None)
logger.debug(f"secret_arn: {self.secret_arn}")
logger.debug(f"secret_name: {self.secret_name}")
if self.secret_arn is not None:
self.cached_secret = secret_dict
self.active_resource = created_resource
return True
except Exception as e:
logger.error(f"{self.get_resource_type()} could not be created.")
logger.error(e)
return False
def _read(self, aws_client: AwsApiClient) -> Optional[Any]:
"""Returns the SecretsManager
Args:
aws_client: The AwsApiClient for the current secret
"""
logger.debug(f"Reading {self.get_resource_type()}: {self.get_resource_name()}")
from botocore.exceptions import ClientError
service_client = self.get_service_client(aws_client)
try:
describe_response = service_client.describe_secret(SecretId=self.name)
logger.debug(f"SecretsManager: {describe_response}")
self.secret_arn = describe_response.get("ARN", None)
self.secret_name = describe_response.get("Name", None)
describe_response.get("DeletedDate", None)
logger.debug(f"secret_arn: {self.secret_arn}")
logger.debug(f"secret_name: {self.secret_name}")
# logger.debug(f"secret_deleted_date: {secret_deleted_date}")
if self.secret_arn is not None:
# print_info(f"SecretsManager available: {self.name}")
self.active_resource = describe_response
except ClientError as ce:
logger.debug(f"ClientError: {ce}")
except Exception as e:
logger.error(f"Error reading {self.get_resource_type()}.")
logger.error(e)
return self.active_resource
def _delete(self, aws_client: AwsApiClient) -> bool:
"""Deletes the SecretsManager
Args:
aws_client: The AwsApiClient for the current secret
"""
print_info(f"Deleting {self.get_resource_type()}: {self.get_resource_name()}")
service_client = self.get_service_client(aws_client)
self.active_resource = None
self.secret_value = None
try:
delete_response = service_client.delete_secret(
SecretId=self.name, ForceDeleteWithoutRecovery=self.force_delete
)
logger.debug(f"SecretsManager: {delete_response}")
return True
except Exception as e:
logger.error(f"{self.get_resource_type()} could not be deleted.")
logger.error("Please try again or delete resources manually.")
logger.error(e)
return False
def _update(self, aws_client: AwsApiClient) -> bool:
"""Update SecretsManager"""
print_info(f"Updating {self.get_resource_type()}: {self.get_resource_name()}")
# Initialize final secret_dict
secret_dict: Dict[str, Any] = {}
# Step 1: Read secrets from AWS SecretsManager
existing_secret_dict = self.get_secrets_as_dict()
# logger.debug(f"existing_secret_dict: {existing_secret_dict}")
if existing_secret_dict is not None:
secret_dict.update(existing_secret_dict)
# Step 2: Read secrets from files
new_secret_dict: Dict[str, Any] = self.read_secrets_from_files()
if len(new_secret_dict) > 0:
secret_dict.update(new_secret_dict)
# Step 3: Add secret_string is provided
if self.secret_string is not None:
secret_dict.update(json.loads(self.secret_string))
# Step 3: Update AWS SecretsManager
service_client = self.get_service_client(aws_client)
self.active_resource = None
self.secret_value = None
try:
create_response = service_client.update_secret(
SecretId=self.name,
SecretString=json.dumps(secret_dict),
)
logger.debug(f"SecretsManager: {create_response}")
return True
except Exception as e:
logger.error(f"{self.get_resource_type()} could not be Updated.")
logger.error(e)
return False
def get_secrets_as_dict(self, aws_client: Optional[AwsApiClient] = None) -> Optional[Dict[str, Any]]:
"""Get secret value
Args:
aws_client: The AwsApiClient for the current secret
"""
from botocore.exceptions import ClientError
if self.cached_secret is not None:
return self.cached_secret
logger.debug(f"Getting {self.get_resource_type()}: {self.get_resource_name()}")
client: AwsApiClient = aws_client or self.get_aws_client()
service_client = self.get_service_client(client)
try:
secret_value = service_client.get_secret_value(SecretId=self.name)
# logger.debug(f"SecretsManager: {secret_value}")
if secret_value is None:
logger.warning(f"Secret Empty: {self.name}")
return None
self.secret_value = secret_value
self.secret_arn = secret_value.get("ARN", None)
self.secret_name = secret_value.get("Name", None)
secret_string = secret_value.get("SecretString", None)
if secret_string is not None:
self.cached_secret = json.loads(secret_string)
return self.cached_secret
secret_binary = secret_value.get("SecretBinary", None)
if secret_binary is not None:
self.cached_secret = json.loads(secret_binary.decode("utf-8"))
return self.cached_secret
except ClientError as ce:
logger.debug(f"ClientError: {ce}")
except Exception as e:
logger.error(f"Error reading {self.get_resource_type()}.")
logger.error(e)
return None
def get_secret_value(self, secret_name: str, aws_client: Optional[AwsApiClient] = None) -> Optional[Any]:
secret_dict = self.get_secrets_as_dict(aws_client=aws_client)
if secret_dict is not None:
return secret_dict.get(secret_name, None)
return None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/aws/resource/secret/manager.py",
"license": "Apache License 2.0",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno_infra/agno/aws/resources.py | from typing import List, Optional, Tuple
from pydantic import Field, PrivateAttr
from agno.aws.api_client import AwsApiClient
from agno.aws.app.base import AwsApp
from agno.aws.context import AwsBuildContext
from agno.aws.resource.base import AwsResource
from agno.base.resources import InfraResources
from agno.utilities.logging import logger
class AwsResources(InfraResources):
infra: str = Field(default="aws", init=False)
apps: Optional[List[AwsApp]] = None
resources: Optional[List[AwsResource]] = None
aws_region: Optional[str] = None
aws_profile: Optional[str] = None
# -*- Cached Data
_api_client: Optional[AwsApiClient] = PrivateAttr(default_factory=lambda: None)
def get_aws_region(self) -> Optional[str]:
# Priority 1: Use aws_region from ResourceGroup (or cached value)
if self.aws_region:
return self.aws_region
# Priority 2: Get aws_region from os settings
if self.infra_settings is not None and self.infra_settings.aws_region is not None:
self.aws_region = self.infra_settings.aws_region
return self.aws_region
# Priority 3: Get aws_region from env
from os import getenv
from agno.constants import AWS_REGION_ENV_VAR
aws_region_env = getenv(AWS_REGION_ENV_VAR)
if aws_region_env is not None:
logger.debug(f"{AWS_REGION_ENV_VAR}: {aws_region_env}")
self.aws_region = aws_region_env
return self.aws_region
def get_aws_profile(self) -> Optional[str]:
# Priority 1: Use aws_region from ResourceGroup (or cached value)
if self.aws_profile:
return self.aws_profile
# Priority 2: Get aws_profile from os settings
if self.infra_settings is not None and self.infra_settings.aws_profile is not None:
self.aws_profile = self.infra_settings.aws_profile
return self.aws_profile
# Priority 3: Get aws_profile from env
from os import getenv
from agno.constants import AWS_PROFILE_ENV_VAR
aws_profile_env = getenv(AWS_PROFILE_ENV_VAR)
if aws_profile_env is not None:
logger.debug(f"{AWS_PROFILE_ENV_VAR}: {aws_profile_env}")
self.aws_profile = aws_profile_env
return self.aws_profile
@property
def aws_client(self) -> AwsApiClient:
if self._api_client is None:
self._api_client = AwsApiClient(aws_region=self.get_aws_region(), aws_profile=self.get_aws_profile())
return self._api_client
def create_resources(
self,
group_filter: Optional[str] = None,
name_filter: Optional[str] = None,
type_filter: Optional[str] = None,
dry_run: Optional[bool] = False,
auto_confirm: Optional[bool] = False,
force: Optional[bool] = None,
pull: Optional[bool] = None,
) -> Tuple[int, int]:
from agno.aws.resource.types import AwsResourceInstallOrder
from agno.cli.console import confirm_yes_no, print_heading, print_info
logger.debug("-*- Creating AwsResources")
# Build a list of AwsResources to create
resources_to_create: List[AwsResource] = []
# Add resources to resources_to_create
if self.resources is not None:
for r in self.resources:
r.set_infra_settings(infra_settings=self.infra_settings)
if r.group is None and self.name is not None:
r.group = self.name
if r.should_create(
group_filter=group_filter,
name_filter=name_filter,
type_filter=type_filter,
):
r.set_infra_settings(infra_settings=self.infra_settings)
resources_to_create.append(r)
# Build a list of AwsApps to create
apps_to_create: List[AwsApp] = []
if self.apps is not None:
for app in self.apps:
if app.group is None and self.name is not None:
app.group = self.name
if app.should_create(group_filter=group_filter):
apps_to_create.append(app)
# Get the list of AwsResources from the AwsApps
if len(apps_to_create) > 0:
logger.debug(f"Found {len(apps_to_create)} apps to create")
for app in apps_to_create:
app.set_infra_settings(infra_settings=self.infra_settings)
app_resources = app.get_resources(
build_context=AwsBuildContext(aws_region=self.get_aws_region(), aws_profile=self.get_aws_profile())
)
if len(app_resources) > 0:
# If the app has dependencies, add the resources from the
# dependencies first to the list of resources to create
if app.depends_on is not None:
for dep in app.depends_on:
if isinstance(dep, AwsApp):
dep.set_infra_settings(infra_settings=self.infra_settings)
dep_resources = dep.get_resources(
build_context=AwsBuildContext(
aws_region=self.get_aws_region(), aws_profile=self.get_aws_profile()
)
)
if len(dep_resources) > 0:
for dep_resource in dep_resources:
if isinstance(dep_resource, AwsResource):
resources_to_create.append(dep_resource)
# Add the resources from the app to the list of resources to create
for app_resource in app_resources:
if isinstance(app_resource, AwsResource) and app_resource.should_create(
group_filter=group_filter, name_filter=name_filter, type_filter=type_filter
):
resources_to_create.append(app_resource)
# Sort the AwsResources in install order
resources_to_create.sort(key=lambda x: AwsResourceInstallOrder.get(x.__class__.__name__, 5000))
# Deduplicate AwsResources
deduped_resources_to_create: List[AwsResource] = []
for r in resources_to_create:
if r not in deduped_resources_to_create:
deduped_resources_to_create.append(r)
# Implement dependency sorting
final_aws_resources: List[AwsResource] = []
logger.debug("-*- Building AwsResources dependency graph")
for aws_resource in deduped_resources_to_create:
# Logic to follow if resource has dependencies
if aws_resource.depends_on is not None and len(aws_resource.depends_on) > 0:
# Add the dependencies before the resource itself
for dep in aws_resource.depends_on:
if isinstance(dep, AwsResource):
if dep not in final_aws_resources:
logger.debug(f"-*- Adding {dep.name}, dependency of {aws_resource.name}")
final_aws_resources.append(dep)
# Add the resource to be created after its dependencies
if aws_resource not in final_aws_resources:
logger.debug(f"-*- Adding {aws_resource.name}")
final_aws_resources.append(aws_resource)
else:
# Add the resource to be created if it has no dependencies
if aws_resource not in final_aws_resources:
logger.debug(f"-*- Adding {aws_resource.name}")
final_aws_resources.append(aws_resource)
# Track the total number of AwsResources to create for validation
num_resources_to_create: int = len(final_aws_resources)
num_resources_created: int = 0
if num_resources_to_create == 0:
return 0, 0
if dry_run:
print_heading("--**- AWS resources to create:")
for resource in final_aws_resources:
print_info(f" -+-> {resource.get_resource_type()}: {resource.get_resource_name()}")
print_info("")
if self.get_aws_region():
print_info(f"Region: {self.get_aws_region()}")
if self.get_aws_profile():
print_info(f"Profile: {self.get_aws_profile()}")
print_info(f"Total {num_resources_to_create} resources")
return 0, 0
# Validate resources to be created
if not auto_confirm:
print_heading("\n--**-- Confirm resources to create:")
for resource in final_aws_resources:
print_info(f" -+-> {resource.get_resource_type()}: {resource.get_resource_name()}")
print_info("")
if self.get_aws_region():
print_info(f"Region: {self.get_aws_region()}")
if self.get_aws_profile():
print_info(f"Profile: {self.get_aws_profile()}")
print_info(f"Total {num_resources_to_create} resources")
confirm = confirm_yes_no("\nConfirm deploy")
if not confirm:
print_info("-*-")
print_info("-*- Skipping create")
print_info("-*-")
return 0, 0
for resource in final_aws_resources:
print_info(f"\n-==+==- {resource.get_resource_type()}: {resource.get_resource_name()}")
if force is True:
resource.force = True
# logger.debug(resource)
try:
_resource_created = resource.create(aws_client=self.aws_client)
if _resource_created:
num_resources_created += 1
else:
if self.infra_settings is not None and not self.infra_settings.continue_on_create_failure:
return num_resources_created, num_resources_to_create
except Exception as e:
logger.error(f"Failed to create {resource.get_resource_type()}: {resource.get_resource_name()}")
logger.error(e)
logger.error("Please fix and try again...")
print_heading(f"\n--**-- Resources created: {num_resources_created}/{num_resources_to_create}")
if num_resources_to_create != num_resources_created:
logger.error(
f"Resources created: {num_resources_created} do not match resources required: {num_resources_to_create}"
) # noqa: E501
return num_resources_created, num_resources_to_create
def delete_resources(
self,
group_filter: Optional[str] = None,
name_filter: Optional[str] = None,
type_filter: Optional[str] = None,
dry_run: Optional[bool] = False,
auto_confirm: Optional[bool] = False,
force: Optional[bool] = None,
) -> Tuple[int, int]:
from agno.aws.resource.types import AwsResourceInstallOrder
from agno.cli.console import confirm_yes_no, print_heading, print_info
logger.debug("-*- Deleting AwsResources")
# Build a list of AwsResources to delete
resources_to_delete: List[AwsResource] = []
# Add resources to resources_to_delete
if self.resources is not None:
for r in self.resources:
r.set_infra_settings(infra_settings=self.infra_settings)
if r.group is None and self.name is not None:
r.group = self.name
if r.should_delete(
group_filter=group_filter,
name_filter=name_filter,
type_filter=type_filter,
):
r.set_infra_settings(infra_settings=self.infra_settings)
resources_to_delete.append(r)
# Build a list of AwsApps to delete
apps_to_delete: List[AwsApp] = []
if self.apps is not None:
for app in self.apps:
if app.group is None and self.name is not None:
app.group = self.name
if app.should_delete(group_filter=group_filter):
apps_to_delete.append(app)
# Get the list of AwsResources from the AwsApps
if len(apps_to_delete) > 0:
logger.debug(f"Found {len(apps_to_delete)} apps to delete")
for app in apps_to_delete:
app.set_infra_settings(infra_settings=self.infra_settings)
app_resources = app.get_resources(
build_context=AwsBuildContext(aws_region=self.get_aws_region(), aws_profile=self.get_aws_profile())
)
if len(app_resources) > 0:
for app_resource in app_resources:
if isinstance(app_resource, AwsResource) and app_resource.should_delete(
group_filter=group_filter, name_filter=name_filter, type_filter=type_filter
):
resources_to_delete.append(app_resource)
# Sort the AwsResources in install order
resources_to_delete.sort(key=lambda x: AwsResourceInstallOrder.get(x.__class__.__name__, 5000), reverse=True)
# Deduplicate AwsResources
deduped_resources_to_delete: List[AwsResource] = []
for r in resources_to_delete:
if r not in deduped_resources_to_delete:
deduped_resources_to_delete.append(r)
# Implement dependency sorting
final_aws_resources: List[AwsResource] = []
logger.debug("-*- Building AwsResources dependency graph")
for aws_resource in deduped_resources_to_delete:
# Logic to follow if resource has dependencies
if aws_resource.depends_on is not None and len(aws_resource.depends_on) > 0:
# 1. Reverse the order of dependencies
aws_resource.depends_on.reverse()
# 2. Remove the dependencies if they are already added to the final_aws_resources
for dep in aws_resource.depends_on:
if dep in final_aws_resources:
logger.debug(f"-*- Removing {dep.name}, dependency of {aws_resource.name}")
final_aws_resources.remove(dep)
# 3. Add the resource to be deleted before its dependencies
if aws_resource not in final_aws_resources:
logger.debug(f"-*- Adding {aws_resource.name}")
final_aws_resources.append(aws_resource)
# 4. Add the dependencies back in reverse order
for dep in aws_resource.depends_on:
if isinstance(dep, AwsResource):
if dep not in final_aws_resources:
logger.debug(f"-*- Adding {dep.name}, dependency of {aws_resource.name}")
final_aws_resources.append(dep)
else:
# Add the resource to be deleted if it has no dependencies
if aws_resource not in final_aws_resources:
logger.debug(f"-*- Adding {aws_resource.name}")
final_aws_resources.append(aws_resource)
# Track the total number of AwsResources to delete for validation
num_resources_to_delete: int = len(final_aws_resources)
num_resources_deleted: int = 0
if num_resources_to_delete == 0:
return 0, 0
if dry_run:
print_heading("--**- AWS resources to delete:")
for resource in final_aws_resources:
print_info(f" -+-> {resource.get_resource_type()}: {resource.get_resource_name()}")
print_info("")
if self.get_aws_region():
print_info(f"Region: {self.get_aws_region()}")
if self.get_aws_profile():
print_info(f"Profile: {self.get_aws_profile()}")
print_info(f"Total {num_resources_to_delete} resources")
return 0, 0
# Validate resources to be deleted
if not auto_confirm:
print_heading("\n--**-- Confirm resources to delete:")
for resource in final_aws_resources:
print_info(f" -+-> {resource.get_resource_type()}: {resource.get_resource_name()}")
print_info("")
if self.get_aws_region():
print_info(f"Region: {self.get_aws_region()}")
if self.get_aws_profile():
print_info(f"Profile: {self.get_aws_profile()}")
print_info(f"Total {num_resources_to_delete} resources")
confirm = confirm_yes_no("\nConfirm delete")
if not confirm:
print_info("-*-")
print_info("-*- Skipping delete")
print_info("-*-")
return 0, 0
for resource in final_aws_resources:
print_info(f"\n-==+==- {resource.get_resource_type()}: {resource.get_resource_name()}")
if force is True:
resource.force = True
# logger.debug(resource)
try:
_resource_deleted = resource.delete(aws_client=self.aws_client)
if _resource_deleted:
num_resources_deleted += 1
else:
if self.infra_settings is not None and not self.infra_settings.continue_on_delete_failure:
return num_resources_deleted, num_resources_to_delete
except Exception as e:
logger.error(f"Failed to delete {resource.get_resource_type()}: {resource.get_resource_name()}")
logger.error(e)
logger.error("Please fix and try again...")
print_heading(f"\n--**-- Resources deleted: {num_resources_deleted}/{num_resources_to_delete}")
if num_resources_to_delete != num_resources_deleted:
logger.error(
f"Resources deleted: {num_resources_deleted} do not match resources required: {num_resources_to_delete}"
) # noqa: E501
return num_resources_deleted, num_resources_to_delete
def update_resources(
self,
group_filter: Optional[str] = None,
name_filter: Optional[str] = None,
type_filter: Optional[str] = None,
dry_run: Optional[bool] = False,
auto_confirm: Optional[bool] = False,
force: Optional[bool] = None,
pull: Optional[bool] = None,
) -> Tuple[int, int]:
from agno.aws.resource.types import AwsResourceInstallOrder
from agno.cli.console import confirm_yes_no, print_heading, print_info
logger.debug("-*- Updating AwsResources")
# Build a list of AwsResources to update
resources_to_update: List[AwsResource] = []
# Add resources to resources_to_update
if self.resources is not None:
for r in self.resources:
r.set_infra_settings(infra_settings=self.infra_settings)
if r.group is None and self.name is not None:
r.group = self.name
if r.should_update(
group_filter=group_filter,
name_filter=name_filter,
type_filter=type_filter,
):
r.set_infra_settings(infra_settings=self.infra_settings)
resources_to_update.append(r)
# Build a list of AwsApps to update
apps_to_update: List[AwsApp] = []
if self.apps is not None:
for app in self.apps:
if app.group is None and self.name is not None:
app.group = self.name
if app.should_update(group_filter=group_filter):
apps_to_update.append(app)
# Get the list of AwsResources from the AwsApps
if len(apps_to_update) > 0:
logger.debug(f"Found {len(apps_to_update)} apps to update")
for app in apps_to_update:
app.set_infra_settings(infra_settings=self.infra_settings)
app_resources = app.get_resources(
build_context=AwsBuildContext(aws_region=self.get_aws_region(), aws_profile=self.get_aws_profile())
)
if len(app_resources) > 0:
for app_resource in app_resources:
if isinstance(app_resource, AwsResource) and app_resource.should_update(
group_filter=group_filter, name_filter=name_filter, type_filter=type_filter
):
resources_to_update.append(app_resource)
# Sort the AwsResources in install order
resources_to_update.sort(key=lambda x: AwsResourceInstallOrder.get(x.__class__.__name__, 5000))
# Deduplicate AwsResources
deduped_resources_to_update: List[AwsResource] = []
for r in resources_to_update:
if r not in deduped_resources_to_update:
deduped_resources_to_update.append(r)
# Implement dependency sorting
final_aws_resources: List[AwsResource] = []
logger.debug("-*- Building AwsResources dependency graph")
for aws_resource in deduped_resources_to_update:
# Logic to follow if resource has dependencies
if aws_resource.depends_on is not None and len(aws_resource.depends_on) > 0:
# Add the dependencies before the resource itself
for dep in aws_resource.depends_on:
if isinstance(dep, AwsResource):
if dep not in final_aws_resources:
logger.debug(f"-*- Adding {dep.name}, dependency of {aws_resource.name}")
final_aws_resources.append(dep)
# Add the resource to be created after its dependencies
if aws_resource not in final_aws_resources:
logger.debug(f"-*- Adding {aws_resource.name}")
final_aws_resources.append(aws_resource)
else:
# Add the resource to be created if it has no dependencies
if aws_resource not in final_aws_resources:
logger.debug(f"-*- Adding {aws_resource.name}")
final_aws_resources.append(aws_resource)
# Track the total number of AwsResources to update for validation
num_resources_to_update: int = len(final_aws_resources)
num_resources_updated: int = 0
if num_resources_to_update == 0:
return 0, 0
if dry_run:
print_heading("--**- AWS resources to update:")
for resource in final_aws_resources:
print_info(f" -+-> {resource.get_resource_type()}: {resource.get_resource_name()}")
print_info("")
if self.get_aws_region():
print_info(f"Region: {self.get_aws_region()}")
if self.get_aws_profile():
print_info(f"Profile: {self.get_aws_profile()}")
print_info(f"Total {num_resources_to_update} resources")
return 0, 0
# Validate resources to be updated
if not auto_confirm:
print_heading("\n--**-- Confirm resources to update:")
for resource in final_aws_resources:
print_info(f" -+-> {resource.get_resource_type()}: {resource.get_resource_name()}")
print_info("")
if self.get_aws_region():
print_info(f"Region: {self.get_aws_region()}")
if self.get_aws_profile():
print_info(f"Profile: {self.get_aws_profile()}")
print_info(f"Total {num_resources_to_update} resources")
confirm = confirm_yes_no("\nConfirm patch")
if not confirm:
print_info("-*-")
print_info("-*- Skipping patch")
print_info("-*-")
return 0, 0
for resource in final_aws_resources:
print_info(f"\n-==+==- {resource.get_resource_type()}: {resource.get_resource_name()}")
if force is True:
resource.force = True
# logger.debug(resource)
try:
_resource_updated = resource.update(aws_client=self.aws_client)
if _resource_updated:
num_resources_updated += 1
else:
if self.infra_settings is not None and not self.infra_settings.continue_on_patch_failure:
return num_resources_updated, num_resources_to_update
except Exception as e:
logger.error(f"Failed to update {resource.get_resource_type()}: {resource.get_resource_name()}")
logger.error(e)
logger.error("Please fix and try again...")
print_heading(f"\n--**-- Resources updated: {num_resources_updated}/{num_resources_to_update}")
if num_resources_to_update != num_resources_updated:
logger.error(
f"Resources updated: {num_resources_updated} do not match resources required: {num_resources_to_update}"
) # noqa: E501
return num_resources_updated, num_resources_to_update
def save_resources(
self,
group_filter: Optional[str] = None,
name_filter: Optional[str] = None,
type_filter: Optional[str] = None,
) -> Tuple[int, int]:
raise NotImplementedError
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/aws/resources.py",
"license": "Apache License 2.0",
"lines": 481,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno_infra/agno/base/app.py | from typing import Any, Dict, List, Optional, Union
from pydantic import Field, ValidationInfo, field_validator
from agno.base.base import InfraBase
from agno.base.context import ContainerContext
from agno.base.resource import InfraResource
from agno.utilities.logging import logger
class InfraApp(InfraBase):
"""Base class for Infrastructure Apps."""
# -*- App Name (required)
name: str
# -*- Image Configuration
# Image can be provided as a DockerImage object
image: Optional[Any] = None
# OR as image_name:image_tag str
image_str: Optional[str] = None
# OR as image_name and image_tag
image_name: Optional[str] = None
image_tag: Optional[str] = None
# Entrypoint for the container
entrypoint: Optional[Union[str, List[str]]] = None
# Command for the container
command: Optional[Union[str, List[str]]] = None
# -*- Python Configuration
# Install python dependencies using a requirements.txt file
install_requirements: bool = False
# Path to the requirements.txt file relative to the infra_root
requirements_file: str = "requirements.txt"
# Set the PYTHONPATH env var
set_python_path: bool = True
# Manually provide the PYTHONPATH.
# If None, PYTHONPATH is set to infra_root
python_path: Optional[str] = None
# Add paths to the PYTHONPATH env var
# If python_path is provided, this value is ignored
add_python_paths: Optional[List[str]] = None
# -*- App Ports
# Open a container port if open_port=True
open_port: bool = False
# If open_port=True, port_number is used to set the
# container_port if container_port is None and host_port if host_port is None
port_number: int = 80
# Port number on the Container to open
# Preferred over port_number if both are set
container_port: Optional[int] = Field(None, validate_default=True)
# Port name for the opened port
container_port_name: str = "http"
# Port number on the Host to map to the Container port
# Preferred over port_number if both are set
host_port: Optional[int] = Field(None, validate_default=True)
# -*- Extra Resources created "before" the App resources
resources: Optional[List[InfraResource]] = None
# -*- Other args
print_env_on_load: bool = False
# -*- App specific args. Not to be set by the user.
# Container Environment that can be set by subclasses
# which is used as a starting point for building the container_env
# Any variables set in container_env will be overridden by values
# in the env_vars dict or env_file
container_env: Optional[Dict[str, Any]] = None
# Variable used to cache the container context
container_context: Optional[ContainerContext] = None
# -*- Cached Data
cached_resources: Optional[List[Any]] = None
@field_validator("container_port", mode="before")
def set_container_port(cls, v, info: ValidationInfo):
port_number = info.data.get("port_number")
if v is None and port_number is not None:
v = port_number
return v
@field_validator("host_port", mode="before")
def set_host_port(cls, v, info: ValidationInfo):
port_number = info.data.get("port_number")
if v is None and port_number is not None:
v = port_number
return v
def get_app_name(self) -> str:
return self.name
def get_image_str(self) -> str:
if self.image:
return f"{self.image.name}:{self.image.tag}"
elif self.image_str:
return self.image_str
elif self.image_name and self.image_tag:
return f"{self.image_name}:{self.image_tag}"
elif self.image_name:
return f"{self.image_name}:latest"
else:
return ""
def build_resources(self, build_context: Any) -> Optional[Any]:
logger.debug(f"@build_resource_group not defined for {self.get_app_name()}")
return None
def get_dependencies(self) -> Optional[List[InfraResource]]:
return (
[dep for dep in self.depends_on if isinstance(dep, InfraResource)] if self.depends_on is not None else None
)
def add_app_properties_to_resources(self, resources: List[InfraResource]) -> List[InfraResource]:
updated_resources = []
# Convert to dict for property access (similar to model_dump)
app_properties = {
"skip_create": self.skip_create,
"skip_read": self.skip_read,
"skip_update": self.skip_update,
"skip_delete": self.skip_delete,
"recreate_on_update": self.recreate_on_update,
"use_cache": self.use_cache,
"force": self.force,
"debug_mode": self.debug_mode,
"wait_for_create": self.wait_for_create,
"wait_for_update": self.wait_for_update,
"wait_for_delete": self.wait_for_delete,
"save_output": self.save_output,
}
app_group = self.get_group_name()
app_output_dir = self.get_app_name()
app_skip_create = app_properties.get("skip_create")
app_skip_read = app_properties.get("skip_read")
app_skip_update = app_properties.get("skip_update")
app_skip_delete = app_properties.get("skip_delete")
app_recreate_on_update = app_properties.get("recreate_on_update")
app_use_cache = app_properties.get("use_cache")
app_force = app_properties.get("force")
app_debug_mode = app_properties.get("debug_mode")
app_wait_for_create = app_properties.get("wait_for_create")
app_wait_for_update = app_properties.get("wait_for_update")
app_wait_for_delete = app_properties.get("wait_for_delete")
app_save_output = app_properties.get("save_output")
for resource in resources:
# Convert resource to dict for property access (similar to model_dump)
resource_properties = {
"skip_create": getattr(resource, "skip_create", None),
"skip_read": getattr(resource, "skip_read", None),
"skip_update": getattr(resource, "skip_update", None),
"skip_delete": getattr(resource, "skip_delete", None),
"recreate_on_update": getattr(resource, "recreate_on_update", None),
"use_cache": getattr(resource, "use_cache", None),
"force": getattr(resource, "force", None),
"debug_mode": getattr(resource, "debug_mode", None),
"wait_for_create": getattr(resource, "wait_for_create", None),
"wait_for_update": getattr(resource, "wait_for_update", None),
"wait_for_delete": getattr(resource, "wait_for_delete", None),
"save_output": getattr(resource, "save_output", None),
}
resource_skip_create = resource_properties.get("skip_create")
resource_skip_read = resource_properties.get("skip_read")
resource_skip_update = resource_properties.get("skip_update")
resource_skip_delete = resource_properties.get("skip_delete")
resource_recreate_on_update = resource_properties.get("recreate_on_update")
resource_use_cache = resource_properties.get("use_cache")
resource_force = resource_properties.get("force")
resource_debug_mode = resource_properties.get("debug_mode")
resource_wait_for_create = resource_properties.get("wait_for_create")
resource_wait_for_update = resource_properties.get("wait_for_update")
resource_wait_for_delete = resource_properties.get("wait_for_delete")
resource_save_output = resource_properties.get("save_output")
# If skip_create on resource is not set, use app level skip_create (if set on app)
if resource_skip_create is None and app_skip_create is not None:
resource.skip_create = app_skip_create
# If skip_read on resource is not set, use app level skip_read (if set on app)
if resource_skip_read is None and app_skip_read is not None:
resource.skip_read = app_skip_read
# If skip_update on resource is not set, use app level skip_update (if set on app)
if resource_skip_update is None and app_skip_update is not None:
resource.skip_update = app_skip_update
# If skip_delete on resource is not set, use app level skip_delete (if set on app)
if resource_skip_delete is None and app_skip_delete is not None:
resource.skip_delete = app_skip_delete
# If recreate_on_update on resource is not set, use app level recreate_on_update (if set on app)
if resource_recreate_on_update is None and app_recreate_on_update is not None:
resource.recreate_on_update = app_recreate_on_update
# If use_cache on resource is not set, use app level use_cache (if set on app)
if resource_use_cache is None and app_use_cache is not None:
resource.use_cache = app_use_cache
# If force on resource is not set, use app level force (if set on app)
if resource_force is None and app_force is not None:
resource.force = app_force
# If debug_mode on resource is not set, use app level debug_mode (if set on app)
if resource_debug_mode is None and app_debug_mode is not None:
resource.debug_mode = app_debug_mode
# If wait_for_create on resource is not set, use app level wait_for_create (if set on app)
if resource_wait_for_create is None and app_wait_for_create is not None:
resource.wait_for_create = app_wait_for_create
# If wait_for_update on resource is not set, use app level wait_for_update (if set on app)
if resource_wait_for_update is None and app_wait_for_update is not None:
resource.wait_for_update = app_wait_for_update
# If wait_for_delete on resource is not set, use app level wait_for_delete (if set on app)
if resource_wait_for_delete is None and app_wait_for_delete is not None:
resource.wait_for_delete = app_wait_for_delete
# If save_output on resource is not set, use app level save_output (if set on app)
if resource_save_output is None and app_save_output is not None:
resource.save_output = app_save_output
# If infra_settings on resource is not set, use app level infra_settings (if set on app)
if resource.infra_settings is None and self.infra_settings is not None:
resource.set_infra_settings(self.infra_settings)
# If group on resource is not set, use app level group (if set on app)
if resource.group is None and app_group is not None:
resource.group = app_group
# Always set output_dir on resource to app level output_dir
resource.output_dir = app_output_dir
app_dependencies = self.get_dependencies()
if app_dependencies is not None:
if resource.depends_on is None:
resource.depends_on = app_dependencies
else:
resource.depends_on.extend(app_dependencies)
updated_resources.append(resource)
return updated_resources
def get_resources(self, build_context: Any) -> List[InfraResource]:
if self.cached_resources is not None and len(self.cached_resources) > 0:
return self.cached_resources
base_resources = self.resources or []
app_resources = self.build_resources(build_context)
if app_resources is not None:
base_resources.extend(app_resources)
self.cached_resources = self.add_app_properties_to_resources(base_resources)
# logger.debug(f"Resources: {self.cached_resources}")
return self.cached_resources
def matches_filters(self, group_filter: Optional[str] = None) -> bool:
if group_filter is not None:
group_name = self.get_group_name()
logger.debug(f"{self.get_app_name()}: Checking {group_filter} in {group_name}")
if group_name is None or group_filter not in group_name:
return False
return True
def should_create(self, group_filter: Optional[str] = None) -> bool:
if not self.enabled or self.skip_create:
return False
return self.matches_filters(group_filter)
def should_delete(self, group_filter: Optional[str] = None) -> bool:
if not self.enabled or self.skip_delete:
return False
return self.matches_filters(group_filter)
def should_update(self, group_filter: Optional[str] = None) -> bool:
if not self.enabled or self.skip_update:
return False
return self.matches_filters(group_filter)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/base/app.py",
"license": "Apache License 2.0",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno_infra/agno/base/base.py | from pathlib import Path
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, ConfigDict
from agno.infra.settings import InfraSettings
class InfraBase(BaseModel):
"""Base class for all InfraResource, InfraApp and InfraResources objects."""
# Name of the infrastructure resource
name: Optional[str] = None
# Group for the infrastructure resource
# Used for filtering infrastructure resources by group
group: Optional[str] = None
# Environment filter for this resource
env: Optional[str] = None
# Infrastructure filter for this resource
infra: Optional[str] = None
# Whether this resource is enabled
enabled: bool = True
# Resource Control
skip_create: bool = False
skip_read: bool = False
skip_update: bool = False
skip_delete: bool = False
recreate_on_update: bool = False
# Skip create if resource with the same name is active
use_cache: bool = True
# Force create/update/delete even if a resource with the same name is active
force: Optional[bool] = None
# Wait for resource to be created, updated or deleted
wait_for_create: bool = True
wait_for_update: bool = True
wait_for_delete: bool = True
waiter_delay: int = 30
waiter_max_attempts: int = 50
# Environment Variables for the resource (if applicable)
# Add env variables to resource where applicable
env_vars: Optional[Dict[str, Any]] = None
# Read env from a file in yaml format
env_file: Optional[Path] = None
# Add secret variables to resource where applicable
# secrets_dict: Optional[Dict[str, Any]] = None
# Read secrets from a file in yaml format
secrets_file: Optional[Path] = None
# Debug Mode
debug_mode: bool = False
# Store resource to output directory
# If True, save resource output to json files
save_output: bool = False
# The directory for the input files in the infra directory
input_dir: Optional[str] = None
# The directory for the output files in the infra directory
output_dir: Optional[str] = None
# Dependencies for the resource
depends_on: Optional[List[Any]] = None
# Infra Settings
infra_settings: Optional[InfraSettings] = None
# Cached Data
cached_infra_dir: Optional[Path] = None
cached_env_file_data: Optional[Dict[str, Any]] = None
cached_secret_file_data: Optional[Dict[str, Any]] = None
model_config = ConfigDict(arbitrary_types_allowed=True)
def get_group_name(self) -> Optional[str]:
return self.group or self.name
@property
def infra_root(self) -> Optional[Path]:
return self.infra_settings.infra_root if self.infra_settings is not None else None
@property
def infra_name(self) -> Optional[str]:
return self.infra_settings.infra_name if self.infra_settings is not None else None
@property
def infra_dir(self) -> Optional[Path]:
if self.cached_infra_dir is not None:
return self.cached_infra_dir
if self.infra_root is not None:
from agno.infra.helpers import get_infra_dir_path
infra_dir = get_infra_dir_path(self.infra_root)
if infra_dir is not None:
self.cached_infra_dir = infra_dir
return infra_dir
return None
def set_infra_settings(self, infra_settings: Optional[InfraSettings] = None) -> None:
if infra_settings is not None:
self.infra_settings = infra_settings
def get_env_file_data(self) -> Optional[Dict[str, Any]]:
if self.cached_env_file_data is None:
from agno.utilities.yaml_io import read_yaml_file
self.cached_env_file_data = read_yaml_file(file_path=self.env_file)
return self.cached_env_file_data
def get_secret_file_data(self) -> Optional[Dict[str, Any]]:
if self.cached_secret_file_data is None:
from agno.utilities.yaml_io import read_yaml_file
self.cached_secret_file_data = read_yaml_file(file_path=self.secrets_file)
return self.cached_secret_file_data
def get_secret_from_file(self, secret_name: str) -> Optional[str]:
secret_file_data = self.get_secret_file_data()
if secret_file_data is not None:
return secret_file_data.get(secret_name)
return None
def get_infra_resources(self) -> Optional[Any]:
"""This method returns an InfraResources object for this resource"""
raise NotImplementedError("get_infra_resources method not implemented")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/base/base.py",
"license": "Apache License 2.0",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno_infra/agno/base/context.py | from typing import Optional
from pydantic import BaseModel
class ContainerContext(BaseModel):
"""ContainerContext is a context object passed when creating containers."""
# Infra name
infra_name: str
# Path to the infra directory inside the container
infra_root: str
# Path to the infra parent directory inside the container
infra_parent: str
# Path to the requirements.txt file relative to the infra_root
requirements_file: Optional[str] = None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/base/context.py",
"license": "Apache License 2.0",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno_infra/agno/base/resources.py | from typing import Any, List, Optional, Tuple
from agno.base.base import InfraBase
class InfraResources(InfraBase):
"""InfraResources is a group of InfraResource and InfraApp objects
that are managed together.
"""
apps: Optional[List[Any]] = None
resources: Optional[List[Any]] = None
def create_resources(
self,
group_filter: Optional[str] = None,
name_filter: Optional[str] = None,
type_filter: Optional[str] = None,
dry_run: Optional[bool] = False,
auto_confirm: Optional[bool] = False,
force: Optional[bool] = None,
pull: Optional[bool] = None,
) -> Tuple[int, int]:
raise NotImplementedError
def delete_resources(
self,
group_filter: Optional[str] = None,
name_filter: Optional[str] = None,
type_filter: Optional[str] = None,
dry_run: Optional[bool] = False,
auto_confirm: Optional[bool] = False,
force: Optional[bool] = None,
) -> Tuple[int, int]:
raise NotImplementedError
def update_resources(
self,
group_filter: Optional[str] = None,
name_filter: Optional[str] = None,
type_filter: Optional[str] = None,
dry_run: Optional[bool] = False,
auto_confirm: Optional[bool] = False,
force: Optional[bool] = None,
pull: Optional[bool] = None,
) -> Tuple[int, int]:
raise NotImplementedError
def save_resources(
self,
group_filter: Optional[str] = None,
name_filter: Optional[str] = None,
type_filter: Optional[str] = None,
) -> Tuple[int, int]:
raise NotImplementedError
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno_infra/agno/base/resources.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/integration/models/anthropic/test_format_tools.py | from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel, Field
from agno.tools.function import Function
from agno.utils.models.claude import format_tools_for_model
def test_none_input():
"""Test that None input returns None."""
result = format_tools_for_model(None)
assert result is None
def test_empty_list_input():
"""Test that empty list input returns None."""
result = format_tools_for_model([])
assert result is None
def test_non_function_tool_passthrough():
"""Test that non-function tools are passed through unchanged."""
tools = [
{
"type": "computer_20241022",
"name": "computer",
"display_width_px": 1024,
"display_height_px": 768,
}
]
result = format_tools_for_model(tools)
assert result == tools
def test_simple_function_tool():
"""Test formatting a simple function tool with required parameters."""
def get_weather(location: str, units: str):
"""Get weather information for a location
Args:
location: The location to get weather for
units: Temperature units (celsius or fahrenheit)
"""
return f"The weather in {location} is {units}"
function = Function.from_callable(get_weather)
tools = [
{
"type": "function",
"function": function.to_dict(),
}
]
expected = [
{
"name": "get_weather",
"description": "Get weather information for a location",
"input_schema": {
"type": "object",
"properties": {
"location": {"type": "string", "description": "The location to get weather for"},
"units": {"type": "string", "description": "Temperature units (celsius or fahrenheit)"},
},
"required": ["location", "units"],
"additionalProperties": False,
},
}
]
result = format_tools_for_model(tools)
assert result == expected
def test_optional_parameters_with_null_type():
"""Test that parameters with 'null' in type are not marked as required."""
def search_database(query: str, limit: Optional[int] = None):
"""Search database with optional filters
Args:
query: Search query
limit (Optional): Maximum results to return
"""
return f"Searching database for {query} with limit {limit}"
function = Function.from_callable(search_database)
tools = [
{
"type": "function",
"function": function.to_dict(),
}
]
result = format_tools_for_model(tools)
assert result[0]["input_schema"]["required"] == ["query"]
assert "limit" not in result[0]["input_schema"]["required"]
def test_optional_parameters_with_null_union():
"""Test that parameters with 'null' in type are not marked as required."""
def search_database(query: str, limit: int | None = None):
"""Search database with optional filters
Args:
query: Search query
limit (Optional): Maximum results to return
"""
return f"Searching database for {query} with limit {limit}"
function = Function.from_callable(search_database)
tools = [
{
"type": "function",
"function": function.to_dict(),
}
]
result = format_tools_for_model(tools)
assert result[0]["input_schema"]["required"] == ["query"]
assert "limit" not in result[0]["input_schema"]["required"]
def test_parameters_with_anyof_schema():
"""Test handling of parameters with anyOf schemas."""
def process_data(data: Union[str, Dict[str, Any]]):
"""Process data with flexible input
Args:
data: Data to process
"""
return f"Processing data: {data}"
function = Function.from_callable(process_data)
tools = [
{
"type": "function",
"function": function.to_dict(),
}
]
print(tools)
result = format_tools_for_model(tools)
data_property = result[0]["input_schema"]["properties"]["data"]
assert "anyOf" in data_property
assert "type" not in data_property
assert data_property["anyOf"] == [
{"type": "string"},
{
"type": "object",
"propertyNames": {"type": "string"},
"additionalProperties": {"type": "object", "properties": {}, "additionalProperties": False},
},
]
def test_parameter_with_list_type_containing_null():
"""Test parameter with list type that contains null."""
def flexible_func(required_param: str, optional_param: Union[str, None] = None):
"""Function with flexible parameters
Args:
required_param: Required parameter
optional_param: Optional parameter
"""
return f"Required parameter: {required_param}, Optional parameter: {optional_param}"
function = Function.from_callable(flexible_func)
tools = [
{
"type": "function",
"function": function.to_dict(),
}
]
result = format_tools_for_model(tools)
required_params = result[0]["input_schema"]["required"]
assert "required_param" in required_params
assert "optional_param" not in required_params
def test_parameter_missing_description():
"""Test parameter without description."""
def test_func(param1: str):
"""Test function"""
return f"Test function: {param1}"
function = Function.from_callable(test_func)
tools = [
{
"type": "function",
"function": function.to_dict(),
}
]
result = format_tools_for_model(tools)
param1 = result[0]["input_schema"]["properties"]["param1"]
assert param1["description"] == ""
assert param1["type"] == "string"
def test_complex_nested_schema():
"""Test complex nested parameter schema."""
class NestedParam(BaseModel):
nested_field: bool
class ComplexParam(BaseModel):
simple_param: str = Field(description="A simple string parameter")
array_param: List[int] = Field(description="An array of integers")
object_param: Dict[str, Any] = Field(description="An object parameter")
nested_param: NestedParam = Field(description="A nested parameter")
def complex_func(param: ComplexParam):
"""Function with complex parameters"""
return f"Complex parameter: {param}"
function = Function.from_callable(complex_func)
tools = [
{
"type": "function",
"function": function.to_dict(),
}
]
result = format_tools_for_model(tools)
properties = result[0]["input_schema"]["properties"]
assert "param" in properties
inner_properties = properties["param"]["properties"]
assert inner_properties["simple_param"] == {
"title": "Simple Param",
"type": "string",
"description": "A simple string parameter",
}
assert inner_properties["array_param"] == {
"title": "Array Param",
"type": "array",
"items": {"type": "integer"},
"description": "An array of integers",
}
assert inner_properties["object_param"] == {
"title": "Object Param",
"type": "object",
"description": "An object parameter",
"additionalProperties": True,
}
assert inner_properties["nested_param"] == {
"title": "NestedParam",
"type": "object",
"properties": {"nested_field": {"title": "Nested Field", "type": "boolean"}},
"required": ["nested_field"],
}
nested_properties = inner_properties["nested_param"]["properties"]
assert nested_properties["nested_field"] == {"title": "Nested Field", "type": "boolean"}
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/anthropic/test_format_tools.py",
"license": "Apache License 2.0",
"lines": 207,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/neo4j.py | import os
from typing import Any, List, Optional
try:
from neo4j import GraphDatabase
except ImportError:
raise ImportError("`neo4j` not installed. Please install using `pip install neo4j`")
from agno.tools import Toolkit
from agno.utils.log import log_debug, logger
class Neo4jTools(Toolkit):
def __init__(
self,
uri: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
database: Optional[str] = None,
# Enable flags for <6 functions
enable_list_labels: bool = True,
enable_list_relationships: bool = True,
enable_get_schema: bool = True,
enable_run_cypher: bool = True,
all: bool = False,
**kwargs,
):
"""
Initialize the Neo4jTools toolkit.
Connection parameters (uri/user/password or host/port) can be provided.
If not provided, falls back to NEO4J_URI, NEO4J_USERNAME, NEO4J_PASSWORD env vars.
Args:
uri (Optional[str]): The Neo4j URI.
user (Optional[str]): The Neo4j username.
password (Optional[str]): The Neo4j password.
host (Optional[str]): The Neo4j host.
port (Optional[int]): The Neo4j port.
database (Optional[str]): The Neo4j database.
list_labels (bool): Whether to list node labels.
list_relationships (bool): Whether to list relationship types.
get_schema (bool): Whether to get the schema.
run_cypher (bool): Whether to run Cypher queries.
**kwargs: Additional keyword arguments.
"""
# Determine the connection URI and credentials
uri = uri or os.getenv("NEO4J_URI", "bolt://localhost:7687")
user = user or os.getenv("NEO4J_USERNAME")
password = password or os.getenv("NEO4J_PASSWORD")
if user is None or password is None:
raise ValueError("Username or password for Neo4j not provided")
# Create the Neo4j driver
try:
self.driver = GraphDatabase.driver(uri, auth=(user, password)) # type: ignore
self.driver.verify_connectivity()
log_debug("Connected to Neo4j database")
except Exception as e:
logger.error(f"Failed to connect to Neo4j: {e}")
raise
self.database = database or "neo4j"
# Register toolkit methods as tools
tools: List[Any] = []
if all or enable_list_labels:
tools.append(self.list_labels)
if all or enable_list_relationships:
tools.append(self.list_relationship_types)
if all or enable_get_schema:
tools.append(self.get_schema)
if all or enable_run_cypher:
tools.append(self.run_cypher_query)
super().__init__(name="neo4j_tools", tools=tools, **kwargs)
def list_labels(self) -> list:
"""
Retrieve all node labels present in the connected Neo4j database.
"""
try:
log_debug("Listing node labels in Neo4j database")
with self.driver.session(database=self.database) as session:
result = session.run("CALL db.labels()")
labels = [record["label"] for record in result]
return labels
except Exception as e:
logger.error(f"Error listing labels: {e}")
return []
def list_relationship_types(self) -> list:
"""
Retrieve all relationship types present in the connected Neo4j database.
"""
try:
log_debug("Listing relationship types in Neo4j database")
with self.driver.session(database=self.database) as session:
result = session.run("CALL db.relationshipTypes()")
types = [record["relationshipType"] for record in result]
return types
except Exception as e:
logger.error(f"Error listing relationship types: {e}")
return []
def get_schema(self) -> list:
"""
Retrieve a visualization of the database schema, including nodes and relationships.
"""
try:
log_debug("Retrieving Neo4j schema visualization")
with self.driver.session(database=self.database) as session:
result = session.run("CALL db.schema.visualization()")
schema_data = result.data()
return schema_data
except Exception as e:
logger.error(f"Error getting Neo4j schema: {e}")
return []
def run_cypher_query(self, query: str) -> list:
"""
Execute an arbitrary Cypher query against the connected Neo4j database.
Args:
query (str): The Cypher query string to execute.
"""
try:
log_debug(f"Running Cypher query: {query}")
with self.driver.session(database=self.database) as session:
result = session.run(query) # type: ignore[arg-type]
data = result.data()
return data
except Exception as e:
logger.error(f"Error running Cypher query: {e}")
return []
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/neo4j.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/tools/test_neo4j.py | import os
from unittest.mock import MagicMock, patch
import pytest
from agno.tools.neo4j import Neo4jTools
def test_list_labels():
with patch("neo4j.GraphDatabase.driver") as mock_driver:
mock_session = mock_driver.return_value.session.return_value
# Patch the context manager's __enter__ to return mock_session
mock_session.__enter__.return_value = mock_session
mock_run = mock_session.run
mock_run.return_value = [{"label": "Person"}, {"label": "Movie"}]
tools = Neo4jTools("uri", "user", "password")
labels = tools.list_labels()
assert labels == ["Person", "Movie"]
mock_run.assert_called_with("CALL db.labels()")
def test_list_labels_connection_error():
with patch("neo4j.GraphDatabase.driver") as mock_driver:
mock_driver.side_effect = Exception("Connection error")
with pytest.raises(Exception, match="Connection error"):
Neo4jTools("uri", "user", "password")
def test_list_labels_runtime_error():
with patch("neo4j.GraphDatabase.driver") as mock_driver:
mock_session = mock_driver.return_value.session.return_value
mock_session.__enter__.return_value = mock_session
mock_session.run.side_effect = Exception("Query failed")
tools = Neo4jTools("uri", "user", "password")
labels = tools.list_labels()
assert labels == []
def test_list_relationship_types():
with patch("neo4j.GraphDatabase.driver") as mock_driver:
mock_session = mock_driver.return_value.session.return_value
mock_session.__enter__.return_value = mock_session
mock_run = mock_session.run
mock_run.return_value = [{"relationshipType": "ACTED_IN"}, {"relationshipType": "DIRECTED"}]
tools = Neo4jTools("uri", "user", "password")
rel_types = tools.list_relationship_types()
assert rel_types == ["ACTED_IN", "DIRECTED"]
mock_run.assert_called_with("CALL db.relationshipTypes()")
def test_list_relationship_types_error():
with patch("neo4j.GraphDatabase.driver") as mock_driver:
mock_session = mock_driver.return_value.session.return_value
mock_session.__enter__.return_value = mock_session
mock_session.run.side_effect = Exception("Query failed")
tools = Neo4jTools("uri", "user", "password")
rel_types = tools.list_relationship_types()
assert rel_types == []
def test_get_schema():
with patch("neo4j.GraphDatabase.driver") as mock_driver:
mock_session = mock_driver.return_value.session.return_value
mock_session.__enter__.return_value = mock_session
mock_result = MagicMock()
mock_result.data.return_value = [
{"nodes": [{"id": 1, "labels": ["Person"]}], "relationships": [{"id": 1, "type": "ACTED_IN"}]}
]
mock_session.run.return_value = mock_result
tools = Neo4jTools("uri", "user", "password")
schema = tools.get_schema()
assert len(schema) == 1
assert "nodes" in schema[0]
assert "relationships" in schema[0]
mock_session.run.assert_called_with("CALL db.schema.visualization()")
def test_get_schema_error():
with patch("neo4j.GraphDatabase.driver") as mock_driver:
mock_session = mock_driver.return_value.session.return_value
mock_session.__enter__.return_value = mock_session
mock_session.run.side_effect = Exception("Schema query failed")
tools = Neo4jTools("uri", "user", "password")
schema = tools.get_schema()
assert schema == []
def test_run_cypher_query():
with patch("neo4j.GraphDatabase.driver") as mock_driver:
mock_session = mock_driver.return_value.session.return_value
mock_session.__enter__.return_value = mock_session
mock_result = MagicMock()
mock_result.data.return_value = [{"name": "John", "age": 30}, {"name": "Jane", "age": 25}]
mock_session.run.return_value = mock_result
tools = Neo4jTools("uri", "user", "password")
query = "MATCH (p:Person) RETURN p.name as name, p.age as age"
result = tools.run_cypher_query(query)
assert len(result) == 2
assert result[0]["name"] == "John"
assert result[1]["name"] == "Jane"
mock_session.run.assert_called_with(query)
def test_run_cypher_query_error():
with patch("neo4j.GraphDatabase.driver") as mock_driver:
mock_session = mock_driver.return_value.session.return_value
mock_session.__enter__.return_value = mock_session
mock_session.run.side_effect = Exception("Cypher query failed")
tools = Neo4jTools("uri", "user", "password")
result = tools.run_cypher_query("INVALID QUERY")
assert result == []
def test_initialization_with_env_vars():
with (
patch("neo4j.GraphDatabase.driver") as mock_driver,
patch.dict(
os.environ,
{"NEO4J_URI": "bolt://test-host:7687", "NEO4J_USERNAME": "test_user", "NEO4J_PASSWORD": "test_pass"},
),
):
Neo4jTools()
mock_driver.assert_called_with("bolt://test-host:7687", auth=("test_user", "test_pass"))
def test_initialization_missing_credentials():
with patch("neo4j.GraphDatabase.driver"), patch.dict(os.environ, {}, clear=True):
with pytest.raises(ValueError, match="Username or password for Neo4j not provided"):
Neo4jTools()
def test_initialization_custom_database():
with patch("neo4j.GraphDatabase.driver") as _:
tools = Neo4jTools("uri", "user", "password", database="custom_db")
assert tools.database == "custom_db"
def test_initialization_default_database():
with patch("neo4j.GraphDatabase.driver") as _:
tools = Neo4jTools("uri", "user", "password")
assert tools.database == "neo4j"
def test_initialization_selective_tools():
with patch("neo4j.GraphDatabase.driver") as _:
tools = Neo4jTools(
"uri",
"user",
"password",
enable_list_labels=True,
enable_list_relationships=False,
enable_get_schema=False,
enable_run_cypher=True,
)
# Check that only selected tools are registered
tool_names = [tool.__name__ for tool in tools.tools]
assert "list_labels" in tool_names
assert "run_cypher_query" in tool_names
assert "list_relationship_types" not in tool_names
assert "get_schema" not in tool_names
def test_driver_connectivity_verification():
with patch("neo4j.GraphDatabase.driver") as mock_driver:
mock_driver_instance = mock_driver.return_value
mock_driver_instance.verify_connectivity.side_effect = Exception("Connection failed")
with pytest.raises(Exception, match="Connection failed"):
Neo4jTools("uri", "user", "password")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_neo4j.py",
"license": "Apache License 2.0",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/brandfetch.py | """
Brandfetch API toolkit for retrieving brand data and searching brands.
"""
import warnings
from os import getenv
from typing import Any, List, Optional
try:
import httpx
except ImportError:
raise ImportError("`httpx` not installed.")
from agno.tools import Toolkit
class BrandfetchTools(Toolkit):
"""
Brandfetch API toolkit for retrieving brand data and searching brands.
Supports both Brand API (retrieve comprehensive brand data) and
Brand Search API (find and search brands by name).
-- Brand API
api_key: str - your Brandfetch API key
-- Brand Search API
client_id: str - your Brandfetch Client ID
all: bool - if True, will use all tools
enable_search_by_identifier: bool - if True, will use search by identifier
enable_search_by_brand: bool - if True, will use search by brand
"""
def __init__(
self,
api_key: Optional[str] = None,
client_id: Optional[str] = None,
base_url: str = "https://api.brandfetch.io/v2",
timeout: Optional[float] = 20.0,
enable_search_by_identifier: bool = True,
enable_search_by_brand: bool = False,
all: bool = False,
async_tools: bool = False, # Deprecated
**kwargs,
):
# Handle deprecated async_tools parameter
if async_tools:
warnings.warn(
"The 'async_tools' parameter is deprecated and will be removed in a future version. "
"Async tools are now automatically used when calling agent.arun() or agent.aprint_response().",
DeprecationWarning,
stacklevel=2,
)
self.api_key = api_key or getenv("BRANDFETCH_API_KEY")
self.client_id = client_id or getenv("BRANDFETCH_CLIENT_ID")
self.base_url = base_url
self.timeout = httpx.Timeout(timeout)
self.search_url = f"{self.base_url}/search"
self.brand_url = f"{self.base_url}/brands"
# Build tools lists
# sync tools: used by agent.run() and agent.print_response()
# async tools: used by agent.arun() and agent.aprint_response()
tools: List[Any] = []
async_tools_list: List[tuple] = []
if all or enable_search_by_identifier:
tools.append(self.search_by_identifier)
async_tools_list.append((self.asearch_by_identifier, "search_by_identifier"))
if all or enable_search_by_brand:
tools.append(self.search_by_brand)
async_tools_list.append((self.asearch_by_brand, "search_by_brand"))
name = kwargs.pop("name", "brandfetch_tools")
super().__init__(name=name, tools=tools, async_tools=async_tools_list, **kwargs)
async def asearch_by_identifier(self, identifier: str) -> dict[str, Any]:
"""
Search for brand data by identifier (domain, brand id, isin, stock ticker).
Args:
identifier: Options are you can use: Domain (nike.com), Brand ID (id_0dwKPKT), ISIN (US6541061031), Stock Ticker (NKE)
Returns:
Dict containing brand data including logos, colors, fonts, and other brand assets
Raises:
ValueError: If no API key is provided
"""
if not self.api_key:
raise ValueError("API key is required for brand search by identifier")
url = f"{self.brand_url}/{identifier}"
headers = {"Authorization": f"Bearer {self.api_key}"}
try:
async with httpx.AsyncClient(timeout=self.timeout) as client:
response = await client.get(url, headers=headers)
response.raise_for_status()
return response.json()
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
return {"error": f"Brand not found for identifier: {identifier}"}
elif e.response.status_code == 401:
return {"error": "Invalid API key"}
elif e.response.status_code == 429:
return {"error": "Rate limit exceeded"}
else:
return {"error": f"API error: {e.response.status_code}"}
except httpx.RequestError as e:
return {"error": f"Request failed: {str(e)}"}
def search_by_identifier(self, identifier: str) -> dict[str, Any]:
"""
Search for brand data by identifier (domain, brand id, isin, stock ticker).
Args:
identifier: Options are you can use: Domain (nike.com), Brand ID (id_0dwKPKT), ISIN (US6541061031), Stock Ticker (NKE)
Returns:
Dict containing brand data including logos, colors, fonts, and other brand assets
Raises:
ValueError: If no API key is provided
"""
if not self.api_key:
raise ValueError("API key is required for brand search by identifier")
url = f"{self.brand_url}/{identifier}"
headers = {"Authorization": f"Bearer {self.api_key}"}
try:
with httpx.Client(timeout=self.timeout) as client:
response = client.get(url, headers=headers)
response.raise_for_status()
return response.json()
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
return {"error": f"Brand not found for identifier: {identifier}"}
elif e.response.status_code == 401:
return {"error": "Invalid API key"}
elif e.response.status_code == 429:
return {"error": "Rate limit exceeded"}
else:
return {"error": f"API error: {e.response.status_code}"}
except httpx.RequestError as e:
return {"error": f"Request failed: {str(e)}"}
async def asearch_by_brand(self, name: str) -> dict[str, Any]:
"""
Search for brands by name using the Brand Search API - can give you the right brand id to use for the brand api.
Args:
name: Brand name to search for (e.g., 'Google', 'Apple')
Returns:
Dict containing search results with brand matches
Raises:
ValueError: If no client ID is provided
"""
if not self.client_id:
raise ValueError("Client ID is required for brand search by name")
url = f"{self.search_url}/{name}"
params = {"c": self.client_id}
try:
async with httpx.AsyncClient(timeout=self.timeout) as client:
response = await client.get(url, params=params)
response.raise_for_status()
return response.json()
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
return {"error": f"No brands found for name: {name}"}
elif e.response.status_code == 401:
return {"error": "Invalid client ID"}
elif e.response.status_code == 429:
return {"error": "Rate limit exceeded"}
else:
return {"error": f"API error: {e.response.status_code}"}
except httpx.RequestError as e:
return {"error": f"Request failed: {str(e)}"}
def search_by_brand(self, name: str) -> dict[str, Any]:
"""
Search for brands by name using the Brand Search API - can give you the right brand id to use for the brand api.
Args:
name: Brand name to search for (e.g., 'Google', 'Apple')
Returns:
Dict containing search results with brand matches
Raises:
ValueError: If no client ID is provided
"""
if not self.client_id:
raise ValueError("Client ID is required for brand search by name")
url = f"{self.search_url}/{name}"
params = {"c": self.client_id}
try:
with httpx.Client(timeout=self.timeout) as client:
response = client.get(url, params=params)
response.raise_for_status()
return response.json()
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
return {"error": f"No brands found for name: {name}"}
elif e.response.status_code == 401:
return {"error": "Invalid client ID"}
elif e.response.status_code == 429:
return {"error": "Rate limit exceeded"}
else:
return {"error": f"API error: {e.response.status_code}"}
except httpx.RequestError as e:
return {"error": f"Request failed: {str(e)}"}
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/brandfetch.py",
"license": "Apache License 2.0",
"lines": 183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/models/dashscope/dashscope.py | from dataclasses import dataclass
from os import getenv
from typing import Any, Dict, List, Optional, Type, Union
from pydantic import BaseModel
from agno.exceptions import ModelAuthenticationError
from agno.models.openai.like import OpenAILike
@dataclass
class DashScope(OpenAILike):
"""
A class for interacting with Qwen models via DashScope API.
Attributes:
id (str): The model id. Defaults to "qwen-plus".
name (str): The model name. Defaults to "Qwen".
provider (str): The provider name. Defaults to "Qwen".
api_key (Optional[str]): The DashScope API key.
base_url (str): The base URL. Defaults to "https://dashscope-intl.aliyuncs.com/compatible-mode/v1".
enable_thinking (bool): Enable thinking process (DashScope native parameter). Defaults to False.
include_thoughts (Optional[bool]): Include thinking process in response (alternative parameter). Defaults to None.
"""
id: str = "qwen-plus"
name: str = "Qwen"
provider: str = "Dashscope"
api_key: Optional[str] = getenv("DASHSCOPE_API_KEY") or getenv("QWEN_API_KEY")
base_url: str = "https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
# Thinking parameters
enable_thinking: bool = False
include_thoughts: Optional[bool] = None
thinking_budget: Optional[int] = None
# DashScope supports structured outputs
supports_native_structured_outputs: bool = True
supports_json_schema_outputs: bool = True
def _get_client_params(self) -> Dict[str, Any]:
if not self.api_key:
self.api_key = getenv("DASHSCOPE_API_KEY")
if not self.api_key:
raise ModelAuthenticationError(
message="DASHSCOPE_API_KEY not set. Please set the DASHSCOPE_API_KEY environment variable.",
model_name=self.name,
)
# Define base client params
base_params = {
"api_key": self.api_key,
"organization": self.organization,
"base_url": self.base_url,
"timeout": self.timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
# Create client_params dict with non-None values
client_params = {k: v for k, v in base_params.items() if v is not None}
# Add additional client params if provided
if self.client_params:
client_params.update(self.client_params)
return client_params
def get_request_params(
self,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
tools: Optional[List[Dict[str, Any]]] = None,
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
**kwargs: Any,
) -> Dict[str, Any]:
params = super().get_request_params(response_format=response_format, tools=tools, tool_choice=tool_choice)
if self.include_thoughts is not None:
self.enable_thinking = self.include_thoughts
if self.enable_thinking is not None:
params["extra_body"] = {
"enable_thinking": self.enable_thinking,
}
if self.thinking_budget is not None:
params["extra_body"]["thinking_budget"] = self.thinking_budget
return params
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/dashscope/dashscope.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/models/dashscope/test_basic.py | import pytest
from pydantic import BaseModel, Field
from agno.agent import Agent, RunOutput # noqa
from agno.db.sqlite import SqliteDb
from agno.models.dashscope import DashScope
def _assert_metrics(response: RunOutput):
assert response.metrics is not None
input_tokens = response.metrics.input_tokens
output_tokens = response.metrics.output_tokens
total_tokens = response.metrics.total_tokens
assert input_tokens > 0
assert output_tokens > 0
assert total_tokens > 0
assert total_tokens == input_tokens + output_tokens
def test_basic():
agent = Agent(model=DashScope(id="qwen-plus"), markdown=True, telemetry=False)
# Print the response in the terminal
response: RunOutput = agent.run("Share a 2 sentence horror story")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
def test_basic_stream():
agent = Agent(model=DashScope(id="qwen-plus"), markdown=True, telemetry=False)
run_stream = agent.run("Say 'hi'", stream=True)
for chunk in run_stream:
assert chunk.content is not None
@pytest.mark.asyncio
async def test_async_basic():
agent = Agent(model=DashScope(id="qwen-plus"), markdown=True, telemetry=False)
response = await agent.arun("Share a 2 sentence horror story")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
@pytest.mark.asyncio
async def test_async_basic_stream():
agent = Agent(model=DashScope(id="qwen-plus"), markdown=True, telemetry=False)
async for response in agent.arun("Share a 2 sentence horror story", stream=True):
assert response.content is not None
def test_with_memory():
agent = Agent(
db=SqliteDb(db_file="tmp/dashcope/test_with_memory.db"),
model=DashScope(id="qwen-plus"),
add_history_to_context=True,
markdown=True,
telemetry=False,
)
# First interaction
response1 = agent.run("My name is John Smith")
assert response1.content is not None
# Second interaction should remember the name
response2 = agent.run("What's my name?")
assert response2.content is not None
assert "John Smith" in response2.content
# Verify memories were created
messages = agent.get_session_messages()
assert len(messages) == 5
assert [m.role for m in messages] == ["system", "user", "assistant", "user", "assistant"]
# Test metrics structure and types
_assert_metrics(response2)
def test_output_schema():
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(
model=DashScope(id="qwen-plus"),
output_schema=MovieScript,
telemetry=False,
)
response = agent.run(
"Create a movie about time travel. Please return a JSON object with the title, genre, and plot."
)
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
def test_json_response_mode():
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(
model=DashScope(id="qwen-plus"),
output_schema=MovieScript,
use_json_mode=True,
telemetry=False,
)
response = agent.run("Create a movie about time travel.")
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
def test_history():
agent = Agent(
model=DashScope(id="qwen-plus"),
db=SqliteDb(db_file="tmp/dashscope/test_history.db"),
add_history_to_context=True,
store_history_messages=True,
telemetry=False,
)
run_output = agent.run("Hello")
assert run_output.messages is not None
assert len(run_output.messages) == 2
run_output = agent.run("Hello 2")
assert run_output.messages is not None
assert len(run_output.messages) == 4
run_output = agent.run("Hello 3")
assert run_output.messages is not None
assert len(run_output.messages) == 6
run_output = agent.run("Hello 4")
assert run_output.messages is not None
assert len(run_output.messages) == 8
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/dashscope/test_basic.py",
"license": "Apache License 2.0",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/dashscope/test_multimodal.py | from pathlib import Path
import pytest
from agno.agent.agent import Agent
from agno.media import Image
from agno.models.dashscope import DashScope
def test_image_input_url():
agent = Agent(model=DashScope(id="qwen-vl-plus"), markdown=True, telemetry=False)
response = agent.run(
"Tell me about this image.",
images=[Image(url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg")],
)
assert "golden" in response.content.lower()
assert "bridge" in response.content.lower()
def test_image_input_bytes():
agent = Agent(model=DashScope(id="qwen-vl-plus"), telemetry=False)
image_path = Path(__file__).parent.parent.joinpath("sample_image.jpg")
# Read the image file content as bytes
image_bytes = image_path.read_bytes()
response = agent.run(
"Tell me about this image.",
images=[Image(content=image_bytes)],
)
assert "golden" in response.content.lower()
assert "bridge" in response.content.lower()
@pytest.mark.asyncio
async def test_async_image_input_stream():
agent = Agent(model=DashScope(id="qwen-vl-plus"), markdown=True, telemetry=False)
image_path = Path(__file__).parent.parent.joinpath("sample_image.jpg")
image_bytes = image_path.read_bytes()
response_stream = await agent.arun(
"Describe this image in detail.", images=[Image(content=image_bytes, format="jpeg")], stream=True
)
responses = []
async for chunk in response_stream:
responses.append(chunk)
assert chunk.content is not None
assert len(responses) > 0
full_content = ""
for r in responses:
full_content += r.content or ""
assert "bridge" in full_content.lower()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/dashscope/test_multimodal.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/dashscope/test_tool_use.py | from typing import Optional
import pytest
from agno.agent import Agent
from agno.models.dashscope import DashScope
from agno.tools.exa import ExaTools
from agno.tools.websearch import WebSearchTools
from agno.tools.yfinance import YFinanceTools
def test_tool_use():
agent = Agent(
model=DashScope(id="qwen-plus"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA?")
# Verify tool usage
assert any(msg.tool_calls for msg in response.messages)
assert response.content is not None
def test_tool_use_stream():
agent = Agent(
model=DashScope(id="qwen-plus"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response_stream = agent.run("What is the current price of TSLA?", stream=True, stream_events=True)
responses = []
tool_call_seen = False
for chunk in response_stream:
responses.append(chunk)
# Check for ToolCallStartedEvent or ToolCallCompletedEvent
if hasattr(chunk, "tool") and chunk.tool:
if chunk.tool.tool_name:
tool_call_seen = True
assert len(responses) > 0
assert tool_call_seen, "No tool calls observed in stream"
@pytest.mark.asyncio
async def test_async_tool_use():
agent = Agent(
model=DashScope(id="qwen-plus"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = await agent.arun("What is the current price of TSLA?")
# Verify tool usage
assert any(msg.tool_calls for msg in response.messages if msg.role == "assistant")
assert response.content is not None
@pytest.mark.asyncio
async def test_async_tool_use_stream():
agent = Agent(
model=DashScope(id="qwen-plus"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response_stream = await agent.arun("What is the current price of TSLA?", stream=True, stream_events=True)
responses = []
tool_call_seen = False
async for chunk in response_stream:
responses.append(chunk)
if hasattr(chunk, "tool") and chunk.tool:
if chunk.tool.tool_name:
tool_call_seen = True
assert len(responses) > 0
assert tool_call_seen, "No tool calls observed in stream"
def test_parallel_tool_calls():
agent = Agent(
model=DashScope(id="qwen-plus"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA and AAPL?")
# Verify tool usage
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
assert len([call for call in tool_calls if call.get("type", "") == "function"]) >= 2
assert response.content is not None
def test_multiple_tool_calls():
agent = Agent(
model=DashScope(id="qwen-plus"),
tools=[YFinanceTools(cache_results=True), WebSearchTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA and what is the latest news about it?")
# Verify tool usage
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
assert len([call for call in tool_calls if call.get("type", "") == "function"]) >= 2
assert response.content is not None
def test_tool_call_custom_tool_no_parameters():
def get_the_weather_in_tokyo():
"""
Get the weather in Tokyo
"""
return "It is currently 70 degrees and cloudy in Tokyo"
agent = Agent(
model=DashScope(id="qwen-plus"),
tools=[get_the_weather_in_tokyo],
markdown=True,
telemetry=False,
)
response = agent.run("What is the weather in Tokyo?")
# Verify tool usage
assert any(msg.tool_calls for msg in response.messages)
assert response.content is not None
assert "Tokyo" in response.content
def test_tool_call_custom_tool_optional_parameters():
def get_the_weather(city: Optional[str] = None):
"""
Get the weather in a city
Args:
city: The city to get the weather for
"""
if city is None:
return "It is currently 70 degrees and cloudy in Tokyo"
else:
return f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=DashScope(id="qwen-plus"),
tools=[get_the_weather],
markdown=True,
telemetry=False,
)
response = agent.run("What is the weather in Paris?")
# Verify tool usage
assert any(msg.tool_calls for msg in response.messages)
assert response.content is not None
assert "70" in response.content
def test_tool_call_list_parameters():
agent = Agent(
model=DashScope(id="qwen-plus"),
tools=[ExaTools()],
instructions="Use a single tool call if possible",
markdown=True,
telemetry=False,
)
response = agent.run(
"What are the papers at https://arxiv.org/pdf/2307.06435 and https://arxiv.org/pdf/2502.09601 about?"
)
# Verify tool usage
assert any(msg.tool_calls for msg in response.messages)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
for call in tool_calls:
if call.get("type", "") == "function":
assert call["function"]["name"] in ["get_contents", "exa_answer", "search_exa"]
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/dashscope/test_tool_use.py",
"license": "Apache License 2.0",
"lines": 156,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/models/openai/test_openai_responses_id_handling.py | from typing import Any, Dict, List, Optional
from agno.models.message import Message
from agno.models.openai.responses import OpenAIResponses
from agno.models.response import ModelResponse
class _FakeError:
def __init__(self, message: str):
self.message = message
class _FakeOutputFunctionCall:
def __init__(self, *, _id: str, call_id: Optional[str], name: str, arguments: str):
self.type = "function_call"
self.id = _id
self.call_id = call_id
self.name = name
self.arguments = arguments
class _FakeResponse:
def __init__(
self,
*,
_id: str,
output: List[Any],
output_text: str = "",
usage: Optional[Dict[str, Any]] = None,
error: Optional[_FakeError] = None,
):
self.id = _id
self.output = output
self.output_text = output_text
self.usage = usage
self.error = error
class _FakeStreamItem:
def __init__(self, *, _id: str, call_id: Optional[str], name: str, arguments: str):
self.type = "function_call"
self.id = _id
self.call_id = call_id
self.name = name
self.arguments = arguments
class _FakeStreamEvent:
def __init__(
self,
*,
type: str,
item: Optional[_FakeStreamItem] = None,
delta: str = "",
response: Any = None,
annotation: Any = None,
):
self.type = type
self.item = item
self.delta = delta
self.response = response
self.annotation = annotation
def test_format_messages_maps_tool_output_fc_to_call_id():
model = OpenAIResponses(id="gpt-4.1-mini")
# Assistant emitted a function_call with both fc_* and call_* ids
assistant_with_tool_call = Message(
role="assistant",
tool_calls=[
{
"id": "fc_abc123",
"call_id": "call_def456",
"type": "function",
"function": {"name": "execute_shell_command", "arguments": '{"command": "ls -la"}'},
}
],
)
# Tool output referring to the fc_* id should be normalized to call_*
tool_output = Message(role="tool", tool_call_id="fc_abc123", content="ok")
fm = model._format_messages(
messages=[
Message(role="system", content="s"),
Message(role="user", content="u"),
assistant_with_tool_call,
tool_output,
]
)
# Expect one function_call and one function_call_output normalized
fc_items = [x for x in fm if x.get("type") == "function_call"]
out_items = [x for x in fm if x.get("type") == "function_call_output"]
assert len(fc_items) == 1
assert fc_items[0]["id"] == "fc_abc123"
assert fc_items[0]["call_id"] == "call_def456"
assert len(out_items) == 1
assert out_items[0]["call_id"] == "call_def456"
def test_parse_provider_response_maps_ids():
model = OpenAIResponses(id="gpt-4.1-mini")
fake_resp = _FakeResponse(
_id="resp_1",
output=[_FakeOutputFunctionCall(_id="fc_abc123", call_id="call_def456", name="execute", arguments="{}")],
output_text="",
usage=None,
error=None,
)
mr: ModelResponse = model._parse_provider_response(fake_resp) # type: ignore[arg-type]
assert mr.tool_calls is not None and len(mr.tool_calls) == 1
tc = mr.tool_calls[0]
assert tc["id"] == "fc_abc123"
assert tc["call_id"] == "call_def456"
assert mr.extra is not None and "tool_call_ids" in mr.extra and mr.extra["tool_call_ids"][0] == "call_def456"
def test_process_stream_response_builds_tool_calls():
model = OpenAIResponses(id="gpt-4.1-mini")
assistant_message = Message(role="assistant")
# Simulate function_call added and then completed
added = _FakeStreamEvent(
type="response.output_item.added",
item=_FakeStreamItem(_id="fc_abc123", call_id="call_def456", name="execute", arguments="{}"),
)
mr, tool_use = model._parse_provider_response_delta(added, assistant_message, {}) # type: ignore[arg-type]
assert mr is not None
assert mr.role is None
assert mr.content is None
assert mr.tool_calls == []
# Optional: simulate args delta
delta_ev = _FakeStreamEvent(type="response.function_call_arguments.delta", delta='{"k":1}')
mr, tool_use = model._parse_provider_response_delta(delta_ev, assistant_message, tool_use) # type: ignore[arg-type]
assert mr is not None
assert mr.role is None
assert mr.content is None
assert mr.tool_calls == []
done = _FakeStreamEvent(type="response.output_item.done")
mr, tool_use = model._parse_provider_response_delta(done, assistant_message, tool_use) # type: ignore[arg-type]
assert mr is not None
assert mr.tool_calls is not None and len(mr.tool_calls) == 1
tc = mr.tool_calls[0]
assert tc["id"] == "fc_abc123"
assert tc["call_id"] == "call_def456"
assert assistant_message.tool_calls is not None and len(assistant_message.tool_calls) == 1
def test_reasoning_previous_response_skips_prior_function_call_items(monkeypatch):
model = OpenAIResponses(id="o4-mini") # reasoning
# Force _using_reasoning_model to True
monkeypatch.setattr(model, "_using_reasoning_model", lambda: True)
assistant_with_prev = Message(role="assistant")
assistant_with_prev.provider_data = {"response_id": "resp_123"} # type: ignore[attr-defined]
assistant_with_tool_call = Message(
role="assistant",
tool_calls=[
{
"id": "fc_abc123",
"call_id": "call_def456",
"type": "function",
"function": {"name": "execute_shell_command", "arguments": "{}"},
}
],
)
fm = model._format_messages(
messages=[
Message(role="system", content="s"),
Message(role="user", content="u"),
assistant_with_prev,
assistant_with_tool_call,
]
)
# Expect no re-sent function_call when previous_response_id is present for reasoning models
assert all(x.get("type") != "function_call" for x in fm)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/models/openai/test_openai_responses_id_handling.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/trafilatura.py | import json
from typing import Any, Callable, Dict, List, Optional, Set
from agno.tools import Toolkit
from agno.utils.log import log_debug, logger
try:
from trafilatura import (
extract,
extract_metadata,
fetch_url,
html2txt,
)
from trafilatura.meta import reset_caches
# Import spider functionality
try:
from trafilatura.spider import focused_crawler
SPIDER_AVAILABLE = True
except ImportError:
SPIDER_AVAILABLE = False
logger.warning("Trafilatura spider module not available. Web crawling functionality will be disabled.")
except ImportError:
raise ImportError("`trafilatura` not installed. Please install using `pip install trafilatura`")
class TrafilaturaTools(Toolkit):
"""
TrafilaturaTools is a toolkit for web scraping and text extraction.
Args:
output_format (str): Default output format for extractions. Options: 'txt', 'json', 'xml', 'markdown', 'csv', 'html', 'xmltei'.
include_comments (bool): Whether to extract comments along with main text by default.
include_tables (bool): Whether to include table content by default.
include_images (bool): Whether to include image information by default (experimental).
include_formatting (bool): Whether to preserve formatting by default.
include_links (bool): Whether to preserve links by default (experimental).
with_metadata (bool): Whether to include metadata in extractions by default.
favor_precision (bool): Whether to prefer precision over recall by default.
favor_recall (bool): Whether to prefer recall over precision by default.
target_language (Optional[str]): Default target language filter (ISO 639-1 format).
deduplicate (bool): Whether to remove duplicate segments by default.
max_tree_size (Optional[int]): Maximum tree size for processing.
max_crawl_urls (int): Maximum number of URLs to crawl per website.
max_known_urls (int): Maximum number of known URLs during crawling.
"""
def __init__(
self,
output_format: str = "txt",
include_comments: bool = True,
include_tables: bool = True,
include_images: bool = False,
include_formatting: bool = False,
include_links: bool = False,
with_metadata: bool = False,
favor_precision: bool = False,
favor_recall: bool = False,
target_language: Optional[str] = None,
deduplicate: bool = False,
max_tree_size: Optional[int] = None,
max_crawl_urls: int = 10,
max_known_urls: int = 100000,
# Tool enable flags for <6 functions
enable_extract_text: bool = True,
enable_extract_metadata_only: bool = True,
enable_html_to_text: bool = True,
enable_extract_batch: bool = True,
enable_crawl_website: bool = True,
all: bool = False,
**kwargs,
):
self.output_format = output_format
self.include_comments = include_comments
self.include_tables = include_tables
self.include_images = include_images
self.include_formatting = include_formatting
self.include_links = include_links
self.with_metadata = with_metadata
self.favor_precision = favor_precision
self.favor_recall = favor_recall
self.target_language = target_language
self.deduplicate = deduplicate
self.max_tree_size = max_tree_size
self.max_crawl_urls = max_crawl_urls
self.max_known_urls = max_known_urls
tools: List[Callable] = []
if all or enable_extract_text:
tools.append(self.extract_text)
if all or enable_extract_metadata_only:
tools.append(self.extract_metadata_only)
if all or enable_html_to_text:
tools.append(self.html_to_text)
if all or enable_extract_batch:
tools.append(self.extract_batch)
if all or enable_crawl_website:
if not SPIDER_AVAILABLE:
logger.warning("Web crawling requested but spider module not available. Skipping crawler tool.")
else:
tools.append(self.crawl_website)
super().__init__(name="trafilatura_tools", tools=tools, **kwargs)
def _get_extraction_params(
self,
output_format: Optional[str] = None,
include_comments: Optional[bool] = None,
include_tables: Optional[bool] = None,
include_images: Optional[bool] = None,
include_formatting: Optional[bool] = None,
include_links: Optional[bool] = None,
with_metadata: Optional[bool] = None,
favor_precision: Optional[bool] = None,
favor_recall: Optional[bool] = None,
target_language: Optional[str] = None,
deduplicate: Optional[bool] = None,
max_tree_size: Optional[int] = None,
url_blacklist: Optional[Set[str]] = None,
author_blacklist: Optional[Set[str]] = None,
) -> Dict[str, Any]:
"""Helper method to build extraction parameters with fallbacks to instance defaults."""
return {
"output_format": output_format if output_format is not None else self.output_format,
"include_comments": include_comments if include_comments is not None else self.include_comments,
"include_tables": include_tables if include_tables is not None else self.include_tables,
"include_images": include_images if include_images is not None else self.include_images,
"include_formatting": include_formatting if include_formatting is not None else self.include_formatting,
"include_links": include_links if include_links is not None else self.include_links,
"with_metadata": with_metadata if with_metadata is not None else self.with_metadata,
"favor_precision": favor_precision if favor_precision is not None else self.favor_precision,
"favor_recall": favor_recall if favor_recall is not None else self.favor_recall,
"target_language": target_language if target_language is not None else self.target_language,
"deduplicate": deduplicate if deduplicate is not None else self.deduplicate,
"max_tree_size": max_tree_size if max_tree_size is not None else self.max_tree_size,
"url_blacklist": url_blacklist,
"author_blacklist": author_blacklist,
}
def extract_text(
self,
url: str,
output_format: Optional[str] = None,
) -> str:
"""
Extract main text content from a web page URL using Trafilatura.
Args:
url (str): The URL to extract content from.
output_format (Optional[str]): Output format. Options: 'txt', 'json', 'xml', 'markdown', 'csv', 'html', 'xmltei'.
Returns:
str: Extracted content in the specified format, or error message if extraction fails.
"""
try:
log_debug(f"Extracting text from URL: {url}")
# Fetch the webpage content
html_content = fetch_url(url)
if not html_content:
return f"Error: Could not fetch content from URL: {url}"
# Get extraction parameters
params = self._get_extraction_params(output_format=output_format)
result = extract(html_content, url=url, **params)
if result is None:
return f"Error: Could not extract readable content from URL: {url}"
# Reset caches
reset_caches()
return result
except Exception as e:
logger.warning(f"Error extracting text from {url}: {e}")
return f"Error extracting text from {url}: {e}"
def extract_metadata_only(
self,
url: str,
as_json: bool = True,
) -> str:
"""
Extract only metadata from a web page URL.
Args:
url (str): The URL to extract metadata from.
as_json (bool): Whether to return metadata as JSON string.
Returns:
str: Extracted metadata as JSON string or formatted text.
"""
try:
log_debug(f"Extracting metadata from URL: {url}")
# Fetch the webpage content
html_content = fetch_url(url)
if not html_content:
return f"Error: Could not fetch content from URL: {url}"
# Extract metadata
metadata_doc = extract_metadata(
html_content,
default_url=url,
extensive=True, # default
author_blacklist=None,
)
if metadata_doc is None:
return f"Error: Could not extract metadata from URL: {url}"
metadata_dict = metadata_doc.as_dict()
# Reset caches
reset_caches()
if as_json:
return json.dumps(metadata_dict, indent=2, default=str)
else:
return "\n".join(f"{key}: {value}" for key, value in metadata_dict.items())
except Exception as e:
logger.warning(f"Error extracting metadata from {url}: {e}")
return f"Error extracting metadata from {url}: {e}"
def crawl_website(
self,
homepage_url: str,
extract_content: bool = False,
) -> str:
"""
Crawl a website and optionally extract content from discovered pages.
Args:
homepage_url (str): The starting URL (preferably homepage) to crawl from.
extract_content (bool): Whether to extract content from discovered URLs.
Returns:
str: JSON containing crawl results and optionally extracted content.
"""
if not SPIDER_AVAILABLE:
return "Error: Web crawling functionality not available. Trafilatura spider module could not be imported."
try:
log_debug(f"Starting website crawl from: {homepage_url}")
# Use instance configuration
max_seen = self.max_crawl_urls
max_known = self.max_known_urls
lang = self.target_language
# Perform focused crawling
to_visit, known_links = focused_crawler(
homepage=homepage_url,
max_seen_urls=max_seen,
max_known_urls=max_known,
lang=lang,
)
crawl_results = {
"homepage": homepage_url,
"to_visit": list(to_visit) if to_visit else [],
"known_links": list(known_links) if known_links else [],
"stats": {
"urls_to_visit": len(to_visit) if to_visit else 0,
"known_links_count": len(known_links) if known_links else 0,
},
}
# Optionally extract content from discovered URLs
if extract_content and known_links:
log_debug("Extracting content from discovered URLs")
extracted_content = {}
# Limit extraction to avoid overwhelming responses
urls_to_extract = list(known_links)[: min(10, len(known_links))]
for url in urls_to_extract:
try:
params = self._get_extraction_params()
html_content = fetch_url(url)
if html_content:
content = extract(html_content, url=url, **params)
if content:
extracted_content[url] = content
except Exception as e:
extracted_content[url] = f"Error extracting content: {e}"
crawl_results["extracted_content"] = extracted_content
# Reset caches
reset_caches()
return json.dumps(crawl_results, indent=2, default=str)
except Exception as e:
logger.warning(f"Error crawling website {homepage_url}: {e}")
return f"Error crawling website {homepage_url}: {e}"
def html_to_text(
self,
html_content: str,
clean: bool = True,
) -> str:
"""
Convert HTML content to plain text using Trafilatura's html2txt function.
Args:
html_content (str): The HTML content to convert.
clean (bool): Whether to remove potentially undesirable elements.
Returns:
str: Plain text extracted from HTML.
"""
try:
log_debug("Converting HTML to text")
result = html2txt(html_content, clean=clean)
# Reset caches
reset_caches()
return result if result else "Error: Could not extract text from HTML content"
except Exception as e:
logger.warning(f"Error converting HTML to text: {e}")
return f"Error converting HTML to text: {e}"
def extract_batch(
self,
urls: List[str],
) -> str:
"""
Extract content from multiple URLs in batch.
Args:
urls (List[str]): List of URLs to extract content from.
Returns:
str: JSON containing batch extraction results.
"""
try:
log_debug(f"Starting batch extraction for {len(urls)} URLs")
results = {}
failed_urls = []
for url in urls:
try:
params = self._get_extraction_params()
html_content = fetch_url(url)
if html_content:
content = extract(html_content, url=url, **params)
if content:
results[url] = content
else:
failed_urls.append(url)
else:
failed_urls.append(url)
except Exception as e:
failed_urls.append(url)
results[url] = f"Error: {e}"
# Reset caches after batch processing
reset_caches()
batch_results = {
"successful_extractions": len(results)
- len([k for k, v in results.items() if str(v).startswith("Error:")]),
"failed_extractions": len(failed_urls),
"total_urls": len(urls),
"results": results,
"failed_urls": failed_urls,
}
return json.dumps(batch_results, indent=2, default=str)
except Exception as e:
logger.warning(f"Error in batch extraction: {e}")
return f"Error in batch extraction: {e}"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/trafilatura.py",
"license": "Apache License 2.0",
"lines": 322,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/tools/test_trafilatura.py | import json
from unittest.mock import Mock, patch
import pytest
from agno.tools.trafilatura import TrafilaturaTools
@pytest.fixture
def mock_trafilatura_modules():
"""Mock all trafilatura module imports."""
with (
patch("agno.tools.trafilatura.extract") as mock_extract,
patch("agno.tools.trafilatura.extract_metadata") as mock_extract_metadata,
patch("agno.tools.trafilatura.fetch_url") as mock_fetch_url,
patch("agno.tools.trafilatura.html2txt") as mock_html2txt,
patch("agno.tools.trafilatura.reset_caches") as mock_reset_caches,
patch("agno.tools.trafilatura.focused_crawler", create=True) as mock_crawler,
):
yield {
"extract": mock_extract,
"extract_metadata": mock_extract_metadata,
"fetch_url": mock_fetch_url,
"html2txt": mock_html2txt,
"reset_caches": mock_reset_caches,
"focused_crawler": mock_crawler,
}
@pytest.fixture
def trafilatura_tools(mock_trafilatura_modules):
"""Create a TrafilaturaTools instance with default settings."""
return TrafilaturaTools()
@pytest.fixture
def custom_trafilatura_tools(mock_trafilatura_modules):
"""Create a TrafilaturaTools instance with custom settings."""
return TrafilaturaTools(
output_format="json",
include_comments=False,
include_tables=True,
include_images=True,
include_formatting=True,
include_links=True,
with_metadata=True,
favor_precision=True,
target_language="en",
deduplicate=True,
max_tree_size=5000,
max_crawl_urls=20,
max_known_urls=50000,
)
def create_mock_metadata_document(
title="Test Title", author="Test Author", date="2024-01-01", url="https://example.com"
):
"""Helper function to create mock metadata document."""
mock_doc = Mock()
mock_doc.as_dict.return_value = {
"title": title,
"author": author,
"date": date,
"url": url,
"text": "Sample text content",
}
return mock_doc
class TestTrafilaturaToolsInitialization:
"""Test class for TrafilaturaTools initialization."""
def test_initialization_default(self, trafilatura_tools):
"""Test initialization with default values."""
assert trafilatura_tools.name == "trafilatura_tools"
assert trafilatura_tools.output_format == "txt"
assert trafilatura_tools.include_comments is True
assert trafilatura_tools.include_tables is True
assert trafilatura_tools.include_images is False
assert trafilatura_tools.include_formatting is False
assert trafilatura_tools.include_links is False
assert trafilatura_tools.with_metadata is False
assert trafilatura_tools.favor_precision is False
assert trafilatura_tools.favor_recall is False
assert trafilatura_tools.target_language is None
assert trafilatura_tools.deduplicate is False
assert trafilatura_tools.max_tree_size is None
assert trafilatura_tools.max_crawl_urls == 10
assert trafilatura_tools.max_known_urls == 100000
# Check registered functions - all tools included by default
function_names = [func.name for func in trafilatura_tools.functions.values()]
assert "extract_text" in function_names
assert "extract_metadata_only" in function_names
assert "html_to_text" in function_names
assert "extract_batch" in function_names
def test_initialization_custom(self, custom_trafilatura_tools):
"""Test initialization with custom values."""
tools = custom_trafilatura_tools
assert tools.output_format == "json"
assert tools.include_comments is False
assert tools.include_tables is True
assert tools.include_images is True
assert tools.include_formatting is True
assert tools.include_links is True
assert tools.with_metadata is True
assert tools.favor_precision is True
assert tools.target_language == "en"
assert tools.deduplicate is True
assert tools.max_tree_size == 5000
assert tools.max_crawl_urls == 20
assert tools.max_known_urls == 50000
# Check all functions are registered
function_names = [func.name for func in tools.functions.values()]
assert "extract_text" in function_names
assert "extract_metadata_only" in function_names
assert "html_to_text" in function_names
assert "extract_batch" in function_names
def test_initialization_include_tools(self, mock_trafilatura_modules):
"""Test initialization with include_tools parameter."""
tools = TrafilaturaTools(include_tools=["extract_text", "extract_batch"])
function_names = [func.name for func in tools.functions.values()]
assert "extract_text" in function_names
assert "extract_batch" in function_names
assert "extract_metadata_only" not in function_names
assert "html_to_text" not in function_names
def test_initialization_exclude_tools(self, mock_trafilatura_modules):
"""Test initialization with exclude_tools parameter."""
tools = TrafilaturaTools(exclude_tools=["crawl_website", "html_to_text"])
function_names = [func.name for func in tools.functions.values()]
assert "extract_text" in function_names
assert "extract_metadata_only" in function_names
assert "extract_batch" in function_names
assert "crawl_website" not in function_names
assert "html_to_text" not in function_names
@patch("agno.tools.trafilatura.SPIDER_AVAILABLE", False)
def test_initialization_without_spider(self, mock_trafilatura_modules):
"""Test initialization when spider module is not available."""
tools = TrafilaturaTools()
function_names = [func.name for func in tools.functions.values()]
# crawl_website should not be in functions when spider is not available
assert "crawl_website" not in function_names
class TestExtractTextMethod:
"""Test class for extract_text method."""
def test_extract_text_success(self, trafilatura_tools, mock_trafilatura_modules):
"""Test successful text extraction."""
# Setup mocks
mock_trafilatura_modules["fetch_url"].return_value = "<html><body>Test content</body></html>"
mock_trafilatura_modules["extract"].return_value = "Extracted text content"
# Execute
result = trafilatura_tools.extract_text("https://example.com")
# Assert
assert result == "Extracted text content"
mock_trafilatura_modules["fetch_url"].assert_called_once_with("https://example.com")
mock_trafilatura_modules["extract"].assert_called_once()
mock_trafilatura_modules["reset_caches"].assert_called_once()
def test_extract_text_fetch_failure(self, trafilatura_tools, mock_trafilatura_modules):
"""Test extract_text when fetch_url fails."""
# Setup mocks
mock_trafilatura_modules["fetch_url"].return_value = None
# Execute
result = trafilatura_tools.extract_text("https://example.com")
# Assert
assert "Error: Could not fetch content from URL" in result
mock_trafilatura_modules["fetch_url"].assert_called_once_with("https://example.com")
mock_trafilatura_modules["extract"].assert_not_called()
def test_extract_text_with_custom_params(self, trafilatura_tools, mock_trafilatura_modules):
"""Test extract_text with custom output format."""
# Setup mocks
mock_trafilatura_modules["fetch_url"].return_value = "<html><body>Test content</body></html>"
mock_trafilatura_modules["extract"].return_value = "Extracted text"
# Execute with custom output format
result = trafilatura_tools.extract_text("https://example.com", output_format="json")
# Assert
assert result == "Extracted text"
# Verify extract was called with output format
call_args = mock_trafilatura_modules["extract"].call_args
assert call_args[1]["output_format"] == "json"
def test_extract_text_exception_handling(self, trafilatura_tools, mock_trafilatura_modules):
"""Test extract_text exception handling."""
# Setup mocks to raise exception
mock_trafilatura_modules["fetch_url"].side_effect = Exception("Network error")
# Execute
result = trafilatura_tools.extract_text("https://example.com")
# Assert
assert "Error extracting text from https://example.com: Network error" in result
class TestExtractMetadataOnlyMethod:
"""Test class for extract_metadata_only method."""
def test_extract_metadata_only_success(self, trafilatura_tools, mock_trafilatura_modules):
"""Test successful metadata extraction."""
# Setup mocks
mock_trafilatura_modules["fetch_url"].return_value = "<html><body>Test content</body></html>"
mock_doc = create_mock_metadata_document()
mock_trafilatura_modules["extract_metadata"].return_value = mock_doc
# Execute
result = trafilatura_tools.extract_metadata_only("https://example.com")
# Assert
result_data = json.loads(result)
assert result_data["title"] == "Test Title"
assert result_data["author"] == "Test Author"
assert result_data["url"] == "https://example.com"
mock_trafilatura_modules["reset_caches"].assert_called_once()
def test_extract_metadata_only_fetch_failure(self, trafilatura_tools, mock_trafilatura_modules):
"""Test extract_metadata_only when fetch fails."""
# Setup mocks
mock_trafilatura_modules["fetch_url"].return_value = None
# Execute
result = trafilatura_tools.extract_metadata_only("https://example.com")
# Assert
assert "Error: Could not fetch content from URL" in result
def test_extract_metadata_only_extraction_failure(self, trafilatura_tools, mock_trafilatura_modules):
"""Test extract_metadata_only when extraction returns None."""
# Setup mocks
mock_trafilatura_modules["fetch_url"].return_value = "<html><body>Test content</body></html>"
mock_trafilatura_modules["extract_metadata"].return_value = None
# Execute
result = trafilatura_tools.extract_metadata_only("https://example.com")
# Assert
assert "Error: Could not extract metadata" in result
def test_extract_metadata_only_non_json_format(self, trafilatura_tools, mock_trafilatura_modules):
"""Test extract_metadata_only with non-JSON format."""
# Setup mocks
mock_trafilatura_modules["fetch_url"].return_value = "<html><body>Test content</body></html>"
mock_doc = create_mock_metadata_document()
mock_trafilatura_modules["extract_metadata"].return_value = mock_doc
# Execute
result = trafilatura_tools.extract_metadata_only("https://example.com", as_json=False)
# Assert
assert "Test Title" in result # Should be string representation
assert not result.startswith("{") # Should not be JSON
class TestCrawlWebsiteMethod:
"""Test class for crawl_website method."""
@patch("agno.tools.trafilatura.SPIDER_AVAILABLE", True)
def test_crawl_website_success(self, trafilatura_tools, mock_trafilatura_modules):
"""Test successful website crawling."""
# Setup mocks
mock_to_visit = ["https://example.com/page1", "https://example.com/page2"]
mock_known_links = ["https://example.com", "https://example.com/page1", "https://example.com/page2"]
mock_trafilatura_modules["focused_crawler"].return_value = (mock_to_visit, mock_known_links)
# Execute
result = trafilatura_tools.crawl_website("https://example.com")
# Assert
result_data = json.loads(result)
assert result_data["homepage"] == "https://example.com"
assert len(result_data["to_visit"]) == 2
assert len(result_data["known_links"]) == 3
assert result_data["stats"]["urls_to_visit"] == 2
assert result_data["stats"]["known_links_count"] == 3
mock_trafilatura_modules["reset_caches"].assert_called_once()
@patch("agno.tools.trafilatura.SPIDER_AVAILABLE", False)
def test_crawl_website_spider_unavailable(self, trafilatura_tools, mock_trafilatura_modules):
"""Test crawl_website when spider is not available."""
# Execute
result = trafilatura_tools.crawl_website("https://example.com")
# Assert
assert "Error: Web crawling functionality not available" in result
@patch("agno.tools.trafilatura.SPIDER_AVAILABLE", True)
def test_crawl_website_with_content_extraction(self, trafilatura_tools, mock_trafilatura_modules):
"""Test crawl_website with content extraction enabled."""
# Setup mocks
mock_known_links = ["https://example.com/page1"]
mock_trafilatura_modules["focused_crawler"].return_value = ([], mock_known_links)
mock_trafilatura_modules["fetch_url"].return_value = "<html><body>Content</body></html>"
mock_trafilatura_modules["extract"].return_value = "Extracted content"
# Execute
result = trafilatura_tools.crawl_website("https://example.com", extract_content=True)
# Assert
result_data = json.loads(result)
assert "extracted_content" in result_data
assert result_data["extracted_content"]["https://example.com/page1"] == "Extracted content"
class TestHtmlToTextMethod:
"""Test class for html_to_text method."""
def test_html_to_text_success(self, trafilatura_tools, mock_trafilatura_modules):
"""Test successful HTML to text conversion."""
# Setup mocks
mock_trafilatura_modules["html2txt"].return_value = "Converted text content"
# Execute
html_content = "<html><body><h1>Title</h1><p>Paragraph</p></body></html>"
result = trafilatura_tools.html_to_text(html_content)
# Assert
assert result == "Converted text content"
mock_trafilatura_modules["html2txt"].assert_called_once_with(html_content, clean=True)
mock_trafilatura_modules["reset_caches"].assert_called_once()
def test_html_to_text_with_clean_false(self, trafilatura_tools, mock_trafilatura_modules):
"""Test HTML to text conversion with clean=False."""
# Setup mocks
mock_trafilatura_modules["html2txt"].return_value = "Raw text content"
# Execute
html_content = "<html><body>Content</body></html>"
result = trafilatura_tools.html_to_text(html_content, clean=False)
# Assert
assert result == "Raw text content"
mock_trafilatura_modules["html2txt"].assert_called_once_with(html_content, clean=False)
def test_html_to_text_empty_result(self, trafilatura_tools, mock_trafilatura_modules):
"""Test HTML to text conversion when result is empty."""
# Setup mocks
mock_trafilatura_modules["html2txt"].return_value = ""
# Execute
result = trafilatura_tools.html_to_text("<html></html>")
# Assert
assert "Error: Could not extract text from HTML content" in result
def test_html_to_text_exception_handling(self, trafilatura_tools, mock_trafilatura_modules):
"""Test HTML to text exception handling."""
# Setup mocks to raise exception
mock_trafilatura_modules["html2txt"].side_effect = Exception("Conversion error")
# Execute
result = trafilatura_tools.html_to_text("<html></html>")
# Assert
assert "Error converting HTML to text: Conversion error" in result
class TestExtractBatchMethod:
"""Test class for extract_batch method."""
def test_extract_batch_success(self, trafilatura_tools, mock_trafilatura_modules):
"""Test successful batch extraction."""
# Setup mocks
mock_trafilatura_modules["fetch_url"].return_value = "<html><body>Content</body></html>"
mock_trafilatura_modules["extract"].return_value = "Extracted content"
# Execute
urls = ["https://example1.com", "https://example2.com"]
result = trafilatura_tools.extract_batch(urls)
# Assert
result_data = json.loads(result)
assert result_data["total_urls"] == 2
assert result_data["successful_extractions"] == 2
assert result_data["failed_extractions"] == 0
assert len(result_data["results"]) == 2
mock_trafilatura_modules["reset_caches"].assert_called_once()
def test_extract_batch_partial_failure(self, trafilatura_tools, mock_trafilatura_modules):
"""Test batch extraction with partial failures."""
# Setup mocks - first URL succeeds, second fails
def fetch_side_effect(url):
if "example1" in url:
return "<html><body>Content</body></html>"
return None
mock_trafilatura_modules["fetch_url"].side_effect = fetch_side_effect
mock_trafilatura_modules["extract"].return_value = "Extracted content"
# Execute
urls = ["https://example1.com", "https://example2.com"]
result = trafilatura_tools.extract_batch(urls)
# Assert
result_data = json.loads(result)
assert result_data["total_urls"] == 2
assert result_data["successful_extractions"] == 1
assert result_data["failed_extractions"] == 1
assert len(result_data["failed_urls"]) == 1
class TestUtilityMethods:
"""Test class for utility methods."""
def test_get_extraction_params_defaults(self, trafilatura_tools):
"""Test _get_extraction_params with default values."""
params = trafilatura_tools._get_extraction_params()
assert params["output_format"] == "txt"
assert params["include_comments"] is True
assert params["with_metadata"] is False
def test_get_extraction_params_overrides(self, trafilatura_tools):
"""Test _get_extraction_params with parameter overrides."""
params = trafilatura_tools._get_extraction_params(
output_format="json", include_comments=False, with_metadata=True
)
assert params["output_format"] == "json"
assert params["include_comments"] is False
assert params["with_metadata"] is True
class TestToolkitIntegration:
"""Test class for toolkit integration."""
def test_toolkit_registration_default(self, trafilatura_tools):
"""Test that tools are registered correctly with default configuration."""
function_names = [func.name for func in trafilatura_tools.functions.values()]
# Default configuration should include all available tools
assert "extract_text" in function_names
assert "extract_metadata_only" in function_names
assert "html_to_text" in function_names
assert "extract_batch" in function_names
# crawl_website should be included if spider is available
def test_toolkit_registration_custom(self, custom_trafilatura_tools):
"""Test that tools are registered correctly with custom configuration."""
function_names = [func.name for func in custom_trafilatura_tools.functions.values()]
# Custom configuration should include all enabled tools
assert "extract_text" in function_names
assert "extract_metadata_only" in function_names
assert "html_to_text" in function_names
assert "extract_batch" in function_names
def test_toolkit_name(self, trafilatura_tools):
"""Test that toolkit has correct name."""
assert trafilatura_tools.name == "trafilatura_tools"
if __name__ == "__main__":
pytest.main([__file__])
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_trafilatura.py",
"license": "Apache License 2.0",
"lines": 371,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/test_output_model.py | from agno.agent import Agent
from agno.models.anthropic import Claude
from agno.models.openai import OpenAIChat
from agno.run.agent import IntermediateRunContentEvent, RunContentEvent
def test_claude_with_openai_output_model():
park_agent = Agent(
model=Claude(id="claude-sonnet-4-20250514"), # Main model to generate the content
description="You are an expert on national parks and provide concise guides.",
output_model=OpenAIChat(id="gpt-4o"), # Model to parse the output
telemetry=False,
)
response = park_agent.run("Tell me about Yosemite National Park.")
assert response.content is not None
assert isinstance(response.content, str)
assert len(response.content) > 0
assert response.messages is not None
assert len(response.messages) > 0
assistant_message_count = sum(1 for message in response.messages if message.role == "assistant")
assert assistant_message_count == 1
assert response.content == response.messages[-1].content
def test_openai_with_claude_output_model():
park_agent = Agent(
model=OpenAIChat(id="gpt-4o"), # Main model to generate the content
description="You are an expert on national parks and provide concise guides.",
output_model=Claude(id="claude-sonnet-4-20250514"), # Model to parse the output
telemetry=False,
)
response = park_agent.run("Tell me about Yosemite National Park.")
assert response.content is not None
assert isinstance(response.content, str)
assert len(response.content) > 0
assert response.messages is not None
assert len(response.messages) > 0
assistant_message_count = sum(1 for message in response.messages if message.role == "assistant")
assert assistant_message_count == 1
assert response.content == response.messages[-1].content
async def test_openai_with_claude_output_model_async():
park_agent = Agent(
model=OpenAIChat(id="gpt-4o"), # Main model to generate the content
description="You are an expert on national parks and provide concise guides.",
output_model=Claude(id="claude-sonnet-4-20250514"), # Model to parse the output
telemetry=False,
)
response = await park_agent.arun("Tell me about Yosemite National Park.")
assert response.content is not None
assert isinstance(response.content, str)
assert len(response.content) > 0
assert response.messages is not None
assert len(response.messages) > 0
assistant_message_count = sum(1 for message in response.messages if message.role == "assistant")
assert assistant_message_count == 1
assert response.content == response.messages[-1].content
def test_claude_with_openai_output_model_stream(shared_db):
agent = Agent(
model=Claude(id="claude-sonnet-4-20250514"), # Main model to generate the content
db=shared_db,
description="You are an expert on national parks and provide concise guides.",
output_model=OpenAIChat(id="gpt-4o"), # Model to parse the output
stream_events=True,
telemetry=False,
)
response = agent.run("Tell me about Yosemite National Park.", session_id="test_session_id", stream=True)
intermediate_run_response: bool = False
run_response: bool = False
run_id = None
for event in response:
if not run_id:
run_id = event.run_id
if isinstance(event, IntermediateRunContentEvent):
assert isinstance(event.content, str)
intermediate_run_response = True
elif isinstance(event, RunContentEvent):
assert isinstance(event.content, str)
run_response = True
# Assert the expected events were emitted
assert intermediate_run_response
assert run_response
assert run_id is not None
run_output = agent.get_run_output(session_id="test_session_id", run_id=run_id)
# Assert the run output is populated correctly
assert run_output is not None
assert run_output.content is not None
assert isinstance(run_output.content, str)
assert len(run_output.content) > 0
assert run_output.messages is not None
assert len(run_output.messages) > 0
# Assert the assistant message, in the run output, is populated correctly
assistant_message_count = sum(1 for message in run_output.messages if message.role == "assistant")
assert assistant_message_count == 1
assert run_output.content == run_output.messages[-1].content
async def test_openai_with_claude_output_model_stream_async(shared_db):
agent = Agent(
model=OpenAIChat(id="gpt-4o"), # Main model to generate the content
db=shared_db,
description="You are an expert on national parks and provide concise guides.",
output_model=Claude(id="claude-sonnet-4-20250514"), # Model to parse the output
stream_events=True,
telemetry=False,
)
intermediate_run_response: bool = False
run_response: bool = False
run_id = None
async for event in agent.arun("Tell me about Yosemite National Park.", stream=True, session_id="test_session_id"):
if not run_id:
run_id = event.run_id
if isinstance(event, IntermediateRunContentEvent):
assert isinstance(event.content, str)
intermediate_run_response = True
elif isinstance(event, RunContentEvent):
assert isinstance(event.content, str)
run_response = True
# Assert the expected events were emitted
assert intermediate_run_response
assert run_response
assert run_id is not None
run_output = agent.get_run_output(session_id="test_session_id", run_id=run_id)
# Assert the run output is populated correctly
assert run_output is not None
assert run_output.content is not None
assert isinstance(run_output.content, str)
assert len(run_output.content) > 0
assert run_output.messages is not None
assert len(run_output.messages) > 0
# Assert the assistant message, in the run output, is populated correctly
assistant_message_count = sum(1 for message in run_output.messages if message.role == "assistant")
assert assistant_message_count == 1
assert run_output.content == run_output.messages[-1].content
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_output_model.py",
"license": "Apache License 2.0",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/test_message_ordering.py | from agno.agent import Agent
from agno.models.openai import OpenAIChat
def test_message_ordering_run():
"""Test that historical messages come before current user message"""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
user_id="test_user",
session_id="test_session",
add_history_to_context=True,
telemetry=False,
)
# Historical messages plus new one
messages = [
{"role": "user", "content": "What is 5 + 3?"},
{"role": "assistant", "content": "5 + 3 = 8"},
{"role": "user", "content": "and if I add 7 to that result?"},
]
# Get run messages
response = agent.run(input=messages, session_id="test_session", user_id="test_user")
# Verify correct chronological order
messages = response.messages
assert messages is not None
assert len(messages) == 4
# Historical messages should come first
assert messages[0].role == "user"
assert messages[0].content == "What is 5 + 3?"
assert messages[0].id is not None
assert messages[1].role == "assistant"
assert messages[1].content == "5 + 3 = 8"
assert messages[1].id is not None
# Current user message should come last
assert messages[2].role == "user"
assert messages[2].content == "and if I add 7 to that result?"
assert messages[2].id is not None
assert messages[3].role == "assistant"
assert messages[3].id is not None
def test_message_ordering(shared_db):
"""Test message ordering with storage"""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
telemetry=False,
)
# Realistic conversation history
historical_messages = [
{"role": "user", "content": "Hello, I need help with math"},
{"role": "assistant", "content": "I'd be happy to help you with math! What do you need assistance with?"},
{"role": "user", "content": "Can you solve 15 * 7?"},
{"role": "assistant", "content": "15 * 7 = 105"},
{"role": "user", "content": "Great! Now what about 105 divided by 3?"},
]
run_output = agent.run(
input=historical_messages,
session_id="test_session_storage",
user_id="test_user",
)
messages = run_output.messages
assert messages is not None
assert len(messages) == 6
# Verify chronological order is maintained
expected_contents = [
"Hello, I need help with math",
"I'd be happy to help you with math! What do you need assistance with?",
"Can you solve 15 * 7?",
"15 * 7 = 105",
"Great! Now what about 105 divided by 3?",
]
for content, expected_content in zip(messages[0:-1], expected_contents):
assert content.content == expected_content, (
f"Message {content.content} content mismatch. Expected: {expected_content}, Got: {content.content}"
)
def test_message_ordering_with_system_message():
"""Test message ordering when system message is present"""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
description="You are a helpful math assistant.",
telemetry=False,
)
historical_messages = [
{"role": "user", "content": "What is 2 + 2?"},
{"role": "assistant", "content": "2 + 2 = 4"},
{"role": "user", "content": "What about 4 + 4?"},
]
run_output = agent.run(input=historical_messages, session_id="test_session")
messages = run_output.messages
assert messages is not None
assert len(messages) == 5
# System message should be first
assert messages[0].role == "system"
assert messages[0].content is not None
assert "You are a helpful math assistant." == messages[0].content
# Then historical messages in order
assert messages[1].role == "user"
assert messages[1].content == "What is 2 + 2?"
assert messages[2].role == "assistant"
assert messages[2].content == "2 + 2 = 4"
# Finally current message
assert messages[3].role == "user"
assert messages[3].content == "What about 4 + 4?"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_message_ordering.py",
"license": "Apache License 2.0",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/teams/test_team_with_output_model.py | from agno.agent import Agent
from agno.models.anthropic import Claude
from agno.models.openai import OpenAIChat
from agno.run.team import IntermediateRunContentEvent, RunContentEvent
from agno.team import Team
agent = Agent(
model=Claude(id="claude-sonnet-4-20250514"),
description="You are an expert on national parks and provide concise guides.",
output_model=OpenAIChat(id="gpt-4o"),
telemetry=False,
)
team = Team(
name="National Park Expert",
members=[agent],
output_model=OpenAIChat(id="gpt-4o"),
instructions="You have no members, answer directly",
description="You are an expert on national parks and provide concise guides.",
stream_events=True,
telemetry=False,
)
def test_team_with_output_model():
response = team.run("Tell me about Yosemite National Park.")
assert response.content is not None
assert isinstance(response.content, str)
assert len(response.content) > 0
assert response.messages is not None
assert len(response.messages) > 0
# Note: With output_model, content may be reformatted, so we just check it exists
# and contains relevant information about Yosemite
assert "Yosemite" in response.content
async def test_team_with_output_model_async():
response = await team.arun("Tell me about Yosemite National Park.")
assert response.content is not None
assert isinstance(response.content, str)
assert len(response.content) > 0
assert response.messages is not None
assert len(response.messages) > 0
# Note: With output_model, content may be reformatted, so we just check it exists
# and contains relevant information about Yosemite
assert "Yosemite" in response.content
def test_team_with_output_model_stream():
response = team.run("Tell me about Yosemite National Park.", stream=True)
run_response_content_event: bool = False
intermediate_run_response_content_event: bool = False
final_response = None
for event in response:
print(event)
print(type(event))
if isinstance(event, RunContentEvent):
run_response_content_event = True
assert isinstance(event.content, str)
final_response = event # Capture the final content event
if isinstance(event, IntermediateRunContentEvent):
intermediate_run_response_content_event = True
assert isinstance(event.content, str)
assert run_response_content_event
assert intermediate_run_response_content_event
# Validate the final response content from the last event
if final_response:
assert final_response.content is not None
assert isinstance(final_response.content, str)
assert len(final_response.content) > 0
async def test_team_with_output_model_stream_async():
run_response_content_event: bool = False
intermediate_run_response_content_event: bool = False
final_response = None
async for event in team.arun("Tell me about Yosemite National Park.", stream=True):
if isinstance(event, RunContentEvent):
run_response_content_event = True
assert isinstance(event.content, str)
final_response = event # Capture the final content event
if isinstance(event, IntermediateRunContentEvent):
intermediate_run_response_content_event = True
assert isinstance(event.content, str)
assert run_response_content_event
assert intermediate_run_response_content_event
# Validate the final response content from the last event
if final_response:
assert final_response.content is not None
assert isinstance(final_response.content, str)
assert len(final_response.content) > 0
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/test_team_with_output_model.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/tools/test_google_calendar.py | """Unit tests for Google Calendar Tools."""
import json
import os
import tempfile
from unittest.mock import MagicMock, Mock, patch
import pytest
from google.oauth2.credentials import Credentials
from agno.tools.google.calendar import GoogleCalendarTools
@pytest.fixture
def mock_credentials():
"""Mock Google OAuth2 credentials."""
mock_creds = Mock(spec=Credentials)
mock_creds.valid = True
mock_creds.expired = False
mock_creds.to_json.return_value = '{"token": "test_token"}'
return mock_creds
@pytest.fixture
def mock_calendar_service():
"""Mock Google Calendar API service."""
mock_service = MagicMock()
return mock_service
@pytest.fixture
def calendar_tools(mock_credentials, mock_calendar_service):
"""Create GoogleCalendarTools instance with mocked dependencies."""
# Patch both build and the authenticate decorator to completely bypass auth
with (
patch("agno.tools.google.calendar.build") as mock_build,
patch("agno.tools.google.calendar.authenticate", lambda func: func),
):
mock_build.return_value = mock_calendar_service
tools = GoogleCalendarTools(access_token="test_token")
tools.creds = mock_credentials
tools.service = mock_calendar_service
return tools
class TestGoogleCalendarToolsInitialization:
"""Test initialization and configuration of Google Calendar tools."""
def test_init_with_access_token(self):
"""Test initialization with access token."""
tools = GoogleCalendarTools(access_token="test_token")
assert tools.access_token == "test_token"
assert tools.calendar_id == "primary"
assert tools.creds is None # Not set until authentication
assert tools.service is None
def test_init_with_credentials_path(self):
"""Test initialization with credentials file path."""
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
json.dump({"installed": {"client_id": "test"}}, f)
temp_file = f.name
try:
tools = GoogleCalendarTools(credentials_path=temp_file)
assert tools.credentials_path == temp_file
assert tools.calendar_id == "primary"
assert tools.creds is None
assert tools.service is None
finally:
os.unlink(temp_file)
def test_init_missing_credentials(self):
"""Test initialization without any credentials succeeds but won't authenticate."""
tools = GoogleCalendarTools()
assert tools.access_token is None
assert tools.credentials_path is None
assert tools.token_path == "token.json" # default value
def test_init_invalid_credentials_path(self):
"""Test initialization with invalid credentials path succeeds but won't authenticate."""
tools = GoogleCalendarTools(credentials_path="./nonexistent.json")
assert tools.credentials_path == "./nonexistent.json"
assert tools.service is None
def test_init_with_existing_token_path(self):
"""Test initialization with existing token file."""
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as token_file:
json.dump({"token": "test_token"}, token_file)
token_file_path = token_file.name
try:
tools = GoogleCalendarTools(token_path=token_file_path)
assert tools.token_path == token_file_path
assert tools.calendar_id == "primary"
finally:
os.unlink(token_file_path)
def test_init_with_custom_calendar_id(self):
"""Test initialization with custom calendar ID."""
tools = GoogleCalendarTools(access_token="test_token", calendar_id="custom@example.com")
assert tools.calendar_id == "custom@example.com"
assert tools.access_token == "test_token"
def test_init_with_all_tools_registered(self):
"""Test that all tools are properly registered during initialization."""
tools = GoogleCalendarTools(access_token="test_token")
# Check that all expected tools are registered
tool_names = [func.name for func in tools.functions.values()]
expected_tools = [
"list_events",
"create_event",
"update_event",
"delete_event",
"fetch_all_events",
"find_available_slots",
"list_calendars",
]
for tool_name in expected_tools:
assert tool_name in tool_names, f"Tool {tool_name} should be registered"
# Verify we have the expected number of tools
assert len(tool_names) == len(expected_tools)
class TestAuthentication:
"""Test authentication configuration."""
def test_auth_parameters_stored(self):
"""Test that authentication parameters are stored correctly."""
tools = GoogleCalendarTools(
access_token="test_token", credentials_path="test_creds.json", token_path="test_token.json", oauth_port=9090
)
assert tools.access_token == "test_token"
assert tools.credentials_path == "test_creds.json"
assert tools.token_path == "test_token.json"
assert tools.oauth_port == 9090
def test_scopes_configuration(self):
"""Test that scopes are configured correctly."""
# Default scopes
tools = GoogleCalendarTools(access_token="test_token")
assert tools.scopes == ["https://www.googleapis.com/auth/calendar.readonly"]
# Custom scopes
custom_scopes = ["https://www.googleapis.com/auth/calendar"]
tools_custom = GoogleCalendarTools(access_token="test_token", scopes=custom_scopes)
assert tools_custom.scopes == custom_scopes
class TestListEvents:
"""Test list_events method."""
def test_list_events_success(self, calendar_tools, mock_calendar_service):
"""Test successful event listing."""
mock_events = [{"id": "1", "summary": "Test Event 1"}, {"id": "2", "summary": "Test Event 2"}]
mock_calendar_service.events().list().execute.return_value = {"items": mock_events}
result = calendar_tools.list_events(limit=2)
result_data = json.loads(result)
assert result_data == mock_events
# Check that the service was called (may be called multiple times due to chaining)
assert mock_calendar_service.events().list.call_count >= 1
def test_list_events_no_events(self, calendar_tools, mock_calendar_service):
"""Test listing events when none exist."""
mock_calendar_service.events().list().execute.return_value = {"items": []}
result = calendar_tools.list_events()
result_data = json.loads(result)
assert result_data["message"] == "No upcoming events found."
def test_list_events_with_start_date(self, calendar_tools, mock_calendar_service):
"""Test listing events with specific start date."""
mock_events = [{"id": "1", "summary": "Test Event"}]
mock_calendar_service.events().list().execute.return_value = {"items": mock_events}
result = calendar_tools.list_events(start_date="2025-07-19T10:00:00")
result_data = json.loads(result)
assert result_data == mock_events
def test_list_events_invalid_date_format(self, calendar_tools):
"""Test listing events with invalid date format."""
result = calendar_tools.list_events(start_date="invalid-date")
result_data = json.loads(result)
assert "error" in result_data
assert "Invalid date format" in result_data["error"]
def test_list_events_http_error(self, calendar_tools, mock_calendar_service):
"""Test handling of HTTP errors."""
from googleapiclient.errors import HttpError
# Create a mock HttpError
mock_response = Mock()
mock_response.status = 403
mock_response.reason = "Forbidden"
http_error = HttpError(mock_response, b'{"error": {"message": "Forbidden"}}')
mock_calendar_service.events().list().execute.side_effect = http_error
result = calendar_tools.list_events()
result_data = json.loads(result)
assert "error" in result_data
assert "An error occurred" in result_data["error"]
class TestCreateEvent:
"""Test create_event method."""
def test_create_event_success(self, calendar_tools, mock_calendar_service):
"""Test successful event creation."""
mock_event = {"id": "test_id", "summary": "Test Event"}
mock_calendar_service.events().insert().execute.return_value = mock_event
result = calendar_tools.create_event(
start_date="2025-07-19T10:00:00",
end_date="2025-07-19T11:00:00",
title="Test Event",
description="Test Description",
)
result_data = json.loads(result)
assert result_data == mock_event
def test_create_event_with_attendees(self, calendar_tools, mock_calendar_service):
"""Test event creation with attendees."""
mock_event = {"id": "test_id", "summary": "Test Event"}
mock_calendar_service.events().insert().execute.return_value = mock_event
result = calendar_tools.create_event(
start_date="2025-07-19T10:00:00",
end_date="2025-07-19T11:00:00",
title="Test Event",
attendees=["test1@example.com", "test2@example.com"],
)
result_data = json.loads(result)
assert result_data == mock_event
def test_create_event_with_google_meet(self, calendar_tools, mock_calendar_service):
"""Test event creation with Google Meet link."""
mock_event = {"id": "test_id", "summary": "Test Event"}
mock_calendar_service.events().insert().execute.return_value = mock_event
result = calendar_tools.create_event(
start_date="2025-07-19T10:00:00",
end_date="2025-07-19T11:00:00",
title="Test Event",
add_google_meet_link=True,
)
result_data = json.loads(result)
assert result_data == mock_event
# Verify conferenceDataVersion was set
call_args = mock_calendar_service.events().insert.call_args
assert call_args[1]["conferenceDataVersion"] == 1
def test_create_event_invalid_datetime(self, calendar_tools):
"""Test event creation with invalid datetime format."""
result = calendar_tools.create_event(
start_date="invalid-date", end_date="2025-07-19T11:00:00", title="Test Event"
)
result_data = json.loads(result)
assert "error" in result_data
assert "Invalid datetime format" in result_data["error"]
class TestUpdateEvent:
"""Test update_event method."""
def test_update_event_success(self, calendar_tools, mock_calendar_service):
"""Test successful event update."""
existing_event = {
"id": "test_id",
"summary": "Old Title",
"start": {"dateTime": "2025-07-19T10:00:00", "timeZone": "UTC"},
"end": {"dateTime": "2025-07-19T11:00:00", "timeZone": "UTC"},
}
updated_event = existing_event.copy()
updated_event["summary"] = "New Title"
mock_calendar_service.events().get().execute.return_value = existing_event
mock_calendar_service.events().update().execute.return_value = updated_event
result = calendar_tools.update_event(event_id="test_id", title="New Title")
result_data = json.loads(result)
assert result_data["summary"] == "New Title"
def test_update_event_datetime(self, calendar_tools, mock_calendar_service):
"""Test updating event datetime."""
existing_event = {
"id": "test_id",
"summary": "Test Event",
"start": {"dateTime": "2025-07-19T10:00:00", "timeZone": "UTC"},
"end": {"dateTime": "2025-07-19T11:00:00", "timeZone": "UTC"},
}
mock_calendar_service.events().get().execute.return_value = existing_event
mock_calendar_service.events().update().execute.return_value = existing_event
result = calendar_tools.update_event(
event_id="test_id", start_date="2025-07-19T14:00:00", end_date="2025-07-19T15:00:00"
)
result_data = json.loads(result)
assert "error" not in result_data
class TestDeleteEvent:
"""Test delete_event method."""
def test_delete_event_success(self, calendar_tools, mock_calendar_service):
"""Test successful event deletion."""
mock_calendar_service.events().delete().execute.return_value = None
result = calendar_tools.delete_event(event_id="test_id")
result_data = json.loads(result)
assert result_data["success"] is True
assert "deleted successfully" in result_data["message"]
class TestFetchAllEvents:
"""Test fetch_all_events method."""
def test_fetch_all_events_success(self, calendar_tools, mock_calendar_service):
"""Test successful fetching of all events."""
mock_events = [{"id": "1", "summary": "Event 1"}, {"id": "2", "summary": "Event 2"}]
mock_calendar_service.events().list().execute.return_value = {"items": mock_events, "nextPageToken": None}
result = calendar_tools.fetch_all_events()
result_data = json.loads(result)
assert result_data == mock_events
def test_fetch_all_events_with_pagination(self, calendar_tools, mock_calendar_service):
"""Test fetching events with pagination."""
page1_events = [{"id": "1", "summary": "Event 1"}]
page2_events = [{"id": "2", "summary": "Event 2"}]
mock_calendar_service.events().list().execute.side_effect = [
{"items": page1_events, "nextPageToken": "token2"},
{"items": page2_events, "nextPageToken": None},
]
result = calendar_tools.fetch_all_events()
result_data = json.loads(result)
assert len(result_data) == 2
assert result_data == page1_events + page2_events
class TestFindAvailableSlots:
"""Test find_available_slots method."""
@patch.object(GoogleCalendarTools, "fetch_all_events")
@patch.object(GoogleCalendarTools, "_get_working_hours")
def test_find_available_slots_success(self, mock_working_hours, mock_fetch, calendar_tools):
"""Test successful finding of available slots."""
# Mock working hours response
mock_working_hours.return_value = json.dumps(
{"start_hour": 9, "end_hour": 17, "timezone": "UTC", "locale": "en"}
)
# Mock no existing events
mock_fetch.return_value = json.dumps([])
result = calendar_tools.find_available_slots(
start_date="2025-07-21", end_date="2025-07-21", duration_minutes=30
) # Monday, 30 min
result_data = json.loads(result)
assert "available_slots" in result_data
assert "working_hours" in result_data
assert "events_analyzed" in result_data
assert isinstance(result_data["available_slots"], list)
@patch.object(GoogleCalendarTools, "fetch_all_events")
@patch.object(GoogleCalendarTools, "_get_working_hours")
def test_find_available_slots_with_busy_times(self, mock_working_hours, mock_fetch, calendar_tools):
"""Test finding available slots with existing events."""
# Mock working hours response
mock_working_hours.return_value = json.dumps(
{"start_hour": 9, "end_hour": 17, "timezone": "UTC", "locale": "en"}
)
# Mock existing event that blocks 10:30-11:30 AM (shorter busy period)
existing_events = [
{"start": {"dateTime": "2025-07-19T10:30:00+00:00"}, "end": {"dateTime": "2025-07-19T11:30:00+00:00"}}
]
mock_fetch.return_value = json.dumps(existing_events)
result = calendar_tools.find_available_slots(
start_date="2025-07-19", end_date="2025-07-19", duration_minutes=30
)
result_data = json.loads(result)
assert "available_slots" in result_data
assert "working_hours" in result_data
assert "events_analyzed" in result_data
assert result_data["events_analyzed"] == 1
# Check that the response structure is correct (may or may not have slots)
assert isinstance(result_data["available_slots"], list)
@patch.object(GoogleCalendarTools, "fetch_all_events")
@patch.object(GoogleCalendarTools, "_get_working_hours")
def test_find_available_slots_guarantees_slots(self, mock_working_hours, mock_fetch, calendar_tools):
"""Test finding available slots when there should definitely be some."""
# Mock working hours response
mock_working_hours.return_value = json.dumps(
{"start_hour": 9, "end_hour": 17, "timezone": "UTC", "locale": "en"}
)
# Mock no existing events (completely free day)
mock_fetch.return_value = json.dumps([])
result = calendar_tools.find_available_slots(
start_date="2025-07-21",
end_date="2025-07-21",
duration_minutes=30, # Monday
)
result_data = json.loads(result)
assert "available_slots" in result_data
assert "working_hours" in result_data
assert "events_analyzed" in result_data
assert result_data["events_analyzed"] == 0
# With no events and a full working day, we should have multiple slots
slots = result_data["available_slots"]
assert isinstance(slots, list)
# Should have many 30-minute slots between 9 AM and 5 PM
assert len(slots) >= 10 # Conservative estimate
def test_find_available_slots_invalid_date(self, calendar_tools):
"""Test finding available slots with invalid date format."""
result = calendar_tools.find_available_slots(
start_date="invalid-date", end_date="2025-07-19", duration_minutes=60
)
result_data = json.loads(result)
assert "error" in result_data
assert "Invalid isoformat string" in result_data["error"]
class TestListCalendars:
"""Test list_calendars method."""
def test_list_calendars_success(self, calendar_tools, mock_calendar_service):
"""Test successful calendar listing."""
mock_calendars = {
"items": [
{
"id": "primary",
"summary": "John Doe",
"description": "Personal calendar",
"primary": True,
"accessRole": "owner",
"backgroundColor": "#ffffff",
},
{
"id": "work@company.com",
"summary": "Work Calendar",
"description": "Company work calendar",
"primary": False,
"accessRole": "writer",
"backgroundColor": "#4285f4",
},
]
}
mock_calendar_service.calendarList().list().execute.return_value = mock_calendars
result = calendar_tools.list_calendars()
result_data = json.loads(result)
assert "calendars" in result_data
assert len(result_data["calendars"]) == 2
assert result_data["current_default"] == "primary"
# Check calendar data structure
primary_cal = result_data["calendars"][0]
assert primary_cal["id"] == "primary"
assert primary_cal["name"] == "John Doe"
assert primary_cal["primary"] is True
assert primary_cal["access_role"] == "owner"
def test_list_calendars_http_error(self, calendar_tools, mock_calendar_service):
"""Test handling of HTTP errors in list_calendars."""
from googleapiclient.errors import HttpError
mock_response = Mock()
mock_response.status = 403
mock_response.reason = "Forbidden"
http_error = HttpError(mock_response, b'{"error": {"message": "Forbidden"}}')
mock_calendar_service.calendarList().list().execute.side_effect = http_error
result = calendar_tools.list_calendars()
result_data = json.loads(result)
assert "error" in result_data
assert "An error occurred" in result_data["error"]
class TestErrorHandling:
"""Test error handling across all methods."""
def test_method_integration_works(self, calendar_tools):
"""Test that all methods work with proper setup."""
# This test verifies that our fixture pattern provides working tools
assert calendar_tools.calendar_id == "primary"
assert calendar_tools.service is not None
assert calendar_tools.creds is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_google_calendar.py",
"license": "Apache License 2.0",
"lines": 407,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/agent/test_agent_knowledge.py | import pytest
from agno.agent import Agent
# Create a mock knowledge object
class MockKnowledge:
def __init__(self):
self.max_results = 5
self.vector_db = None
def validate_filters(self, filters):
return filters or {}, []
async def avalidate_filters(self, filters):
return filters or {}, []
async def asearch(self, query, max_results, filters):
# Verify that max_results is correctly set to default value
assert max_results == 5
return []
@pytest.mark.asyncio
async def test_agent_aget_relevant_docs_from_knowledge_with_none_num_documents():
"""Test that aget_relevant_docs_from_knowledge handles num_documents=None correctly with retriever."""
# Create a mock retriever function
def mock_retriever(agent, query, num_documents, **kwargs):
# Verify that num_documents is correctly set to knowledge.num_documents
assert num_documents == 5
return [{"content": "test document"}]
# Create Agent instance
agent = Agent()
agent.knowledge = MockKnowledge() # type: ignore
agent.knowledge_retriever = mock_retriever # type: ignore
# Call the function with num_documents=None
result = await agent.aget_relevant_docs_from_knowledge(query="test query", num_documents=None)
# Verify the result
assert result == [{"content": "test document"}]
@pytest.mark.asyncio
async def test_agent_aget_relevant_docs_from_knowledge_with_specific_num_documents():
"""Test that aget_relevant_docs_from_knowledge handles specific num_documents correctly with retriever."""
# Create a mock retriever function
def mock_retriever(agent, query, num_documents, **kwargs):
# Verify that num_documents is correctly set to knowledge.num_documents
assert num_documents == 10
return [{"content": "test document"}]
# Create Agent instance
agent = Agent()
agent.knowledge = MockKnowledge() # type: ignore
agent.knowledge_retriever = mock_retriever # type: ignore
# Call the function with specific num_documents
result = await agent.aget_relevant_docs_from_knowledge(query="test query", num_documents=10)
# Verify the result
assert result == [{"content": "test document"}]
@pytest.mark.asyncio
async def test_agent_aget_relevant_docs_from_knowledge_without_retriever():
"""Test that aget_relevant_docs_from_knowledge works correctly without retriever."""
# Create Agent instance
agent = Agent()
agent.knowledge = MockKnowledge() # type: ignore
agent.knowledge_retriever = None # type: ignore
# Call the function with num_documents=None
result = await agent.aget_relevant_docs_from_knowledge(query="test query", num_documents=None)
# Verify the result
assert result is None # Because asearch returns empty list
def test_agent_get_relevant_docs_from_knowledge_with_none_num_documents():
"""Test that get_relevant_docs_from_knowledge handles num_documents=None correctly with retriever."""
# Create a mock retriever function
def mock_retriever(agent, query, num_documents, **kwargs):
# Verify that num_documents is correctly set to knowledge.num_documents
assert num_documents == 5
return [{"content": "test document"}]
# Create Agent instance
agent = Agent()
agent.knowledge = MockKnowledge() # type: ignore
agent.knowledge_retriever = mock_retriever # type: ignore
# Call the function with num_documents=None
result = agent.get_relevant_docs_from_knowledge(query="test query", num_documents=None)
# Verify the result
assert result == [{"content": "test document"}]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/agent/test_agent_knowledge.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/tools/test_jina.py | from unittest.mock import MagicMock, patch
import httpx
import pytest
from agno.tools.jina import JinaReaderTools, JinaReaderToolsConfig
@pytest.fixture()
def jina_tools():
return JinaReaderTools(api_key="test_api_key")
@pytest.fixture
def sample_read_url_response():
"""Sample response for read_url function"""
return {
"code": 200,
"status": 20000,
"data": {
"title": "Example Domain",
"description": "",
"url": "https://example.com/",
"content": "This domain is for use in illustrative examples in documents. You may use this domain in literature without prior coordination or asking for permission.\n\n[More information...](https://www.iana.org/domains/example)",
"publishedTime": "Mon, 13 Jan 2025 20:11:20 GMT",
"metadata": {"viewport": "width=device-width, initial-scale=1"},
"external": {},
"warning": "This is a cached snapshot of the original page, consider retry with caching opt-out.",
"usage": {"tokens": 42},
},
"meta": {"usage": {"tokens": 42}},
}
@pytest.fixture
def sample_search_query_response():
"""Sample response for search_query function"""
return {
"code": 200,
"status": 20000,
"data": [
{
"title": "Berita Terkini Nasional - Politik - CNN Indonesia",
"url": "https://www.cnnindonesia.com/nasional/politik",
"description": "Istana Pastikan Undang SBY, Megawati dan Jokowi di HUT ke-80 RI · PDIP Akan Gelar Kongres di Bali Awal Agustus? · Erika Carlina Laporkan DJ Panda soal Dugaan",
"content": "",
"usage": {"tokens": 1000},
}
],
"meta": {"usage": {"tokens": 10000}},
}
def test_config_default_values():
"""Test that config has correct default values"""
config = JinaReaderToolsConfig()
assert config.api_key is None
assert str(config.base_url) == "https://r.jina.ai/"
assert str(config.search_url) == "https://s.jina.ai/"
assert config.max_content_length == 10000
assert config.timeout is None
assert config.search_query_content is False
def test_config_custom_values():
"""Test config with custom values"""
config = JinaReaderToolsConfig(
api_key="test_key",
base_url="https://custom.r.jina.ai/",
search_url="https://custom.s.jina.ai/",
max_content_length=5000,
timeout=30,
)
assert config.api_key == "test_key"
assert str(config.base_url) == "https://custom.r.jina.ai/"
assert str(config.search_url) == "https://custom.s.jina.ai/"
assert config.max_content_length == 5000
assert config.timeout == 30
def test_init_with_api_key():
tools = JinaReaderTools(api_key="test_key")
assert tools.config.api_key == "test_key"
def test_init_with_env_var(monkeypatch):
"""Test initialization with environment variable"""
monkeypatch.setenv("JINA_API_KEY", "env_test_key")
# Pass api_key=None to bypass the import-time default and test the runtime getenv fallback
tools = JinaReaderTools(api_key=None)
assert tools.config.api_key == "env_test_key"
def test_init_without_api_key(monkeypatch):
"""Test initialization without API key (should work)"""
monkeypatch.delenv("JINA_API_KEY", raising=False)
tools = JinaReaderTools(api_key=None)
assert tools.config.api_key is None
def test_init_with_custom_config():
"""Test initialization with custom configuration"""
tools = JinaReaderTools(
api_key="test_key",
base_url="https://custom.r.jina.ai/",
search_url="https://custom.s.jina.ai/",
max_content_length=5000,
timeout=30,
)
assert tools.config.api_key == "test_key"
assert str(tools.config.base_url) == "https://custom.r.jina.ai/"
assert str(tools.config.search_url) == "https://custom.s.jina.ai/"
assert tools.config.max_content_length == 5000
assert tools.config.timeout == 30
def test_init_tools_selection_read_only():
"""Test initialization with only read_url tool"""
tools = JinaReaderTools(api_key="test_key", enable_read_url=True, enable_search_query=False)
assert len(tools.tools) == 1
assert tools.tools[0].__name__ == "read_url"
def test_init_tools_selection_search_only():
"""Test initialization with only search_query tool"""
tools = JinaReaderTools(api_key="test_key", enable_read_url=False, enable_search_query=True)
assert len(tools.tools) == 1
assert tools.tools[0].__name__ == "search_query"
def test_init_tools_selection_both():
"""Test initialization with both tools"""
tools = JinaReaderTools(api_key="test_key", enable_read_url=True, enable_search_query=True)
assert len(tools.tools) == 2
tool_names = [tool.__name__ for tool in tools.tools]
assert "read_url" in tool_names
assert "search_query" in tool_names
def test_init_tools_selection_none():
"""Test initialization with no tools"""
tools = JinaReaderTools(api_key="test_key", enable_read_url=False, enable_search_query=False)
assert len(tools.tools) == 0
@patch("agno.tools.jina.httpx.get")
def test_read_url_successful(mock_httpx_get, sample_read_url_response):
"""Test successful URL reading"""
# Setup mock response
mock_response = MagicMock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = sample_read_url_response
mock_httpx_get.return_value = mock_response
tools = JinaReaderTools(api_key="test_key")
result = tools.read_url("https://example.com")
# Verify the call
expected_url = f"{tools.config.base_url}https://example.com"
mock_httpx_get.assert_called_once_with(expected_url, headers=tools._get_headers())
# Verify result contains the response data
assert str(sample_read_url_response) in result
@patch("agno.tools.jina.httpx.get")
@patch("agno.tools.jina.logger")
def test_read_url_http_error(mock_logger, mock_httpx_get):
"""Test read_url with HTTP error"""
mock_httpx_get.side_effect = httpx.HTTPStatusError("HTTP Error", request=MagicMock(), response=MagicMock())
tools = JinaReaderTools(api_key="test_key")
result = tools.read_url("https://example.com")
assert "Error reading URL" in result
assert "HTTP Error" in result
mock_logger.error.assert_called_once()
@patch("agno.tools.jina.httpx.get")
@patch("agno.tools.jina.logger")
def test_read_url_connection_error(mock_logger, mock_httpx_get):
"""Test read_url with connection error"""
mock_httpx_get.side_effect = httpx.ConnectError("Connection failed")
tools = JinaReaderTools(api_key="test_key")
result = tools.read_url("https://example.com")
assert "Error reading URL" in result
assert "Connection failed" in result
mock_logger.error.assert_called_once()
@patch("agno.tools.jina.httpx.get")
def test_read_url_with_truncation(mock_httpx_get):
"""Test read_url with content truncation"""
# Create a large response that should be truncated
large_content = {"data": "x" * 15000} # Larger than default max_content_length
mock_response = MagicMock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = large_content
mock_httpx_get.return_value = mock_response
tools = JinaReaderTools(api_key="test_key", max_content_length=1000)
result = tools.read_url("https://example.com")
assert len(result) <= 1000 + len("... (content truncated)")
assert "... (content truncated)" in result
@patch("agno.tools.jina.httpx.post")
def test_search_query_successful(mock_httpx_post, sample_search_query_response):
"""Test successful search query"""
# Setup mock response
mock_response = MagicMock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = sample_search_query_response
mock_httpx_post.return_value = mock_response
tools = JinaReaderTools(api_key="test_key", enable_search_query=True)
result = tools.search_query("test query")
# Verify the call
expected_headers = tools._get_headers()
if not tools.config.search_query_content:
expected_headers["X-Respond-With"] = "no-content" # to avoid returning full content in search results
expected_body = {"q": "test query"}
mock_httpx_post.assert_called_once_with(str(tools.config.search_url), headers=expected_headers, json=expected_body)
# Verify result contains the response data
assert str(sample_search_query_response) in result
@patch("agno.tools.jina.httpx.post")
@patch("agno.tools.jina.logger")
def test_search_query_http_error(mock_logger, mock_httpx_post):
"""Test search_query with HTTP error"""
mock_httpx_post.side_effect = httpx.HTTPStatusError("HTTP Error", request=MagicMock(), response=MagicMock())
tools = JinaReaderTools(api_key="test_key", enable_search_query=True)
result = tools.search_query("test query")
assert "Error performing search" in result
assert "HTTP Error" in result
mock_logger.error.assert_called_once()
@patch("agno.tools.jina.httpx.post")
def test_search_query_with_truncation(mock_httpx_post):
"""Test search_query with content truncation"""
# Create a large response that should be truncated
large_response = {"data": [{"content": "x" * 15000}]}
mock_response = MagicMock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = large_response
mock_httpx_post.return_value = mock_response
tools = JinaReaderTools(api_key="test_key", enable_search_query=True, max_content_length=1000)
result = tools.search_query("test query")
assert len(result) <= 1000 + len("... (content truncated)")
assert "... (content truncated)" in result
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_jina.py",
"license": "Apache License 2.0",
"lines": 204,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/litellm/test_multimodal.py | from typing import Any
import requests
from agno.agent.agent import Agent
from agno.media import Audio, Image
from agno.models.litellm import LiteLLM
from agno.tools.websearch import WebSearchTools
def _get_audio_input() -> bytes | Any:
"""Fetch an example audio file and return it as base64 encoded string"""
url = "https://openaiassets.blob.core.windows.net/$web/API/docs/audio/alloy.wav"
response = requests.get(url)
response.raise_for_status()
return response.content
def test_image_input():
"""Test LiteLLM with image input"""
agent = Agent(
model=LiteLLM(id="gpt-4o"),
tools=[WebSearchTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run(
"Tell me about this image and give me the latest news about it.",
images=[Image(url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg")],
)
assert "golden" in response.content.lower()
def test_audio_input_bytes():
"""Test LiteLLM with audio input from bytes"""
wav_data = _get_audio_input()
# Provide the agent with the audio file and get result as text
agent = Agent(
model=LiteLLM(id="gpt-4o-audio-preview"),
markdown=True,
telemetry=False,
)
response = agent.run("What is in this audio?", audio=[Audio(content=wav_data, format="wav")])
assert response.content is not None
def test_audio_input_url():
"""Test LiteLLM with audio input from URL"""
agent = Agent(
model=LiteLLM(id="gpt-4o-audio-preview"),
markdown=True,
telemetry=False,
)
response = agent.run(
"What is in this audio?",
audio=[Audio(url="https://openaiassets.blob.core.windows.net/$web/API/docs/audio/alloy.wav")],
)
assert response.content is not None
def test_single_image_simple():
"""Test LiteLLM with a simple image"""
agent = Agent(
model=LiteLLM(id="gpt-4o"),
markdown=True,
telemetry=False,
)
response = agent.run(
"What do you see in this image?",
images=[
Image(url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg"),
],
)
assert response.content is not None
assert len(response.content) > 0
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/litellm/test_multimodal.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/litellm_openai/test_multimodal.py | from typing import Any, Union
import requests
from agno.agent.agent import Agent
from agno.media import Audio, Image
from agno.models.litellm import LiteLLMOpenAI
from agno.tools.websearch import WebSearchTools
def _get_audio_input() -> Union[bytes, Any]:
"""Fetch an example audio file and return it as base64 encoded string"""
url = "https://openaiassets.blob.core.windows.net/$web/API/docs/audio/alloy.wav"
response = requests.get(url)
response.raise_for_status()
return response.content
def test_image_input():
"""Test LiteLLMOpenAI with image input"""
agent = Agent(
model=LiteLLMOpenAI(id="gpt-4o"),
tools=[WebSearchTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run(
"Tell me about this image and give me the latest news about it.",
images=[Image(url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg")],
)
assert "golden" in response.content.lower()
def test_audio_input_bytes():
"""Test LiteLLMOpenAI with audio input from bytes"""
wav_data = _get_audio_input()
# Provide the agent with the audio file and get result as text
agent = Agent(
model=LiteLLMOpenAI(id="gpt-4o-audio-preview"),
markdown=True,
telemetry=False,
)
response = agent.run("What is in this audio?", audio=[Audio(content=wav_data, format="wav")])
assert response.content is not None
def test_audio_input_url():
"""Test LiteLLMOpenAI with audio input from URL"""
agent = Agent(
model=LiteLLMOpenAI(id="gpt-4o-audio-preview"),
markdown=True,
telemetry=False,
)
response = agent.run(
"What is in this audio?",
audio=[Audio(url="https://openaiassets.blob.core.windows.net/$web/API/docs/audio/alloy.wav")],
)
assert response.content is not None
def test_single_image_simple():
"""Test LiteLLMOpenAI with a simple image"""
agent = Agent(
model=LiteLLMOpenAI(id="gpt-4o"),
markdown=True,
telemetry=False,
)
response = agent.run(
"What do you see in this image?",
images=[
Image(url="https://upload.wikimedia.org/wikipedia/commons/0/0c/GoldenGateBridge-001.jpg"),
],
)
assert response.content is not None
assert len(response.content) > 0
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/litellm_openai/test_multimodal.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/models/morph.py | import os
from os import getenv
from textwrap import dedent
from typing import Optional
from agno.tools import Toolkit
from agno.utils.log import log_debug, log_error
try:
from openai import OpenAI
except ImportError:
raise ImportError("`openai` not installed. Please install using `pip install openai`")
class MorphTools(Toolkit):
"""Tools for interacting with Morph's Fast Apply API for code editing"""
def __init__(
self,
api_key: Optional[str] = None,
base_url: str = "https://api.morphllm.com/v1",
instructions: Optional[str] = None,
add_instructions: bool = True,
model: str = "morph-v3-large",
**kwargs,
):
"""Initialize Morph Fast Apply tools.
Args:
api_key: Morph API key. If not provided, will look for MORPH_API_KEY environment variable.
base_url: The base URL for the Morph API.
model: The Morph model to use. Options:
- "morph-v3-fast" (4500+ tok/sec, 96% accuracy)
- "morph-v3-large" (2500+ tok/sec, 98% accuracy)
- "auto" (automatic selection)
**kwargs: Additional arguments to pass to Toolkit.
"""
# Set up instructions
if instructions is None:
self.instructions = self.DEFAULT_INSTRUCTIONS
else:
self.instructions = instructions
super().__init__(
name="morph_tools",
tools=[self.edit_file],
instructions=self.instructions,
add_instructions=add_instructions,
**kwargs,
)
self.api_key = api_key or getenv("MORPH_API_KEY")
if not self.api_key:
raise ValueError("MORPH_API_KEY not set. Please set the MORPH_API_KEY environment variable.")
self.base_url = base_url
self.model = model
self._morph_client: Optional[OpenAI] = None
def _get_client(self):
"""Get or create the Morph OpenAI client."""
if self._morph_client is None:
self._morph_client = OpenAI(
api_key=self.api_key,
base_url=self.base_url,
)
return self._morph_client
def edit_file(
self,
target_file: str,
instructions: str,
code_edit: str,
original_code: Optional[str] = None,
) -> str:
"""
Apply code edits to a target file using Morph's Fast Apply API.
This function reads the specified file, sends its content along with
editing instructions and code edits to Morph's API, and writes the
resulting code back to the file. A backup of the original file is
created before writing changes.
Args:
target_file (str): Path to the file to be edited.
instructions (str): High-level instructions describing the intended change.
code_edit (str): Specific code edit or change to apply.
original_code (Optional[str], optional): Original content of the file.
If not provided, the function reads from target_file.
Returns:
str: Result message indicating success or failure, and details about
the backup and any errors encountered.
"""
try:
# Always read the actual file content for backup purposes
actual_file_content = None
if os.path.exists(target_file):
try:
with open(target_file, "r", encoding="utf-8") as f:
actual_file_content = f.read()
except Exception as e:
return f"Error reading {target_file} for backup: {e}"
else:
return f"Error: File {target_file} does not exist."
# Use provided original_code or fall back to file content
code_to_process = original_code if original_code is not None else actual_file_content
# Format the message for Morph's Fast Apply API
content = f"<instruction>{instructions}</instruction>\n<code>{code_to_process}</code>\n<update>{code_edit}</update>"
log_debug(f"Input to Morph: {content}")
client = self._get_client()
response = client.chat.completions.create(
model=self.model,
messages=[
{
"role": "user",
"content": content,
}
],
)
if response.choices and response.choices[0].message.content:
final_code = response.choices[0].message.content
try:
backup_file = f"{target_file}.backup"
with open(backup_file, "w", encoding="utf-8") as f:
f.write(actual_file_content)
# Write the new code
with open(target_file, "w", encoding="utf-8") as f:
f.write(final_code)
return f"Successfully applied edit to {target_file} using Morph Fast Apply! Original content backed up as {backup_file}"
except Exception as e:
return f"Successfully applied edit but failed to write back to {target_file}: {e}"
else:
return f"Failed to apply edit to {target_file}: No response from Morph API"
except Exception as e:
log_error(f"Failed to apply edit using Morph Fast Apply: {e}")
return f"Failed to apply edit to {target_file}: {e}"
DEFAULT_INSTRUCTIONS = dedent("""\
You have access to Morph Fast Apply for ultra-fast code editing with 98% accuracy at 2500+ tokens/second.
## How to use the edit_file tool:
**Critical Requirements:**
1. **Instructions Parameter**: Generate clear first-person instructions describing what you're doing
- Example: "I am adding type hints to all functions and methods"
- Example: "I am refactoring the error handling to use try-catch blocks"
2. **Code Edit Parameter**: Specify ONLY the lines you want to change
- Use `# ... existing code ...` (or `// ... existing code ...` for JS/Java) to represent unchanged sections
- NEVER write out unchanged code in the code_edit parameter
- Include sufficient context around changes to resolve ambiguity
3. **Single Edit Call**: Make ALL edits to a file in a single edit_file call. The apply model can handle many distinct edits at once.
**Example Format:**
```
# ... existing code ...
def add(a: int, b: int) -> int:
\"\"\"Add two numbers together.\"\"\"
return a + b
# ... existing code ...
def multiply(x: int, y: int) -> int:
\"\"\"Multiply two numbers.\"\"\"
return x * y
# ... existing code ...
```
**Important Guidelines:**
- Bias towards repeating as few lines as possible while conveying the change clearly
- Each edit should contain sufficient context of unchanged lines around the code you're editing
- DO NOT omit spans of pre-existing code without using the `# ... existing code ...` comment
- If deleting a section, provide context before and after to clearly indicate the deletion
- The tool automatically creates backup files before applying changes\
""")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/models/morph.py",
"license": "Apache License 2.0",
"lines": 154,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/tools/models/test_morph.py | from unittest.mock import MagicMock, mock_open, patch
import pytest
from agno.tools.models.morph import MorphTools
# Fixture for mock OpenAI client
@pytest.fixture
def mock_openai_client():
client = MagicMock()
return client
# Fixture for mock MorphTools with mock client
@pytest.fixture
def mock_morph_tools(mock_openai_client):
with patch("agno.tools.models.morph.OpenAI", return_value=mock_openai_client) as _:
morph_tools = MorphTools(api_key="fake_test_key")
morph_tools._morph_client = mock_openai_client
return morph_tools
# Fixture for successful API response
@pytest.fixture
def mock_successful_response():
mock_response = MagicMock()
mock_choice = MagicMock()
mock_choice.message.content = """def add(a: int, b: int) -> int:
\"\"\"Add two numbers together.\"\"\"
return a + b
def multiply(x: int, y: int) -> int:
\"\"\"Multiply two numbers.\"\"\"
result = x * y
return result"""
mock_response.choices = [mock_choice]
return mock_response
# Fixture for failed API response (no content)
@pytest.fixture
def mock_failed_response():
mock_response = MagicMock()
mock_response.choices = []
return mock_response
# Test Initialization
def test_morph_tools_init_with_api_key_arg():
"""Test initialization with API key provided as an argument."""
api_key = "test_api_key_arg"
with patch("agno.tools.models.morph.OpenAI") as mock_openai_cls:
mock_client_instance = MagicMock()
mock_openai_cls.return_value = mock_client_instance
morph_tools = MorphTools(api_key=api_key)
assert morph_tools.api_key == api_key
assert morph_tools.base_url == "https://api.morphllm.com/v1"
assert morph_tools.model == "morph-v3-large"
assert morph_tools._morph_client is None # Client created lazily
def test_morph_tools_init_with_env_var():
"""Test initialization with API key from environment variable."""
env_api_key = "test_api_key_env"
with patch("agno.tools.models.morph.getenv", return_value=env_api_key) as mock_getenv:
morph_tools = MorphTools()
assert morph_tools.api_key == env_api_key
mock_getenv.assert_called_once_with("MORPH_API_KEY")
def test_morph_tools_init_no_api_key():
"""Test initialization raises ValueError when no API key is found."""
with patch("agno.tools.models.morph.getenv", return_value=None) as mock_getenv:
with pytest.raises(ValueError, match="MORPH_API_KEY not set"):
MorphTools()
mock_getenv.assert_called_once_with("MORPH_API_KEY")
# Test edit_file method - Success cases
def test_edit_file_success_with_file_reading(mock_morph_tools, mock_successful_response):
"""Test successful file editing when reading from existing file."""
target_file = "test_file.py"
original_content = "def add(a, b):\n return a + b"
instructions = "I am adding type hints to the function"
code_edit = "def add(a: int, b: int) -> int:\n return a + b"
mock_morph_tools._morph_client.chat.completions.create.return_value = mock_successful_response
with patch("os.path.exists", return_value=True):
with patch("builtins.open", mock_open(read_data=original_content)) as mock_file:
result = mock_morph_tools.edit_file(target_file=target_file, instructions=instructions, code_edit=code_edit)
# Verify file operations
mock_file.assert_any_call(target_file, "r", encoding="utf-8")
mock_file.assert_any_call(f"{target_file}.backup", "w", encoding="utf-8")
mock_file.assert_any_call(target_file, "w", encoding="utf-8")
# Verify API call
expected_content = (
f"<instruction>{instructions}</instruction>\n<code>{original_content}</code>\n<update>{code_edit}</update>"
)
mock_morph_tools._morph_client.chat.completions.create.assert_called_once_with(
model="morph-v3-large", messages=[{"role": "user", "content": expected_content}]
)
assert "Successfully applied edit" in result
assert "backup" in result
def test_edit_file_success_with_provided_original_code(mock_morph_tools, mock_successful_response):
"""Test successful file editing when original code is provided."""
target_file = "test_file.py"
file_content = "def old_function():\n pass"
provided_original = "def add(a, b):\n return a + b"
instructions = "I am adding type hints to the function"
code_edit = "def add(a: int, b: int) -> int:\n return a + b"
mock_morph_tools._morph_client.chat.completions.create.return_value = mock_successful_response
with patch("os.path.exists", return_value=True):
with patch("builtins.open", mock_open(read_data=file_content)) as mock_file:
result = mock_morph_tools.edit_file(
target_file=target_file, instructions=instructions, code_edit=code_edit, original_code=provided_original
)
# Verify API call uses provided original code
expected_content = (
f"<instruction>{instructions}</instruction>\n<code>{provided_original}</code>\n<update>{code_edit}</update>"
)
mock_morph_tools._morph_client.chat.completions.create.assert_called_once_with(
model="morph-v3-large", messages=[{"role": "user", "content": expected_content}]
)
# Verify backup uses actual file content, not provided original
handle = mock_file()
backup_write_calls = [call for call in handle.write.call_args_list if call[0][0] == file_content]
assert len(backup_write_calls) == 1
assert "Successfully applied edit" in result
def test_edit_file_no_response_content(mock_morph_tools, mock_failed_response):
"""Test edit_file when API returns no content."""
target_file = "test_file.py"
original_content = "def test(): pass"
mock_morph_tools._morph_client.chat.completions.create.return_value = mock_failed_response
with patch("os.path.exists", return_value=True):
with patch("builtins.open", mock_open(read_data=original_content)):
result = mock_morph_tools.edit_file(
target_file=target_file, instructions="test instruction", code_edit="test edit"
)
assert f"Failed to apply edit to {target_file}: No response from Morph API" in result
def test_edit_file_write_error(mock_morph_tools, mock_successful_response):
"""Test edit_file when writing back to file fails."""
target_file = "test_file.py"
original_content = "def test(): pass"
write_error = "Disk full"
mock_morph_tools._morph_client.chat.completions.create.return_value = mock_successful_response
def mock_open_side_effect(file_path, mode, **kwargs):
if mode == "r":
return mock_open(read_data=original_content)()
elif file_path.endswith(".backup"):
return mock_open()()
else: # Writing to target file
raise Exception(write_error)
with patch("os.path.exists", return_value=True):
with patch("builtins.open", side_effect=mock_open_side_effect):
result = mock_morph_tools.edit_file(
target_file=target_file, instructions="test instruction", code_edit="test edit"
)
assert f"Successfully applied edit but failed to write back to {target_file}: {write_error}" in result
# Test edge cases
def test_edit_file_empty_original_code(mock_morph_tools, mock_successful_response):
"""Test edit_file with empty original code."""
target_file = "empty_file.py"
original_content = ""
mock_morph_tools._morph_client.chat.completions.create.return_value = mock_successful_response
with patch("os.path.exists", return_value=True):
with patch("builtins.open", mock_open(read_data=original_content)):
result = mock_morph_tools.edit_file(
target_file=target_file, instructions="I am adding a new function", code_edit="def new_function(): pass"
)
assert "Successfully applied edit" in result
# Test toolkit structure
def test_morph_tools_toolkit_structure():
"""Test that MorphTools properly inherits from Toolkit and has correct structure."""
morph_tools = MorphTools(api_key="test_key")
assert morph_tools.name == "morph_tools"
assert len(morph_tools.tools) == 1
assert morph_tools.tools[0] == morph_tools.edit_file
assert hasattr(morph_tools, "edit_file")
assert callable(morph_tools.edit_file)
# Test method signature matches current implementation
def test_edit_file_method_signature():
"""Test that edit_file method has the correct signature."""
import inspect
morph_tools = MorphTools(api_key="test_key")
sig = inspect.signature(morph_tools.edit_file)
expected_params = ["target_file", "instructions", "code_edit", "original_code"]
actual_params = list(sig.parameters.keys())
assert actual_params == expected_params
# Check that original_code has default None
assert sig.parameters["original_code"].default is None
def test_edit_file_always_writes_to_file(mock_morph_tools, mock_successful_response):
"""Test that edit_file always writes to file in current implementation."""
target_file = "test_file.py"
original_content = "def test(): pass"
mock_morph_tools._morph_client.chat.completions.create.return_value = mock_successful_response
with patch("os.path.exists", return_value=True):
with patch("builtins.open", mock_open(read_data=original_content)) as mock_file:
result = mock_morph_tools.edit_file(
target_file=target_file, instructions="test instruction", code_edit="test edit"
)
# Verify that backup file was created (indicates file writing occurred)
mock_file.assert_any_call(f"{target_file}.backup", "w", encoding="utf-8")
# Verify that original file was written to
mock_file.assert_any_call(target_file, "w", encoding="utf-8")
assert "Successfully applied edit" in result
assert "backup" in result
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/models/test_morph.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/linkup.py | from os import getenv
from typing import Any, List, Literal, Optional
from agno.tools import Toolkit
from agno.utils.log import logger
try:
from linkup import LinkupClient
except ImportError:
raise ImportError("`linkup-sdk` not installed. Please install using `pip install linkup-sdk`")
class LinkupTools(Toolkit):
def __init__(
self,
api_key: Optional[str] = None,
depth: Literal["standard", "deep"] = "standard",
output_type: Literal["sourcedAnswer", "searchResults"] = "searchResults",
enable_web_search_with_linkup: bool = True,
all: bool = False,
**kwargs,
):
self.api_key = api_key or getenv("LINKUP_API_KEY")
if not self.api_key:
logger.error("LINKUP_API_KEY not set. Please set the LINKUP_API_KEY environment variable.")
self.linkup = LinkupClient(api_key=api_key)
self.depth = depth
self.output_type = output_type
tools: List[Any] = []
if all or enable_web_search_with_linkup:
tools.append(self.web_search_with_linkup)
super().__init__(name="linkup_tools", tools=tools, **kwargs)
def web_search_with_linkup(self, query: str, depth: Optional[str] = None, output_type: Optional[str] = None) -> str:
"""
Use this function to search the web for a given query.
This function uses the Linkup API to provide realtime online information about the query.
Args:
query (str): Query to search for.
depth (str): (deep|standard) Depth of the search. Defaults to 'standard'.
output_type (str): (sourcedAnswer|searchResults) Type of output. Defaults to 'searchResults'.
Returns:
str: string of results related to the query.
"""
try:
response = self.linkup.search(
query=query,
depth=depth or self.depth, # type: ignore
output_type=output_type or self.output_type, # type: ignore
)
return response
except Exception as e:
return f"Error: {str(e)}"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/linkup.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/tools/evm.py | from os import getenv
from typing import Optional
from agno.tools import Toolkit
from agno.utils.log import log_debug, log_error
try:
from eth_account.account import LocalAccount
from eth_account.datastructures import SignedTransaction
from hexbytes import HexBytes
from web3 import Web3
from web3.main import Web3 as Web3Type
from web3.providers.rpc import HTTPProvider
from web3.types import TxParams, TxReceipt
except ImportError:
raise ImportError("`web3` not installed. Please install using `pip install web3`")
class EvmTools(Toolkit):
def __init__(
self,
private_key: Optional[str] = None,
rpc_url: Optional[str] = None,
enable_send_transaction: bool = True,
all: bool = False,
**kwargs,
):
"""Initialize EVM tools for blockchain interactions.
Args:
private_key: Private key for signing transactions (defaults to EVM_PRIVATE_KEY env var)
rpc_url: RPC URL for blockchain connection (defaults to EVM_RPC_URL env var)
**kwargs: Additional arguments passed to parent Toolkit class
"""
self.private_key = private_key or getenv("EVM_PRIVATE_KEY")
self.rpc_url = rpc_url or getenv("EVM_RPC_URL")
if not self.private_key:
log_error("Private Key is required")
raise ValueError("Private Key is required")
if not self.rpc_url:
log_error("RPC Url is needed to interact with EVM blockchain")
raise ValueError("RPC Url is needed to interact with EVM blockchain")
# Ensure private key has 0x prefix
if not self.private_key.startswith("0x"):
self.private_key = f"0x{self.private_key}"
# Initialize Web3 client and account
self.web3_client: "Web3Type" = Web3(HTTPProvider(self.rpc_url))
self.account: "LocalAccount" = self.web3_client.eth.account.from_key(self.private_key)
log_debug(f"Your wallet address is: {self.account.address}")
tools = []
if all or enable_send_transaction:
tools.append(self.send_transaction)
super().__init__(name="evm_tools", tools=tools, **kwargs)
def get_max_priority_fee_per_gas(self) -> int:
"""Get the max priority fee per gas for the transaction.
Returns:
int: The max priority fee per gas for the transaction (1 gwei)
"""
max_priority_fee_per_gas = self.web3_client.to_wei(1, "gwei")
return max_priority_fee_per_gas
def get_max_fee_per_gas(self, max_priority_fee_per_gas: int) -> int:
"""Get the max fee per gas for the transaction.
Args:
max_priority_fee_per_gas: The max priority fee per gas
Returns:
int: The max fee per gas for the transaction
"""
latest_block = self.web3_client.eth.get_block("latest")
base_fee_per_gas = latest_block.get("baseFeePerGas")
if base_fee_per_gas is None:
log_error("Base fee per gas not found in the latest block.")
raise ValueError("Base fee per gas not found in the latest block.")
max_fee_per_gas = (2 * base_fee_per_gas) + max_priority_fee_per_gas
return max_fee_per_gas
def send_transaction(self, to_address: str, amount_in_wei: int) -> str:
"""Sends a transaction to the address provided.
Args:
to_address: The address to which you want to send ETH
amount_in_wei: The amount of ETH to send in wei
Returns:
str: The transaction hash of the transaction or error message
"""
try:
max_priority_fee_per_gas = self.get_max_priority_fee_per_gas()
max_fee_per_gas = self.get_max_fee_per_gas(max_priority_fee_per_gas)
transaction_params: "TxParams" = {
"from": self.account.address,
"to": to_address,
"value": amount_in_wei, # type: ignore[typeddict-item]
"nonce": self.web3_client.eth.get_transaction_count(self.account.address),
"gas": 21000,
"maxFeePerGas": max_fee_per_gas, # type: ignore[typeddict-item]
"maxPriorityFeePerGas": max_priority_fee_per_gas, # type: ignore[typeddict-item]
"chainId": self.web3_client.eth.chain_id,
"type": 2, # EIP-1559 dynamic fee transaction
}
signed_transaction: "SignedTransaction" = self.web3_client.eth.account.sign_transaction(
transaction_params, self.private_key
)
transaction_hash: "HexBytes" = self.web3_client.eth.send_raw_transaction(signed_transaction.raw_transaction)
log_debug(f"Ongoing Transaction hash: 0x{transaction_hash.hex()}")
transaction_receipt: "TxReceipt" = self.web3_client.eth.wait_for_transaction_receipt(transaction_hash)
if transaction_receipt.get("status") == 1:
log_debug(f"Transaction successful! Transaction hash: 0x{transaction_hash.hex()}")
return f"0x{transaction_hash.hex()}"
else:
log_error("Transaction failed!")
raise Exception("Transaction failed!")
except Exception as e:
log_error(f"Error sending transaction: {e}")
return f"error: {e}"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/evm.py",
"license": "Apache License 2.0",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/tools/test_evm.py | """Unit tests for EvmTools class."""
from unittest.mock import Mock, patch
import pytest
from agno.tools.evm import EvmTools
@pytest.fixture
def mock_web3_client():
"""Create a mocked Web3 client with all required methods."""
mock_client = Mock()
# Mock Web3 conversion methods
mock_client.to_wei = Mock(return_value=1000000000) # 1 gwei
mock_client.from_wei = Mock(return_value=1.0)
# Mock eth namespace
mock_client.eth = Mock()
mock_client.eth.get_block = Mock(return_value={"baseFeePerGas": 20000000000}) # 20 gwei
mock_client.eth.get_transaction_count = Mock(return_value=5)
mock_client.eth.chain_id = 11155111 # Sepolia testnet
mock_client.eth.get_balance = Mock(return_value=1000000000000000000) # 1 ETH in wei
mock_client.eth.send_raw_transaction = Mock(return_value=b"0x1234567890abcdef")
mock_client.eth.wait_for_transaction_receipt = Mock(
return_value={"status": 1, "transactionHash": "0x1234567890abcdef"}
)
# Mock account
mock_account = Mock()
mock_account.address = "0x742d35Cc6634C0532925a3b8D2A7E1234567890A"
mock_client.eth.account = Mock()
mock_client.eth.account.from_key = Mock(return_value=mock_account)
# Mock signed transaction
mock_signed_tx = Mock()
mock_signed_tx.raw_transaction = b"0xsignedtransaction"
mock_client.eth.account.sign_transaction = Mock(return_value=mock_signed_tx)
return mock_client
@pytest.fixture
def mock_web3_constructor():
"""Mock the Web3 constructor and HTTPProvider."""
with patch("agno.tools.evm.Web3") as mock_web3_class, patch("agno.tools.evm.HTTPProvider") as mock_http_provider:
yield mock_web3_class, mock_http_provider
@pytest.fixture
def mock_environment_variables():
"""Mock environment variables for EVM credentials."""
with patch.dict(
"os.environ",
{
"EVM_PRIVATE_KEY": "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
"EVM_RPC_URL": "https://0xrpc.io/sep",
},
):
yield
class TestEvmToolsInitialization:
"""Test cases for EvmTools initialization."""
def test_init_with_credentials(self, mock_web3_constructor, mock_web3_client):
"""Test initialization with provided credentials."""
mock_web3_class, mock_http_provider = mock_web3_constructor
mock_web3_class.return_value = mock_web3_client
tools = EvmTools(
private_key="1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
rpc_url="https://0xrpc.io/sep",
)
assert tools.private_key == "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"
assert tools.rpc_url == "https://0xrpc.io/sep"
assert tools.web3_client is not None
assert tools.account is not None
def test_init_with_env_variables(self, mock_web3_constructor, mock_web3_client, mock_environment_variables):
"""Test initialization with environment variables."""
mock_web3_class, mock_http_provider = mock_web3_constructor
mock_web3_class.return_value = mock_web3_client
tools = EvmTools()
assert tools.private_key == "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"
assert tools.rpc_url == "https://0xrpc.io/sep"
assert tools.web3_client is not None
def test_init_without_private_key(self, mock_web3_constructor):
"""Test initialization failure without private key."""
with patch.dict("os.environ", {}, clear=True):
with pytest.raises(ValueError, match="Private Key is required"):
EvmTools(rpc_url="https://0xrpc.io/sep")
def test_init_without_rpc_url(self, mock_web3_constructor):
"""Test initialization failure without RPC URL."""
with patch.dict("os.environ", {}, clear=True):
with pytest.raises(ValueError, match="RPC Url is needed to interact with EVM blockchain"):
EvmTools(private_key="0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef")
def test_private_key_prefix_added(self, mock_web3_constructor, mock_web3_client):
"""Test that 0x prefix is added to private key if missing."""
mock_web3_class, mock_http_provider = mock_web3_constructor
mock_web3_class.return_value = mock_web3_client
tools = EvmTools(
private_key="1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
rpc_url="https://0xrpc.io/sep",
)
assert tools.private_key == "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"
class TestGasFeeCalculation:
"""Test cases for gas fee calculation methods."""
def test_get_max_priority_fee_per_gas(self, mock_web3_constructor, mock_web3_client):
"""Test max priority fee per gas calculation."""
mock_web3_class, mock_http_provider = mock_web3_constructor
mock_web3_class.return_value = mock_web3_client
tools = EvmTools(
private_key="0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
rpc_url="https://0xrpc.io/sep",
)
result = tools.get_max_priority_fee_per_gas()
mock_web3_client.to_wei.assert_called_once_with(1, "gwei")
assert result == 1000000000
def test_get_max_fee_per_gas(self, mock_web3_constructor, mock_web3_client):
"""Test max fee per gas calculation."""
mock_web3_class, mock_http_provider = mock_web3_constructor
mock_web3_class.return_value = mock_web3_client
tools = EvmTools(
private_key="0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
rpc_url="https://0xrpc.io/sep",
)
max_priority_fee = 1000000000 # 1 gwei
result = tools.get_max_fee_per_gas(max_priority_fee)
mock_web3_client.eth.get_block.assert_called_once_with("latest")
expected = (2 * 20000000000) + max_priority_fee # 2 * base_fee + priority_fee
assert result == expected
def test_get_max_fee_per_gas_no_base_fee(self, mock_web3_constructor, mock_web3_client):
"""Test max fee per gas calculation when base fee is not available."""
mock_web3_class, mock_http_provider = mock_web3_constructor
mock_web3_client.eth.get_block.return_value = {} # No baseFeePerGas
mock_web3_class.return_value = mock_web3_client
tools = EvmTools(
private_key="0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
rpc_url="https://0xrpc.io/sep",
)
with pytest.raises(ValueError, match="Base fee per gas not found"):
tools.get_max_fee_per_gas(1000000000)
class TestSendTransaction:
"""Test cases for send_transaction method."""
def test_send_transaction_success(self, mock_web3_constructor, mock_web3_client):
"""Test successful transaction sending."""
mock_web3_class, mock_http_provider = mock_web3_constructor
mock_web3_class.return_value = mock_web3_client
tools = EvmTools(
private_key="0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
rpc_url="https://0xrpc.io/sep",
)
result = tools.send_transaction(
to_address="0x3Dfc53E3C77bb4e30Ce333Be1a66Ce62558bE395",
amount_in_wei=1000000000000000, # 0.001 ETH
)
# Verify transaction was signed and sent
mock_web3_client.eth.account.sign_transaction.assert_called_once()
mock_web3_client.eth.send_raw_transaction.assert_called_once()
mock_web3_client.eth.wait_for_transaction_receipt.assert_called_once()
# The successful transaction should return the transaction hash
assert result.startswith("0x")
assert len(result) > 10 # Transaction hash should be a reasonable length
def test_send_transaction_invalid_address(self, mock_web3_constructor, mock_web3_client):
"""Test transaction sending with invalid address."""
mock_web3_class, mock_http_provider = mock_web3_constructor
mock_web3_class.return_value = mock_web3_client
tools = EvmTools(
private_key="0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
rpc_url="https://0xrpc.io/sep",
)
result = tools.send_transaction(to_address="invalid_address", amount_in_wei=1000000000000000)
# Invalid address should still succeed in our mock (no validation in mock)
# But in real implementation, this would be caught by Web3 validation
assert result.startswith("0x") or result.startswith("error:")
def test_send_transaction_zero_amount(self, mock_web3_constructor, mock_web3_client):
"""Test transaction sending with zero amount."""
mock_web3_class, mock_http_provider = mock_web3_constructor
mock_web3_class.return_value = mock_web3_client
tools = EvmTools(
private_key="0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
rpc_url="https://0xrpc.io/sep",
)
result = tools.send_transaction(to_address="0x3Dfc53E3C77bb4e30Ce333Be1a66Ce62558bE395", amount_in_wei=0)
# Zero amount transaction should still succeed and return transaction hash
assert result.startswith("0x")
assert len(result) > 10
class TestErrorHandling:
"""Test cases for error handling."""
def test_transaction_exception_handling(self, mock_web3_constructor, mock_web3_client):
"""Test handling of transaction exceptions."""
mock_web3_class, mock_http_provider = mock_web3_constructor
mock_web3_client.eth.send_raw_transaction.side_effect = Exception("Network error")
mock_web3_class.return_value = mock_web3_client
tools = EvmTools(
private_key="0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
rpc_url="https://0xrpc.io/sep",
)
result = tools.send_transaction(
to_address="0x3Dfc53E3C77bb4e30Ce333Be1a66Ce62558bE395", amount_in_wei=1000000000000000
)
assert result.startswith("error:")
assert "Network error" in result
def test_gas_calculation_exception(self, mock_web3_constructor, mock_web3_client):
"""Test handling of gas calculation exceptions."""
mock_web3_class, mock_http_provider = mock_web3_constructor
mock_web3_client.eth.get_block.side_effect = Exception("RPC error")
mock_web3_class.return_value = mock_web3_client
tools = EvmTools(
private_key="0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
rpc_url="https://0xrpc.io/sep",
)
result = tools.send_transaction(
to_address="0x3Dfc53E3C77bb4e30Ce333Be1a66Ce62558bE395", amount_in_wei=1000000000000000
)
assert result.startswith("error:")
assert "RPC error" in result
class TestTransactionParameters:
"""Test cases for transaction parameter construction."""
def test_transaction_params_construction(self, mock_web3_constructor, mock_web3_client):
"""Test that transaction parameters are constructed correctly."""
mock_web3_class, mock_http_provider = mock_web3_constructor
mock_web3_class.return_value = mock_web3_client
tools = EvmTools(
private_key="0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
rpc_url="https://0xrpc.io/sep",
)
tools.send_transaction(to_address="0x3Dfc53E3C77bb4e30Ce333Be1a66Ce62558bE395", amount_in_wei=1000000000000000)
# Verify sign_transaction was called with correct parameters
call_args = mock_web3_client.eth.account.sign_transaction.call_args[0][0]
assert call_args["from"] == "0x742d35Cc6634C0532925a3b8D2A7E1234567890A"
assert call_args["to"] == "0x3Dfc53E3C77bb4e30Ce333Be1a66Ce62558bE395"
assert call_args["value"] == 1000000000000000
assert call_args["nonce"] == 5
assert call_args["gas"] == 21000
assert call_args["chainId"] == 11155111
assert "maxFeePerGas" in call_args
assert "maxPriorityFeePerGas" in call_args
class TestToolkitIntegration:
"""Test cases for toolkit integration."""
def test_tools_registration(self, mock_web3_constructor, mock_web3_client):
"""Test that tools are properly registered in the toolkit."""
mock_web3_class, mock_http_provider = mock_web3_constructor
mock_web3_class.return_value = mock_web3_client
tools = EvmTools(
private_key="0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
rpc_url="https://0xrpc.io/sep",
)
# Check that the send_transaction method is registered as a tool
assert hasattr(tools, "tools")
assert len(tools.tools) == 1
assert tools.tools[0] == tools.send_transaction
def test_toolkit_name(self, mock_web3_constructor, mock_web3_client):
"""Test that toolkit has correct name."""
mock_web3_class, mock_http_provider = mock_web3_constructor
mock_web3_class.return_value = mock_web3_client
tools = EvmTools(
private_key="0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
rpc_url="https://0xrpc.io/sep",
)
assert tools.name == "evm_tools"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_evm.py",
"license": "Apache License 2.0",
"lines": 244,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/tools/test_postgres.py | from unittest.mock import Mock, mock_open, patch
import psycopg
import pytest
from agno.tools.postgres import PostgresTools
# --- Mock Data for Tests ---
MOCK_TABLES_RESULT = [{"table_name": "employees"}, {"table_name": "departments"}, {"table_name": "projects"}]
MOCK_DESCRIBE_RESULT = [
{"column_name": "id", "data_type": "integer", "is_nullable": "NO"},
{"column_name": "name", "data_type": "character varying", "is_nullable": "YES"},
{"column_name": "salary", "data_type": "numeric", "is_nullable": "YES"},
{"column_name": "department_id", "data_type": "integer", "is_nullable": "YES"},
]
MOCK_COUNT_RESULT = [{"count": 3}]
MOCK_EXPORT_DATA = [
{"id": 1, "name": "Alice", "salary": 75000, "department_id": 1},
{"id": 2, "name": "Bob", "salary": 80000, "department_id": 2},
{"id": 3, "name": "Charlie", "salary": 65000, "department_id": 1},
]
MOCK_EXPLAIN_RESULT = [
{"QUERY PLAN": "Seq Scan on employees (cost=0.00..35.50 rows=10 width=32)"},
{"QUERY PLAN": " Filter: (salary > 10000)"},
]
@pytest.fixture
def mock_connection():
"""Create a mock connection that behaves like psycopg connection."""
conn = Mock()
conn.closed = False
conn.read_only = False
return conn
@pytest.fixture
def mock_cursor():
"""Create a mock cursor that behaves like psycopg cursor."""
cursor = Mock()
cursor.description = None
cursor.fetchall.return_value = []
cursor.fetchone.return_value = {}
cursor.statusmessage = "Command completed successfully"
cursor.__enter__ = Mock(return_value=cursor)
cursor.__exit__ = Mock(return_value=False)
cursor.__iter__ = Mock(return_value=iter([]))
return cursor
@pytest.fixture
def postgres_tools(mock_connection, mock_cursor):
"""Create PostgresTools instance with mocked connection."""
# Setup the connection to return our mock cursor
mock_connection.cursor.return_value = mock_cursor
with patch("psycopg.connect", return_value=mock_connection):
tools = PostgresTools(
host="localhost",
port=5433,
db_name="testdb",
user="testuser",
password="testpassword",
table_schema="company_data",
)
# Override the connection property to return our mock
tools._connection = mock_connection
yield tools
def test_connection_properties(postgres_tools, mock_connection):
"""Test that connection is properly configured."""
# Test connection is established with correct parameters
assert postgres_tools._connection == mock_connection
assert postgres_tools.db_name == "testdb"
assert postgres_tools.host == "localhost"
assert postgres_tools.port == 5433
assert postgres_tools.table_schema == "company_data"
def test_show_tables_success(postgres_tools, mock_connection, mock_cursor):
"""Test show_tables returns expected table list."""
# Setup mock responses
mock_cursor.description = [("table_name",)]
mock_cursor.fetchall.return_value = MOCK_TABLES_RESULT
result = postgres_tools.show_tables()
# Verify parameterized query was used
mock_cursor.execute.assert_called_with(
"SELECT table_name FROM information_schema.tables WHERE table_schema = %s;", ("company_data",)
)
# Verify result format
assert "table_name" in result
assert "employees" in result
assert "departments" in result
assert "projects" in result
def test_describe_table_success(postgres_tools, mock_connection, mock_cursor):
"""Test describe_table returns expected schema information."""
# Setup mock responses
mock_cursor.description = [("column_name",), ("data_type",), ("is_nullable",)]
mock_cursor.fetchall.return_value = MOCK_DESCRIBE_RESULT
result = postgres_tools.describe_table("employees")
# Verify parameterized query was used (check if call contains expected parameters)
mock_cursor.execute.assert_called()
call_args = mock_cursor.execute.call_args
assert "table_schema = %s AND table_name = %s" in call_args[0][0]
assert call_args[0][1] == ("company_data", "employees")
# Verify result format
assert "column_name,data_type,is_nullable" in result
assert "salary,numeric,YES" in result
def test_run_query_success(postgres_tools, mock_connection, mock_cursor):
"""Test run_query executes SQL and returns formatted results."""
# Setup mock responses
mock_cursor.description = [("count",)]
mock_cursor.fetchall.return_value = MOCK_COUNT_RESULT
result = postgres_tools.run_query("SELECT COUNT(*) FROM employees;")
# Verify query was executed
mock_cursor.execute.assert_called_with("SELECT COUNT(*) FROM employees;", None)
# Verify result format
lines = result.strip().split("\n")
assert lines[0] == "count" # Header
assert lines[1] == "3" # Data
def test_export_table_to_path_success(postgres_tools, mock_connection, mock_cursor):
"""Test export_table_to_path creates CSV file safely."""
# Setup mock responses
mock_cursor.description = [("id",), ("name",), ("salary",), ("department_id",)]
# Override the __iter__ method to return our mock data
mock_cursor.__iter__ = Mock(return_value=iter(MOCK_EXPORT_DATA))
# Mock file operations
mock_file = mock_open()
export_path = "/tmp/test_export.csv"
with patch("builtins.open", mock_file):
result = postgres_tools.export_table_to_path("employees", export_path)
# Verify safe query construction (using sql.Identifier)
mock_cursor.execute.assert_called_once()
# Verify file was opened for writing
mock_file.assert_called_once_with(export_path, "w", newline="", encoding="utf-8")
# Verify success message
assert "Successfully exported table 'employees' to '/tmp/test_export.csv'" in result
def test_inspect_query_success(postgres_tools, mock_connection, mock_cursor):
"""Test inspect_query returns execution plan."""
# Setup mock responses
mock_cursor.description = [("QUERY PLAN",)]
mock_cursor.fetchall.return_value = MOCK_EXPLAIN_RESULT
result = postgres_tools.inspect_query("SELECT name FROM employees WHERE salary > 10000;")
# Verify EXPLAIN query was executed
mock_cursor.execute.assert_called_with("EXPLAIN SELECT name FROM employees WHERE salary > 10000;", None)
# Verify result contains query plan
assert "Seq Scan on employees" in result
assert "Filter: (salary > 10000)" in result
def test_database_error_handling(postgres_tools, mock_connection, mock_cursor):
"""Test proper error handling for database errors."""
# Setup mock to raise psycopg error
mock_cursor.execute.side_effect = psycopg.DatabaseError("Table does not exist")
mock_connection.rollback = Mock()
result = postgres_tools.show_tables()
# Verify error is caught and returned as string
assert "Error executing query: Table does not exist" in result
# Verify rollback was called
mock_connection.rollback.assert_called_once()
def test_export_file_error_handling(postgres_tools, mock_connection, mock_cursor):
"""Test error handling when file operations fail."""
# Setup mock responses
mock_cursor.description = [("id",), ("name",)]
# Mock file operations to raise IOError
with patch("builtins.open", side_effect=IOError("Permission denied")):
result = postgres_tools.export_table_to_path("employees", "/invalid/path/file.csv")
# Verify error is caught and returned
assert "Error exporting table: Permission denied" in result
def test_tools_has_close_method(mock_connection):
"""Test that PostgresTools has a close method for cleanup."""
with patch("psycopg.connect", return_value=mock_connection):
tools = PostgresTools(host="localhost", db_name="testdb")
assert tools is not None
assert hasattr(tools, "close")
assert callable(tools.close)
def test_connection_recovery(mock_connection):
"""Test that connection is re-established if closed."""
# Simulate closed connection
mock_connection.closed = True
with patch("psycopg.connect", return_value=mock_connection) as mock_connect:
tools = PostgresTools(host="localhost", db_name="testdb")
# Call connect() method to trigger connection
tools.connect()
# Verify connect was called
mock_connect.assert_called()
def test_sql_injection_prevention(postgres_tools, mock_connection, mock_cursor):
"""Test that SQL injection attempts are safely handled."""
# Setup mock
mock_cursor.description = [("column_name",), ("data_type",), ("is_nullable",)]
mock_cursor.fetchall.return_value = []
# Attempt SQL injection
malicious_table = "users'; DROP TABLE employees; --"
postgres_tools.describe_table(malicious_table)
# Verify the malicious input was passed as a parameter, not concatenated
call_args = mock_cursor.execute.call_args
assert call_args[0][1] == ("company_data", malicious_table) # Parameters tuple
assert "DROP TABLE" not in call_args[0][0] # Not in the SQL string
def test_readonly_session_configuration(mock_connection):
"""Test that connection is configured as read-only."""
with patch("psycopg.connect", return_value=mock_connection):
tools = PostgresTools(host="localhost", db_name="testdb")
tools.connect() # Trigger connection establishment
# Verify readonly session was set
assert mock_connection.read_only is True
def test_is_connected_property(postgres_tools, mock_connection):
"""Test is_connected property returns correct state."""
# Connection is established via fixture
mock_connection.closed = False
assert postgres_tools.is_connected is True
# Simulate closed connection
mock_connection.closed = True
assert postgres_tools.is_connected is False
# Simulate no connection
postgres_tools._connection = None
assert postgres_tools.is_connected is False
def test_close_method(mock_connection):
"""Test close method properly closes connection."""
mock_connection.closed = False
mock_connection.close = Mock()
with patch("psycopg.connect", return_value=mock_connection):
tools = PostgresTools(host="localhost", db_name="testdb")
tools._connection = mock_connection
tools.close()
mock_connection.close.assert_called_once()
assert tools._connection is None
def test_close_method_already_closed(mock_connection):
"""Test close method handles already closed connection."""
mock_connection.closed = True
mock_connection.close = Mock()
with patch("psycopg.connect", return_value=mock_connection):
tools = PostgresTools(host="localhost", db_name="testdb")
tools._connection = mock_connection
tools.close()
# close() should not be called on already closed connection
mock_connection.close.assert_not_called()
def test_ensure_connection_creates_new(mock_connection):
"""Test _ensure_connection creates connection when none exists."""
with patch("psycopg.connect", return_value=mock_connection) as mock_connect:
tools = PostgresTools(host="localhost", db_name="testdb")
tools._connection = None
result = tools._ensure_connection()
mock_connect.assert_called()
assert result == mock_connection
def test_ensure_connection_reuses_existing(mock_connection):
"""Test _ensure_connection reuses existing open connection."""
mock_connection.closed = False
with patch("psycopg.connect", return_value=mock_connection) as mock_connect:
tools = PostgresTools(host="localhost", db_name="testdb")
tools._connection = mock_connection
mock_connect.reset_mock()
result = tools._ensure_connection()
mock_connect.assert_not_called()
assert result == mock_connection
def test_connect_reuses_existing_connection(mock_connection):
"""Test connect() reuses existing open connection."""
mock_connection.closed = False
with patch("psycopg.connect", return_value=mock_connection) as mock_connect:
tools = PostgresTools(host="localhost", db_name="testdb")
tools._connection = mock_connection
mock_connect.reset_mock()
result = tools.connect()
mock_connect.assert_not_called()
assert result == mock_connection
def test_requires_connect_attribute():
"""Test that _requires_connect class attribute is set."""
assert PostgresTools._requires_connect is True
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_postgres.py",
"license": "Apache License 2.0",
"lines": 252,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/portkey/test_basic.py | import pytest
from pydantic import BaseModel, Field
from agno.agent import Agent, RunOutput # noqa
from agno.db.sqlite import SqliteDb
from agno.models.portkey import Portkey
PORTKEY_MODEL_ID = "gpt-4o-mini"
def _assert_metrics(response: RunOutput):
assert response.metrics is not None
input_tokens = response.metrics.input_tokens
output_tokens = response.metrics.output_tokens
total_tokens = response.metrics.total_tokens
assert input_tokens > 0
assert output_tokens > 0
assert total_tokens > 0
assert total_tokens == input_tokens + output_tokens
def test_basic():
agent = Agent(model=Portkey(id=PORTKEY_MODEL_ID), markdown=True, telemetry=False)
# Print the response in the terminal
response: RunOutput = agent.run("Share a 2 sentence comedy story")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
def test_basic_stream():
agent = Agent(model=Portkey(id=PORTKEY_MODEL_ID), markdown=True, telemetry=False)
for response in agent.run("Share a 2 sentence horror story", stream=True):
assert response.content is not None
@pytest.mark.asyncio
async def test_async_basic():
agent = Agent(model=Portkey(id=PORTKEY_MODEL_ID), markdown=True, telemetry=False)
response = await agent.arun("Share a 2 sentence horror story")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"] # type: ignore
_assert_metrics(response)
@pytest.mark.asyncio
async def test_async_basic_stream():
agent = Agent(model=Portkey(id=PORTKEY_MODEL_ID), markdown=True, telemetry=False)
async for response in agent.arun("Share a 2 sentence horror story", stream=True):
assert response.content is not None
def test_with_memory():
agent = Agent(
db=SqliteDb(db_file="tmp/test_with_memory.db"),
model=Portkey(id=PORTKEY_MODEL_ID),
add_history_to_context=True,
markdown=True,
telemetry=False,
)
# First interaction
response1 = agent.run("My name is John Smith")
assert response1.content is not None
# Second interaction should remember the name
response2 = agent.run("What's my name?")
assert response2.content is not None
assert "John Smith" in response2.content
# Verify memories were created
messages = agent.get_session_messages()
assert messages is not None
assert len(messages) == 5
assert [m.role for m in messages] == ["system", "user", "assistant", "user", "assistant"]
# Test metrics structure and types
_assert_metrics(response2)
def test_output_schema():
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(
model=Portkey(id=PORTKEY_MODEL_ID),
output_schema=MovieScript,
telemetry=False,
)
response = agent.run("Create a movie about time travel")
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
def test_json_response_mode():
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(
model=Portkey(id=PORTKEY_MODEL_ID),
output_schema=MovieScript,
use_json_mode=True,
telemetry=False,
)
response = agent.run("Create a movie about time travel")
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
def test_history():
agent = Agent(
model=Portkey(id=PORTKEY_MODEL_ID),
db=SqliteDb(db_file="tmp/portkey/test_basic.db"),
add_history_to_context=True,
store_history_messages=True,
telemetry=False,
)
run_output = agent.run("Hello")
assert run_output.messages is not None
assert len(run_output.messages) == 2
run_output = agent.run("Hello 2")
assert run_output.messages is not None
assert len(run_output.messages) == 4
run_output = agent.run("Hello 3")
assert run_output.messages is not None
assert len(run_output.messages) == 6
run_output = agent.run("Hello 4")
assert run_output.messages is not None
assert len(run_output.messages) == 8
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/portkey/test_basic.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/portkey/test_tool_use.py | from typing import Optional
import pytest
from agno.agent import Agent
from agno.models.portkey import Portkey
from agno.tools.exa import ExaTools
from agno.tools.websearch import WebSearchTools
from agno.tools.yfinance import YFinanceTools
PORTKEY_MODEL_ID = "gpt-4o-mini"
def test_tool_use():
agent = Agent(
model=Portkey(id=PORTKEY_MODEL_ID),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA?")
# Verify tool usage
assert any(msg.tool_calls for msg in response.messages)
assert response.content is not None
assert "TSLA" in response.content
def test_tool_use_stream():
agent = Agent(
model=Portkey(id=PORTKEY_MODEL_ID),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response_stream = agent.run("What is the current price of TSLA?", stream=True, stream_events=True)
responses = []
tool_call_seen = False
for chunk in response_stream:
responses.append(chunk)
# Check for ToolCallStartedEvent or ToolCallCompletedEvent
if chunk.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(chunk, "tool") and chunk.tool:
if chunk.tool.tool_name:
tool_call_seen = True
assert len(responses) > 0
assert tool_call_seen, "No tool calls observed in stream"
full_content = ""
for r in responses:
full_content += r.content or ""
assert "TSLA" in full_content
@pytest.mark.asyncio
async def test_async_tool_use():
agent = Agent(
model=Portkey(id=PORTKEY_MODEL_ID),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = await agent.arun("What is the current price of TSLA?")
# Verify tool usage
assert any(msg.tool_calls for msg in response.messages if msg.role == "assistant")
assert response.content is not None
assert "TSLA" in response.content
@pytest.mark.asyncio
async def test_async_tool_use_stream():
agent = Agent(
model=Portkey(id=PORTKEY_MODEL_ID),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response_stream = await agent.arun("What is the current price of TSLA?", stream=True, stream_events=True)
responses = []
tool_call_seen = False
async for chunk in response_stream:
responses.append(chunk)
# Check for ToolCallStartedEvent or ToolCallCompletedEvent
if chunk.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(chunk, "tool") and chunk.tool:
if chunk.tool.tool_name:
tool_call_seen = True
assert len(responses) > 0
assert tool_call_seen, "No tool calls observed in stream"
full_content = ""
for r in responses:
full_content += r.content or ""
assert "TSLA" in full_content
def test_parallel_tool_calls():
agent = Agent(
model=Portkey(id=PORTKEY_MODEL_ID),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA and AAPL?")
# Verify tool usage
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
assert len([call for call in tool_calls if call.get("type", "") == "function"]) >= 2 # Total of 2 tool calls made
assert response.content is not None
assert "TSLA" in response.content and "AAPL" in response.content
def test_multiple_tool_calls():
agent = Agent(
model=Portkey(id=PORTKEY_MODEL_ID),
tools=[YFinanceTools(cache_results=True), WebSearchTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA and what is the latest news about it?")
# Verify tool usage
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
assert len([call for call in tool_calls if call.get("type", "") == "function"]) >= 2 # Total of 2 tool calls made
assert response.content is not None
assert "TSLA" in response.content and "latest news" in response.content.lower()
def test_tool_call_custom_tool_no_parameters():
def get_the_weather_in_tokyo():
"""
Get the weather in Tokyo
"""
return "It is currently 70 degrees and cloudy in Tokyo"
agent = Agent(
model=Portkey(id=PORTKEY_MODEL_ID),
tools=[get_the_weather_in_tokyo],
markdown=True,
telemetry=False,
)
response = agent.run("What is the weather in Tokyo?")
# Verify tool usage
assert any(msg.tool_calls for msg in response.messages)
assert response.content is not None
assert "70" in response.content
def test_tool_call_custom_tool_optional_parameters():
def get_the_weather(city: Optional[str] = None):
"""
Get the weather in a city
Args:
city: The city to get the weather for
"""
if city is None:
return "It is currently 70 degrees and cloudy in Tokyo"
else:
return f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=Portkey(id=PORTKEY_MODEL_ID),
tools=[get_the_weather],
markdown=True,
telemetry=False,
)
response = agent.run("What is the weather in Paris?")
# Verify tool usage
assert any(msg.tool_calls for msg in response.messages)
assert response.content is not None
assert "70" in response.content
def test_tool_call_list_parameters():
agent = Agent(
model=Portkey(id=PORTKEY_MODEL_ID),
tools=[ExaTools()],
instructions="Use a single tool call if possible",
markdown=True,
telemetry=False,
)
response = agent.run(
"What are the papers at https://arxiv.org/pdf/2307.06435 and https://arxiv.org/pdf/2502.09601 about?"
)
# Verify tool usage
assert any(msg.tool_calls for msg in response.messages)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
for call in tool_calls:
if call.get("type", "") == "function":
assert call["function"]["name"] in ["get_contents", "exa_answer", "search_exa"]
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/portkey/test_tool_use.py",
"license": "Apache License 2.0",
"lines": 170,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/tools/test_daytona.py | """Test DaytonaTools functionality."""
import sys
from unittest.mock import MagicMock, patch
import pytest
# Create a proper mock Configuration class that can be patched
class MockConfiguration:
def __init__(self, *args, **kwargs):
self.verify_ssl = True
# Mock the daytona modules before importing DaytonaTools
mock_daytona_module = MagicMock()
mock_daytona_api_client_module = MagicMock()
mock_daytona_api_client_module.Configuration = MockConfiguration
sys.modules["daytona"] = mock_daytona_module
sys.modules["daytona_api_client"] = mock_daytona_api_client_module
# Import after mocking to avoid import errors
from agno.tools.daytona import DaytonaTools # noqa: E402
@pytest.fixture
def mock_agent():
"""Create a mock agent with session_state."""
agent = MagicMock()
agent.session_state = {}
return agent
@pytest.fixture
def mock_daytona():
"""Create mock Daytona objects."""
with patch("agno.tools.daytona.Daytona") as mock_daytona_class:
mock_client = mock_daytona_class.return_value
mock_sandbox = MagicMock()
mock_sandbox.id = "test-sandbox-123"
mock_client.create.return_value = mock_sandbox
# Mock process and fs
mock_process = MagicMock()
mock_fs = MagicMock()
mock_sandbox.process = mock_process
mock_sandbox.fs = mock_fs
yield mock_client, mock_sandbox, mock_process, mock_fs
class TestDaytonaTools:
"""Test DaytonaTools class."""
def test_initialization_with_api_key(self):
"""Test initialization with API key."""
with patch.dict("os.environ", {"DAYTONA_API_KEY": "test-key"}):
tools = DaytonaTools()
assert tools.api_key == "test-key"
assert tools.persistent is True
def test_initialization_without_api_key(self):
"""Test initialization without API key."""
with patch.dict("os.environ", {}, clear=True):
with pytest.raises(ValueError, match="DAYTONA_API_KEY not set"):
DaytonaTools()
def test_working_directory_management(self, mock_agent):
"""Test working directory get/set methods."""
with patch.dict("os.environ", {"DAYTONA_API_KEY": "test-key"}):
tools = DaytonaTools()
# Test get with no directory set
assert tools._get_working_directory(mock_agent) == "/home/daytona"
# Test set
tools._set_working_directory(mock_agent, "/tmp")
assert mock_agent.session_state["working_directory"] == "/tmp"
# Test get with directory set
assert tools._get_working_directory(mock_agent) == "/tmp"
def test_create_sandbox_persistent(self, mock_daytona, mock_agent):
"""Test persistent sandbox creation."""
mock_client, mock_sandbox, _, _ = mock_daytona
with patch.dict("os.environ", {"DAYTONA_API_KEY": "test-key"}):
tools = DaytonaTools(persistent=True)
# Mock the get method to return the same sandbox
mock_client.get.return_value = mock_sandbox
# First call creates sandbox
sandbox1 = tools._get_or_create_sandbox(mock_agent)
assert sandbox1 == mock_sandbox
assert mock_agent.session_state["sandbox_id"] == "test-sandbox-123"
# Second call reuses sandbox via get()
sandbox2 = tools._get_or_create_sandbox(mock_agent)
assert sandbox2 == mock_sandbox
assert mock_client.create.call_count == 1 # Only called once
assert mock_client.get.call_count >= 1 # get() called to retrieve existing sandbox
def test_create_sandbox_non_persistent(self, mock_daytona, mock_agent):
"""Test non-persistent sandbox creation."""
mock_client, mock_sandbox, _, _ = mock_daytona
with patch.dict("os.environ", {"DAYTONA_API_KEY": "test-key"}):
tools = DaytonaTools(persistent=False)
# Each call creates new sandbox
_ = tools._get_or_create_sandbox(mock_agent)
_ = tools._get_or_create_sandbox(mock_agent)
assert mock_client.create.call_count == 2 # Called twice
def test_run_python_code(self, mock_daytona, mock_agent):
"""Test run_code method with Python code."""
mock_client, mock_sandbox, mock_process, _ = mock_daytona
with patch.dict("os.environ", {"DAYTONA_API_KEY": "test-key"}):
tools = DaytonaTools()
# Mock execution result
mock_execution = MagicMock()
mock_execution.result = "Hello, World!"
mock_process.code_run.return_value = mock_execution
# Test execution
result = tools.run_code(mock_agent, "print('Hello, World!')")
assert result == "Hello, World!"
def test_run_shell_command(self, mock_daytona, mock_agent):
"""Test run_shell_command method."""
mock_client, mock_sandbox, mock_process, _ = mock_daytona
with patch.dict("os.environ", {"DAYTONA_API_KEY": "test-key"}):
tools = DaytonaTools()
# Mock execution
mock_execution = MagicMock()
mock_execution.exit_code = 0
mock_execution.result = "total 4\nfile1.txt\nfile2.txt"
mock_process.exec.return_value = mock_execution
# Test shell command
result = tools.run_shell_command(mock_agent, "ls -la")
assert "total 4" in result
def test_run_shell_command_cd(self, mock_daytona, mock_agent):
"""Test run_shell_command with cd command."""
mock_client, mock_sandbox, mock_process, _ = mock_daytona
with patch.dict("os.environ", {"DAYTONA_API_KEY": "test-key"}):
tools = DaytonaTools()
# Test with a simple absolute path that won't be resolved
mock_test = MagicMock()
mock_test.result = "exists"
mock_process.exec.return_value = mock_test
# Test cd command
result = tools.run_shell_command(mock_agent, "cd /home/test")
assert "Changed directory to:" in result
assert "/home/test" in result
def test_create_file(self, mock_daytona, mock_agent):
"""Test create_file method."""
mock_client, mock_sandbox, mock_process, _ = mock_daytona
with patch.dict("os.environ", {"DAYTONA_API_KEY": "test-key"}):
tools = DaytonaTools()
# Mock successful file creation
mock_execution = MagicMock()
mock_execution.exit_code = 0
mock_process.exec.return_value = mock_execution
# Test file creation
result = tools.create_file(mock_agent, "test.txt", "Hello, World!")
assert "File created/updated: /home/daytona/test.txt" in result
def test_read_file(self, mock_daytona, mock_agent):
"""Test read_file method."""
mock_client, mock_sandbox, mock_process, _ = mock_daytona
with patch.dict("os.environ", {"DAYTONA_API_KEY": "test-key"}):
tools = DaytonaTools()
# Mock file read
mock_execution = MagicMock()
mock_execution.exit_code = 0
mock_execution.result = "File contents"
mock_process.exec.return_value = mock_execution
# Test file read
result = tools.read_file(mock_agent, "test.txt")
assert result == "File contents"
def test_list_files(self, mock_daytona, mock_agent):
"""Test list_files method."""
mock_client, mock_sandbox, mock_process, _ = mock_daytona
with patch.dict("os.environ", {"DAYTONA_API_KEY": "test-key"}):
tools = DaytonaTools()
# Mock ls output
mock_execution = MagicMock()
mock_execution.exit_code = 0
mock_execution.result = "file1.txt\nfile2.py\ndir1/"
mock_process.exec.return_value = mock_execution
# Test list files
result = tools.list_files(mock_agent, ".")
assert "file1.txt" in result
assert "file2.py" in result
def test_delete_file(self, mock_daytona, mock_agent):
"""Test delete_file method."""
mock_client, mock_sandbox, mock_process, _ = mock_daytona
with patch.dict("os.environ", {"DAYTONA_API_KEY": "test-key"}):
tools = DaytonaTools()
# Mock file check and deletion
mock_check = MagicMock()
mock_check.result = "file"
mock_delete = MagicMock()
mock_delete.exit_code = 0
mock_process.exec.side_effect = [
mock_check, # test -d check
mock_delete, # rm command
]
# Test file deletion
result = tools.delete_file(mock_agent, "test.txt")
assert result == "Deleted: /home/daytona/test.txt"
def test_change_directory(self, mock_daytona, mock_agent):
"""Test change_directory method."""
mock_client, mock_sandbox, mock_process, _ = mock_daytona
with patch.dict("os.environ", {"DAYTONA_API_KEY": "test-key"}):
tools = DaytonaTools()
# Mock the test -d response to indicate directory exists
mock_test = MagicMock()
mock_test.result = "exists"
mock_process.exec.return_value = mock_test
# Test directory change
result = tools.change_directory(mock_agent, "/home/test")
assert "Changed directory to:" in result
assert "/home/test" in result
# Check that working directory was updated
assert mock_agent.session_state["working_directory"] == "/home/test"
def test_ssl_configuration(self):
"""Test SSL configuration."""
with patch.dict("os.environ", {"DAYTONA_API_KEY": "test-key"}):
# Test with SSL verification disabled
tools = DaytonaTools(verify_ssl=False)
assert tools.verify_ssl is False
# Test with SSL verification enabled (default)
tools = DaytonaTools(verify_ssl=True)
assert tools.verify_ssl is True
def test_error_handling(self, mock_daytona, mock_agent):
"""Test error handling in various methods."""
mock_client, mock_sandbox, mock_process, _ = mock_daytona
with patch.dict("os.environ", {"DAYTONA_API_KEY": "test-key"}):
tools = DaytonaTools()
# Test error in run_code
mock_process.code_run.side_effect = Exception("Execution error")
result = tools.run_code(mock_agent, "print('test')")
assert "error" in result
assert "Execution error" in result
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_daytona.py",
"license": "Apache License 2.0",
"lines": 213,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/embedder/test_jina_embedder.py | import pytest
from agno.knowledge.embedder.jina import JinaEmbedder
@pytest.fixture
def embedder():
return JinaEmbedder()
def test_embedder_initialization(embedder):
"""Test that the embedder initializes correctly"""
assert embedder is not None
assert embedder.id == "jina-embeddings-v3" # Field is 'id' not 'model'
assert embedder.dimensions == 1024
assert embedder.embedding_type == "float"
assert not embedder.late_chunking
assert embedder.api_key is not None, "JINA_API_KEY env variable is not set"
def test_get_embedding(embedder):
"""Test that we can get embeddings for a simple text"""
text = "The quick brown fox jumps over the lazy dog."
embeddings = embedder.get_embedding(text)
# Basic checks on the embeddings
assert isinstance(embeddings, list)
assert len(embeddings) > 0
assert all(isinstance(x, float) for x in embeddings)
assert len(embeddings) == embedder.dimensions
def test_get_embedding_and_usage(embedder):
"""Test that we can get embeddings with usage information"""
text = "Test embedding with usage information."
embedding, usage = embedder.get_embedding_and_usage(text)
# Check embedding
assert isinstance(embedding, list)
assert len(embedding) > 0
assert all(isinstance(x, float) for x in embedding)
assert len(embedding) == embedder.dimensions
# Check usage (may be None if not provided by API)
assert usage is None or isinstance(usage, dict)
def test_special_characters(embedder):
"""Test that special characters are handled correctly"""
text = "Hello, world! こんにちは 123 @#$%"
embeddings = embedder.get_embedding(text)
assert isinstance(embeddings, list)
assert len(embeddings) > 0
assert len(embeddings) == embedder.dimensions
def test_long_text(embedder):
"""Test that long text is handled correctly"""
text = " ".join(["word"] * 1000) # Create a long text
embeddings = embedder.get_embedding(text)
assert isinstance(embeddings, list)
assert len(embeddings) > 0
assert len(embeddings) == embedder.dimensions
def test_embedding_consistency(embedder):
"""Test that embeddings for the same text are consistent"""
text = "Consistency test"
embeddings1 = embedder.get_embedding(text)
embeddings2 = embedder.get_embedding(text)
assert len(embeddings1) == len(embeddings2)
assert all(abs(a - b) < 1e-3 for a, b in zip(embeddings1, embeddings2))
def test_custom_configuration():
"""Test embedder with custom configuration"""
custom_embedder = JinaEmbedder(
id="jina-embeddings-v3", # Field is 'id' not 'model'
dimensions=512, # Different dimensions
embedding_type="float",
late_chunking=True,
timeout=30.0,
)
text = "Test with custom configuration"
embeddings = custom_embedder.get_embedding(text)
assert isinstance(embeddings, list)
assert len(embeddings) > 0
# Note: dimensions might still be 1024 if the API doesn't support 512 for this model
def test_different_embedding_types():
"""Test different embedding output types"""
# Test with float type (default)
float_embedder = JinaEmbedder(embedding_type="float")
text = "Test different embedding types"
embeddings = float_embedder.get_embedding(text)
assert isinstance(embeddings, list)
assert all(isinstance(x, float) for x in embeddings)
def test_late_chunking_feature():
"""Test the late chunking feature for better long document processing"""
chunking_embedder = JinaEmbedder(late_chunking=True)
# Test with a longer document
long_text = "This is a longer document that would benefit from late chunking. " * 50
embeddings = chunking_embedder.get_embedding(long_text)
assert isinstance(embeddings, list)
assert len(embeddings) > 0
assert len(embeddings) == chunking_embedder.dimensions
def test_api_key_validation():
"""Test that missing API key is handled gracefully"""
embedder_no_key = JinaEmbedder(api_key=None)
# The embedder should return empty list when API key is missing
# (since the error is caught and logged as warning)
embeddings = embedder_no_key.get_embedding("Test text")
assert embeddings == []
def test_empty_text_handling(embedder):
"""Test handling of empty text"""
embeddings = embedder.get_embedding("")
# Should return empty list or handle gracefully
assert isinstance(embeddings, list)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/embedder/test_jina_embedder.py",
"license": "Apache License 2.0",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/bitbucket.py | import base64
import json
from os import getenv
from typing import Any, Dict, Optional, Union
import requests
from agno.tools import Toolkit
from agno.utils.log import logger
class BitbucketTools(Toolkit):
def __init__(
self,
server_url: str = "api.bitbucket.org",
username: Optional[str] = None,
password: Optional[str] = None,
token: Optional[str] = None,
workspace: Optional[str] = None,
repo_slug: Optional[str] = None,
api_version: str = "2.0",
**kwargs,
):
self.username = username or getenv("BITBUCKET_USERNAME")
self.password = password or getenv("BITBUCKET_PASSWORD")
self.token = token or getenv("BITBUCKET_TOKEN")
self.auth_password = self.token or self.password
self.server_url = server_url or "api.bitbucket.org"
self.api_version = api_version or "2.0"
self.base_url = (
f"https://{self.server_url}/{api_version}"
if not self.server_url.startswith(("http://", "https://"))
else f"{self.server_url}/{api_version}"
)
self.workspace = workspace
self.repo_slug = repo_slug
if not (self.username and self.auth_password):
raise ValueError("Username and password or token are required")
if not self.workspace:
raise ValueError("Workspace is required")
if not self.repo_slug:
raise ValueError("Repo slug is required")
self.headers = {"Accept": "application/json", "Authorization": f"Basic {self._generate_access_token()}"}
super().__init__(
name="bitbucket",
tools=[
self.list_repositories,
self.get_repository_details,
self.create_repository,
self.list_repository_commits,
self.list_all_pull_requests,
self.get_pull_request_details,
self.get_pull_request_changes,
self.list_issues,
],
**kwargs,
)
def _generate_access_token(self) -> str:
auth_str = f"{self.username}:{self.auth_password}"
auth_bytes = auth_str.encode("ascii")
auth_base64 = base64.b64encode(auth_bytes).decode("ascii")
return auth_base64
def _make_request(
self,
method: str,
endpoint: str,
params: Optional[Dict[str, Any]] = None,
data: Optional[Dict[str, Any]] = None,
) -> Union[str, Dict[str, Any]]:
url = f"{self.base_url}{endpoint}"
response = requests.request(method, url, headers=self.headers, json=data, params=params)
response.raise_for_status()
encoding_type = response.headers.get("Content-Type", "application/json")
if encoding_type.startswith("application/json"):
return response.json() if response.text else {}
elif encoding_type == "text/plain":
return response.text
logger.warning(f"Unsupported content type: {encoding_type}")
return {}
def list_repositories(self, count: int = 10) -> str:
"""
Get all repositories in the workspace.
Args:
count (int, optional): The number of repositories to retrieve
Returns:
str: A JSON string containing repository list.
"""
try:
# Limit count to maximum of 50
count = min(count, 50)
# Use count directly as pagelen for simplicity, max out at 50 per our limit
pagelen = min(count, 50)
params = {"page": 1, "pagelen": pagelen}
repo = self._make_request("GET", f"/repositories/{self.workspace}", params=params)
return json.dumps(repo, indent=2)
except Exception as e:
logger.error(f"Error retrieving repository list for workspace {self.workspace}: {str(e)}")
return json.dumps({"error": str(e)})
def get_repository_details(self) -> str:
"""
Retrieves repository information.
API Docs: https://developer.atlassian.com/cloud/bitbucket/rest/api-group-repositories/#api-repositories-workspace-repo-slug-get
Returns:
str: A JSON string containing repository information.
"""
try:
repo = self._make_request("GET", f"/repositories/{self.workspace}/{self.repo_slug}")
return json.dumps(repo, indent=2)
except Exception as e:
logger.error(f"Error retrieving repository information for {self.repo_slug}: {str(e)}")
return json.dumps({"error": str(e)})
def create_repository(
self,
name: str,
project: Optional[str] = None,
is_private: bool = False,
description: Optional[str] = None,
language: Optional[str] = None,
has_issues: bool = False,
has_wiki: bool = False,
) -> str:
"""
Creates a new repository in Bitbucket for the given workspace.
Args:
name (str): The name of the new repository.
project (str, optional): The key of the project to create the repository in.
is_private (bool, optional): Whether the repository is private.
description (str, optional): A short description of the repository.
language (str, optional): The primary language of the repository
has_issues (bool, optional): Whether the repository has issues enabled.
has_wiki (bool, optional): Whether the repository has a wiki enabled.
Returns:
str: A JSON string containing repository information.
"""
try:
payload: Dict[str, Any] = {
"name": name,
"scm": "git",
"is_private": is_private,
"description": description,
"language": language,
"has_issues": has_issues,
"has_wiki": has_wiki,
}
if project:
payload["project"] = {"key": project}
repo = self._make_request("POST", f"/repositories/{self.workspace}/{self.repo_slug}", data=payload)
return json.dumps(repo, indent=2)
except Exception as e:
logger.error(f"Error creating repository {self.repo_slug} for {self.workspace}: {str(e)}")
return json.dumps({"error": str(e)})
def list_repository_commits(self, count: int = 10) -> str:
"""
Retrieves all commits in a repository.
Args:
count (int, optional): The number of commits to retrieve. Defaults to 10. Maximum 50.
Returns:
str: A JSON string containing all commits.
"""
try:
count = min(count, 50)
params = {"pagelen": count}
commits = self._make_request(
"GET", f"/repositories/{self.workspace}/{self.repo_slug}/commits", params=params
)
if isinstance(commits, dict) and commits.get("next"):
collected_commits = commits.get("values", [])
while len(collected_commits) < count and isinstance(commits, dict) and commits.get("next"):
next_url = commits["next"] # type: ignore
query_param = next_url.split("?")[1] if "?" in next_url else ""
commits = self._make_request(
"GET", f"/repositories/{self.workspace}/{self.repo_slug}/commits?{query_param}"
)
if isinstance(commits, dict):
collected_commits.extend(commits.get("values", []))
if isinstance(commits, dict):
commits["values"] = collected_commits[:count]
return json.dumps(commits, indent=2)
except Exception as e:
logger.error(f"Error retrieving commits for {self.repo_slug}: {str(e)}")
return json.dumps({"error": str(e)})
def list_all_pull_requests(self, state: str = "OPEN") -> str:
"""
Retrieves all pull requests for a repository.
Args:
state (str, optional): The state of the pull requests to retrieve.
Returns:
str: A JSON string containing all pull requests.
"""
try:
if state not in ["OPEN", "MERGED", "DECLINED", "SUPERSEDED"]:
logger.debug(f"Invalid pull request state: {state}. Defaulting to OPEN")
state = "OPEN"
params = {"state": state}
pull_requests = self._make_request(
"GET", f"/repositories/{self.workspace}/{self.repo_slug}/pullrequests", params=params
)
return json.dumps(pull_requests, indent=2)
except Exception as e:
logger.error(f"Error retrieving pull requests for {self.repo_slug}: {str(e)}")
return json.dumps({"error": str(e)})
def get_pull_request_details(self, pull_request_id: int) -> str:
"""
Retrieves a pull request for a repository.
Args:
pull_request_id (int): The ID of the pull request to retrieve.
Returns:
str: A JSON string containing the pull request.
"""
try:
pull_request = self._make_request(
"GET", f"/repositories/{self.workspace}/{self.repo_slug}/pullrequests/{pull_request_id}"
)
return json.dumps(pull_request, indent=2)
except Exception as e:
logger.error(f"Error retrieving pull requests for {self.repo_slug}: {str(e)}")
return json.dumps({"error": str(e)})
def get_pull_request_changes(self, pull_request_id: int) -> str:
"""
Retrieves changes for a pull request in a repository.
Args:
pull_request_id (int): The ID of the pull request to retrieve.
Returns:
str: A markdown string containing the pull request diff.
"""
try:
diff = self._make_request(
"GET", f"/repositories/{self.workspace}/{self.repo_slug}/pullrequests/{pull_request_id}/diff"
)
if isinstance(diff, dict):
return json.dumps(diff, indent=2)
return diff
except Exception as e:
logger.error(f"Error retrieving changes for pull request {pull_request_id} in {self.repo_slug}: {str(e)}")
return json.dumps({"error": str(e)})
def list_issues(self, count: int = 10) -> str:
"""
Retrieves all issues for a repository.
Args:
count (int, optional): The number of issues to retrieve. Defaults to 10. Maximum 50.
Returns:
str: A JSON string containing all issues.
"""
try:
count = min(count, 50)
params = {"pagelen": count}
issues = self._make_request("GET", f"/repositories/{self.workspace}/{self.repo_slug}/issues", params=params)
return json.dumps(issues, indent=2)
except Exception as e:
logger.error(f"Error retrieving issues for {self.repo_slug}: {str(e)}")
return json.dumps({"error": str(e)})
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/bitbucket.py",
"license": "Apache License 2.0",
"lines": 247,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/tools/test_bitbucket.py | import json
import os
from unittest.mock import MagicMock, patch
import pytest
import requests
from agno.tools.bitbucket import BitbucketTools
class TestBitbucketTools:
"""Test suite for BitbucketTools class."""
@pytest.fixture
def mock_env_vars(self):
"""Mock environment variables for testing."""
with patch.dict(
os.environ,
{
"BITBUCKET_USERNAME": "test_user",
"BITBUCKET_PASSWORD": "test_password",
},
):
yield
@pytest.fixture
def bitbucket_tools(self, mock_env_vars):
"""Create BitbucketTools instance for testing."""
return BitbucketTools(workspace="test_workspace", repo_slug="test_repo")
@pytest.fixture
def bitbucket_tools_with_token(self, mock_env_vars):
"""Create BitbucketTools instance with token for testing."""
with patch.dict(os.environ, {"BITBUCKET_TOKEN": "test_token"}):
return BitbucketTools(workspace="test_workspace", repo_slug="test_repo")
def test_init_with_required_params(self, mock_env_vars):
"""Test successful initialization with required parameters."""
tools = BitbucketTools(workspace="test_workspace", repo_slug="test_repo")
assert tools.workspace == "test_workspace"
assert tools.repo_slug == "test_repo"
assert tools.username == "test_user"
assert tools.auth_password == "test_password"
assert tools.server_url == "api.bitbucket.org"
assert tools.api_version == "2.0"
assert "Basic" in tools.headers["Authorization"]
def test_init_with_custom_params(self, mock_env_vars):
"""Test initialization with custom parameters."""
tools = BitbucketTools(
workspace="custom_workspace",
repo_slug="custom_repo",
server_url="custom.bitbucket.com",
username="custom_user",
password="custom_password",
api_version="2.1",
)
assert tools.workspace == "custom_workspace"
assert tools.repo_slug == "custom_repo"
assert tools.username == "custom_user"
assert tools.auth_password == "custom_password"
assert tools.server_url == "custom.bitbucket.com"
assert tools.api_version == "2.1"
def test_init_with_token_priority(self, mock_env_vars):
"""Test that token takes priority over password."""
tools = BitbucketTools(
workspace="test_workspace", repo_slug="test_repo", token="test_token", password="test_password"
)
assert tools.auth_password == "test_token"
assert tools.token == "test_token"
assert tools.password == "test_password"
def test_init_missing_credentials(self):
"""Test initialization fails without credentials."""
with patch.dict(os.environ, {}, clear=True): # Clear all environment variables
with pytest.raises(ValueError, match="Username and password or token are required"):
BitbucketTools(workspace="test_workspace", repo_slug="test_repo")
def test_init_missing_workspace(self, mock_env_vars):
"""Test initialization fails without workspace."""
with pytest.raises(ValueError, match="Workspace is required"):
BitbucketTools(repo_slug="test_repo")
def test_init_missing_repo_slug(self, mock_env_vars):
"""Test initialization fails without repo_slug."""
with pytest.raises(ValueError, match="Repo slug is required"):
BitbucketTools(workspace="test_workspace")
def test_generate_access_token(self, bitbucket_tools):
"""Test access token generation."""
token = bitbucket_tools._generate_access_token()
assert isinstance(token, str)
assert len(token) > 0
@patch("requests.request")
def test_make_request_json_response(self, mock_request, bitbucket_tools):
"""Test _make_request with JSON response."""
mock_response = MagicMock()
mock_response.headers = {"Content-Type": "application/json"}
mock_response.json.return_value = {"test": "data"}
mock_response.text = '{"test": "data"}'
mock_request.return_value = mock_response
result = bitbucket_tools._make_request("GET", "/test")
assert result == {"test": "data"}
mock_request.assert_called_once()
@patch("requests.request")
def test_make_request_text_response(self, mock_request, bitbucket_tools):
"""Test _make_request with text response."""
mock_response = MagicMock()
mock_response.headers = {"Content-Type": "text/plain"}
mock_response.text = "test data"
mock_request.return_value = mock_response
result = bitbucket_tools._make_request("GET", "/test")
assert result == "test data"
@patch("requests.request")
def test_make_request_empty_json_response(self, mock_request, bitbucket_tools):
"""Test _make_request with empty JSON response."""
mock_response = MagicMock()
mock_response.headers = {"Content-Type": "application/json"}
mock_response.text = ""
mock_request.return_value = mock_response
result = bitbucket_tools._make_request("GET", "/test")
assert result == {}
@patch("requests.request")
def test_make_request_unsupported_content_type(self, mock_request, bitbucket_tools):
"""Test _make_request with unsupported content type."""
mock_response = MagicMock()
mock_response.headers = {"Content-Type": "application/xml"}
mock_request.return_value = mock_response
with patch("agno.tools.bitbucket.logger.warning") as mock_logger:
result = bitbucket_tools._make_request("GET", "/test")
assert result == {}
mock_logger.assert_called_once()
@patch("requests.request")
def test_make_request_http_error(self, mock_request, bitbucket_tools):
"""Test _make_request with HTTP error."""
mock_request.side_effect = requests.exceptions.HTTPError("HTTP Error")
with pytest.raises(requests.exceptions.HTTPError):
bitbucket_tools._make_request("GET", "/test")
@patch.object(BitbucketTools, "_make_request")
def test_list_repositories_success(self, mock_request, bitbucket_tools):
"""Test list_repositories method success."""
mock_response = {"values": [{"name": "repo1"}, {"name": "repo2"}], "page": 1, "size": 2}
mock_request.return_value = mock_response
result = bitbucket_tools.list_repositories(count=5)
assert isinstance(result, str)
result_data = json.loads(result)
assert result_data == mock_response
mock_request.assert_called_once_with("GET", "/repositories/test_workspace", params={"page": 1, "pagelen": 5})
@patch.object(BitbucketTools, "_make_request")
def test_list_repositories_max_count(self, mock_request, bitbucket_tools):
"""Test list_repositories respects maximum count of 50."""
mock_response = {"values": []}
mock_request.return_value = mock_response
bitbucket_tools.list_repositories(count=100)
# Should be limited to 50
mock_request.assert_called_once_with("GET", "/repositories/test_workspace", params={"page": 1, "pagelen": 50})
@patch.object(BitbucketTools, "_make_request")
def test_list_repositories_error(self, mock_request, bitbucket_tools):
"""Test list_repositories error handling."""
mock_request.side_effect = Exception("API Error")
with patch("agno.tools.bitbucket.logger.error") as mock_logger:
result = bitbucket_tools.list_repositories()
result_data = json.loads(result)
assert "error" in result_data
mock_logger.assert_called_once()
@patch.object(BitbucketTools, "_make_request")
def test_get_repository_details_success(self, mock_request, bitbucket_tools):
"""Test get_repository_details method success."""
mock_response = {"name": "test_repo", "full_name": "test_workspace/test_repo"}
mock_request.return_value = mock_response
result = bitbucket_tools.get_repository_details()
assert isinstance(result, str)
result_data = json.loads(result)
assert result_data == mock_response
mock_request.assert_called_once_with("GET", "/repositories/test_workspace/test_repo")
@patch.object(BitbucketTools, "_make_request")
def test_create_repository_success(self, mock_request, bitbucket_tools):
"""Test create_repository method success."""
mock_response = {"name": "new_repo", "is_private": False}
mock_request.return_value = mock_response
result = bitbucket_tools.create_repository(name="new_repo", description="Test repository", is_private=False)
assert isinstance(result, str)
result_data = json.loads(result)
assert result_data == mock_response
expected_payload = {
"name": "new_repo",
"scm": "git",
"is_private": False,
"description": "Test repository",
"language": None,
"has_issues": False,
"has_wiki": False,
}
mock_request.assert_called_once_with("POST", "/repositories/test_workspace/test_repo", data=expected_payload)
@patch.object(BitbucketTools, "_make_request")
def test_create_repository_with_project(self, mock_request, bitbucket_tools):
"""Test create_repository with project parameter."""
mock_response = {"name": "new_repo"}
mock_request.return_value = mock_response
bitbucket_tools.create_repository(name="new_repo", project="TEST")
call_args = mock_request.call_args
payload = call_args[1]["data"]
assert payload["project"] == {"key": "TEST"}
@patch.object(BitbucketTools, "_make_request")
def test_list_repository_commits_success(self, mock_request, bitbucket_tools):
"""Test list_repository_commits method success."""
mock_response = {"values": [{"hash": "abc123"}, {"hash": "def456"}], "next": None}
mock_request.return_value = mock_response
result = bitbucket_tools.list_repository_commits(count=10)
assert isinstance(result, str)
result_data = json.loads(result)
assert result_data == mock_response
mock_request.assert_called_once_with(
"GET", "/repositories/test_workspace/test_repo/commits", params={"pagelen": 10}
)
@patch.object(BitbucketTools, "_make_request")
def test_list_repository_commits_with_pagination(self, mock_request, bitbucket_tools):
"""Test list_repository_commits with pagination."""
# First response with next page
first_response = {
"values": [{"hash": "abc123"}],
"next": "https://api.bitbucket.org/repositories/test_workspace/test_repo/commits?page=2",
}
# Second response
second_response = {"values": [{"hash": "def456"}], "next": None}
mock_request.side_effect = [first_response, second_response]
result = bitbucket_tools.list_repository_commits(count=10)
result_data = json.loads(result)
assert len(result_data["values"]) == 2
assert result_data["values"][0]["hash"] == "abc123"
assert result_data["values"][1]["hash"] == "def456"
assert mock_request.call_count == 2
@patch.object(BitbucketTools, "_make_request")
def test_list_all_pull_requests_success(self, mock_request, bitbucket_tools):
"""Test list_all_pull_requests method success."""
mock_response = {"values": [{"id": 1, "title": "PR 1"}, {"id": 2, "title": "PR 2"}]}
mock_request.return_value = mock_response
result = bitbucket_tools.list_all_pull_requests(state="OPEN")
assert isinstance(result, str)
result_data = json.loads(result)
assert result_data == mock_response
mock_request.assert_called_once_with(
"GET", "/repositories/test_workspace/test_repo/pullrequests", params={"state": "OPEN"}
)
@patch.object(BitbucketTools, "_make_request")
def test_list_all_pull_requests_invalid_state(self, mock_request, bitbucket_tools):
"""Test list_all_pull_requests with invalid state defaults to OPEN."""
mock_response = {"values": []}
mock_request.return_value = mock_response
with patch("agno.tools.bitbucket.logger.debug") as mock_logger:
bitbucket_tools.list_all_pull_requests(state="INVALID")
mock_logger.assert_called_once()
# Should default to OPEN state
mock_request.assert_called_once_with(
"GET", "/repositories/test_workspace/test_repo/pullrequests", params={"state": "OPEN"}
)
@patch.object(BitbucketTools, "_make_request")
def test_get_pull_request_details_success(self, mock_request, bitbucket_tools):
"""Test get_pull_request_details method success."""
mock_response = {"id": 123, "title": "Test PR"}
mock_request.return_value = mock_response
result = bitbucket_tools.get_pull_request_details(pull_request_id=123)
assert isinstance(result, str)
result_data = json.loads(result)
assert result_data == mock_response
mock_request.assert_called_once_with("GET", "/repositories/test_workspace/test_repo/pullrequests/123")
@patch.object(BitbucketTools, "_make_request")
def test_get_pull_request_changes_success(self, mock_request, bitbucket_tools):
"""Test get_pull_request_changes method success."""
mock_diff = "diff --git a/file.txt b/file.txt\n+added line"
mock_request.return_value = mock_diff
result = bitbucket_tools.get_pull_request_changes(pull_request_id=123)
assert result == mock_diff
mock_request.assert_called_once_with("GET", "/repositories/test_workspace/test_repo/pullrequests/123/diff")
@patch.object(BitbucketTools, "_make_request")
def test_list_issues_success(self, mock_request, bitbucket_tools):
"""Test list_issues method success."""
mock_response = {"values": [{"id": 1, "title": "Issue 1"}, {"id": 2, "title": "Issue 2"}]}
mock_request.return_value = mock_response
result = bitbucket_tools.list_issues(count=10)
assert isinstance(result, str)
result_data = json.loads(result)
assert result_data == mock_response
mock_request.assert_called_once_with(
"GET", "/repositories/test_workspace/test_repo/issues", params={"pagelen": 10}
)
@patch.object(BitbucketTools, "_make_request")
def test_list_issues_max_count(self, mock_request, bitbucket_tools):
"""Test list_issues respects maximum count of 50."""
mock_response = {"values": []}
mock_request.return_value = mock_response
bitbucket_tools.list_issues(count=100)
# Should be limited to 50
mock_request.assert_called_once_with(
"GET", "/repositories/test_workspace/test_repo/issues", params={"pagelen": 50}
)
def test_base_url_construction_with_protocol(self, mock_env_vars):
"""Test base URL construction when server_url already has protocol."""
tools = BitbucketTools(
workspace="test_workspace", repo_slug="test_repo", server_url="https://custom.bitbucket.com"
)
assert tools.base_url == "https://custom.bitbucket.com/2.0"
def test_base_url_construction_without_protocol(self, mock_env_vars):
"""Test base URL construction when server_url doesn't have protocol."""
tools = BitbucketTools(workspace="test_workspace", repo_slug="test_repo", server_url="custom.bitbucket.com")
assert tools.base_url == "https://custom.bitbucket.com/2.0"
def test_tools_registration(self, bitbucket_tools):
"""Test that all expected tools are registered."""
# Check that the tools list contains the expected methods
assert hasattr(bitbucket_tools, "list_repositories")
assert hasattr(bitbucket_tools, "get_repository_details")
assert hasattr(bitbucket_tools, "create_repository")
assert hasattr(bitbucket_tools, "list_repository_commits")
assert hasattr(bitbucket_tools, "list_all_pull_requests")
assert hasattr(bitbucket_tools, "get_pull_request_details")
assert hasattr(bitbucket_tools, "get_pull_request_changes")
assert hasattr(bitbucket_tools, "list_issues")
@patch.object(BitbucketTools, "_make_request")
def test_error_handling_returns_json_error(self, mock_request, bitbucket_tools):
"""Test that errors are properly formatted as JSON."""
mock_request.side_effect = Exception("Test error")
with patch("agno.tools.bitbucket.logger.error"):
result = bitbucket_tools.list_repositories()
result_data = json.loads(result)
assert "error" in result_data
assert "Test error" in result_data["error"]
def test_env_var_fallbacks(self):
"""Test environment variable fallbacks work correctly."""
with patch.dict(
os.environ,
{"BITBUCKET_USERNAME": "env_user", "BITBUCKET_PASSWORD": "env_password", "BITBUCKET_TOKEN": "env_token"},
):
tools = BitbucketTools(workspace="test_workspace", repo_slug="test_repo")
assert tools.username == "env_user"
assert tools.password == "env_password"
assert tools.token == "env_token"
assert tools.auth_password == "env_token" # Token takes priority
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_bitbucket.py",
"license": "Apache License 2.0",
"lines": 324,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/models/portkey/portkey.py | from dataclasses import dataclass
from os import getenv
from typing import Any, Dict, Optional, cast
from agno.exceptions import ModelAuthenticationError
from agno.models.openai.like import OpenAILike
try:
from portkey_ai import PORTKEY_GATEWAY_URL, createHeaders
except ImportError:
raise ImportError("`portkey-ai` not installed. Please install using `pip install portkey-ai`")
@dataclass
class Portkey(OpenAILike):
"""
A class for using models through the Portkey AI Gateway.
Attributes:
id (str): The model id. Defaults to "gpt-4o-mini".
name (str): The model name. Defaults to "Portkey".
provider (str): The provider name. Defaults to "Portkey".
portkey_api_key (Optional[str]): The Portkey API key.
virtual_key (Optional[str]): The virtual key for model routing.
config (Optional[Dict[str, Any]]): Portkey configuration for routing, retries, etc.
base_url (str): The Portkey gateway URL.
"""
id: str = "gpt-4o-mini"
name: str = "Portkey"
provider: str = "Portkey"
portkey_api_key: Optional[str] = None
virtual_key: Optional[str] = None
config: Optional[Dict[str, Any]] = None
base_url: str = PORTKEY_GATEWAY_URL
def _get_client_params(self) -> Dict[str, Any]:
# Check for required keys
if not self.portkey_api_key:
raise ModelAuthenticationError(
message="PORTKEY_API_KEY not set. Please set the PORTKEY_API_KEY environment variable.",
model_name=self.name,
)
self.virtual_key = self.virtual_key or getenv("PORTKEY_VIRTUAL_KEY")
# Create headers using Portkey's createHeaders function
header_params: Dict[str, Any] = {
"api_key": self.portkey_api_key,
"virtual_key": self.virtual_key,
}
if self.config is not None:
header_params["config"] = self.config
portkey_headers = cast(Dict[str, Any], createHeaders(**header_params))
# Merge with any existing default headers
default_headers: Dict[str, Any] = {}
if self.default_headers and isinstance(self.default_headers, dict):
default_headers.update(self.default_headers)
default_headers.update(portkey_headers)
# Define base client params
base_params = {
"api_key": "not-needed", # We use virtual keys instead
"organization": self.organization,
"base_url": self.base_url,
"timeout": self.timeout,
"max_retries": self.max_retries,
"default_headers": default_headers,
"default_query": self.default_query,
}
# Create client_params dict with non-None values
client_params = {k: v for k, v in base_params.items() if v is not None}
# Add additional client params if provided
if self.client_params:
client_params.update(self.client_params)
return client_params
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/portkey/portkey.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/vectordb/surrealdb/surrealdb.py | from typing import Any, Dict, Final, List, Optional, Union
try:
from surrealdb import (
AsyncHttpSurrealConnection,
AsyncWsSurrealConnection,
BlockingHttpSurrealConnection,
BlockingWsSurrealConnection,
)
except ImportError as e:
msg = "The `surrealdb` package is not installed. Please install it via `pip install surrealdb`."
raise ImportError(msg) from e
from agno.filters import FilterExpr
from agno.knowledge.document import Document
from agno.knowledge.embedder import Embedder
from agno.utils.log import log_debug, log_error, log_warning
from agno.vectordb.base import VectorDb
from agno.vectordb.distance import Distance
class SurrealDb(VectorDb):
"""SurrealDB Vector Database implementation supporting both sync and async operations."""
# SQL Query Constants
CREATE_TABLE_QUERY: Final[str] = """
DEFINE TABLE IF NOT EXISTS {collection} SCHEMAFUL;
DEFINE FIELD IF NOT EXISTS content ON {collection} TYPE string;
DEFINE FIELD IF NOT EXISTS embedding ON {collection} TYPE array<float>;
DEFINE FIELD IF NOT EXISTS meta_data ON {collection} FLEXIBLE TYPE object;
DEFINE INDEX IF NOT EXISTS vector_idx ON {collection} FIELDS embedding HNSW DIMENSION {dimensions} DIST {distance};
"""
NAME_EXISTS_QUERY: Final[str] = """
SELECT * FROM {collection}
WHERE meta_data.name = $name
LIMIT 1
"""
ID_EXISTS_QUERY: Final[str] = """
SELECT * FROM {collection}
WHERE id = $id
LIMIT 1
"""
CONTENT_HASH_EXISTS_QUERY: Final[str] = """
SELECT * FROM {collection}
WHERE meta_data.content_hash = $content_hash
LIMIT 1
"""
DELETE_BY_ID_QUERY: Final[str] = """
DELETE FROM {collection}
WHERE id = $id
"""
DELETE_BY_NAME_QUERY: Final[str] = """
DELETE FROM {collection}
WHERE meta_data.name = $name
"""
DELETE_BY_METADATA_QUERY: Final[str] = """
DELETE FROM {collection}
WHERE {conditions}
"""
DELETE_BY_CONTENT_ID_QUERY: Final[str] = """
DELETE FROM {collection}
WHERE content_id = $content_id
"""
UPSERT_QUERY: Final[str] = """
UPSERT {thing}
SET content = $content,
embedding = $embedding,
meta_data = $meta_data
"""
SEARCH_QUERY: Final[str] = """
SELECT
content,
meta_data,
vector::distance::knn() as distance
FROM {collection}
WHERE embedding <|{limit}, {search_ef}|> $query_embedding
{filter_condition}
ORDER BY distance ASC
LIMIT {limit};
"""
INFO_DB_QUERY: Final[str] = "INFO FOR DB;"
DROP_TABLE_QUERY: Final[str] = "REMOVE TABLE {collection}"
DELETE_ALL_QUERY: Final[str] = "DELETE {collection}"
def __init__(
self,
client: Optional[Union[BlockingWsSurrealConnection, BlockingHttpSurrealConnection]] = None,
async_client: Optional[Union[AsyncWsSurrealConnection, AsyncHttpSurrealConnection]] = None,
collection: str = "documents",
distance: Distance = Distance.cosine,
efc: int = 150,
m: int = 12,
search_ef: int = 40,
embedder: Optional[Embedder] = None,
name: Optional[str] = None,
description: Optional[str] = None,
id: Optional[str] = None,
):
"""Initialize SurrealDB connection.
Args:
client: A blocking connection, either HTTP or WS
async_client: An async connection, either HTTP or WS (default: None)
collection: Collection name to store documents (default: documents)
distance: Distance metric to use (default: cosine)
efc: HNSW construction time/accuracy trade-off (default: 150)
m: HNSW max number of connections per element (default: 12)
search_ef: HNSW search time/accuracy trade-off (default: 40)
embedder: Embedder instance for creating embeddings (default: OpenAIEmbedder)
"""
# Dynamic ID generation based on unique identifiers
if id is None:
from agno.utils.string import generate_id
client_info = str(client) if client else str(async_client) if async_client else "default"
seed = f"{client_info}#{collection}"
id = generate_id(seed)
# Initialize base class with name, description, and generated ID
super().__init__(id=id, name=name, description=description)
# Embedder for embedding the document contents
if embedder is None:
from agno.knowledge.embedder.openai import OpenAIEmbedder
embedder = OpenAIEmbedder()
log_debug("Embedder not provided, using OpenAIEmbedder as default.")
self.embedder: Embedder = embedder
self.dimensions = self.embedder.dimensions
self.collection = collection
# Convert Distance enum to SurrealDB distance type
self.distance = {Distance.cosine: "COSINE", Distance.l2: "EUCLIDEAN", Distance.max_inner_product: "DOT"}[
distance
]
self._client: Optional[Union[BlockingHttpSurrealConnection, BlockingWsSurrealConnection]] = client
self._async_client: Optional[Union[AsyncWsSurrealConnection, AsyncHttpSurrealConnection]] = async_client
if self._client is None and self._async_client is None:
msg = "Client and async client are not provided. Please provide one of them."
raise RuntimeError(msg)
# HNSW index parameters
self.efc = efc
self.m = m
self.search_ef = search_ef
@property
def async_client(self) -> Union[AsyncWsSurrealConnection, AsyncHttpSurrealConnection]:
"""Check if the async client is initialized.
Raises:
RuntimeError: If the async client is not initialized.
Returns:
The async client.
"""
if self._async_client is None:
msg = "Async client is not initialized"
raise RuntimeError(msg)
return self._async_client
@property
def client(self) -> Union[BlockingHttpSurrealConnection, BlockingWsSurrealConnection]:
"""Check if the client is initialized.
Returns:
The client.
"""
if self._client is None:
msg = "Client is not initialized"
raise RuntimeError(msg)
return self._client
@staticmethod
def _build_filter_condition(filters: Optional[Dict[str, Any]] = None) -> str:
"""Build filter condition for queries.
Args:
filters: A dictionary of filters to apply to the query.
Returns:
A string representing the filter condition.
"""
if not filters:
return ""
conditions = [f"meta_data.{key} = ${key}" for key in filters]
return "AND " + " AND ".join(conditions)
# Synchronous methods
def create(self) -> None:
"""Create the vector collection and index."""
if not self.exists():
log_debug(f"Creating collection: {self.collection}")
query = self.CREATE_TABLE_QUERY.format(
collection=self.collection,
distance=self.distance,
dimensions=self.dimensions,
efc=self.efc,
m=self.m,
)
self.client.query(query)
def name_exists(self, name: str) -> bool:
"""Check if a document exists by its name.
Args:
name: The name of the document to check.
Returns:
True if the document exists, False otherwise.
"""
log_debug(f"Checking if document exists: {name}")
result = self.client.query(self.NAME_EXISTS_QUERY.format(collection=self.collection), {"name": name})
return bool(self._extract_result(result))
def id_exists(self, id: str) -> bool:
"""Check if a document exists by its ID.
Args:
id: The ID of the document to check.
Returns:
True if the document exists, False otherwise.
"""
log_debug(f"Checking if document exists by ID: {id}")
result = self.client.query(self.ID_EXISTS_QUERY.format(collection=self.collection), {"id": id})
return bool(self._extract_result(result))
def content_hash_exists(self, content_hash: str) -> bool:
"""Check if a document exists by its content hash.
Args:
content_hash: The content hash of the document to check.
Returns:
True if the document exists, False otherwise.
"""
log_debug(f"Checking if document exists by content hash: {content_hash}")
result = self.client.query(
self.CONTENT_HASH_EXISTS_QUERY.format(collection=self.collection), {"content_hash": content_hash}
)
return bool(self._extract_result(result))
def insert(self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None) -> None:
"""Insert documents into the vector store.
Args:
content_hash: The content hash for the documents.
documents: A list of documents to insert.
filters: A dictionary of filters to apply to the query.
"""
for doc in documents:
doc.embed(embedder=self.embedder)
meta_data: Dict[str, Any] = doc.meta_data if isinstance(doc.meta_data, dict) else {}
meta_data["content_hash"] = content_hash
data: Dict[str, Any] = {"content": doc.content, "embedding": doc.embedding, "meta_data": meta_data}
if filters:
data["meta_data"].update(filters)
self.client.create(self.collection, data)
def upsert(self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None) -> None:
"""Upsert documents into the vector store.
Args:
content_hash: The content hash for the documents.
documents: A list of documents to upsert.
filters: A dictionary of filters to apply to the query.
"""
for doc in documents:
doc.embed(embedder=self.embedder)
meta_data: Dict[str, Any] = doc.meta_data if isinstance(doc.meta_data, dict) else {}
meta_data["content_hash"] = content_hash
data: Dict[str, Any] = {"content": doc.content, "embedding": doc.embedding, "meta_data": meta_data}
if filters:
data["meta_data"].update(filters)
thing = f"{self.collection}:{doc.id}" if doc.id else self.collection
self.client.query(self.UPSERT_QUERY.format(thing=thing), data)
def search(
self, query: str, limit: int = 5, filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
) -> List[Document]:
"""Search for similar documents.
Args:
query: The query to search for.
limit: The maximum number of documents to return.
filters: A dictionary of filters to apply to the query.
Returns:
A list of documents that are similar to the query.
"""
if isinstance(filters, List):
log_warning("Filters Expressions are not supported in SurrealDB. No filters will be applied.")
filters = None
query_embedding = self.embedder.get_embedding(query)
if query_embedding is None:
log_error(f"Error getting embedding for Query: {query}")
return []
filter_condition = self._build_filter_condition(filters)
log_debug(f"Filter condition: {filter_condition}")
search_query = self.SEARCH_QUERY.format(
collection=self.collection,
limit=limit,
search_ef=self.search_ef,
filter_condition=filter_condition,
distance=self.distance,
)
log_debug(f"Search query: {search_query}")
response = self.client.query(
search_query,
{"query_embedding": query_embedding, **filters} if filters else {"query_embedding": query_embedding},
)
log_debug(f"Search response: {response}")
documents = []
for item in response:
if isinstance(item, dict):
doc = Document(
content=item.get("content", ""),
embedding=item.get("embedding", []),
meta_data=item.get("meta_data", {}),
embedder=self.embedder,
)
documents.append(doc)
log_debug(f"Found {len(documents)} documents")
return documents
def drop(self) -> None:
"""Drop the vector collection."""
log_debug(f"Dropping collection: {self.collection}")
self.client.query(self.DROP_TABLE_QUERY.format(collection=self.collection))
def exists(self) -> bool:
"""Check if the vector collection exists.
Returns:
True if the collection exists, False otherwise.
"""
log_debug(f"Checking if collection exists: {self.collection}")
response = self.client.query(self.INFO_DB_QUERY)
result = self._extract_result(response)
if isinstance(result, dict) and "tables" in result:
return self.collection in result["tables"]
return False
def delete(self) -> bool:
"""Delete all documents from the vector store.
Returns:
True if the collection was deleted, False otherwise.
"""
self.client.query(self.DELETE_ALL_QUERY.format(collection=self.collection))
return True
def delete_by_id(self, id: str) -> bool:
"""Delete a document by its ID.
Args:
id: The ID of the document to delete.
Returns:
True if the document was deleted, False otherwise.
"""
log_debug(f"Deleting document by ID: {id}")
result = self.client.query(self.DELETE_BY_ID_QUERY.format(collection=self.collection), {"id": id})
return bool(result)
def delete_by_name(self, name: str) -> bool:
"""Delete documents by their name.
Args:
name: The name of the documents to delete.
Returns:
True if documents were deleted, False otherwise.
"""
log_debug(f"Deleting documents by name: {name}")
result = self.client.query(self.DELETE_BY_NAME_QUERY.format(collection=self.collection), {"name": name})
return bool(result)
def delete_by_metadata(self, metadata: Dict[str, Any]) -> bool:
"""Delete documents by their metadata.
Args:
metadata: The metadata to match for deletion.
Returns:
True if documents were deleted, False otherwise.
"""
log_debug(f"Deleting documents by metadata: {metadata}")
conditions = [f"meta_data.{key} = ${key}" for key in metadata.keys()]
conditions_str = " AND ".join(conditions)
query = self.DELETE_BY_METADATA_QUERY.format(collection=self.collection, conditions=conditions_str)
result = self.client.query(query, metadata)
return bool(result)
def delete_by_content_id(self, content_id: str) -> bool:
"""Delete documents by their content ID.
Args:
content_id: The content ID of the documents to delete.
Returns:
True if documents were deleted, False otherwise.
"""
log_debug(f"Deleting documents by content ID: {content_id}")
result = self.client.query(
self.DELETE_BY_CONTENT_ID_QUERY.format(collection=self.collection), {"content_id": content_id}
)
return bool(result)
@staticmethod
def _extract_result(query_result: Union[List[Dict[str, Any]], Dict[str, Any]]) -> Union[List[Any], Dict[str, Any]]:
"""Extract the actual result from SurrealDB query response.
Args:
query_result: The query result from SurrealDB.
Returns:
The actual result from SurrealDB query response.
"""
log_debug(f"Query result: {query_result}")
if isinstance(query_result, dict):
return query_result
if isinstance(query_result, list):
if len(query_result) > 0:
return query_result[0].get("result", {})
return []
return []
async def async_create(self) -> None:
"""Create the vector collection and index asynchronously."""
log_debug(f"Creating collection: {self.collection}")
await self.async_client.query(
self.CREATE_TABLE_QUERY.format(
collection=self.collection,
distance=self.distance,
dimensions=self.dimensions,
efc=self.efc,
m=self.m,
),
)
async def async_name_exists(self, name: str) -> bool:
"""Check if a document exists by its name asynchronously.
Returns:
True if the document exists, False otherwise.
"""
response = await self.async_client.query(
self.NAME_EXISTS_QUERY.format(collection=self.collection),
{"name": name},
)
return bool(self._extract_result(response))
async def async_insert(
self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None
) -> None:
"""Insert documents into the vector store asynchronously.
Args:
content_hash: The content hash for the documents.
documents: A list of documents to insert.
filters: A dictionary of filters to apply to the query.
"""
for doc in documents:
doc.embed(embedder=self.embedder)
meta_data: Dict[str, Any] = doc.meta_data if isinstance(doc.meta_data, dict) else {}
meta_data["content_hash"] = content_hash
data: Dict[str, Any] = {"content": doc.content, "embedding": doc.embedding, "meta_data": meta_data}
if filters:
data["meta_data"].update(filters)
log_debug(f"Inserting document asynchronously: {doc.name} ({doc.meta_data})")
await self.async_client.create(self.collection, data)
async def async_upsert(
self, content_hash: str, documents: List[Document], filters: Optional[Dict[str, Any]] = None
) -> None:
"""Upsert documents into the vector store asynchronously.
Args:
content_hash: The content hash for the documents.
documents: A list of documents to upsert.
filters: A dictionary of filters to apply to the query.
"""
for doc in documents:
doc.embed(embedder=self.embedder)
meta_data: Dict[str, Any] = doc.meta_data if isinstance(doc.meta_data, dict) else {}
meta_data["content_hash"] = content_hash
data: Dict[str, Any] = {"content": doc.content, "embedding": doc.embedding, "meta_data": meta_data}
if filters:
data["meta_data"].update(filters)
log_debug(f"Upserting document asynchronously: {doc.name} ({doc.meta_data})")
thing = f"{self.collection}:{doc.id}" if doc.id else self.collection
await self.async_client.query(self.UPSERT_QUERY.format(thing=thing), data)
async def async_search(
self,
query: str,
limit: int = 5,
filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None,
) -> List[Document]:
"""Search for similar documents asynchronously.
Args:
query: The query to search for.
limit: The maximum number of documents to return.
filters: A dictionary of filters to apply to the query.
Returns:
A list of documents that are similar to the query.
"""
if isinstance(filters, List):
log_warning("Filters Expressions are not supported in SurrealDB. No filters will be applied.")
filters = None
query_embedding = self.embedder.get_embedding(query)
if query_embedding is None:
log_error(f"Error getting embedding for Query: {query}")
return []
filter_condition = self._build_filter_condition(filters)
search_query = self.SEARCH_QUERY.format(
collection=self.collection,
limit=limit,
search_ef=self.search_ef,
filter_condition=filter_condition,
distance=self.distance,
)
response = await self.async_client.query(
search_query,
{"query_embedding": query_embedding, **filters} if filters else {"query_embedding": query_embedding},
)
log_debug(f"Search response: {response}")
documents = []
for item in response:
if isinstance(item, dict):
doc = Document(
content=item.get("content", ""),
embedding=item.get("embedding", []),
meta_data=item.get("meta_data", {}),
embedder=self.embedder,
)
documents.append(doc)
log_debug(f"Found {len(documents)} documents asynchronously")
return documents
async def async_drop(self) -> None:
"""Drop the vector collection asynchronously."""
log_debug(f"Dropping collection: {self.collection}")
await self.async_client.query(self.DROP_TABLE_QUERY.format(collection=self.collection))
async def async_exists(self) -> bool:
"""Check if the vector collection exists asynchronously.
Returns:
True if the collection exists, False otherwise.
"""
log_debug(f"Checking if collection exists: {self.collection}")
response = await self.async_client.query(self.INFO_DB_QUERY)
result = self._extract_result(response)
if isinstance(result, dict) and "tables" in result:
return self.collection in result["tables"]
return False
@staticmethod
def upsert_available() -> bool:
"""Check if upsert is available.
Returns:
True if upsert is available, False otherwise.
"""
return True
def update_metadata(self, content_id: str, metadata: Dict[str, Any]) -> None:
"""
Update the metadata for documents with the given content_id.
Args:
content_id (str): The content ID to update
metadata (Dict[str, Any]): The metadata to update
"""
try:
# Query for documents with the given content_id
query = f"SELECT * FROM {self.collection} WHERE content_id = $content_id"
result = self.client.query(query, {"content_id": content_id})
if not result or not result[0].get("result"):
log_debug(f"No documents found with content_id: {content_id}")
return
documents = result[0]["result"]
updated_count = 0
# Update each matching document
for doc in documents:
doc_id = doc["id"]
current_metadata = doc.get("meta_data", {})
current_filters = doc.get("filters", {})
# Merge existing metadata with new metadata
if isinstance(current_metadata, dict):
updated_metadata = current_metadata.copy()
updated_metadata.update(metadata)
else:
updated_metadata = metadata
# Merge existing filters with new metadata
if isinstance(current_filters, dict):
updated_filters = current_filters.copy()
updated_filters.update(metadata)
else:
updated_filters = metadata
# Update the document
update_query = f"UPDATE {doc_id} SET meta_data = $metadata, filters = $filters"
self.client.query(update_query, {"metadata": updated_metadata, "filters": updated_filters})
updated_count += 1
log_debug(f"Updated metadata for {updated_count} documents with content_id: {content_id}")
except Exception as e:
log_error(f"Error updating metadata for content_id '{content_id}': {e}")
raise
def get_supported_search_types(self) -> List[str]:
"""Get the supported search types for this vector database."""
return [] # SurrealDb doesn't use SearchType enum
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/vectordb/surrealdb/surrealdb.py",
"license": "Apache License 2.0",
"lines": 541,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/vectordb/test_surrealdb.py | from typing import Generator, List
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from agno.knowledge.document import Document
from agno.vectordb.surrealdb import SurrealDb
try:
from surrealdb import Surreal
except ImportError:
raise ImportError("surrealdb is not installed")
@pytest.fixture
def mock_embedder() -> MagicMock:
"""Create a mock embedder."""
embedder = MagicMock()
embedder.dimensions = 384
embedder.get_embedding.return_value = [0.1] * 384
embedder.get_embedding_and_usage.return_value = [0.1] * 384, {}
embedder.embedding_dim = 384
return embedder
@pytest.fixture(scope="function")
def mock_surrealdb_client() -> Generator[MagicMock, None, None]:
"""Fixture to create a mock SurrealDB client"""
with patch("surrealdb.Surreal") as mock_client_class:
client = MagicMock(spec=Surreal)
# Mock methods
client.query = MagicMock(return_value=[])
client.create = MagicMock(return_value=[])
mock_client_class.return_value = client
yield client
@pytest.fixture(scope="function")
def mock_async_surrealdb_client() -> Generator[AsyncMock, None, None]:
"""Create a mock Async SurrealDB client"""
with patch("surrealdb.AsyncSurreal") as mock_async_client_class:
client = AsyncMock(spec=Surreal)
# Mock methods
client.query = AsyncMock(return_value=[])
client.create = AsyncMock(return_value=[])
mock_async_client_class.return_value = client
yield client
@pytest.fixture
def surrealdb_vector(mock_surrealdb_client: MagicMock, mock_embedder: MagicMock) -> SurrealDb:
"""Fixture to create a SurrealVectorDb instance with mocked client"""
db = SurrealDb(
collection="test_collection",
embedder=mock_embedder,
client=mock_surrealdb_client,
)
return db
@pytest.fixture
def async_surrealdb_vector(
mock_surrealdb_client: MagicMock, mock_async_surrealdb_client: MagicMock, mock_embedder: MagicMock
) -> SurrealDb:
"""Fixture to create a SurrealVectorDb instance with mocked client"""
db = SurrealDb(
collection="test_collection",
embedder=mock_embedder,
client=mock_surrealdb_client,
async_client=mock_async_surrealdb_client,
)
return db
@pytest.fixture
def sample_documents() -> List[Document]:
"""Fixture to create sample documents"""
return [
Document(
content="Tom Kha Gai is a Thai coconut soup with chicken",
meta_data={"cuisine": "Thai", "type": "soup"},
name="tom_kha",
),
Document(
content="Pad Thai is a stir-fried rice noodle dish",
meta_data={"cuisine": "Thai", "type": "noodles"},
name="pad_thai",
),
Document(
content="Green curry is a spicy Thai curry with coconut milk",
meta_data={"cuisine": "Thai", "type": "curry"},
name="green_curry",
),
]
def test_build_filter_condition(surrealdb_vector):
"""Test filter condition builder"""
# Test with no filters
result = surrealdb_vector._build_filter_condition(None)
assert result == ""
# Test with filters
filters = {"cuisine": "Thai", "type": "soup"}
result = surrealdb_vector._build_filter_condition(filters)
assert "AND meta_data.cuisine = $cuisine" in result
assert "AND meta_data.type = $type" in result
def test_create(surrealdb_vector, mock_surrealdb_client):
"""Test create collection"""
# Mock exists to return False
with patch.object(surrealdb_vector, "exists", return_value=False):
surrealdb_vector.create()
# Verify query was called with correct parameters
mock_surrealdb_client.query.assert_called_once()
args = mock_surrealdb_client.query.call_args[0][0]
assert "DEFINE TABLE IF NOT EXISTS test_collection" in args
assert "DEFINE INDEX IF NOT EXISTS vector_idx" in args
assert f"DIMENSION {surrealdb_vector.dimensions}" in args
def test_exists(surrealdb_vector, mock_surrealdb_client):
"""Test exists method"""
# Test when collection exists
mock_surrealdb_client.query.return_value = [{"result": {"tables": {"test_collection": {}}}}]
assert surrealdb_vector.exists() is True
# Test when collection doesn't exist
mock_surrealdb_client.query.return_value = [{"result": {"tables": {}}}]
assert surrealdb_vector.exists() is False
def test_name_exists(surrealdb_vector, mock_surrealdb_client):
"""Test name existence check"""
# Test when name exists
mock_surrealdb_client.query.return_value = [{"result": [{"name": "tom_kha"}]}]
assert surrealdb_vector.name_exists("tom_kha") is True
# Test when name doesn't exist
mock_surrealdb_client.query.return_value = [{"result": []}]
assert surrealdb_vector.name_exists("nonexistent") is False
def test_insert(surrealdb_vector, mock_surrealdb_client, sample_documents):
"""Test inserting documents"""
surrealdb_vector.insert(content_hash="test_hash", documents=sample_documents)
# Verify create was called for each document
assert mock_surrealdb_client.create.call_count == 3
# Check args for first document
args, _ = mock_surrealdb_client.create.call_args_list[0]
assert args[0] == "test_collection"
assert "content" in args[1]
assert "embedding" in args[1]
assert "meta_data" in args[1]
def test_upsert(surrealdb_vector, mock_surrealdb_client, sample_documents):
surrealdb_vector.upsert(content_hash="test_hash", documents=sample_documents)
# Verify query was called for each document
assert mock_surrealdb_client.query.call_count == 3
# Check args for first call
args, _ = mock_surrealdb_client.query.call_args_list[0]
assert "UPSERT test_collection" in args[0]
assert "SET content = $content" in args[0]
assert "content" in args[1]
assert "embedding" in args[1]
assert "meta_data" in args[1]
def test_search(surrealdb_vector: SurrealDb, mock_surrealdb_client: MagicMock) -> None:
"""Test search functionality"""
assert surrealdb_vector.client is mock_surrealdb_client
# Set up mock search results
mock_surrealdb_client.query.return_value = [
{
"content": "Tom Kha Gai is a Thai coconut soup with chicken",
"meta_data": {"cuisine": "Thai", "type": "soup", "name": "tom_kha"},
"distance": 0.1,
},
{
"content": "Green curry is a spicy Thai curry with coconut milk",
"meta_data": {"cuisine": "Thai", "type": "curry", "name": "green_curry"},
"distance": 0.2,
},
]
# Test search
results = surrealdb_vector.search("Thai food", limit=2)
assert len(results) == 2
assert results[0].content == "Tom Kha Gai is a Thai coconut soup with chicken"
assert results[1].content == "Green curry is a spicy Thai curry with coconut milk"
# Verify search query
mock_surrealdb_client.query.assert_called_once()
args, kwargs = mock_surrealdb_client.query.call_args
assert "SELECT" in args[0]
assert "FROM test_collection" in args[0]
assert "WHERE embedding <|2, 40|>" in args[0]
assert "LIMIT 2" in args[0]
def test_drop(surrealdb_vector, mock_surrealdb_client):
"""Test dropping a collection"""
surrealdb_vector.drop()
# Verify query was called
mock_surrealdb_client.query.assert_called_once()
args = mock_surrealdb_client.query.call_args[0][0]
assert "REMOVE TABLE test_collection" in args
def test_delete(surrealdb_vector, mock_surrealdb_client):
"""Test deleting all documents"""
result = surrealdb_vector.delete()
# Verify query was called and result is True
mock_surrealdb_client.query.assert_called_once()
args = mock_surrealdb_client.query.call_args[0][0]
assert "DELETE test_collection" in args
assert result is True
def test_extract_result(surrealdb_vector):
"""Test extract result method"""
query_result = [{"result": [{"id": 1}, {"id": 2}]}]
result = surrealdb_vector._extract_result(query_result)
assert result == [{"id": 1}, {"id": 2}]
def test_upsert_available(surrealdb_vector):
"""Test upsert_available method"""
assert surrealdb_vector.upsert_available() is True
@pytest.mark.asyncio
async def test_async_create(async_surrealdb_vector, mock_async_surrealdb_client):
"""Test async create collection"""
await async_surrealdb_vector.async_create()
# Verify query was called
mock_async_surrealdb_client.query.assert_awaited_once()
args = mock_async_surrealdb_client.query.await_args[0][0]
assert "DEFINE TABLE IF NOT EXISTS test_collection" in args
assert "DEFINE INDEX IF NOT EXISTS vector_idx" in args
assert f"DIMENSION {async_surrealdb_vector.embedder.dimensions}" in args
@pytest.mark.asyncio
async def test_async_name_exists(async_surrealdb_vector, mock_async_surrealdb_client):
"""Test async name existence check"""
# Test when name exists
mock_async_surrealdb_client.query.return_value = [{"result": [{"name": "tom_kha"}]}]
result = await async_surrealdb_vector.async_name_exists("tom_kha")
assert result is True
# Test when name doesn't exist
mock_async_surrealdb_client.query.return_value = [{"result": []}]
result = await async_surrealdb_vector.async_name_exists("nonexistent")
assert result is False
@pytest.mark.asyncio
async def test_async_insert(async_surrealdb_vector, mock_async_surrealdb_client, sample_documents):
"""Test async inserting documents"""
await async_surrealdb_vector.async_insert(content_hash="test_hash", documents=sample_documents)
# Verify create was called for each document
assert mock_async_surrealdb_client.create.await_count == 3
# Check args for first document
args, kwargs = mock_async_surrealdb_client.create.await_args_list[0]
assert args[0] == "test_collection"
assert "content" in args[1]
assert "embedding" in args[1]
assert "meta_data" in args[1]
@pytest.mark.asyncio
async def test_async_upsert(async_surrealdb_vector, mock_async_surrealdb_client, sample_documents):
"""Test async upserting documents"""
await async_surrealdb_vector.async_upsert(content_hash="test_hash", documents=sample_documents)
# Verify query was called for each document
assert mock_async_surrealdb_client.query.await_count == 3
# Check args for first call
args, kwargs = mock_async_surrealdb_client.query.await_args_list[0]
assert "UPSERT test_collection" in args[0]
assert "SET content = $content" in args[0]
assert "content" in args[1]
assert "embedding" in args[1]
assert "meta_data" in args[1]
@pytest.mark.asyncio
async def test_async_search(async_surrealdb_vector: SurrealDb, mock_async_surrealdb_client: MagicMock) -> None:
"""Test async search functionality"""
# Set up mock search results
mock_async_surrealdb_client.query.return_value = [
{
"content": "Tom Kha Gai is a Thai coconut soup with chicken",
"meta_data": {"cuisine": "Thai", "type": "soup", "name": "tom_kha"},
"distance": 0.1,
},
{
"content": "Green curry is a spicy Thai curry with coconut milk",
"meta_data": {"cuisine": "Thai", "type": "curry", "name": "green_curry"},
"distance": 0.2,
},
]
# Test search
results = await async_surrealdb_vector.async_search("Thai food", limit=2)
assert len(results) == 2
assert results[0].content == "Tom Kha Gai is a Thai coconut soup with chicken"
assert results[1].content == "Green curry is a spicy Thai curry with coconut milk"
# Verify search query
mock_async_surrealdb_client.query.assert_awaited_once()
args, kwargs = mock_async_surrealdb_client.query.await_args
assert "SELECT" in args[0]
assert "FROM test_collection" in args[0]
assert "WHERE embedding <|2, 40|>" in args[0]
assert "LIMIT 2" in args[0]
@pytest.mark.asyncio
async def test_async_drop(async_surrealdb_vector, mock_async_surrealdb_client):
"""Test async dropping a collection"""
await async_surrealdb_vector.async_drop()
# Verify query was called
mock_async_surrealdb_client.query.assert_awaited_once()
args = mock_async_surrealdb_client.query.await_args[0][0]
assert "REMOVE TABLE test_collection" in args
@pytest.mark.asyncio
async def test_async_exists(async_surrealdb_vector: SurrealDb, mock_async_surrealdb_client: MagicMock) -> None:
"""Test async exists method"""
# Test when collection exists
mock_async_surrealdb_client.query.return_value = [{"result": {"tables": {"test_collection": {}}}}]
result = await async_surrealdb_vector.async_exists()
assert result is True
# Test when collection doesn't exist
mock_async_surrealdb_client.query.return_value = [{"result": {"tables": {}}}]
result = await async_surrealdb_vector.async_exists()
assert result is False
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/vectordb/test_surrealdb.py",
"license": "Apache License 2.0",
"lines": 282,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/test_memory_impact.py | import gc
import tracemalloc
from typing import List, Tuple
from agno.agent.agent import Agent
from agno.db.base import SessionType
from agno.models.openai.chat import OpenAIChat
class MemoryMonitor:
"""Utility class to monitor memory usage during agent operations using tracemalloc."""
def __init__(self):
self.memory_readings: List[Tuple[int, float]] = []
self.tracemalloc_snapshots: List[tracemalloc.Snapshot] = []
self.baseline_memory = 0.0
def start_monitoring(self):
"""Start memory monitoring."""
tracemalloc.start()
self._take_reading("start")
def stop_monitoring(self):
"""Stop memory monitoring."""
self._take_reading("stop")
tracemalloc.stop()
def _take_reading(self, label: str):
"""Take a memory reading using tracemalloc."""
# Get current memory usage from tracemalloc
current, peak = tracemalloc.get_traced_memory()
current_memory_mb = current / 1024 / 1024 # Convert to MB
peak_memory_mb = peak / 1024 / 1024 # Convert to MB
# Get tracemalloc snapshot
current_snapshot = tracemalloc.take_snapshot()
self.memory_readings.append((len(self.memory_readings), current_memory_mb))
self.tracemalloc_snapshots.append(current_snapshot)
print(f"Memory reading {label}: {current_memory_mb:.2f} MB (peak: {peak_memory_mb:.2f} MB)")
def take_reading(self, label: str = ""):
"""Take a memory reading with optional label."""
self._take_reading(label)
def force_gc(self):
"""Force garbage collection and take a reading."""
gc.collect()
self._take_reading("after_gc")
def get_memory_growth(self) -> List[float]:
"""Calculate memory growth between readings."""
if len(self.memory_readings) < 2:
return []
growth = []
for i in range(1, len(self.memory_readings)):
prev_memory = self.memory_readings[i - 1][1]
curr_memory = self.memory_readings[i][1]
growth.append(curr_memory - prev_memory)
return growth
def get_peak_memory(self) -> float:
"""Get peak memory usage."""
if not self.memory_readings:
return 0.0
return max(reading[1] for reading in self.memory_readings)
def get_final_memory(self) -> float:
"""Get final memory usage."""
if not self.memory_readings:
return 0.0
return self.memory_readings[-1][1]
def analyze_tracemalloc(self) -> dict:
"""Analyze tracemalloc snapshots for memory leaks."""
if len(self.tracemalloc_snapshots) < 2:
return {}
first_snapshot = self.tracemalloc_snapshots[0]
last_snapshot = self.tracemalloc_snapshots[-1]
# Compare snapshots
stats = last_snapshot.compare_to(first_snapshot, "lineno")
# Get top memory allocations
top_stats = stats[:10]
return {
"top_allocations": [
{"file": stat.traceback.format()[-1], "size_diff": stat.size_diff, "count_diff": stat.count_diff}
for stat in top_stats
]
}
def test_agent_memory_impact_with_gc_monitoring(shared_db):
"""
Test that creates an agent with memory and storage, runs a series of prompts,
and monitors memory usage to verify garbage collection is working correctly.
"""
# Create agent with memory and storage
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
update_memory_on_run=True,
)
# Initialize memory monitor
monitor = MemoryMonitor()
monitor.start_monitoring()
session_id = "memory_test_session"
user_id = "test_user"
# Series of prompts to test memory usage
prompts = [
"Hello, my name is Alice and I like programming.",
"I work as a software engineer at a tech company.",
"My favorite programming language is Python.",
"I enjoy reading science fiction books in my free time.",
"I have a cat named Whiskers who is very playful.",
"I'm planning to learn machine learning this year.",
"What do you remember about me?",
"Can you summarize our conversation so far?",
"Tell me something interesting about Python programming.",
"What are the best practices for memory management in Python?",
]
try:
# Run each prompt and monitor memory
for i, prompt in enumerate(prompts):
print(f"\n--- Running prompt {i + 1}/{len(prompts)} ---")
monitor.take_reading(f"before_prompt_{i + 1}")
# Run the agent
response = agent.run(prompt, session_id=session_id, user_id=user_id)
assert response is not None
assert response.content is not None
monitor.take_reading(f"after_prompt_{i + 1}")
# Force garbage collection every few prompts to test GC effectiveness
if (i + 1) % 3 == 0:
print(f"--- Forcing garbage collection after prompt {i + 1} ---")
monitor.force_gc()
# Final memory analysis
monitor.take_reading("final")
# Get memory statistics
memory_growth = monitor.get_memory_growth()
peak_memory = monitor.get_peak_memory()
final_memory = monitor.get_final_memory()
monitor.analyze_tracemalloc()
print("\n=== Memory Analysis ===")
print(f"Peak memory usage: {peak_memory:.2f} MB")
print(f"Final memory usage: {final_memory:.2f} MB")
print(f"Number of memory readings: {len(monitor.memory_readings)}")
if memory_growth:
print(f"Average memory growth per operation: {sum(memory_growth) / len(memory_growth):.2f} MB")
print(f"Max memory growth in single operation: {max(memory_growth):.2f} MB")
# Verify that memory usage is reasonable - the agent should not utilize excessive memory
assert final_memory < 1000, f"Final memory usage too high: {final_memory:.2f} MB"
# Verify that garbage collection is working
# After GC, memory should not be significantly higher than before
gc_readings = [i for i, (_, memory) in enumerate(monitor.memory_readings) if "after_gc" in str(i)]
if len(gc_readings) > 1:
# Check that memory after GC is not growing excessively
for i in range(1, len(gc_readings)):
prev_gc_memory = monitor.memory_readings[gc_readings[i - 1]][1]
curr_gc_memory = monitor.memory_readings[gc_readings[i]][1]
memory_increase = curr_gc_memory - prev_gc_memory
# Allow some memory growth but not excessive
assert memory_increase < 1, f"Memory leak detected: {memory_increase:.2f} MB increase after GC"
# Verify that the agent's memory and storage are working correctly
# Check that memories were created
user_memories = shared_db.get_user_memories(user_id=user_id)
assert len(user_memories) > 0, "No user memories were created"
# Check that sessions were stored
session_from_storage = shared_db.get_session(session_id=session_id, session_type=SessionType.AGENT)
assert session_from_storage is not None, "Session was not stored"
# Check that runs are in memory
assert session_from_storage.runs[0].session_id == session_id, "Session runs not found in memory"
assert len(session_from_storage.runs) == len(prompts), (
f"Expected {len(prompts)} runs, got {len(session_from_storage.runs[session_id])}"
)
print("✅ Memory impact test completed successfully")
print(f"✅ Created {len(user_memories)} user memories")
print(f"✅ Stored {len(session_from_storage.runs)} runs in memory")
finally:
monitor.stop_monitoring()
def test_agent_memory_cleanup_after_session_switch(shared_db):
"""
Test that verifies memory is properly cleaned up when switching between sessions.
"""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
update_memory_on_run=True,
)
monitor = MemoryMonitor()
monitor.start_monitoring()
user_id = "test_user_cleanup"
try:
# Create multiple sessions and run prompts
sessions = ["session_1", "session_2", "session_3"]
for session_idx, session_id in enumerate(sessions):
print(f"\n--- Testing session {session_id} ---")
monitor.take_reading(f"before_session_{session_idx + 1}")
# Run a few prompts in this session
for prompt_idx in range(3):
prompt = f"This is prompt {prompt_idx + 1} in session {session_id}"
response = agent.run(prompt, session_id=session_id, user_id=user_id)
assert response is not None
assert response.content is not None
monitor.take_reading(f"after_session_{session_idx + 1}")
# Force GC after each session
monitor.force_gc()
# Switch back to first session and verify memory doesn't grow excessively
print("\n--- Switching back to first session ---")
monitor.take_reading("before_switch_back")
response = agent.run(
"What do you remember from our previous conversation?", session_id=sessions[0], user_id=user_id
)
assert response is not None
monitor.take_reading("after_switch_back")
monitor.force_gc()
# Verify memory usage is reasonable
final_memory = monitor.get_final_memory()
assert final_memory < 1000, f"Final memory usage too high: {final_memory:.2f} MB"
# Verify all sessions are properly stored
for session_id in sessions:
session_from_storage = shared_db.get_session(session_id=session_id, session_type=SessionType.AGENT)
assert session_from_storage is not None, f"Session {session_id} was not stored"
assert session_from_storage.runs[0].session_id == session_id, (
f"Session {session_id} runs not found in memory"
)
print("✅ Session switching memory test completed successfully")
finally:
monitor.stop_monitoring()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_memory_impact.py",
"license": "Apache License 2.0",
"lines": 210,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/teams/test_memory_impact.py | import asyncio
import gc
import tracemalloc
from typing import List, Tuple
import pytest
from agno.agent.agent import Agent
from agno.db.base import SessionType
from agno.models.openai.chat import OpenAIChat
from agno.team.team import Team
class MemoryMonitor:
"""Utility class to monitor memory usage during team operations using tracemalloc."""
def __init__(self):
self.memory_readings: List[Tuple[int, float]] = []
self.tracemalloc_snapshots: List[tracemalloc.Snapshot] = []
self.baseline_memory = 0.0
def start_monitoring(self):
"""Start memory monitoring."""
tracemalloc.start()
self._take_reading("start")
def stop_monitoring(self):
"""Stop memory monitoring."""
self._take_reading("stop")
tracemalloc.stop()
def _take_reading(self, label: str):
"""Take a memory reading using tracemalloc."""
# Get current memory usage from tracemalloc
current, peak = tracemalloc.get_traced_memory()
current_memory_mb = current / 1024 / 1024 # Convert to MB
peak_memory_mb = peak / 1024 / 1024 # Convert to MB
# Get tracemalloc snapshot
current_snapshot = tracemalloc.take_snapshot()
self.memory_readings.append((len(self.memory_readings), current_memory_mb))
self.tracemalloc_snapshots.append(current_snapshot)
print(f"Memory reading {label}: {current_memory_mb:.2f} MB (peak: {peak_memory_mb:.2f} MB)")
def take_reading(self, label: str = ""):
"""Take a memory reading with optional label."""
self._take_reading(label)
def force_gc(self):
"""Force garbage collection and take a reading."""
gc.collect()
self._take_reading("after_gc")
def get_memory_growth(self) -> List[float]:
"""Calculate memory growth between readings."""
if len(self.memory_readings) < 2:
return []
growth = []
for i in range(1, len(self.memory_readings)):
prev_memory = self.memory_readings[i - 1][1]
curr_memory = self.memory_readings[i][1]
growth.append(curr_memory - prev_memory)
return growth
def get_peak_memory(self) -> float:
"""Get peak memory usage."""
if not self.memory_readings:
return 0.0
return max(reading[1] for reading in self.memory_readings)
def get_final_memory(self) -> float:
"""Get final memory usage."""
if not self.memory_readings:
return 0.0
return self.memory_readings[-1][1]
def analyze_tracemalloc(self) -> dict:
"""Analyze tracemalloc snapshots for memory leaks."""
if len(self.tracemalloc_snapshots) < 2:
return {}
first_snapshot = self.tracemalloc_snapshots[0]
last_snapshot = self.tracemalloc_snapshots[-1]
# Compare snapshots
stats = last_snapshot.compare_to(first_snapshot, "lineno")
# Get top memory allocations
top_stats = stats[:10]
return {
"top_allocations": [
{"file": stat.traceback.format()[-1], "size_diff": stat.size_diff, "count_diff": stat.count_diff}
for stat in top_stats
]
}
def test_team_memory_impact_with_gc_monitoring(shared_db):
"""
Test that creates a team with memory and storage, runs a series of prompts,
and monitors memory usage to verify garbage collection is working correctly.
"""
# Create simple agents for the team
def simple_calculator(operation: str, a: float, b: float) -> str:
"""Simple calculator function."""
if operation == "add":
return f"{a} + {b} = {a + b}"
elif operation == "subtract":
return f"{a} - {b} = {a - b}"
elif operation == "multiply":
return f"{a} * {b} = {a * b}"
elif operation == "divide":
return f"{a} / {b} = {a / b}"
else:
return f"Unknown operation: {operation}"
def text_processor(text: str, operation: str) -> str:
"""Simple text processing function."""
if operation == "uppercase":
return text.upper()
elif operation == "lowercase":
return text.lower()
elif operation == "length":
return f"Length: {len(text)} characters"
else:
return f"Unknown operation: {operation}"
# Create team members
calculator_agent = Agent(
name="Calculator Agent",
model=OpenAIChat(id="gpt-4o-mini"),
role="Perform mathematical calculations",
tools=[simple_calculator],
db=shared_db,
update_memory_on_run=True,
)
text_agent = Agent(
name="Text Processor Agent",
model=OpenAIChat(id="gpt-4o-mini"),
role="Process and analyze text",
tools=[text_processor],
db=shared_db,
update_memory_on_run=True,
)
# Create team with memory and storage
team = Team(
name="Memory Test Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=[calculator_agent, text_agent],
respond_directly=True,
determine_input_for_members=False,
db=shared_db,
update_memory_on_run=True,
instructions="Route mathematical questions to the calculator agent and text processing questions to the text processor agent.",
)
# Initialize memory monitor
monitor = MemoryMonitor()
monitor.start_monitoring()
session_id = "team_memory_test_session"
user_id = "test_user"
# Series of prompts to test memory usage
prompts = [
"Calculate 15 + 27",
"What is 42 - 18?",
"Process the text 'Hello World' to uppercase",
"Calculate 7 * 8",
"What is the length of 'Python Programming'?",
"Calculate 100 / 4",
"Convert 'MEMORY TEST' to lowercase",
"What is 3 + 5 + 7?",
"Process 'Team Memory Impact Test' to uppercase",
"Calculate 25 * 4",
]
try:
# Run each prompt and monitor memory
for i, prompt in enumerate(prompts):
print(f"\n--- Running team prompt {i + 1}/{len(prompts)} ---")
monitor.take_reading(f"before_prompt_{i + 1}")
# Run the team
response = team.run(prompt, session_id=session_id, user_id=user_id)
assert response is not None
assert response.content is not None
assert response.run_id is not None
monitor.take_reading(f"after_prompt_{i + 1}")
# Force garbage collection every few prompts to test GC effectiveness
if (i + 1) % 3 == 0:
print(f"--- Forcing garbage collection after prompt {i + 1} ---")
monitor.force_gc()
# Final memory analysis
monitor.take_reading("final")
# Get memory statistics
memory_growth = monitor.get_memory_growth()
peak_memory = monitor.get_peak_memory()
final_memory = monitor.get_final_memory()
print("\n=== Team Memory Analysis ===")
print(f"Peak memory usage: {peak_memory:.2f} MB")
print(f"Final memory usage: {final_memory:.2f} MB")
print(f"Number of memory readings: {len(monitor.memory_readings)}")
if memory_growth:
print(f"Average memory growth per operation: {sum(memory_growth) / len(memory_growth):.2f} MB")
print(f"Max memory growth in single operation: {max(memory_growth):.2f} MB")
# STRICT MEMORY LIMITS: Final memory must be under 20MB
assert final_memory < 20, f"Final memory usage too high: {final_memory:.2f} MB (limit: 20MB)"
# Verify that garbage collection is working
# After GC, memory should not be significantly higher than before
gc_readings = [i for i, (_, memory) in enumerate(monitor.memory_readings) if "after_gc" in str(i)]
if len(gc_readings) > 1:
# Check that memory after GC is not growing excessively
for i in range(1, len(gc_readings)):
prev_gc_memory = monitor.memory_readings[gc_readings[i - 1]][1]
curr_gc_memory = monitor.memory_readings[gc_readings[i]][1]
memory_increase = curr_gc_memory - prev_gc_memory
# Allow minimal memory growth but not excessive
assert memory_increase < 0.5, f"Memory leak detected: {memory_increase:.2f} MB increase after GC"
# Check that sessions were stored
session_from_storage = shared_db.get_session(session_id=session_id, session_type=SessionType.TEAM)
assert session_from_storage is not None, "Session was not stored"
# Check that runs are in memory
assert len(session_from_storage.runs) > len(prompts)
finally:
monitor.stop_monitoring()
def test_team_memory_cleanup_after_session_switch(shared_db):
"""
Test that verifies team memory is properly cleaned up when switching between sessions.
"""
# Create simple team with basic agents
def simple_function(input_text: str) -> str:
return f"Processed: {input_text}"
agent = Agent(
name="Simple Agent",
model=OpenAIChat(id="gpt-4o-mini"),
role="Process simple requests",
tools=[simple_function],
db=shared_db,
update_memory_on_run=True,
)
team = Team(
name="Session Switch Team",
model=OpenAIChat(id="gpt-4o-mini"),
members=[agent],
db=shared_db,
update_memory_on_run=True,
)
monitor = MemoryMonitor()
monitor.start_monitoring()
user_id = "test_user_cleanup"
try:
# Create multiple sessions and run prompts
sessions = ["session_1", "session_2", "session_3"]
for session_idx, session_id in enumerate(sessions):
print(f"\n--- Testing team session {session_id} ---")
monitor.take_reading(f"before_session_{session_idx + 1}")
# Run a few prompts in this session
for prompt_idx in range(3):
prompt = f"Process this text: session {session_id} prompt {prompt_idx + 1}"
response = team.run(prompt, session_id=session_id, user_id=user_id)
assert response is not None
assert response.content is not None
monitor.take_reading(f"after_session_{session_idx + 1}")
# Force GC after each session
monitor.force_gc()
# Switch back to first session and verify memory doesn't grow excessively
print("\n--- Switching back to first session ---")
monitor.take_reading("before_switch_back")
response = team.run(
"What do you remember from our previous conversation?", session_id=sessions[0], user_id=user_id
)
assert response is not None
monitor.take_reading("after_switch_back")
monitor.force_gc()
# STRICT MEMORY LIMITS: Final memory must be under 20MB
final_memory = monitor.get_final_memory()
assert final_memory < 5, f"Final memory usage too high: {final_memory:.2f} MB (limit: 5MB)"
# Verify all sessions are properly stored
for session_id in sessions:
session_from_storage = shared_db.get_session(session_id=session_id, session_type=SessionType.TEAM)
assert session_from_storage is not None, f"Session {session_id} was not stored"
finally:
monitor.stop_monitoring()
@pytest.mark.asyncio
async def test_team_memory_with_multiple_members(shared_db):
"""
Test memory usage with multiple team members to ensure scalability.
"""
# Create multiple agents with realistic functions that would generate memories
def calculate_budget(income: float, expenses: float, savings_goal: float) -> str:
"""Calculate budget and provide financial advice."""
disposable_income = income - expenses
months_to_goal = savings_goal / disposable_income if disposable_income > 0 else float("inf")
if disposable_income <= 0:
return f"⚠️ Your expenses (${expenses:.2f}) exceed your income (${income:.2f}). Consider reducing expenses or increasing income."
elif months_to_goal <= 12:
return f"✅ Great! You can reach your ${savings_goal:.2f} goal in {months_to_goal:.1f} months with ${disposable_income:.2f} disposable income."
else:
return f"📊 You'll reach your ${savings_goal:.2f} goal in {months_to_goal:.1f} months. Consider increasing savings or adjusting your goal."
def analyze_health_data(age: int, weight: float, height: float, activity_level: str) -> str:
"""Analyze health data and provide recommendations."""
bmi = weight / ((height / 100) ** 2)
if bmi < 18.5:
category = "underweight"
recommendation = "Consider increasing caloric intake with healthy foods."
elif bmi < 25:
category = "normal weight"
recommendation = "Maintain your healthy weight with balanced nutrition and exercise."
elif bmi < 30:
category = "overweight"
recommendation = "Focus on portion control and regular physical activity."
else:
category = "obese"
recommendation = "Consult with a healthcare provider for a personalized weight management plan."
return f"📊 BMI: {bmi:.1f} ({category}). {recommendation} Activity level: {activity_level}"
def schedule_meeting(duration_minutes: int, priority: str) -> str:
"""Schedule a meeting and provide coordination details."""
priority_emoji = {"high": "🔴", "medium": "🟡", "low": "🟢"}
emoji = priority_emoji.get(priority, "⚪")
return f"{emoji} Meeting scheduled: {duration_minutes} minutes ({priority} priority)"
agent1 = Agent(
name="Financial Advisor",
model=OpenAIChat(id="gpt-4o-mini"), # type: ignore
role="Provide financial planning and budget analysis",
tools=[calculate_budget],
db=shared_db,
update_memory_on_run=True,
add_history_to_context=True,
)
agent2 = Agent(
name="Health Coach",
model=OpenAIChat(id="gpt-4o-mini"), # type: ignore
role="Analyze health data and provide wellness recommendations",
tools=[analyze_health_data],
db=shared_db,
update_memory_on_run=True,
add_history_to_context=True,
)
agent3 = Agent(
name="Meeting Coordinator",
model=OpenAIChat(id="gpt-4o-mini"), # type: ignore
role="Help schedule meetings and coordinate team activities",
tools=[schedule_meeting],
db=shared_db,
update_memory_on_run=True,
add_history_to_context=True,
)
team = Team(
name="Personal Assistant Team",
model=OpenAIChat(id="gpt-4o-mini"), # type: ignore
members=[agent1, agent2, agent3],
respond_directly=True,
determine_input_for_members=False,
db=shared_db,
update_memory_on_run=True,
add_history_to_context=True,
instructions="Route financial questions to the Financial Advisor, health-related questions to the Health Coach, and meeting/scheduling requests to the Meeting Coordinator. Remember user preferences and past interactions to provide personalized assistance.",
)
monitor = MemoryMonitor()
monitor.start_monitoring()
users = [f"test_user_{i}" for i in range(10)]
try:
# Create realistic prompts that would generate meaningful user memories
realistic_prompts = [
"I make $5000 per month and spend $3500 on expenses. I want to save $10000 for a vacation. Can you help me plan this?",
"I'm 28 years old, weigh 70kg, am 175cm tall, and exercise 3 times per week. How's my health looking?",
"I need to schedule a team meeting with 4 people for 1 hour. It's a high priority project kickoff.",
"My expenses went up to $4000 this month due to car repairs. How does this affect my vacation savings goal?",
"I've been working out more and now exercise 5 times per week. Can you update my health assessment?",
"I need to schedule a follow-up meeting with the same team from last time, but this time it's medium priority and only 30 minutes.",
"I got a raise to $6000 per month! How much faster can I reach my vacation savings goal now?",
"I've lost 3kg since our last conversation. Can you recalculate my health metrics?",
"The team meeting went well. I need to schedule a presentation meeting with 8 stakeholders for 2 hours, high priority.",
"I'm thinking of buying a house and need to save $50000 for a down payment. How long will this take with my current budget?",
]
print("--- Running realistic multi-member team test (concurrent) ---")
async def run_prompt(i, prompt):
response = await team.arun(prompt, session_id=f"{users[i]}_session", user_id=users[i])
assert response is not None
assert response.content is not None
assert len(response.content) > 10, f"Response too short: {response.content}"
return response
for _ in range(10):
tasks = []
monitor.take_reading("before_concurrent_prompts")
for i, prompt in enumerate(realistic_prompts):
tasks.append(run_prompt(i, prompt))
await asyncio.gather(*tasks)
monitor.take_reading("after_concurrent_prompts")
monitor.force_gc()
# Comprehensive memory growth analysis
memory_growth = monitor.get_memory_growth()
peak_memory = monitor.get_peak_memory()
final_memory = monitor.get_final_memory()
initial_memory = monitor.memory_readings[0][1] if monitor.memory_readings else 0.0
print("\n=== Memory Growth Analysis ===")
print(f"Initial memory: {initial_memory:.2f} MB")
print(f"Peak memory: {peak_memory:.2f} MB")
print(f"Final memory: {final_memory:.2f} MB")
print(f"Total memory growth: {final_memory - initial_memory:.2f} MB")
print(f"Peak memory growth: {peak_memory - initial_memory:.2f} MB")
if memory_growth:
avg_growth = sum(memory_growth) / len(memory_growth)
max_growth = max(memory_growth)
print(f"Average memory growth per operation: {avg_growth:.2f} MB")
# STRICT MEMORY LIMITS: Final memory must be under 20MB
assert final_memory < 20, f"Memory usage too high with multiple members: {final_memory:.2f} MB (limit: 20MB)"
# Verify memory growth patterns are reasonable
if memory_growth:
# Check that average growth per operation is reasonable (should be small)
avg_growth = sum(memory_growth) / len(memory_growth)
assert avg_growth < 2.0, f"Average memory growth too high: {avg_growth:.2f} MB per operation"
# Check that no single operation causes excessive memory growth
assert max_growth < 10.0, f"Single operation memory growth too high: {max_growth:.2f} MB"
# Verify that garbage collection is effective
# After GC, memory should not be significantly higher than before
gc_readings = [i for i, (_, memory) in enumerate(monitor.memory_readings) if "after_gc" in str(i)]
if len(gc_readings) > 1:
for i in range(1, len(gc_readings)):
prev_gc_memory = monitor.memory_readings[gc_readings[i - 1]][1]
curr_gc_memory = monitor.memory_readings[gc_readings[i]][1]
memory_increase = curr_gc_memory - prev_gc_memory
# Allow minimal memory growth but not excessive
assert memory_increase < 0.5, f"Memory leak detected: {memory_increase:.2f} MB increase after GC"
print("✅ Realistic multi-member team test completed successfully")
print(f"✅ Processed {len(realistic_prompts)} realistic prompts")
print(f"✅ Final memory: {final_memory:.2f} MB")
finally:
monitor.stop_monitoring()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/test_memory_impact.py",
"license": "Apache License 2.0",
"lines": 403,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/response.py | import json
import time
from agno.models.message import Message, Metrics
from agno.models.response import ToolExecution
from agno.run.agent import RunOutput
def test_timer_serialization():
message_1 = Message(role="user", content="Hello, world!")
message_2 = Message(role="assistant", metrics=Metrics())
message_2.metrics.start_timer()
message_2.metrics.stop_timer()
run_response = RunOutput(messages=[message_1, message_2])
assert json.dumps(run_response.to_dict()) is not None
def test_tool_execution_created_at_round_trip():
"""Test that created_at is preserved across serialization/deserialization and unique per instance."""
# Test 1: Per-instance timestamps
instance1 = ToolExecution(tool_name="test_tool_1")
time.sleep(1.1) # Wait > 1 second since created_at uses int(time())
instance2 = ToolExecution(tool_name="test_tool_2")
# Each instance should have its own timestamp
assert instance1.created_at != instance2.created_at, (
f"Bug: All instances share same timestamp! instance1={instance1.created_at}, instance2={instance2.created_at}"
)
# Test 2: Serialization preserves timestamp
original = ToolExecution(tool_name="test_tool", tool_call_id="test_id")
original_created_at = original.created_at
serialized = original.to_dict()
assert "created_at" in serialized
assert serialized["created_at"] == original_created_at
time.sleep(1.1)
restored = ToolExecution.from_dict(serialized)
assert restored.created_at == original_created_at, (
f"Bug: Timestamp not preserved! original={original_created_at}, restored={restored.created_at}"
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/response.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/oxylabs.py | import json
from os import getenv
from typing import Any, Callable, Dict, List, Optional
from urllib.parse import urlparse
from agno.tools import Toolkit
from agno.utils.log import log_debug, log_error, log_info
try:
from oxylabs import RealtimeClient
from oxylabs.sources.response import Response
from oxylabs.utils.types import render
except ImportError:
raise ImportError("Oxylabs SDK not found. Please install it with: pip install oxylabs")
class OxylabsTools(Toolkit):
def __init__(
self,
username: Optional[str] = None,
password: Optional[str] = None,
**kwargs,
):
self.username = username or getenv("OXYLABS_USERNAME")
self.password = password or getenv("OXYLABS_PASSWORD")
if not self.username or not self.password:
raise ValueError(
"No Oxylabs credentials provided. Please set the OXYLABS_USERNAME and OXYLABS_PASSWORD environment variables or pass them to the OxylabsTools constructor."
)
try:
log_debug(f"Initializing Oxylabs client with username: {self.username[:5]}...")
self.client = RealtimeClient(self.username, self.password)
log_debug("Oxylabs client initialized successfully")
except Exception as e:
log_debug(f"Failed to initialize Oxylabs client: {e}")
raise
tools: List[Callable[..., str]] = [
self.search_google,
self.get_amazon_product,
self.search_amazon_products,
self.scrape_website,
]
super().__init__(name="oxylabs_web_scraping", tools=tools, **kwargs)
def search_google(self, query: str, domain_code: str = "com") -> str:
"""Search Google for a query.
Args:
query: Search query
domain_code: Google domain to search (e.g., "com", "co.uk", "de", default: "com")
Returns:
JSON of search results
"""
try:
if not query or not isinstance(query, str) or len(query.strip()) == 0:
return self._error_response("search_google", "Query cannot be empty", {"query": query})
if not isinstance(domain_code, str) or len(domain_code) > 10:
return self._error_response("search_google", "Domain must be a valid string (e.g., 'com', 'co.uk')")
query = query.strip()
log_debug(f"Google search: '{query}' on google.{domain_code}")
response: Response = self.client.google.scrape_search(query=query, domain=domain_code, parse=True)
# Extract search results
search_results = []
if response.results and len(response.results) > 0:
result = response.results[0]
# Try parsed content first
if hasattr(result, "content_parsed") and result.content_parsed:
content = result.content_parsed
if hasattr(content, "results") and content.results:
raw_results = content.results.raw if hasattr(content.results, "raw") else {}
organic_results = raw_results.get("organic", [])
for item in organic_results:
search_results.append(
{
"title": item.get("title", "").strip(),
"url": item.get("url", "").strip(),
"description": item.get("desc", "").strip(),
"position": item.get("pos", 0),
}
)
if not search_results and hasattr(result, "content"):
raw_content = result.content
if isinstance(raw_content, dict) and "results" in raw_content:
organic_results = raw_content["results"].get("organic", [])
for item in organic_results:
search_results.append(
{
"title": item.get("title", "").strip(),
"url": item.get("url", "").strip(),
"description": item.get("desc", "").strip(),
"position": item.get("pos", 0),
}
)
response_data = {
"tool": "search_google",
"query": query,
"results": search_results,
}
log_info(f"Google search completed. Found {len(search_results)} results")
return json.dumps(response_data, indent=2)
except Exception as e:
error_msg = f"Google search failed: {str(e)}"
log_error(error_msg)
return self._error_response("search_google", error_msg, {"query": query})
def get_amazon_product(self, asin: str, domain_code: str = "com") -> str:
"""Get detailed information about an Amazon product by ASIN.
Args:
asin: Amazon Standard Identification Number (10 alphanumeric characters, e.g., "B07FZ8S74R")
domain_code: Amazon domain (e.g., "com", "co.uk", "de", default: "com")
Returns:
JSON of product details
"""
try:
if not asin or not isinstance(asin, str):
return self._error_response("get_amazon_product", "ASIN is required and must be a string")
asin = asin.strip().upper()
if len(asin) != 10 or not asin.isalnum():
return self._error_response(
"get_amazon_product",
f"Invalid ASIN format: {asin}. Must be 10 alphanumeric characters (e.g., 'B07FZ8S74R')",
)
if not isinstance(domain_code, str) or len(domain_code) > 10:
return self._error_response(
"get_amazon_product", "Domain must be a valid string (e.g., 'com', 'co.uk')"
)
log_debug(f"Amazon product lookup: ASIN {asin} on amazon.{domain_code}")
response: Response = self.client.amazon.scrape_product(query=asin, domain=domain_code, parse=True)
product_info = {"found": False, "asin": asin, "domain": f"amazon.{domain_code}"}
if response.results and len(response.results) > 0:
result = response.results[0]
if hasattr(result, "content") and result.content:
content = result.content
if isinstance(content, dict):
product_info.update(
{
"found": True,
"title": content.get("title", "").strip(),
"price": content.get("price", 0),
"currency": content.get("currency", ""),
"rating": content.get("rating", 0),
"reviews_count": content.get("reviews_count", 0),
"url": content.get("url", ""),
"description": content.get("description", "").strip(),
"stock_status": content.get("stock", "").strip(),
"brand": content.get("brand", "").strip(),
"images": content.get("images", [])[:3],
"bullet_points": content.get("bullet_points", [])[:5]
if content.get("bullet_points")
else [],
}
)
elif hasattr(result, "content_parsed") and result.content_parsed:
content = result.content_parsed
product_info.update(
{
"found": True,
"title": getattr(content, "title", "").strip(),
"price": getattr(content, "price", 0),
"currency": getattr(content, "currency", ""),
"rating": getattr(content, "rating", 0),
"reviews_count": getattr(content, "reviews_count", 0),
"url": getattr(content, "url", ""),
"description": getattr(content, "description", "").strip(),
"stock_status": getattr(content, "stock", "").strip(),
"brand": getattr(content, "brand", "").strip(),
"images": getattr(content, "images", [])[:3],
"bullet_points": getattr(content, "bullet_points", [])[:5]
if getattr(content, "bullet_points", None)
else [],
}
)
response_data = {
"tool": "get_amazon_product",
"asin": asin,
"product_info": product_info,
}
log_info(f"Amazon product lookup completed for ASIN {asin}")
return json.dumps(response_data, indent=2)
except Exception as e:
error_msg = f"Amazon product lookup failed: {str(e)}"
log_error(error_msg)
return self._error_response("get_amazon_product", error_msg, {"asin": asin})
def search_amazon_products(self, query: str, domain_code: str = "com") -> str:
"""Search Amazon for products and return search results.
Args:
query: Product search query
domain_code: Amazon domain (e.g., "com", "co.uk", "de", default: "com")
Returns:
JSON string with search results containing:
- success: boolean indicating if search was successful
- query: the original search query
- total_products: number of products found
- products: list of product results with title, asin, price, rating, etc.
"""
try:
if not query or not isinstance(query, str) or len(query.strip()) == 0:
return self._error_response("search_amazon_products", "Query cannot be empty")
if not isinstance(domain_code, str) or len(domain_code) > 10:
return self._error_response(
"search_amazon_products", "Domain must be a valid string (e.g., 'com', 'co.uk')"
)
query = query.strip()
log_info(f"Amazon search: '{query}' on amazon.{domain_code}")
response: Response = self.client.amazon.scrape_search(query=query, domain=domain_code, parse=True)
# Extract search results
products = []
if response.results and len(response.results) > 0:
result = response.results[0]
if hasattr(result, "content") and result.content:
content = result.content
if isinstance(content, dict) and "results" in content:
organic_results = content["results"].get("organic", [])
for item in organic_results:
products.append(
{
"title": item.get("title", "").strip(),
"asin": item.get("asin", "").strip(),
"price": item.get("price", 0),
"currency": item.get("currency", ""),
"rating": item.get("rating", 0),
"reviews_count": item.get("reviews_count", 0),
"url": item.get("url", "").strip(),
"position": item.get("pos", 0),
"image": item.get("image", "").strip(),
}
)
elif hasattr(result, "content_parsed") and result.content_parsed:
content = result.content_parsed
if hasattr(content, "results") and content.results:
if hasattr(content.results, "organic"):
organic_results = content.results.organic
for item in organic_results:
products.append(
{
"title": getattr(item, "title", "").strip(),
"asin": getattr(item, "asin", "").strip(),
"price": getattr(item, "price", 0),
"currency": getattr(item, "currency", ""),
"rating": getattr(item, "rating", 0),
"reviews_count": getattr(item, "reviews_count", 0),
"url": getattr(item, "url", "").strip(),
"position": getattr(item, "pos", 0),
"image": getattr(item, "image", "").strip(),
}
)
response_data = {
"tool": "search_amazon_products",
"query": query,
"products": products,
}
log_debug(f"Amazon search completed. Found {len(products)} products")
return json.dumps(response_data, indent=2)
except Exception as e:
error_msg = f"Amazon search failed: {str(e)}"
log_error(error_msg)
return self._error_response("search_amazon_products", error_msg, {"query": query})
def scrape_website(self, url: str, render_javascript: bool = False) -> str:
"""Scrape content from any website URL.
Args:
url: Website URL to scrape (must start with http:// or ht ps://)
render_javascript: Whether to enable JavaScript rendering for dynamic content (default: False)
Returns:
JSON of results
"""
try:
if not url or not isinstance(url, str):
return self._error_response("scrape_website", "URL is required and must be a string")
url = url.strip()
if not url.startswith(("http://", "https://")):
return self._error_response(
"scrape_website", f"Invalid URL format: {url}. Must start with http:// or https://"
)
try:
parsed_url = urlparse(url)
if not parsed_url.netloc:
return self._error_response("scrape_website", f"Invalid URL format: {url}. Missing domain name")
except Exception:
return self._error_response("scrape_website", f"Invalid URL format: {url}")
if not isinstance(render_javascript, bool):
return self._error_response("scrape_website", "render_javascript must be a boolean (True/False)")
log_debug(f"Website scraping: {url} (JS rendering: {render_javascript})")
response: Response = self.client.universal.scrape_url(
url=url, render=render.HTML if render_javascript else None, parse=True
)
content_info = {"url": url, "javascript_rendered": render_javascript}
if response.results and len(response.results) > 0:
result = response.results[0]
content = result.content
status_code = getattr(result, "status_code", None)
content_preview = ""
content_length = 0
if content:
try:
content_str = str(content)
content_length = len(content_str)
content_preview = content_str[:1000] if content_length > 1000 else content_str
content_info["scraped"] = True
except Exception as e:
log_debug(f"Could not process content: {e}")
content_preview = "Content available but processing failed"
content_info["scraped"] = False
content_info.update(
{
"status_code": status_code,
"content_length": content_length,
"content_preview": content_preview.strip(),
"has_content": content_length > 0,
}
)
response_data = {
"tool": "scrape_website",
"url": url,
"content_info": content_info,
}
log_debug(f"Website scraping completed for {url}")
return json.dumps(response_data, indent=2)
except Exception as e:
error_msg = f"Website scraping failed: {str(e)}"
log_error(error_msg)
return self._error_response("scrape_website", error_msg, {"url": url})
def _error_response(self, tool_name: str, error_message: str, context: Optional[Dict[str, Any]] = None) -> str:
"""Generate a standardized error response."""
error_data = {"tool": tool_name, "error": error_message, "context": context or {}}
return json.dumps(error_data, indent=2)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/oxylabs.py",
"license": "Apache License 2.0",
"lines": 318,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/tools/test_oxylabs.py | """Unit tests for OxylabsTools class."""
import json
from unittest.mock import Mock, patch
import pytest
from agno.agent import Agent
from agno.tools.oxylabs import OxylabsTools
@pytest.fixture
def mock_agent():
"""Create a mock Agent instance."""
return Mock(spec=Agent)
@pytest.fixture
def mock_oxylabs_client():
"""Create a mocked Oxylabs RealtimeClient with all resource methods stubbed."""
with patch("agno.tools.oxylabs.RealtimeClient") as mock_realtime_client:
# Primary client mock returned by the SDK constructor
mock_client = Mock()
# Mock nested resource clients
mock_client.google = Mock()
mock_client.amazon = Mock()
mock_client.universal = Mock()
# Configure the RealtimeClient constructor to return our mock
mock_realtime_client.return_value = mock_client
yield mock_client
@pytest.fixture
def mock_environment_variables():
"""Mock environment variables for Oxylabs credentials."""
with patch.dict("os.environ", {"OXYLABS_USERNAME": "test_user", "OXYLABS_PASSWORD": "test_pass"}):
yield
def create_mock_response(results=None, status_code=200):
"""Helper to create a mock response object matching the SDK structure."""
mock_response = Mock()
mock_response.results = []
if results:
for result_data in results:
mock_result = Mock()
mock_result.content = result_data.get("content")
mock_result.status_code = result_data.get("status_code", status_code)
mock_result.content_parsed = result_data.get("content_parsed")
mock_response.results.append(mock_result)
return mock_response
class TestOxylabsToolsInitialization:
"""Test cases for OxylabsTools initialization."""
def test_init_with_credentials(self, mock_oxylabs_client):
"""Test initialization with provided credentials."""
tools = OxylabsTools(username="test_user", password="test_pass")
assert tools.username == "test_user"
assert tools.password == "test_pass"
assert tools.client is not None
def test_init_with_env_variables(self, mock_oxylabs_client, mock_environment_variables):
"""Test initialization with environment variables."""
tools = OxylabsTools()
assert tools.username == "test_user"
assert tools.password == "test_pass"
assert tools.client is not None
def test_init_without_credentials(self):
"""Test initialization failure without credentials."""
# Ensure no environment variables are set
with patch.dict("os.environ", {}, clear=True):
with pytest.raises(ValueError, match="No Oxylabs credentials provided"):
OxylabsTools()
def test_init_partial_credentials(self):
"""Test initialization failure with partial credentials."""
# Ensure no environment variables are set
with patch.dict("os.environ", {}, clear=True):
with pytest.raises(ValueError, match="No Oxylabs credentials provided"):
OxylabsTools(username="test_user")
class TestSearchGoogle:
"""Test cases for search_google method."""
def test_search_google_success(self, mock_oxylabs_client, mock_environment_variables):
"""Test successful Google search."""
# Arrange
mock_response = create_mock_response(
results=[
{
"content": {
"results": {
"organic": [
{
"title": "Test Result",
"url": "https://example.com",
"desc": "Test description",
"pos": 1,
}
]
}
},
"status_code": 200,
}
]
)
mock_oxylabs_client.google.scrape_search.return_value = mock_response
tools = OxylabsTools()
# Act
result = tools.search_google(query="test query", domain_code="com")
# Assert
mock_oxylabs_client.google.scrape_search.assert_called_once_with(query="test query", domain="com", parse=True)
result_data = json.loads(result)
assert result_data["tool"] == "search_google"
assert result_data["query"] == "test query"
assert "results" in result_data
def test_search_google_empty_query(self, mock_oxylabs_client, mock_environment_variables):
"""Test Google search with empty query."""
tools = OxylabsTools()
result = tools.search_google(query="")
result_data = json.loads(result)
assert "error" in result_data
assert result_data["tool"] == "search_google"
assert "cannot be empty" in result_data["error"]
def test_search_google_invalid_domain(self, mock_oxylabs_client, mock_environment_variables):
"""Test Google search with invalid domain."""
tools = OxylabsTools()
result = tools.search_google(query="test", domain_code="x" * 15)
result_data = json.loads(result)
assert "error" in result_data
assert result_data["tool"] == "search_google"
assert "valid string" in result_data["error"]
class TestGetAmazonProduct:
"""Test cases for get_amazon_product method."""
def test_get_amazon_product_success(self, mock_oxylabs_client, mock_environment_variables):
"""Test successful Amazon product lookup."""
# Arrange
mock_response = create_mock_response(
results=[
{
"content": {"title": "Test Product", "price": 29.99, "currency": "USD", "rating": 4.5},
"status_code": 200,
}
]
)
mock_oxylabs_client.amazon.scrape_product.return_value = mock_response
tools = OxylabsTools()
# Act
result = tools.get_amazon_product(asin="B08N5WRWNW", domain_code="com")
# Assert
mock_oxylabs_client.amazon.scrape_product.assert_called_once_with(query="B08N5WRWNW", domain="com", parse=True)
result_data = json.loads(result)
assert result_data["tool"] == "get_amazon_product"
assert result_data["asin"] == "B08N5WRWNW"
assert "product_info" in result_data
def test_get_amazon_product_invalid_asin(self, mock_oxylabs_client, mock_environment_variables):
"""Test Amazon product lookup with invalid ASIN."""
tools = OxylabsTools()
result = tools.get_amazon_product(asin="INVALID")
result_data = json.loads(result)
assert "error" in result_data
assert result_data["tool"] == "get_amazon_product"
assert "Invalid ASIN format" in result_data["error"]
class TestSearchAmazonProducts:
"""Test cases for search_amazon_products method."""
def test_search_amazon_products_success(self, mock_oxylabs_client, mock_environment_variables):
"""Test successful Amazon search."""
# Arrange
mock_response = create_mock_response(
results=[
{
"content": {
"results": {"organic": [{"title": "Test Product", "asin": "B08N5WRWNW", "price": 29.99}]}
},
"status_code": 200,
}
]
)
mock_oxylabs_client.amazon.scrape_search.return_value = mock_response
tools = OxylabsTools()
# Act
result = tools.search_amazon_products(query="wireless headphones", domain_code="com")
# Assert
mock_oxylabs_client.amazon.scrape_search.assert_called_once_with(
query="wireless headphones", domain="com", parse=True
)
result_data = json.loads(result)
assert result_data["tool"] == "search_amazon_products"
assert result_data["query"] == "wireless headphones"
assert "products" in result_data
def test_search_amazon_products_empty_query(self, mock_oxylabs_client, mock_environment_variables):
"""Test Amazon search with empty query."""
tools = OxylabsTools()
result = tools.search_amazon_products(query="")
result_data = json.loads(result)
assert "error" in result_data
assert result_data["tool"] == "search_amazon_products"
assert "cannot be empty" in result_data["error"]
class TestScrapeWebsite:
"""Test cases for scrape_website method."""
def test_scrape_website_success(self, mock_oxylabs_client, mock_environment_variables):
"""Test successful website scraping."""
# Arrange
mock_response = create_mock_response(
results=[{"content": "<html><body>Test Content</body></html>", "status_code": 200}]
)
mock_oxylabs_client.universal.scrape_url.return_value = mock_response
tools = OxylabsTools()
# Act
result = tools.scrape_website(url="https://example.com", render_javascript=False)
# Assert
mock_oxylabs_client.universal.scrape_url.assert_called_once_with(
url="https://example.com", render=None, parse=True
)
result_data = json.loads(result)
assert result_data["tool"] == "scrape_website"
assert result_data["url"] == "https://example.com"
assert "content_info" in result_data
def test_scrape_website_invalid_url(self, mock_oxylabs_client, mock_environment_variables):
"""Test website scraping with invalid URL."""
tools = OxylabsTools()
result = tools.scrape_website(url="not-a-url")
result_data = json.loads(result)
assert "error" in result_data
assert result_data["tool"] == "scrape_website"
assert "Invalid URL format" in result_data["error"]
def test_scrape_website_with_javascript(self, mock_oxylabs_client, mock_environment_variables):
"""Test website scraping with JavaScript rendering."""
# Arrange
mock_response = create_mock_response(
results=[{"content": "<html><body>Rendered Content</body></html>", "status_code": 200}]
)
mock_oxylabs_client.universal.scrape_url.return_value = mock_response
tools = OxylabsTools()
# Act
result = tools.scrape_website(url="https://example.com", render_javascript=True)
# Assert
result_data = json.loads(result)
assert result_data["tool"] == "scrape_website"
assert result_data["content_info"]["javascript_rendered"] is True
class TestErrorHandling:
"""Test cases for error handling."""
def test_api_exception_handling(self, mock_oxylabs_client, mock_environment_variables):
"""Test handling of API exceptions."""
# Arrange
mock_oxylabs_client.google.scrape_search.side_effect = Exception("API Error")
tools = OxylabsTools()
# Act
result = tools.search_google(query="test")
# Assert
result_data = json.loads(result)
assert "error" in result_data
assert result_data["tool"] == "search_google"
assert "API Error" in result_data["error"]
class TestResponseFormatting:
"""Test cases for response formatting."""
def test_format_response_with_parsed_content(self, mock_oxylabs_client, mock_environment_variables):
"""Test response formatting with parsed content."""
# Arrange
mock_response = create_mock_response(
results=[
{
"content_parsed": Mock(
results=Mock(
raw={
"organic": [
{
"title": "Test",
"url": "https://example.com",
"desc": "Test description",
"pos": 1,
}
]
}
)
),
"status_code": 200,
}
]
)
mock_oxylabs_client.google.scrape_search.return_value = mock_response
tools = OxylabsTools()
# Act
result = tools.search_google(query="test")
# Assert
result_data = json.loads(result)
assert result_data["tool"] == "search_google"
assert result_data["query"] == "test"
assert len(result_data["results"]) == 1
assert result_data["results"][0]["title"] == "Test"
def test_format_response_empty_results(self, mock_oxylabs_client, mock_environment_variables):
"""Test response formatting with empty results."""
# Arrange
mock_response = create_mock_response(results=[])
mock_oxylabs_client.google.scrape_search.return_value = mock_response
tools = OxylabsTools()
# Act
result = tools.search_google(query="test")
# Assert
result_data = json.loads(result)
assert result_data["tool"] == "search_google"
assert result_data["query"] == "test"
assert len(result_data["results"]) == 0
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_oxylabs.py",
"license": "Apache License 2.0",
"lines": 287,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/teams/test_team_with_member_with_parser_model.py | from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.team import Team
class ParkGuide(BaseModel):
park_name: str = Field(..., description="The official name of the national park.")
activities: List[str] = Field(
..., description="A list of popular activities to do in the park. Provide at least three."
)
best_season_to_visit: str = Field(
..., description="The best season to visit the park (e.g., Spring, Summer, Autumn, Winter)."
)
def test_team_with_parser_model():
agent = Agent(
name="National Park Expert",
model=OpenAIChat(id="gpt-4o"),
output_schema=ParkGuide,
parser_model=OpenAIChat(id="gpt-4o-mini"), # Use OpenAI instead of Claude
description="You are an expert on national parks and provide concise guides.",
)
team = Team(
name="National Park Expert",
members=[agent],
respond_directly=True,
telemetry=False,
)
response = team.run("Tell me about Yosemite National Park.")
print(response.content)
assert response.content is not None
assert isinstance(response.content, ParkGuide)
assert isinstance(response.content.park_name, str)
assert len(response.content.park_name) > 0
def test_team_with_parser_model_stream(shared_db):
agent = Agent(
name="National Park Expert",
model=OpenAIChat(id="gpt-4o"),
output_schema=ParkGuide,
parser_model=OpenAIChat(id="gpt-4o-mini"),
description="You are an expert on national parks and provide concise guides.",
)
team = Team(
name="National Park Expert",
members=[agent],
db=shared_db,
respond_directly=True,
telemetry=False,
)
response = team.run("Tell me about Yosemite National Park.", stream=True)
final_content = None
for event in response:
# Capture content from events during streaming
if hasattr(event, "content") and isinstance(event.content, ParkGuide):
final_content = event.content
# Fallback: try to get from database if events didn't capture it
if final_content is None:
run_response = team.get_last_run_output()
if run_response and run_response.content:
final_content = run_response.content
assert final_content is not None
assert isinstance(final_content, ParkGuide)
assert isinstance(final_content.park_name, str)
assert len(final_content.park_name) > 0
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/test_team_with_member_with_parser_model.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/tools/test_duckdb.py | from unittest.mock import MagicMock, patch
import pytest
from agno.tools.duckdb import DuckDbTools
@pytest.fixture
def mock_duckdb_connection():
"""Mock DuckDB connection used by DuckDbTools."""
with patch("agno.tools.duckdb.duckdb") as mock_duckdb:
mock_connection = MagicMock()
mock_duckdb.connect.return_value = mock_connection
# Mock the query result
mock_result = MagicMock()
mock_result.fetchall.return_value = [("test_table",)]
mock_result.columns = ["name"]
mock_connection.sql.return_value = mock_result
yield mock_connection
@pytest.fixture
def duckdb_tools_instance(mock_duckdb_connection):
"""Fixture to instantiate DuckDbTools with mocked connection."""
tools = DuckDbTools()
# Override the connection property to use the mock
tools._connection = mock_duckdb_connection
return tools
# --- Test Cases for Table Creation Methods ---
def test_create_table_from_path_no_quotes_around_table_name(duckdb_tools_instance, mock_duckdb_connection):
"""Test that create_table_from_path does not wrap table names in single quotes."""
path = "/path/to/test-file.csv"
expected_table_name = "test_file"
result = duckdb_tools_instance.create_table_from_path(path)
# Verify the table name returned
assert result == expected_table_name
# Verify the SQL statement does not contain quoted table name
mock_duckdb_connection.sql.assert_called()
call_args = mock_duckdb_connection.sql.call_args[0][0]
assert f"CREATE TABLE IF NOT EXISTS {expected_table_name} AS" in call_args
assert f"'{expected_table_name}'" not in call_args # Should NOT contain quoted table name
assert (
f"read_csv('{path}', ignore_errors=false, auto_detect=true)" in call_args
) # CSV files should use read_csv with parameters
def test_create_table_from_path_with_replace(duckdb_tools_instance, mock_duckdb_connection):
"""Test create_table_from_path with replace=True."""
path = "/path/to/data.json"
expected_table_name = "data"
result = duckdb_tools_instance.create_table_from_path(path, replace=True)
assert result == expected_table_name
call_args = mock_duckdb_connection.sql.call_args[0][0]
assert f"CREATE OR REPLACE TABLE {expected_table_name} AS" in call_args
assert f"'{expected_table_name}'" not in call_args
assert f"SELECT * FROM '{path}'" in call_args # Non-CSV files should use the old approach
def test_create_table_from_path_custom_table_name(duckdb_tools_instance, mock_duckdb_connection):
"""Test create_table_from_path with custom table name."""
path = "/path/to/file.csv"
custom_table = "my_custom_table"
result = duckdb_tools_instance.create_table_from_path(path, table=custom_table)
assert result == custom_table
call_args = mock_duckdb_connection.sql.call_args[0][0]
assert f"CREATE TABLE IF NOT EXISTS {custom_table} AS" in call_args
assert f"'{custom_table}'" not in call_args
def test_load_local_path_to_table_no_quotes(duckdb_tools_instance, mock_duckdb_connection):
"""Test that load_local_path_to_table does not wrap table names in single quotes."""
path = "/local/path/jira-backlog.csv"
expected_table_name = "jira_backlog"
table_name, sql_statement = duckdb_tools_instance.load_local_path_to_table(path)
assert table_name == expected_table_name
assert f"CREATE OR REPLACE TABLE {expected_table_name} AS" in sql_statement
assert f"'{expected_table_name}'" not in sql_statement
# The run_query method removes semicolons, so check for the statement without semicolon
expected_call = sql_statement.rstrip(";")
mock_duckdb_connection.sql.assert_called_with(expected_call)
def test_load_local_csv_to_table_no_quotes(duckdb_tools_instance, mock_duckdb_connection):
"""Test that load_local_csv_to_table does not wrap table names in single quotes."""
path = "/local/path/test.data.csv"
expected_table_name = "test_data"
table_name, sql_statement = duckdb_tools_instance.load_local_csv_to_table(path)
assert table_name == expected_table_name
assert f"CREATE OR REPLACE TABLE {expected_table_name} AS" in sql_statement
assert f"'{expected_table_name}'" not in sql_statement
assert "read_csv(" in sql_statement
assert "ignore_errors=false, auto_detect=true" in sql_statement
def test_load_local_csv_to_table_with_delimiter(duckdb_tools_instance, mock_duckdb_connection):
"""Test load_local_csv_to_table with custom delimiter."""
path = "/local/path/pipe-separated.csv"
delimiter = "|"
expected_table_name = "pipe_separated"
table_name, sql_statement = duckdb_tools_instance.load_local_csv_to_table(path, delimiter=delimiter)
assert table_name == expected_table_name
assert f"CREATE OR REPLACE TABLE {expected_table_name} AS" in sql_statement
assert f"delim='{delimiter}'" in sql_statement
assert f"'{expected_table_name}'" not in sql_statement
def test_load_s3_path_to_table_no_quotes(duckdb_tools_instance, mock_duckdb_connection):
"""Test that load_s3_path_to_table does not wrap table names in single quotes."""
path = "s3://bucket/path/my-data-file.parquet"
expected_table_name = "my_data_file"
table_name, sql_statement = duckdb_tools_instance.load_s3_path_to_table(path)
assert table_name == expected_table_name
assert f"CREATE OR REPLACE TABLE {expected_table_name} AS" in sql_statement
assert f"'{expected_table_name}'" not in sql_statement
def test_load_s3_csv_to_table_no_quotes(duckdb_tools_instance, mock_duckdb_connection):
"""Test that load_s3_csv_to_table does not wrap table names in single quotes."""
path = "s3://bucket/data/sales-report.csv"
expected_table_name = "sales_report"
table_name, sql_statement = duckdb_tools_instance.load_s3_csv_to_table(path)
assert table_name == expected_table_name
assert f"CREATE OR REPLACE TABLE {expected_table_name} AS" in sql_statement
assert f"'{expected_table_name}'" not in sql_statement
assert "read_csv(" in sql_statement
assert "ignore_errors=false, auto_detect=true" in sql_statement
# --- Test Cases for Table Name Sanitization ---
def test_get_table_name_from_path_special_characters(duckdb_tools_instance):
"""Test that table names are properly sanitized from paths with special characters."""
test_cases = [
("/path/to/my-file.csv", "my_file"),
("/path/to/data.backup.csv", "data_backup"),
("/path/to/file with spaces.json", "file_with_spaces"),
("/path/to/complex-file.name.data.csv", "complex_file_name_data"),
("s3://bucket/sub/folder/test-data.parquet", "test_data"),
]
for path, expected_table_name in test_cases:
result = duckdb_tools_instance.get_table_name_from_path(path)
assert result == expected_table_name, f"Failed for path: {path}"
# --- Test Cases for Query Execution ---
def test_run_query_success(duckdb_tools_instance, mock_duckdb_connection):
"""Test successful query execution."""
# Setup mock result
mock_result = MagicMock()
mock_result.fetchall.return_value = [(1, "issue-1", "High"), (2, "issue-2", "Medium")]
mock_result.columns = ["id", "issue_id", "priority"]
mock_duckdb_connection.sql.return_value = mock_result
query = "SELECT id, issue_id, priority FROM test_table"
result = duckdb_tools_instance.run_query(query)
expected_output = "id,issue_id,priority\n1,issue-1,High\n2,issue-2,Medium"
assert result == expected_output
mock_duckdb_connection.sql.assert_called_with(query)
def test_run_query_removes_backticks(duckdb_tools_instance, mock_duckdb_connection):
"""Test that run_query removes backticks from queries."""
mock_result = MagicMock()
mock_result.fetchall.return_value = [("test",)]
mock_result.columns = ["col"]
mock_duckdb_connection.sql.return_value = mock_result
query_with_backticks = "SELECT `column` FROM `table`"
expected_cleaned_query = "SELECT column FROM table"
duckdb_tools_instance.run_query(query_with_backticks)
mock_duckdb_connection.sql.assert_called_with(expected_cleaned_query)
def test_describe_table_success(duckdb_tools_instance, mock_duckdb_connection):
"""Test successful table description."""
# Setup mock result for DESCRIBE query
mock_result = MagicMock()
mock_result.fetchall.return_value = [
("issue_id", "VARCHAR", "YES", None, None, None),
("priority", "VARCHAR", "YES", None, None, None),
("status", "VARCHAR", "YES", None, None, None),
]
mock_result.columns = ["column_name", "column_type", "null", "key", "default", "extra"]
mock_duckdb_connection.sql.return_value = mock_result
table_name = "test_table"
result = duckdb_tools_instance.describe_table(table_name)
expected_output = f"{table_name}\ncolumn_name,column_type,null,key,default,extra\nissue_id,VARCHAR,YES,None,None,None\npriority,VARCHAR,YES,None,None,None\nstatus,VARCHAR,YES,None,None,None"
assert result == expected_output
# The run_query method removes semicolons, so check for the statement without semicolon
mock_duckdb_connection.sql.assert_called_with(f"DESCRIBE {table_name}")
# --- Integration Test Case ---
def test_integration_create_and_query_table(duckdb_tools_instance, mock_duckdb_connection):
"""Integration test: create table and then query it successfully."""
# Test the workflow that was failing in the original issue
path = "/path/to/jira_backlog.csv"
expected_table_name = "jira_backlog"
# Step 1: Create table
table_name = duckdb_tools_instance.create_table_from_path(path)
assert table_name == expected_table_name
# Verify table creation SQL doesn't have quoted table name
create_call_args = mock_duckdb_connection.sql.call_args[0][0]
assert f"CREATE TABLE IF NOT EXISTS {expected_table_name} AS" in create_call_args
assert f"'{expected_table_name}'" not in create_call_args
assert f"read_csv('{path}', ignore_errors=false, auto_detect=true)" in create_call_args
# Step 2: Setup mock for query execution
mock_result = MagicMock()
mock_result.fetchall.return_value = [(1, "ISSUE-1", "High"), (2, "ISSUE-2", "Medium")]
mock_result.columns = ["rownum", "issue_id", "priority"]
mock_duckdb_connection.sql.return_value = mock_result
# Step 3: Query the table (this was failing before the fix)
query = f"SELECT row_number() OVER () AS rownum, issue_id, priority FROM {expected_table_name}"
result = duckdb_tools_instance.run_query(query)
# Verify query executed successfully
expected_output = "rownum,issue_id,priority\n1,ISSUE-1,High\n2,ISSUE-2,Medium"
assert result == expected_output
# --- Error Handling Tests ---
def test_run_query_duckdb_error(duckdb_tools_instance, mock_duckdb_connection):
"""Test run_query handles DuckDB errors gracefully."""
# Create a proper DuckDB error by using the actual DuckDB module
with patch("agno.tools.duckdb.duckdb") as mock_duckdb_module:
# Setup the error classes to be proper exception classes
class MockDuckDBError(Exception):
pass
class MockProgrammingError(Exception):
pass
mock_duckdb_module.Error = MockDuckDBError
mock_duckdb_module.ProgrammingError = MockProgrammingError
mock_duckdb_connection.sql.side_effect = MockDuckDBError("Test error")
query = "SELECT * FROM non_existent_table"
result = duckdb_tools_instance.run_query(query)
assert "Test error" in result
def test_run_query_programming_error(duckdb_tools_instance, mock_duckdb_connection):
"""Test run_query handles programming errors gracefully."""
# Create a proper DuckDB programming error
with patch("agno.tools.duckdb.duckdb") as mock_duckdb_module:
# Setup the error classes to be proper exception classes
class MockDuckDBError(Exception):
pass
class MockProgrammingError(Exception):
pass
mock_duckdb_module.Error = MockDuckDBError
mock_duckdb_module.ProgrammingError = MockProgrammingError
mock_duckdb_connection.sql.side_effect = MockProgrammingError("Syntax error")
query = "INVALID SQL SYNTAX"
result = duckdb_tools_instance.run_query(query)
assert "Syntax error" in result
# --- Test Cases for Edge Cases ---
def test_run_query_single_column_result(duckdb_tools_instance, mock_duckdb_connection):
"""Test run_query with single column results."""
mock_result = MagicMock()
mock_result.fetchall.return_value = [("value1",), ("value2",), ("value3",)]
mock_result.columns = ["single_col"]
mock_duckdb_connection.sql.return_value = mock_result
query = "SELECT single_col FROM test_table"
result = duckdb_tools_instance.run_query(query)
expected_output = "single_col\nvalue1\nvalue2\nvalue3"
assert result == expected_output
def test_run_query_no_results(duckdb_tools_instance, mock_duckdb_connection):
"""Test run_query with no results."""
mock_result = MagicMock()
mock_result.fetchall.return_value = []
mock_result.columns = ["col1", "col2"]
mock_duckdb_connection.sql.return_value = mock_result
query = "SELECT * FROM empty_table"
result = duckdb_tools_instance.run_query(query)
expected_output = "col1,col2\n"
assert result == expected_output
def test_custom_table_name_with_special_chars(duckdb_tools_instance, mock_duckdb_connection):
"""Test that custom table names are used as-is without additional sanitization."""
path = "/path/to/file.csv"
custom_table = "my_custom_table_123"
result = duckdb_tools_instance.create_table_from_path(path, table=custom_table)
assert result == custom_table
call_args = mock_duckdb_connection.sql.call_args[0][0]
assert f"CREATE TABLE IF NOT EXISTS {custom_table} AS" in call_args
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_duckdb.py",
"license": "Apache License 2.0",
"lines": 244,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/valyu.py | import json
from os import getenv
from typing import Any, List, Optional
from agno.tools import Toolkit
from agno.utils.log import log_debug, log_error, log_warning
try:
from valyu import Valyu
except ImportError:
raise ImportError("`valyu` not installed. Please install using `pip install valyu`")
class ValyuTools(Toolkit):
"""
Valyu is a toolkit for academic and web search capabilities.
Args:
api_key (Optional[str]): Valyu API key. Retrieved from VALYU_API_KEY env variable if not provided.
enable_academic_search (bool): Enable academic sources search functionality. Default is True.
enable_web_search (bool): Enable web search functionality. Default is True.
enable_paper_search (bool): Enable search within paper functionality. Default is True.
all (bool): Enable all tools. Overrides individual flags when True. Default is False.
text_length (int): Maximum length of text content per result. Default is 1000.
max_results (int): Maximum number of results to return. Default is 10.
relevance_threshold (float): Minimum relevance score for results. Default is 0.5.
content_category (Optional[str]): Content category for filtering.
search_start_date (Optional[str]): Start date for search filtering (YYYY-MM-DD).
search_end_date (Optional[str]): End date for search filtering (YYYY-MM-DD).
search_domains (Optional[List[str]]): List of domains to search within.
sources (Optional[List[str]]): List of specific sources to search.
max_price (float): Maximum price for API calls. Default is 30.0.
tool_call_mode (bool): Enable tool call mode. Default is False.
"""
def __init__(
self,
api_key: Optional[str] = None,
enable_academic_search: bool = True,
enable_web_search: bool = True,
enable_paper_search: bool = True,
all: bool = False,
text_length: int = 1000,
max_results: int = 10,
relevance_threshold: float = 0.5,
content_category: Optional[str] = None,
search_start_date: Optional[str] = None,
search_end_date: Optional[str] = None,
search_domains: Optional[List[str]] = None,
sources: Optional[List[str]] = None,
max_price: float = 30.0,
tool_call_mode: bool = False,
**kwargs,
):
self.api_key = api_key or getenv("VALYU_API_KEY")
if not self.api_key:
raise ValueError("VALYU_API_KEY not set. Please set the VALYU_API_KEY environment variable.")
self.valyu = Valyu(api_key=self.api_key)
self.text_length = text_length
self.max_results = max_results
self.relevance_threshold = relevance_threshold
self.max_price = max_price
self.content_category = content_category
self.search_start_date = search_start_date
self.search_end_date = search_end_date
self.search_domains = search_domains
self.sources = sources
self.tool_call_mode = tool_call_mode
tools: List[Any] = []
if all or enable_academic_search:
tools.append(self.search_academic_sources)
if all or enable_web_search:
tools.append(self.search_web)
if all or enable_paper_search:
tools.append(self.search_within_paper)
super().__init__(name="valyu_search", tools=tools, **kwargs)
def _parse_results(self, results: List[Any]) -> str:
parsed_results = []
for result in results:
result_dict = {}
# Essential fields
if hasattr(result, "url") and result.url:
result_dict["url"] = result.url
if hasattr(result, "title") and result.title:
result_dict["title"] = result.title
if hasattr(result, "source") and result.source:
result_dict["source"] = result.source
if hasattr(result, "relevance_score"):
result_dict["relevance_score"] = result.relevance_score
# Content with length limiting
if hasattr(result, "content") and result.content:
content = result.content
if self.text_length and len(content) > self.text_length:
content = content[: self.text_length] + "..."
result_dict["content"] = content
# Additional metadata
if hasattr(result, "description") and result.description:
result_dict["description"] = result.description
parsed_results.append(result_dict)
return json.dumps(parsed_results, indent=2)
def _valyu_search(
self,
query: str,
search_type: str,
content_category: Optional[str] = None,
sources: Optional[List[str]] = None,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
) -> str:
try:
search_params = {
"query": query,
"search_type": search_type,
"max_num_results": self.max_results,
"is_tool_call": self.tool_call_mode,
"relevance_threshold": self.relevance_threshold,
"max_price": self.max_price,
}
# Add optional parameters
if sources or self.sources:
search_params["included_sources"] = sources or self.sources
if content_category or self.content_category:
search_params["category"] = content_category or self.content_category
if start_date or self.search_start_date:
search_params["start_date"] = start_date or self.search_start_date
if end_date or self.search_end_date:
search_params["end_date"] = end_date or self.search_end_date
log_debug(f"Valyu search parameters: {search_params}")
response = self.valyu.search(**search_params)
if not response.success:
log_error(f"Valyu search API error: {response.error}")
return f"Error: {response.error or 'Search request failed'}"
return self._parse_results(response.results or [])
except Exception as e:
error_msg = f"Valyu search failed: {str(e)}"
log_error(error_msg)
return f"Error: {error_msg}"
def search_academic_sources(
self,
query: str,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
) -> str:
"""Search academic sources (ArXiv, PubMed, academic publishers).
Args:
query: Research question or topic
start_date: Filter papers after this date (YYYY-MM-DD)
end_date: Filter papers before this date (YYYY-MM-DD)
Returns:
JSON array of academic papers
"""
sources = ["valyu/valyu-arxiv", "valyu/valyu-pubmed", "wiley/wiley-finance-papers", "wiley/wiley-finance-books"]
return self._valyu_search(
query=query,
search_type="proprietary",
sources=sources,
start_date=start_date,
end_date=end_date,
)
def search_web(
self,
query: str,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
content_category: Optional[str] = None,
) -> str:
"""Search web sources for real-time information.
Args:
query: Search query
start_date: Filter content after this date (YYYY-MM-DD)
end_date: Filter content before this date (YYYY-MM-DD)
content_category: Description of the category of the query
Returns:
JSON array of web search results
"""
return self._valyu_search(
query=query,
search_type="web",
content_category=content_category,
start_date=start_date,
end_date=end_date,
)
def search_within_paper(
self,
paper_url: str,
query: str,
) -> str:
"""Search within a specific ArXiv paper.
Args:
paper_url: ArXiv paper URL
query: Search query
Returns:
JSON array of relevant sections from the paper
"""
# Validate ArXiv URL
if not paper_url.startswith("https:/"):
log_warning(f"Invalid paper URL: {paper_url}")
return "Error: Invalid paper URL format"
return self._valyu_search(
query=query,
search_type="proprietary",
sources=[paper_url],
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/valyu.py",
"license": "Apache License 2.0",
"lines": 197,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/tools/test_valyu.py | import json
from unittest.mock import patch
import pytest
from agno.tools.valyu import ValyuTools
class MockSearchResult:
def __init__(
self,
title="Test Paper",
url="https://example.com",
content="Test content",
source="test",
relevance_score=0.8,
description="Test description",
):
self.title = title
self.url = url
self.content = content
self.source = source
self.relevance_score = relevance_score
self.description = description
class MockSearchResponse:
def __init__(self, success=True, results=None, error=None):
self.success = success
self.results = results or []
self.error = error
@pytest.fixture
def mock_valyu():
with patch("agno.tools.valyu.Valyu") as mock:
yield mock
@pytest.fixture
def valyu_tools(mock_valyu):
return ValyuTools(api_key="test_key")
class TestValyuTools:
def test_init_with_api_key(self, mock_valyu):
"""Test initialization with API key."""
tools = ValyuTools(api_key="test_key")
assert tools.api_key == "test_key"
assert tools.max_price == 30.0
assert tools.text_length == 1000
mock_valyu.assert_called_once_with(api_key="test_key")
def test_init_without_api_key_raises_error(self, mock_valyu):
"""Test initialization without API key raises ValueError."""
with patch.dict("os.environ", {}, clear=True):
with pytest.raises(ValueError, match="VALYU_API_KEY not set"):
ValyuTools()
@patch.dict("os.environ", {"VALYU_API_KEY": "env_key"})
def test_init_with_env_api_key(self, mock_valyu):
"""Test initialization with API key from environment."""
tools = ValyuTools()
assert tools.api_key == "env_key"
def test_parse_results_basic(self, valyu_tools):
"""Test basic result parsing."""
results = [MockSearchResult()]
parsed = valyu_tools._parse_results(results)
data = json.loads(parsed)
assert len(data) == 1
assert data[0]["title"] == "Test Paper"
assert data[0]["url"] == "https://example.com"
assert data[0]["content"] == "Test content"
assert data[0]["relevance_score"] == 0.8
def test_parse_results_with_text_truncation(self, valyu_tools):
"""Test result parsing with text length truncation."""
valyu_tools.text_length = 10
long_content = "A" * 20
results = [MockSearchResult(content=long_content)]
parsed = valyu_tools._parse_results(results)
data = json.loads(parsed)
assert data[0]["content"] == "A" * 10 + "..."
def test_parse_results_empty(self, valyu_tools):
"""Test parsing empty results."""
parsed = valyu_tools._parse_results([])
data = json.loads(parsed)
assert data == []
def test_search_academic_sources_success(self, valyu_tools):
"""Test successful academic search."""
mock_response = MockSearchResponse(success=True, results=[MockSearchResult(title="Academic Paper")])
valyu_tools.valyu.search.return_value = mock_response
result = valyu_tools.search_academic_sources("test query")
data = json.loads(result)
assert len(data) == 1
assert data[0]["title"] == "Academic Paper"
# Verify search was called with correct parameters
valyu_tools.valyu.search.assert_called_once()
call_args = valyu_tools.valyu.search.call_args[1]
assert call_args["query"] == "test query"
assert call_args["search_type"] == "proprietary"
assert "valyu/valyu-arxiv" in call_args["included_sources"]
assert "valyu/valyu-pubmed" in call_args["included_sources"]
def test_search_academic_sources_with_dates(self, valyu_tools):
"""Test academic search with date filters."""
mock_response = MockSearchResponse(success=True, results=[])
valyu_tools.valyu.search.return_value = mock_response
valyu_tools.search_academic_sources("test query", start_date="2023-01-01", end_date="2023-12-31")
call_args = valyu_tools.valyu.search.call_args[1]
assert call_args["start_date"] == "2023-01-01"
assert call_args["end_date"] == "2023-12-31"
def test_search_web_success(self, valyu_tools):
"""Test successful web search."""
mock_response = MockSearchResponse(success=True, results=[MockSearchResult(title="Web Article")])
valyu_tools.valyu.search.return_value = mock_response
result = valyu_tools.search_web("test query")
data = json.loads(result)
assert len(data) == 1
assert data[0]["title"] == "Web Article"
call_args = valyu_tools.valyu.search.call_args[1]
assert call_args["search_type"] == "web"
def test_search_web_with_category(self, valyu_tools):
"""Test web search with category."""
mock_response = MockSearchResponse(success=True, results=[])
valyu_tools.valyu.search.return_value = mock_response
valyu_tools.search_web("test query", content_category="technology")
call_args = valyu_tools.valyu.search.call_args[1]
assert call_args["category"] == "technology"
def test_search_within_paper_success(self, valyu_tools):
"""Test successful within-paper search."""
mock_response = MockSearchResponse(success=True, results=[MockSearchResult(title="Paper Section")])
valyu_tools.valyu.search.return_value = mock_response
result = valyu_tools.search_within_paper("https://arxiv.org/abs/1234.5678", "test query")
data = json.loads(result)
assert len(data) == 1
assert data[0]["title"] == "Paper Section"
call_args = valyu_tools.valyu.search.call_args[1]
assert call_args["included_sources"] == ["https://arxiv.org/abs/1234.5678"]
def test_search_within_paper_invalid_url(self, valyu_tools):
"""Test within-paper search with invalid URL."""
result = valyu_tools.search_within_paper("invalid-url", "test query")
assert "Error: Invalid paper URL format" in result
def test_search_api_error(self, valyu_tools):
"""Test handling of API error."""
mock_response = MockSearchResponse(success=False, error="API Error")
valyu_tools.valyu.search.return_value = mock_response
result = valyu_tools.search_academic_sources("test query")
assert "Error: API Error" in result
def test_search_exception_handling(self, valyu_tools):
"""Test exception handling during search."""
valyu_tools.valyu.search.side_effect = Exception("Network error")
result = valyu_tools.search_academic_sources("test query")
assert "Error: Valyu search failed: Network error" in result
def test_constructor_parameters_used_in_search(self, mock_valyu):
"""Test that constructor parameters are properly used in searches."""
tools = ValyuTools(
api_key="test_key",
max_results=5,
relevance_threshold=0.7,
content_category="science",
search_start_date="2023-01-01",
)
mock_response = MockSearchResponse(success=True, results=[])
tools.valyu.search.return_value = mock_response
tools.search_academic_sources("test query")
call_args = tools.valyu.search.call_args[1]
assert call_args["max_num_results"] == 5
assert call_args["relevance_threshold"] == 0.7
assert call_args["category"] == "science"
assert call_args["start_date"] == "2023-01-01"
def test_method_parameters_override_constructor(self, valyu_tools):
"""Test that method parameters override constructor defaults."""
valyu_tools.content_category = "default_category"
valyu_tools.search_start_date = "2023-01-01"
mock_response = MockSearchResponse(success=True, results=[])
valyu_tools.valyu.search.return_value = mock_response
valyu_tools.search_web("test query", content_category="override_category", start_date="2024-01-01")
call_args = valyu_tools.valyu.search.call_args[1]
assert call_args["category"] == "override_category"
assert call_args["start_date"] == "2024-01-01"
def test_tools_registration(self, valyu_tools):
"""Test that all tools are properly registered."""
tool_names = list(valyu_tools.functions.keys())
expected_tools = ["search_academic_sources", "search_web", "search_within_paper"]
for tool in expected_tools:
assert tool in tool_names
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_valyu.py",
"license": "Apache License 2.0",
"lines": 171,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/tools/test_serper.py | import json
from unittest.mock import Mock, patch
import pytest
import requests
from agno.tools.serper import SerperTools
@pytest.fixture(autouse=True)
def clear_env(monkeypatch):
"""Ensure SERPER_API_KEY is unset unless explicitly needed."""
monkeypatch.delenv("SERPER_API_KEY", raising=False)
@pytest.fixture
def api_tools():
"""SerperTools with a known API key and custom settings for testing."""
return SerperTools(
api_key="test_key",
location="us",
language="en",
num_results=5,
date_range="qdr:d", # Last day
)
@pytest.fixture
def mock_search_response():
"""Mock a successful Serper API search response."""
mock = Mock(spec=requests.Response)
mock.text = '{"organic": [{"title": "Test Result", "link": "http://example.com"}]}'
mock.json.return_value = {"organic": [{"title": "Test Result", "link": "http://example.com"}]}
mock.raise_for_status.return_value = None
return mock
@pytest.fixture
def mock_news_response():
"""Mock a successful Serper API news response."""
mock = Mock(spec=requests.Response)
mock.text = '{"news": [{"title": "Breaking News", "link": "http://news.example.com", "date": "2 hours ago"}]}'
mock.json.return_value = {
"news": [{"title": "Breaking News", "link": "http://news.example.com", "date": "2 hours ago"}]
}
mock.raise_for_status.return_value = None
return mock
@pytest.fixture
def mock_scholar_response():
"""Mock a successful Serper API scholar response."""
mock = Mock(spec=requests.Response)
mock.text = (
'{"organic": [{"title": "Research Paper", "link": "http://scholar.example.com", "authors": ["Dr. Smith"]}]}'
)
mock.json.return_value = {
"organic": [{"title": "Research Paper", "link": "http://scholar.example.com", "authors": ["Dr. Smith"]}]
}
mock.raise_for_status.return_value = None
return mock
@pytest.fixture
def mock_scrape_response():
"""Mock a successful Serper API scrape response."""
mock = Mock(spec=requests.Response)
mock.text = '{"text": "Scraped content", "title": "Example Page"}'
mock.json.return_value = {"text": "Scraped content", "title": "Example Page"}
mock.raise_for_status.return_value = None
return mock
# Initialization Tests
def test_init_without_api_key_and_env(monkeypatch):
"""If no api_key argument and no SERPER_API_KEY in env, api_key should be None."""
monkeypatch.delenv("SERPER_API_KEY", raising=False)
tools = SerperTools()
assert tools.api_key is None
def test_init_with_env_var(monkeypatch):
"""If SERPER_API_KEY is set in the environment, it is picked up."""
monkeypatch.setenv("SERPER_API_KEY", "env_key")
tools = SerperTools(api_key=None)
assert tools.api_key == "env_key"
def test_init_with_custom_params():
"""Test initialization with custom parameters."""
tools = SerperTools(
api_key="test_key",
location="uk",
language="fr",
num_results=15,
date_range="qdr:w",
)
assert tools.api_key == "test_key"
assert tools.location == "uk"
assert tools.language == "fr"
assert tools.num_results == 15
assert tools.date_range == "qdr:w"
# Search Tests
def test_search_no_api_key():
"""Calling search without any API key returns an error message."""
tools = SerperTools(api_key=None)
result = tools.search_web("anything")
result_json = json.loads(result)
assert "error" in result_json
assert "Please provide a Serper API key" in result_json["error"]
def test_search_empty_query(api_tools):
"""Calling search with an empty query returns an error message."""
result = api_tools.search_web("")
result_json = json.loads(result)
assert "error" in result_json
assert "Please provide a query to search for" in result_json["error"]
def test_search_success(api_tools, mock_search_response):
"""A successful search should return the raw response.text and call requests.request correctly."""
with patch("requests.request", return_value=mock_search_response) as mock_req:
result = api_tools.search_web("pytest testing")
assert result == mock_search_response.text
mock_req.assert_called_once_with(
"POST",
"https://google.serper.dev/search",
headers={"X-API-KEY": "test_key", "Content-Type": "application/json"},
data=json.dumps({"q": "pytest testing", "num": 5, "tbs": "qdr:d", "gl": "us", "hl": "en"}),
)
def test_search_with_custom_num_results(api_tools, mock_search_response):
"""Overriding the num_results parameter should be respected in the request payload."""
with patch("requests.request", return_value=mock_search_response) as mock_req:
result = api_tools.search_web("pytest testing", num_results=20)
assert result == mock_search_response.text
expected_payload = {
"q": "pytest testing",
"num": 20,
"tbs": "qdr:d",
"gl": "us",
"hl": "en",
}
mock_req.assert_called_once_with(
"POST",
"https://google.serper.dev/search",
headers={"X-API-KEY": "test_key", "Content-Type": "application/json"},
data=json.dumps(expected_payload),
)
def test_search_exception(api_tools):
"""If requests.request raises, search should catch and return an error string."""
with patch("requests.request", side_effect=Exception("Network failure")):
result = api_tools.search_web("failure test")
result_json = json.loads(result)
assert "error" in result_json
assert "Network failure" in result_json["error"]
# News Search Tests
def test_search_news_no_api_key():
"""Calling search_news without any API key returns an error message."""
tools = SerperTools(api_key=None)
result = tools.search_news("tech news")
result_json = json.loads(result)
assert "error" in result_json
assert "Please provide a Serper API key" in result_json["error"]
def test_search_news_empty_query(api_tools):
"""Calling search_news with an empty query returns an error message."""
result = api_tools.search_news("")
result_json = json.loads(result)
assert "error" in result_json
assert "Please provide a query to search for news" in result_json["error"]
def test_search_news_success(api_tools, mock_news_response):
"""A successful news search should return the raw response.text."""
with patch("requests.request", return_value=mock_news_response) as mock_req:
result = api_tools.search_news("latest tech news")
assert result == mock_news_response.text
expected_payload = {
"q": "latest tech news",
"num": 5,
"tbs": "qdr:d",
"gl": "us",
"hl": "en",
}
mock_req.assert_called_once_with(
"POST",
"https://google.serper.dev/news",
headers={"X-API-KEY": "test_key", "Content-Type": "application/json"},
data=json.dumps(expected_payload),
)
def test_search_news_with_custom_num_results(api_tools, mock_news_response):
"""Overriding num_results in news search should work."""
with patch("requests.request", return_value=mock_news_response) as mock_req:
result = api_tools.search_news("tech news", num_results=15)
assert result == mock_news_response.text
expected_payload = {"q": "tech news", "num": 15, "tbs": "qdr:d", "gl": "us", "hl": "en"}
mock_req.assert_called_once()
call_args = mock_req.call_args
assert json.loads(call_args[1]["data"]) == expected_payload
def test_search_news_exception(api_tools):
"""If requests.request raises during news search, should catch and return error."""
with patch("requests.request", side_effect=Exception("API timeout")):
result = api_tools.search_news("breaking news")
result_json = json.loads(result)
assert "error" in result_json
assert "API timeout" in result_json["error"]
# Scholar Search Tests
def test_search_scholar_no_api_key():
"""Calling search_scholar without any API key returns an error message."""
tools = SerperTools(api_key=None)
result = tools.search_scholar("machine learning")
result_json = json.loads(result)
assert "error" in result_json
assert "Please provide a Serper API key" in result_json["error"]
def test_search_scholar_empty_query(api_tools):
"""Calling search_scholar with an empty query returns an error message."""
result = api_tools.search_scholar("")
result_json = json.loads(result)
assert "error" in result_json
assert "Please provide a query to search for academic papers" in result_json["error"]
def test_search_scholar_success(api_tools, mock_scholar_response):
"""A successful scholar search should return the raw response.text."""
with patch("requests.request", return_value=mock_scholar_response) as mock_req:
result = api_tools.search_scholar("artificial intelligence")
assert result == mock_scholar_response.text
expected_payload = {
"q": "artificial intelligence",
"num": 5,
"tbs": "qdr:d",
"gl": "us",
"hl": "en",
}
mock_req.assert_called_once_with(
"POST",
"https://google.serper.dev/scholar",
headers={"X-API-KEY": "test_key", "Content-Type": "application/json"},
data=json.dumps(expected_payload),
)
def test_search_scholar_exception(api_tools):
"""If requests.request raises during scholar search, should catch and return error."""
with patch("requests.request", side_effect=Exception("Scholar API error")):
result = api_tools.search_scholar("quantum computing")
result_json = json.loads(result)
assert "error" in result_json
assert "Scholar API error" in result_json["error"]
# Webpage Scraping Tests
def test_scrape_webpage_no_api_key():
"""Calling scrape_webpage without any API key returns an error message."""
tools = SerperTools(api_key=None)
result = tools.scrape_webpage("https://example.com")
result_json = json.loads(result)
assert "error" in result_json
assert "Please provide a Serper API key" in result_json["error"]
def test_scrape_webpage_empty_url(api_tools):
"""Calling scrape_webpage with an empty URL returns an error message."""
result = api_tools.scrape_webpage("")
result_json = json.loads(result)
assert "error" in result_json
assert "Please provide a URL to scrape" in result_json["error"]
def test_scrape_webpage_success(api_tools, mock_scrape_response):
"""A successful webpage scrape should return the raw response.text."""
with patch("requests.request", return_value=mock_scrape_response) as mock_req:
result = api_tools.scrape_webpage("https://example.com")
assert result == mock_scrape_response.text
expected_payload = {
"url": "https://example.com",
"includeMarkdown": False,
"tbs": "qdr:d",
"gl": "us",
"hl": "en",
}
mock_req.assert_called_once_with(
"POST",
"https://scrape.serper.dev",
headers={"X-API-KEY": "test_key", "Content-Type": "application/json"},
data=json.dumps(expected_payload),
)
def test_scrape_webpage_with_markdown(api_tools, mock_scrape_response):
"""Scraping with markdown=True should set includeMarkdown to True."""
with patch("requests.request", return_value=mock_scrape_response) as mock_req:
result = api_tools.scrape_webpage("https://example.com", markdown=True)
assert result == mock_scrape_response.text
expected_payload = {
"url": "https://example.com",
"includeMarkdown": True,
"tbs": "qdr:d",
"gl": "us",
"hl": "en",
}
call_args = mock_req.call_args
assert json.loads(call_args[1]["data"]) == expected_payload
def test_scrape_webpage_exception(api_tools):
"""If requests.request raises during scraping, should catch and return error."""
with patch("requests.request", side_effect=Exception("Scraping failed")):
result = api_tools.scrape_webpage("https://example.com")
result_json = json.loads(result)
assert "error" in result_json
assert "Scraping failed" in result_json["error"]
# Edge Cases and Integration Tests
def test_tools_without_optional_params():
"""Test initialization and usage with minimal parameters."""
tools = SerperTools(api_key="test_key")
assert tools.location == "us"
assert tools.language == "en"
assert tools.num_results == 10
assert tools.date_range is None
def test_http_error_handling(api_tools):
"""Test that HTTP errors are properly handled."""
mock_response = Mock()
mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError("404 Not Found")
with patch("requests.request", return_value=mock_response):
result = api_tools.search_web("test query")
result_json = json.loads(result)
assert "error" in result_json
assert "404 Not Found" in result_json["error"]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_serper.py",
"license": "Apache License 2.0",
"lines": 289,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/utils/test_audio_utils.py | import base64
import os
import tempfile
from pathlib import Path
from unittest.mock import patch
import pytest
from agno.utils.audio import write_audio_to_file
def test_write_audio_to_file_basic():
"""Test basic audio file writing functionality."""
# Create test audio data
test_audio_data = b"test audio content"
base64_audio = base64.b64encode(test_audio_data).decode("utf-8")
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_file:
filename = temp_file.name
try:
# Call the function
write_audio_to_file(base64_audio, filename)
# Verify the file was created and contains the correct data
assert os.path.exists(filename)
with open(filename, "rb") as f:
written_data = f.read()
assert written_data == test_audio_data
finally:
# Cleanup
if os.path.exists(filename):
os.unlink(filename)
def test_write_audio_to_file_with_directory_creation():
"""Test audio file writing with directory creation."""
# Create test audio data
test_audio_data = b"test audio content"
base64_audio = base64.b64encode(test_audio_data).decode("utf-8")
# Create a temporary directory path that doesn't exist
with tempfile.TemporaryDirectory() as temp_dir:
subdir = os.path.join(temp_dir, "audio", "subdir")
filename = os.path.join(subdir, "test_audio.wav")
# Call the function
write_audio_to_file(base64_audio, filename)
# Verify the directory was created and file was written
assert os.path.exists(subdir)
assert os.path.exists(filename)
with open(filename, "rb") as f:
written_data = f.read()
assert written_data == test_audio_data
def test_write_audio_to_file_existing_directory():
"""Test audio file writing to existing directory."""
# Create test audio data
test_audio_data = b"test audio content"
base64_audio = base64.b64encode(test_audio_data).decode("utf-8")
with tempfile.TemporaryDirectory() as temp_dir:
filename = os.path.join(temp_dir, "test_audio.wav")
# Call the function
write_audio_to_file(base64_audio, filename)
# Verify the file was created and contains the correct data
assert os.path.exists(filename)
with open(filename, "rb") as f:
written_data = f.read()
assert written_data == test_audio_data
def test_write_audio_to_file_empty_audio():
"""Test writing empty audio data."""
# Create empty audio data
test_audio_data = b""
base64_audio = base64.b64encode(test_audio_data).decode("utf-8")
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_file:
filename = temp_file.name
try:
# Call the function
write_audio_to_file(base64_audio, filename)
# Verify the file was created and is empty
assert os.path.exists(filename)
with open(filename, "rb") as f:
written_data = f.read()
assert written_data == b""
finally:
# Cleanup
if os.path.exists(filename):
os.unlink(filename)
def test_write_audio_to_file_large_audio():
"""Test writing large audio data."""
# Create large test audio data (1MB)
test_audio_data = b"x" * (1024 * 1024)
base64_audio = base64.b64encode(test_audio_data).decode("utf-8")
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_file:
filename = temp_file.name
try:
# Call the function
write_audio_to_file(base64_audio, filename)
# Verify the file was created and contains the correct data
assert os.path.exists(filename)
with open(filename, "rb") as f:
written_data = f.read()
assert written_data == test_audio_data
assert len(written_data) == 1024 * 1024
finally:
# Cleanup
if os.path.exists(filename):
os.unlink(filename)
def test_write_audio_to_file_special_characters_in_filename():
"""Test writing audio file with special characters in filename."""
# Create test audio data
test_audio_data = b"test audio content"
base64_audio = base64.b64encode(test_audio_data).decode("utf-8")
with tempfile.TemporaryDirectory() as temp_dir:
# Create filename with special characters
filename = os.path.join(temp_dir, "test-audio_123.wav")
# Call the function
write_audio_to_file(base64_audio, filename)
# Verify the file was created and contains the correct data
assert os.path.exists(filename)
with open(filename, "rb") as f:
written_data = f.read()
assert written_data == test_audio_data
def test_write_audio_to_file_unicode_filename():
"""Test writing audio file with unicode characters in filename."""
# Create test audio data
test_audio_data = b"test audio content"
base64_audio = base64.b64encode(test_audio_data).decode("utf-8")
with tempfile.TemporaryDirectory() as temp_dir:
# Create filename with unicode characters
filename = os.path.join(temp_dir, "test_audio_🎵.wav")
# Call the function
write_audio_to_file(base64_audio, filename)
# Verify the file was created and contains the correct data
assert os.path.exists(filename)
with open(filename, "rb") as f:
written_data = f.read()
assert written_data == test_audio_data
@patch("agno.utils.audio.log_info")
def test_write_audio_to_file_logging(mock_log_info):
"""Test that logging is called correctly."""
# Create test audio data
test_audio_data = b"test audio content"
base64_audio = base64.b64encode(test_audio_data).decode("utf-8")
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_file:
filename = temp_file.name
try:
# Call the function
write_audio_to_file(base64_audio, filename)
# Verify logging was called with the correct message
mock_log_info.assert_called_once_with(f"Audio file saved to {filename}")
finally:
# Cleanup
if os.path.exists(filename):
os.unlink(filename)
def test_write_audio_to_file_overwrite_existing():
"""Test overwriting an existing audio file."""
# Create test audio data
test_audio_data = b"new audio content"
base64_audio = base64.b64encode(test_audio_data).decode("utf-8")
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_file:
filename = temp_file.name
# Write some initial content
temp_file.write(b"old content")
try:
# Call the function to overwrite the file
write_audio_to_file(base64_audio, filename)
# Verify the file was overwritten with new content
assert os.path.exists(filename)
with open(filename, "rb") as f:
written_data = f.read()
assert written_data == test_audio_data
assert written_data != b"old content"
finally:
# Cleanup
if os.path.exists(filename):
os.unlink(filename)
def test_write_audio_to_file_pathlib_path():
"""Test writing audio file using pathlib.Path object."""
# Create test audio data
test_audio_data = b"test audio content"
base64_audio = base64.b64encode(test_audio_data).decode("utf-8")
with tempfile.TemporaryDirectory() as temp_dir:
filename = Path(temp_dir) / "test_audio.wav"
# Call the function
write_audio_to_file(base64_audio, str(filename))
# Verify the file was created and contains the correct data
assert filename.exists()
with open(filename, "rb") as f:
written_data = f.read()
assert written_data == test_audio_data
def test_write_audio_to_file_relative_path():
"""Test writing audio file using relative path."""
# Create test audio data
test_audio_data = b"test audio content"
base64_audio = base64.b64encode(test_audio_data).decode("utf-8")
# Use a relative path
filename = "test_audio_relative.wav"
try:
# Call the function
write_audio_to_file(base64_audio, filename)
# Verify the file was created and contains the correct data
assert os.path.exists(filename)
with open(filename, "rb") as f:
written_data = f.read()
assert written_data == test_audio_data
finally:
# Cleanup
if os.path.exists(filename):
os.unlink(filename)
def test_write_audio_to_file_invalid_base64():
"""Test writing audio file with invalid base64 data."""
# Create invalid base64 data
invalid_base64 = "invalid_base64_data!"
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_file:
filename = temp_file.name
try:
# This should raise a binascii.Error
with pytest.raises(Exception):
write_audio_to_file(invalid_base64, filename)
finally:
# Cleanup
if os.path.exists(filename):
os.unlink(filename)
def test_write_audio_to_file_none_audio():
"""Test writing None audio data."""
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_file:
filename = temp_file.name
try:
# This should raise a TypeError
with pytest.raises(TypeError):
write_audio_to_file(None, filename)
finally:
# Cleanup
if os.path.exists(filename):
os.unlink(filename)
def test_write_audio_to_file_empty_string_audio():
"""Test writing empty string audio data."""
# Create test audio data
test_audio_data = b""
base64_audio = base64.b64encode(test_audio_data).decode("utf-8")
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_file:
filename = temp_file.name
try:
# Call the function
write_audio_to_file(base64_audio, filename)
# Verify the file was created and is empty
assert os.path.exists(filename)
with open(filename, "rb") as f:
written_data = f.read()
assert written_data == b""
finally:
# Cleanup
if os.path.exists(filename):
os.unlink(filename)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/utils/test_audio_utils.py",
"license": "Apache License 2.0",
"lines": 243,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/opencv.py | import time
from pathlib import Path
from typing import Callable, List
from uuid import uuid4
from agno.agent import Agent
from agno.media import Image, Video
from agno.tools import Toolkit
from agno.tools.function import ToolResult
from agno.utils.log import log_debug, log_error, log_info
try:
import cv2
except ImportError:
raise ImportError("`opencv-python` package not found. Please install it with `pip install opencv-python`")
class OpenCVTools(Toolkit):
"""Tools for capturing images and videos from the webcam using OpenCV"""
def __init__(
self,
show_preview=False,
enable_capture_image: bool = True,
enable_capture_video: bool = True,
all: bool = False,
**kwargs,
):
self.show_preview = show_preview
tools: List[Callable] = []
if all or enable_capture_image:
tools.append(self.capture_image)
if all or enable_capture_video:
tools.append(self.capture_video)
super().__init__(
name="opencv_tools",
tools=tools,
**kwargs,
)
def capture_image(
self,
agent: Agent,
prompt: str = "Webcam capture",
) -> ToolResult:
"""Capture an image from the webcam.
Args:
prompt (str): Description of the image capture. Defaults to "Webcam capture".
Returns:
ToolResult: A ToolResult containing the captured image or error message.
"""
try:
log_debug("Initializing webcam for image capture...")
cam = cv2.VideoCapture(0)
if not cam.isOpened():
cam = cv2.VideoCapture(0, cv2.CAP_AVFOUNDATION) # macOS
if not cam.isOpened():
cam = cv2.VideoCapture(0, cv2.CAP_DSHOW) # Windows
if not cam.isOpened():
cam = cv2.VideoCapture(0, cv2.CAP_V4L2) # Linux
if not cam.isOpened():
error_msg = "Could not open webcam. Please ensure your terminal has camera permissions and the camera is not being used by another application."
log_error(error_msg)
return ToolResult(content=error_msg)
try:
cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
cam.set(cv2.CAP_PROP_FPS, 30)
log_debug("Camera initialized successfully")
captured_frame = None
if self.show_preview:
log_info("Live preview started. Press 'c' to capture image, 'q' to quit.")
while True:
ret, frame = cam.read()
if not ret:
error_msg = "Failed to read frame from webcam"
log_error(error_msg)
return ToolResult(content=error_msg)
cv2.imshow('Camera Preview - Press "c" to capture, "q" to quit', frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("c"):
captured_frame = frame.copy()
log_info("Image captured!")
break
elif key == ord("q"):
log_info("Capture cancelled by user")
return ToolResult(content="Image capture cancelled by user")
else:
ret, captured_frame = cam.read()
if not ret:
error_msg = "Failed to capture image from webcam"
log_error(error_msg)
return ToolResult(content=error_msg)
if captured_frame is None:
error_msg = "No frame captured"
log_error(error_msg)
return ToolResult(content=error_msg)
success, encoded_image = cv2.imencode(".png", captured_frame)
if not success:
error_msg = "Failed to encode captured image"
log_error(error_msg)
return ToolResult(content=error_msg)
image_bytes = encoded_image.tobytes()
media_id = str(uuid4())
# Create ImageArtifact with raw bytes (not base64 encoded)
image_artifact = Image(
id=media_id,
content=image_bytes, # Store as raw bytes
original_prompt=prompt,
mime_type="image/png",
)
log_debug(f"Successfully captured and attached image {media_id}")
return ToolResult(
content="Image captured successfully",
images=[image_artifact],
)
finally:
# Release the camera and close windows
cam.release()
cv2.destroyAllWindows()
log_debug("Camera resources released")
except Exception as e:
error_msg = f"Error capturing image: {str(e)}"
log_error(error_msg)
return ToolResult(content=error_msg)
def capture_video(
self,
agent: Agent,
duration: int = 10,
prompt: str = "Webcam video capture",
) -> ToolResult:
"""Capture a video from the webcam.
Args:
duration (int): Duration in seconds to record video. Defaults to 10 seconds.
prompt (str): Description of the video capture. Defaults to "Webcam video capture".
Returns:
ToolResult: A ToolResult containing the captured video or error message.
"""
try:
log_debug("Initializing webcam for video capture...")
cap = cv2.VideoCapture(0)
# Try different backends for better compatibility
if not cap.isOpened():
cap = cv2.VideoCapture(0, cv2.CAP_AVFOUNDATION) # macOS
if not cap.isOpened():
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW) # Windows
if not cap.isOpened():
cap = cv2.VideoCapture(0, cv2.CAP_V4L2) # Linux
if not cap.isOpened():
error_msg = "Could not open webcam. Please ensure your terminal has camera permissions and the camera is not being used by another application."
log_error(error_msg)
return ToolResult(content=error_msg)
try:
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
actual_fps = cap.get(cv2.CAP_PROP_FPS)
# Use actual FPS or default to 30 if detection fails
if actual_fps <= 0 or actual_fps > 60:
actual_fps = 30.0
log_debug(f"Video properties: {frame_width}x{frame_height} at {actual_fps} FPS")
# Try different codecs in order of preference for compatibility
codecs_to_try = [
("avc1", "H.264"), # Most compatible
("mp4v", "MPEG-4"), # Fallback
("XVID", "Xvid"), # Another fallback
]
import os
import tempfile
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as temp_file:
temp_filepath = temp_file.name
out = None
successful_codec = None
for codec_fourcc, codec_name in codecs_to_try:
try:
fourcc = getattr(cv2, "VideoWriter_fourcc")(*codec_fourcc)
out = cv2.VideoWriter(temp_filepath, fourcc, actual_fps, (frame_width, frame_height))
if out.isOpened():
successful_codec = codec_name
log_debug(f"Successfully initialized video writer with {codec_name} codec")
break
else:
out.release()
out = None
except Exception as e:
log_debug(f"Failed to initialize {codec_name} codec: {e}")
if out:
out.release()
out = None
if not out or not out.isOpened():
error_msg = "Failed to initialize video writer with any codec"
log_error(error_msg)
return ToolResult(content=error_msg)
start_time = time.time()
frame_count = 0
if self.show_preview:
log_info(f"Recording {duration}s video with live preview using {successful_codec} codec...")
else:
log_info(f"Recording {duration}s video using {successful_codec} codec...")
while True:
ret, frame = cap.read()
if not ret:
error_msg = "Failed to capture video frame"
log_error(error_msg)
return ToolResult(content=error_msg)
# Write the frame to the output file
out.write(frame)
frame_count += 1
# Show live preview if enabled
if self.show_preview:
# Add recording indicator
elapsed = time.time() - start_time
remaining = max(0, duration - elapsed)
# Draw recording info on frame
display_frame = frame.copy()
cv2.putText(
display_frame,
f"REC {remaining:.1f}s",
(10, 30),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(0, 0, 255),
2,
)
cv2.circle(display_frame, (30, 60), 10, (0, 0, 255), -1) # Red dot
cv2.imshow(f"Recording Video - {remaining:.1f}s remaining", display_frame)
cv2.waitKey(1)
# Check if recording duration is reached
if time.time() - start_time >= duration:
break
# Release video writer
out.release()
# Verify the file was created and has content
temp_path = Path(temp_filepath)
if not temp_path.exists() or temp_path.stat().st_size == 0:
error_msg = "Video file was not created or is empty"
log_error(error_msg)
return ToolResult(content=error_msg)
# Read the video file and encode to base64
with open(temp_filepath, "rb") as video_file:
video_bytes = video_file.read()
# Clean up temporary file
os.unlink(temp_filepath)
media_id = str(uuid4())
# Create VideoArtifact with base64 encoded content
video_artifact = Video(
id=media_id,
content=video_bytes,
original_prompt=prompt,
mime_type="video/mp4",
)
actual_duration = time.time() - start_time
log_debug(
f"Successfully captured and attached video {media_id} ({actual_duration:.1f}s, {frame_count} frames)"
)
return ToolResult(
content=f"Video captured successfully and attached as artifact {media_id} ({actual_duration:.1f}s, {frame_count} frames, {successful_codec} codec)",
videos=[video_artifact],
)
finally:
if "cap" in locals():
cap.release()
cv2.destroyAllWindows()
log_debug("Video capture resources released")
except Exception as e:
error_msg = f"Error capturing video: {str(e)}"
log_error(error_msg)
return ToolResult(content=error_msg)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/opencv.py",
"license": "Apache License 2.0",
"lines": 261,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/tools/test_opencv.py | """Unit tests for OpenCVTools class."""
from unittest.mock import Mock, patch
import numpy as np
import pytest
from agno.agent import Agent
from agno.media import Image, Video
from agno.tools.function import ToolResult
from agno.tools.opencv import OpenCVTools
@pytest.fixture
def mock_cv2():
"""Create a mock OpenCV module."""
with patch("agno.tools.opencv.cv2") as mock_cv2:
mock_cv2.CAP_AVFOUNDATION = 1200
mock_cv2.CAP_DSHOW = 700
mock_cv2.CAP_V4L2 = 200
mock_cv2.CAP_PROP_FRAME_WIDTH = 3
mock_cv2.CAP_PROP_FRAME_HEIGHT = 4
mock_cv2.CAP_PROP_FPS = 5
mock_cv2.FONT_HERSHEY_SIMPLEX = 0
# Mock VideoCapture
mock_capture = Mock()
mock_capture.isOpened.return_value = True
mock_capture.set.return_value = True
mock_capture.get.side_effect = lambda prop: {3: 1280, 4: 720, 5: 30.0}[prop]
mock_capture.read.return_value = (True, np.zeros((720, 1280, 3), dtype=np.uint8))
mock_capture.release.return_value = None
mock_cv2.VideoCapture.return_value = mock_capture
# Mock image encoding
mock_cv2.imencode.return_value = (True, np.array([1, 2, 3], dtype=np.uint8))
# Mock video writer
mock_writer = Mock()
mock_writer.isOpened.return_value = True
mock_writer.write.return_value = None
mock_writer.release.return_value = None
mock_cv2.VideoWriter.return_value = mock_writer
# Mock VideoWriter_fourcc
mock_cv2.VideoWriter_fourcc.return_value = 123456
# Mock GUI functions
mock_cv2.imshow.return_value = None
mock_cv2.waitKey.return_value = ord("c") # Default to capture key
mock_cv2.destroyAllWindows.return_value = None
mock_cv2.putText.return_value = None
mock_cv2.circle.return_value = None
yield mock_cv2
@pytest.fixture
def mock_agent():
"""Create a mock Agent instance."""
agent = Mock(spec=Agent)
return agent
@pytest.fixture
def opencv_tools_with_preview(mock_cv2):
"""Create OpenCVTools instance with preview enabled."""
return OpenCVTools(show_preview=True)
@pytest.fixture
def opencv_tools_no_preview(mock_cv2):
"""Create OpenCVTools instance with preview disabled."""
return OpenCVTools(show_preview=False)
class TestOpenCVToolsInitialization:
"""Test OpenCVTools initialization and configuration."""
def test_init_with_preview_enabled(self, mock_cv2):
"""Test initialization with preview enabled."""
tools = OpenCVTools(show_preview=True)
assert tools.show_preview is True
assert tools.name == "opencv_tools"
assert len(tools.tools) == 2
def test_init_with_preview_disabled(self, mock_cv2):
"""Test initialization with preview disabled."""
tools = OpenCVTools(show_preview=False)
assert tools.show_preview is False
assert tools.name == "opencv_tools"
assert len(tools.tools) == 2
def test_init_default_preview(self, mock_cv2):
"""Test default initialization (preview disabled by default)."""
tools = OpenCVTools()
assert tools.show_preview is False
class TestImageCapture:
"""Test image capture functionality."""
def test_capture_image_no_preview_success(self, opencv_tools_no_preview, mock_agent, mock_cv2):
"""Test successful image capture without preview."""
result = opencv_tools_no_preview.capture_image(mock_agent, "Test capture")
# Check that result is a ToolResult with success content
assert isinstance(result, ToolResult)
assert result.content == "Image captured successfully"
assert result.images is not None
assert len(result.images) == 1
# Verify image artifact was created correctly
image_artifact = result.images[0]
assert isinstance(image_artifact, Image)
assert image_artifact.original_prompt == "Test capture"
assert image_artifact.mime_type == "image/png"
# Verify OpenCV calls
mock_cv2.VideoCapture.assert_called_with(0)
mock_cv2.imencode.assert_called_once()
def test_capture_image_with_preview_success(self, opencv_tools_with_preview, mock_agent, mock_cv2):
"""Test successful image capture with preview (user presses 'c')."""
# Mock waitKey to return 'c' (capture) on first call
mock_cv2.waitKey.return_value = ord("c")
result = opencv_tools_with_preview.capture_image(mock_agent, "Test capture with preview")
# Check that result is a ToolResult with success content
assert isinstance(result, ToolResult)
assert result.content == "Image captured successfully"
assert result.images is not None
assert len(result.images) == 1
# Verify preview was shown
mock_cv2.imshow.assert_called()
def test_capture_image_user_cancels(self, opencv_tools_with_preview, mock_agent, mock_cv2):
"""Test image capture cancelled by user (user presses 'q')."""
# Mock waitKey to return 'q' (quit) on first call
mock_cv2.waitKey.return_value = ord("q")
result = opencv_tools_with_preview.capture_image(mock_agent, "Test capture")
# Check that result is a ToolResult with cancellation content
assert isinstance(result, ToolResult)
assert result.content == "Image capture cancelled by user"
assert result.images is None
def test_capture_image_camera_not_available(self, opencv_tools_no_preview, mock_agent, mock_cv2):
"""Test image capture when camera is not available."""
mock_cv2.VideoCapture.return_value.isOpened.return_value = False
result = opencv_tools_no_preview.capture_image(mock_agent, "Test capture")
# Check that result is a ToolResult with error content
assert isinstance(result, ToolResult)
assert "Could not open webcam" in result.content
assert result.images is None
def test_capture_image_read_frame_fails(self, opencv_tools_no_preview, mock_agent, mock_cv2):
"""Test image capture when reading frame fails."""
mock_cv2.VideoCapture.return_value.read.return_value = (False, None)
result = opencv_tools_no_preview.capture_image(mock_agent, "Test capture")
# Check that result is a ToolResult with error content
assert isinstance(result, ToolResult)
assert "Failed to capture image from webcam" in result.content
assert result.images is None
def test_capture_image_encode_fails(self, opencv_tools_no_preview, mock_agent, mock_cv2):
"""Test image capture when encoding fails."""
mock_cv2.imencode.return_value = (False, None)
result = opencv_tools_no_preview.capture_image(mock_agent, "Test capture")
# Check that result is a ToolResult with error content
assert isinstance(result, ToolResult)
assert "Failed to encode captured image" in result.content
assert result.images is None
def test_capture_image_exception_handling(self, opencv_tools_no_preview, mock_agent, mock_cv2):
"""Test image capture exception handling."""
mock_cv2.VideoCapture.side_effect = Exception("Test exception")
result = opencv_tools_no_preview.capture_image(mock_agent, "Test capture")
# Check that result is a ToolResult with error content
assert isinstance(result, ToolResult)
assert "Error capturing image: Test exception" in result.content
assert result.images is None
class TestVideoCapture:
"""Test video capture functionality."""
@patch("tempfile.NamedTemporaryFile")
@patch("os.path.exists")
@patch("os.path.getsize")
@patch("os.unlink")
@patch("builtins.open", create=True)
@patch("agno.tools.opencv.time")
def test_capture_video_no_preview_success(
self,
mock_time_module,
mock_open,
mock_unlink,
mock_getsize,
mock_exists,
mock_tempfile,
opencv_tools_no_preview,
mock_agent,
mock_cv2,
):
"""Test successful video capture without preview."""
# Patch the time module reference in opencv so logging doesn't consume mock values
mock_time_module.time.side_effect = [0, 6, 6]
# Mock temporary file
mock_temp = Mock()
mock_temp.name = "/tmp/test_video.mp4"
mock_tempfile.return_value.__enter__.return_value = mock_temp
# Mock file operations
mock_exists.return_value = True
mock_getsize.return_value = 1000 # Non-zero size
mock_file = Mock()
mock_file.read.return_value = b"fake_video_data"
mock_open.return_value.__enter__.return_value = mock_file
# Mock getattr for VideoWriter_fourcc
with patch("agno.tools.opencv.getattr") as mock_getattr, patch("agno.tools.opencv.Path") as mock_path_class:
mock_getattr.return_value.return_value = 123456
# Mock Path behavior
mock_path = Mock()
mock_path.exists.return_value = True
mock_stat = Mock()
mock_stat.st_size = 1000 # Non-zero size
mock_path.stat.return_value = mock_stat
mock_path_class.return_value = mock_path
result = opencv_tools_no_preview.capture_video(mock_agent, duration=5, prompt="Test video")
# Check that result is a ToolResult with success content
assert isinstance(result, ToolResult)
assert "Video captured successfully" in result.content
assert "H.264 codec" in result.content # Should use first codec successfully
assert result.videos is not None
assert len(result.videos) == 1
# Verify video artifact was created correctly
video_artifact = result.videos[0]
assert isinstance(video_artifact, Video)
assert video_artifact.original_prompt == "Test video"
assert video_artifact.mime_type == "video/mp4"
@patch("tempfile.NamedTemporaryFile")
@patch("os.path.exists")
@patch("os.path.getsize")
@patch("os.unlink")
@patch("builtins.open", create=True)
@patch("agno.tools.opencv.time")
def test_capture_video_with_preview_success(
self,
mock_time_module,
mock_open,
mock_unlink,
mock_getsize,
mock_exists,
mock_tempfile,
opencv_tools_with_preview,
mock_agent,
mock_cv2,
):
"""Test successful video capture with preview."""
# Patch the time module reference in opencv so logging doesn't consume mock values
# Preview path: start_time, elapsed, duration_check, actual_duration
mock_time_module.time.side_effect = [0, 1, 4, 4]
# Mock temporary file
mock_temp = Mock()
mock_temp.name = "/tmp/test_video.mp4"
mock_tempfile.return_value.__enter__.return_value = mock_temp
# Mock file operations
mock_exists.return_value = True
mock_getsize.return_value = 1000
mock_file = Mock()
mock_file.read.return_value = b"fake_video_data"
mock_open.return_value.__enter__.return_value = mock_file
# Mock getattr for VideoWriter_fourcc
with patch("agno.tools.opencv.getattr") as mock_getattr, patch("agno.tools.opencv.Path") as mock_path_class:
mock_getattr.return_value.return_value = 123456
# Mock Path behavior
mock_path = Mock()
mock_path.exists.return_value = True
mock_stat = Mock()
mock_stat.st_size = 1000 # Non-zero size
mock_path.stat.return_value = mock_stat
mock_path_class.return_value = mock_path
result = opencv_tools_with_preview.capture_video(mock_agent, duration=3, prompt="Test video")
# Check that result is a ToolResult with success content
assert isinstance(result, ToolResult)
assert "Video captured successfully" in result.content
assert result.videos is not None
assert len(result.videos) == 1
# Verify preview was shown
mock_cv2.imshow.assert_called() # Preview should be shown
mock_cv2.putText.assert_called() # Recording indicator should be drawn
def test_capture_video_camera_not_available(self, opencv_tools_no_preview, mock_agent, mock_cv2):
"""Test video capture when camera is not available."""
mock_cv2.VideoCapture.return_value.isOpened.return_value = False
result = opencv_tools_no_preview.capture_video(mock_agent, duration=5)
# Check that result is a ToolResult with error content
assert isinstance(result, ToolResult)
assert "Could not open webcam" in result.content
assert result.videos is None
def test_capture_video_invalid_fps(self, opencv_tools_no_preview, mock_agent, mock_cv2):
"""Test video capture with invalid FPS (should default to 30)."""
# Mock invalid FPS values
mock_cv2.VideoCapture.return_value.get.side_effect = lambda prop: {
3: 1280,
4: 720,
5: -1, # Invalid FPS
}[prop]
with (
patch("tempfile.NamedTemporaryFile") as mock_tempfile,
patch("os.path.exists", return_value=True),
patch("os.path.getsize", return_value=1000),
patch("os.unlink"),
patch("builtins.open", create=True) as mock_open,
patch("agno.tools.opencv.time") as mock_time,
patch("agno.tools.opencv.getattr") as mock_getattr,
):
mock_time.time.side_effect = [0, 2, 2]
# Mock temporary file
mock_temp = Mock()
mock_temp.name = "/tmp/test_video.mp4"
mock_tempfile.return_value.__enter__.return_value = mock_temp
# Mock file operations
mock_file = Mock()
mock_file.read.return_value = b"fake_video_data"
mock_open.return_value.__enter__.return_value = mock_file
mock_getattr.return_value.return_value = 123456
# This should not fail and should use 30.0 as default FPS
result = opencv_tools_no_preview.capture_video(mock_agent, duration=1)
# Check that result is a ToolResult
assert isinstance(result, ToolResult)
# Should succeed with default FPS or fail gracefully
assert (
"Video captured successfully" in result.content
or "Failed to initialize video writer" in result.content
or "Video file was not created" in result.content
)
def test_capture_video_codec_fallback(self, opencv_tools_no_preview, mock_agent, mock_cv2):
"""Test video capture codec fallback mechanism."""
# Mock first codec failing, second succeeding
mock_writer_fail = Mock()
mock_writer_fail.isOpened.return_value = False
mock_writer_success = Mock()
mock_writer_success.isOpened.return_value = True
mock_cv2.VideoWriter.side_effect = [mock_writer_fail, mock_writer_success]
with (
patch("tempfile.NamedTemporaryFile") as mock_tempfile,
patch("os.path.exists", return_value=True),
patch("os.path.getsize", return_value=1000),
patch("os.unlink"),
patch("builtins.open", create=True) as mock_open,
patch("agno.tools.opencv.time") as mock_time,
patch("agno.tools.opencv.getattr") as mock_getattr,
):
mock_time.time.side_effect = [0, 2, 2]
# Mock temporary file
mock_temp = Mock()
mock_temp.name = "/tmp/test_video.mp4"
mock_tempfile.return_value.__enter__.return_value = mock_temp
# Mock file operations
mock_file = Mock()
mock_file.read.return_value = b"fake_video_data"
mock_open.return_value.__enter__.return_value = mock_file
mock_getattr.return_value.return_value = 123456
result = opencv_tools_no_preview.capture_video(mock_agent, duration=1)
# Check that result is a ToolResult
assert isinstance(result, ToolResult)
# Should succeed with fallback codec or fail gracefully
assert (
"Video captured successfully" in result.content
or "MPEG-4 codec" in result.content
or "Video file was not created" in result.content
)
def test_capture_video_all_codecs_fail(self, opencv_tools_no_preview, mock_agent, mock_cv2):
"""Test video capture when all codecs fail."""
mock_cv2.VideoWriter.return_value.isOpened.return_value = False
with patch("agno.tools.opencv.getattr") as mock_getattr:
mock_getattr.return_value.return_value = 123456
result = opencv_tools_no_preview.capture_video(mock_agent, duration=1)
# Check that result is a ToolResult with error content
assert isinstance(result, ToolResult)
assert "Failed to initialize video writer with any codec" in result.content
assert result.videos is None
def test_capture_video_frame_read_fails(self, opencv_tools_no_preview, mock_agent, mock_cv2):
"""Test video capture when frame reading fails."""
mock_cv2.VideoCapture.return_value.read.return_value = (False, None)
with patch("tempfile.NamedTemporaryFile"), patch("agno.tools.opencv.getattr") as mock_getattr:
mock_getattr.return_value.return_value = 123456
result = opencv_tools_no_preview.capture_video(mock_agent, duration=1)
# Check that result is a ToolResult with error content
assert isinstance(result, ToolResult)
assert "Failed to capture video frame" in result.content
assert result.videos is None
@patch("tempfile.NamedTemporaryFile")
@patch("os.path.exists")
def test_capture_video_file_not_created(
self, mock_exists, mock_tempfile, opencv_tools_no_preview, mock_agent, mock_cv2
):
"""Test video capture when temporary file is not created."""
mock_temp = Mock()
mock_temp.name = "/tmp/test_video.mp4"
mock_tempfile.return_value.__enter__.return_value = mock_temp
mock_exists.return_value = False # File doesn't exist
with (
patch("agno.tools.opencv.time") as mock_time,
patch("agno.tools.opencv.getattr") as mock_getattr,
):
mock_time.time.side_effect = [0, 2]
mock_getattr.return_value.return_value = 123456
result = opencv_tools_no_preview.capture_video(mock_agent, duration=1)
# Check that result is a ToolResult with error content
assert isinstance(result, ToolResult)
assert "Video file was not created or is empty" in result.content
assert result.videos is None
def test_capture_video_exception_handling(self, opencv_tools_no_preview, mock_agent, mock_cv2):
"""Test video capture exception handling."""
mock_cv2.VideoCapture.side_effect = Exception("Test exception")
result = opencv_tools_no_preview.capture_video(mock_agent, duration=1)
# Check that result is a ToolResult with error content
assert isinstance(result, ToolResult)
assert "Error capturing video: Test exception" in result.content
assert result.videos is None
class TestResourceCleanup:
"""Test proper resource cleanup in all scenarios."""
def test_image_capture_cleanup_on_success(self, opencv_tools_no_preview, mock_agent, mock_cv2):
"""Test that camera resources are properly released on successful image capture."""
mock_cam = mock_cv2.VideoCapture.return_value
opencv_tools_no_preview.capture_image(mock_agent, "Test")
mock_cam.release.assert_called_once()
mock_cv2.destroyAllWindows.assert_called_once()
def test_image_capture_cleanup_on_exception(self, opencv_tools_no_preview, mock_agent, mock_cv2):
"""Test that camera resources are properly released on exception."""
mock_cam = mock_cv2.VideoCapture.return_value
mock_cv2.imencode.side_effect = Exception("Test exception")
opencv_tools_no_preview.capture_image(mock_agent, "Test")
mock_cam.release.assert_called_once()
mock_cv2.destroyAllWindows.assert_called_once()
def test_video_capture_cleanup_on_success(self, opencv_tools_no_preview, mock_agent, mock_cv2):
"""Test that video capture resources are properly released on success."""
mock_cap = mock_cv2.VideoCapture.return_value
with (
patch("tempfile.NamedTemporaryFile") as mock_tempfile,
patch("os.path.exists", return_value=True),
patch("os.path.getsize", return_value=1000),
patch("os.unlink"),
patch("builtins.open", create=True) as mock_open,
patch("agno.tools.opencv.time") as mock_time,
patch("agno.tools.opencv.getattr") as mock_getattr,
):
mock_time.time.side_effect = [0, 2, 2]
# Mock temporary file
mock_temp = Mock()
mock_temp.name = "/tmp/test_video.mp4"
mock_tempfile.return_value.__enter__.return_value = mock_temp
# Mock file operations
mock_file = Mock()
mock_file.read.return_value = b"fake_video_data"
mock_open.return_value.__enter__.return_value = mock_file
mock_getattr.return_value.return_value = 123456
opencv_tools_no_preview.capture_video(mock_agent, duration=1)
mock_cap.release.assert_called_once()
mock_cv2.destroyAllWindows.assert_called_once()
def test_video_capture_cleanup_on_exception(self, opencv_tools_no_preview, mock_agent, mock_cv2):
"""Test that video capture resources are properly released on exception."""
mock_cap = mock_cv2.VideoCapture.return_value
mock_cv2.VideoCapture.side_effect = [mock_cap, Exception("Test exception")]
opencv_tools_no_preview.capture_video(mock_agent, duration=1)
mock_cap.release.assert_called_once()
mock_cv2.destroyAllWindows.assert_called_once()
class TestEdgeCases:
"""Test edge cases and special scenarios."""
def test_capture_image_default_prompt(self, opencv_tools_no_preview, mock_agent, mock_cv2):
"""Test image capture with default prompt."""
result = opencv_tools_no_preview.capture_image(mock_agent)
# Check that result is a ToolResult with the expected artifact
assert isinstance(result, ToolResult)
assert result.images is not None
assert len(result.images) == 1
image_artifact = result.images[0]
assert image_artifact.original_prompt == "Webcam capture"
def test_capture_video_default_parameters(self, opencv_tools_no_preview, mock_agent, mock_cv2):
"""Test video capture with default parameters."""
with (
patch("tempfile.NamedTemporaryFile") as mock_tempfile,
patch("os.path.exists", return_value=True),
patch("os.path.getsize", return_value=1000),
patch("os.unlink"),
patch("builtins.open", create=True) as mock_open,
patch("agno.tools.opencv.time") as mock_time,
patch("agno.tools.opencv.getattr") as mock_getattr,
):
mock_time.time.side_effect = [0, 11, 11]
# Mock temporary file
mock_temp = Mock()
mock_temp.name = "/tmp/test_video.mp4"
mock_tempfile.return_value.__enter__.return_value = mock_temp
# Mock file operations
mock_file = Mock()
mock_file.read.return_value = b"fake_video_data"
mock_open.return_value.__enter__.return_value = mock_file
mock_getattr.return_value.return_value = 123456
result = opencv_tools_no_preview.capture_video(mock_agent)
# Check that result is a ToolResult
assert isinstance(result, ToolResult)
# The result could be success or failure depending on mocking, but should have expected prompt if successful
if result.videos and len(result.videos) > 0:
video_artifact = result.videos[0]
assert video_artifact.original_prompt == "Webcam video capture"
def test_preview_mode_persistence(self, mock_cv2):
"""Test that preview mode setting persists across calls."""
tools_with_preview = OpenCVTools(show_preview=True)
tools_without_preview = OpenCVTools(show_preview=False)
assert tools_with_preview.show_preview is True
assert tools_without_preview.show_preview is False
# Setting should persist
assert tools_with_preview.show_preview is True
assert tools_without_preview.show_preview is False
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_opencv.py",
"license": "Apache License 2.0",
"lines": 477,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/brightdata.py | import base64
import json
from os import getenv
from typing import Any, Dict, List, Optional
from uuid import uuid4
from agno.agent import Agent
from agno.media import Image
from agno.tools import Toolkit
from agno.tools.function import ToolResult
from agno.utils.log import log_debug, log_error, log_info
try:
import requests
except ImportError:
raise ImportError("`requests` not installed.")
class BrightDataTools(Toolkit):
"""
BrightData is a toolkit for web scraping, screenshots, search engines, and web data feeds.
Args:
api_key (Optional[str]): Bright Data API key. Retrieved from BRIGHT_DATA_API_KEY env variable if not provided.
enable_scrape_markdown (bool): Enable webpage scraping as Markdown. Default is True.
enable_screenshot (bool): Enable website screenshot capture. Default is True.
enable_search_engine (bool): Enable search engine functionality. Default is True.
enable_web_data_feed (bool): Enable web data feed retrieval. Default is True.
all (bool): Enable all tools. Overrides individual flags when True. Default is False.
serp_zone (str): SERP zone for search operations. Default is "serp_api".
web_unlocker_zone (str): Web unlocker zone for scraping operations. Default is "web_unlocker1".
verbose (bool): Enable verbose logging. Default is False.
timeout (int): Timeout in seconds for operations. Default is 600.
"""
def __init__(
self,
api_key: Optional[str] = None,
enable_scrape_markdown: bool = True,
enable_screenshot: bool = True,
enable_search_engine: bool = True,
enable_web_data_feed: bool = True,
all: bool = False,
serp_zone: str = "serp_api",
web_unlocker_zone: str = "web_unlocker1",
verbose: bool = False,
timeout: int = 600,
**kwargs,
):
self.api_key = api_key or getenv("BRIGHT_DATA_API_KEY")
if not self.api_key:
log_error("No Bright Data API key provided")
raise ValueError(
"No Bright Data API key provided. Please provide an api_key or set the BRIGHT_DATA_API_KEY environment variable."
)
self.verbose = verbose
self.endpoint = "https://api.brightdata.com/request"
self.headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}",
}
self.web_unlocker_zone = getenv("BRIGHT_DATA_WEB_UNLOCKER_ZONE", web_unlocker_zone)
self.serp_zone = getenv("BRIGHT_DATA_SERP_ZONE", serp_zone)
self.timeout = timeout
tools: List[Any] = []
if all or enable_scrape_markdown:
tools.append(self.scrape_as_markdown)
if all or enable_screenshot:
tools.append(self.get_screenshot)
if all or enable_search_engine:
tools.append(self.search_engine)
if all or enable_web_data_feed:
tools.append(self.web_data_feed)
super().__init__(name="brightdata_tools", tools=tools, **kwargs)
def _make_request(self, payload: Dict) -> str:
"""Make a request to Bright Data API."""
try:
if self.verbose:
log_info(f"[Bright Data] Request: {payload['url']}")
response = requests.post(self.endpoint, headers=self.headers, data=json.dumps(payload))
if response.status_code != 200:
raise Exception(f"Failed to scrape: {response.status_code} - {response.text}")
return response.text
except Exception as e:
raise Exception(f"Request failed: {e}")
def scrape_as_markdown(self, url: str) -> str:
"""
Scrape a webpage and return content in Markdown format.
Args:
url (str): URL to scrape
Returns:
str: Scraped content as Markdown
"""
try:
if not self.api_key:
return "Please provide a Bright Data API key"
if not url:
return "Please provide a URL to scrape"
log_info(f"Scraping URL as Markdown: {url}")
payload = {
"url": url,
"zone": self.web_unlocker_zone,
"format": "raw",
"data_format": "markdown",
}
content = self._make_request(payload)
return content
except Exception as e:
return f"Error scraping URL {url}: {e}"
def get_screenshot(self, agent: Agent, url: str, output_path: str = "screenshot.png") -> ToolResult:
"""
Capture a screenshot of a webpage
Args:
url (str): URL to screenshot
output_path (str): Output path for the screenshot (not used, kept for compatibility)
Returns:
ToolResult: Contains the screenshot image or error message.
"""
try:
if not self.api_key:
return ToolResult(content="Please provide a Bright Data API key")
if not url:
return ToolResult(content="Please provide a URL to screenshot")
log_info(f"Taking screenshot of: {url}")
payload = {
"url": url,
"zone": self.web_unlocker_zone,
"format": "raw",
"data_format": "screenshot",
}
response = requests.post(self.endpoint, headers=self.headers, data=json.dumps(payload))
if response.status_code != 200:
raise Exception(f"Error {response.status_code}: {response.text}")
image_bytes = response.content
base64_encoded_image = base64.b64encode(image_bytes).decode("utf-8")
log_debug(f"Base64 encoded image: {type(base64_encoded_image)}")
media_id = str(uuid4())
# Create Image for the screenshot
image_artifact = Image(
id=media_id,
content=base64_encoded_image.encode("utf-8"),
mime_type="image/png",
original_prompt=f"Screenshot of {url}",
)
log_debug(f"Screenshot captured and added as artifact with ID: {media_id}")
return ToolResult(
content=f"Screenshot captured and added as artifact with ID: {media_id}", images=[image_artifact]
)
except Exception as e:
return ToolResult(content=f"Error taking screenshot of {url}: {e}")
def search_engine(
self,
query: str,
engine: str = "google",
num_results: int = 10,
language: Optional[str] = None,
country_code: Optional[str] = None,
) -> str:
"""
Search using Google, Bing, or Yandex and return results in Markdown.
Args:
query (str): Search query
engine (str): Search engine - 'google', 'bing', or 'yandex'
num_results (int): Number of results to return
language (Optional[str]): Two-letter language code
country_code (Optional[str]): Two-letter country code
Returns:
str: Search results as Markdown
"""
try:
if not self.api_key:
return "Please provide a Bright Data API key"
if not query:
return "Please provide a query to search for"
log_info(f"Searching {engine} for: {query}")
from urllib.parse import quote
encoded_query = quote(query)
base_urls = {
"google": f"https://www.google.com/search?q={encoded_query}",
"bing": f"https://www.bing.com/search?q={encoded_query}",
"yandex": f"https://yandex.com/search/?text={encoded_query}",
}
if engine not in base_urls:
return f"Unsupported search engine: {engine}. Use 'google', 'bing', or 'yandex'"
search_url = base_urls[engine]
if engine == "google":
params = []
if language:
params.append(f"hl={language}")
if country_code:
params.append(f"gl={country_code}")
if num_results:
params.append(f"num={num_results}")
if params:
search_url += "&" + "&".join(params)
payload = {
"url": search_url,
"zone": self.serp_zone,
"format": "raw",
"data_format": "markdown",
}
content = self._make_request(payload)
return content
except Exception as e:
return f"Error searching for query {query}: {e}"
def web_data_feed(
self,
source_type: str,
url: str,
num_of_reviews: Optional[int] = None,
) -> str:
"""
Retrieve structured web data from various sources like LinkedIn, Amazon, Instagram, etc.
Args:
source_type (str): Type of data source (e.g., 'linkedin_person_profile', 'amazon_product')
url (str): URL of the web resource to retrieve data from
num_of_reviews (Optional[int]): Number of reviews to retrieve
Returns:
str: Structured data from the requested source as JSON
"""
try:
if not self.api_key:
return "Please provide a Bright Data API key"
if not url:
return "Please provide a URL to retrieve data from"
log_info(f"Retrieving {source_type} data from: {url}")
datasets = {
"amazon_product": "gd_l7q7dkf244hwjntr0",
"amazon_product_reviews": "gd_le8e811kzy4ggddlq",
"amazon_product_search": "gd_lwdb4vjm1ehb499uxs",
"walmart_product": "gd_l95fol7l1ru6rlo116",
"walmart_seller": "gd_m7ke48w81ocyu4hhz0",
"ebay_product": "gd_ltr9mjt81n0zzdk1fb",
"homedepot_products": "gd_lmusivh019i7g97q2n",
"zara_products": "gd_lct4vafw1tgx27d4o0",
"etsy_products": "gd_ltppk0jdv1jqz25mz",
"bestbuy_products": "gd_ltre1jqe1jfr7cccf",
"linkedin_person_profile": "gd_l1viktl72bvl7bjuj0",
"linkedin_company_profile": "gd_l1vikfnt1wgvvqz95w",
"linkedin_job_listings": "gd_lpfll7v5hcqtkxl6l",
"linkedin_posts": "gd_lyy3tktm25m4avu764",
"linkedin_people_search": "gd_m8d03he47z8nwb5xc",
"crunchbase_company": "gd_l1vijqt9jfj7olije",
"zoominfo_company_profile": "gd_m0ci4a4ivx3j5l6nx",
"instagram_profiles": "gd_l1vikfch901nx3by4",
"instagram_posts": "gd_lk5ns7kz21pck8jpis",
"instagram_reels": "gd_lyclm20il4r5helnj",
"instagram_comments": "gd_ltppn085pokosxh13",
"facebook_posts": "gd_lyclm1571iy3mv57zw",
"facebook_marketplace_listings": "gd_lvt9iwuh6fbcwmx1a",
"facebook_company_reviews": "gd_m0dtqpiu1mbcyc2g86",
"facebook_events": "gd_m14sd0to1jz48ppm51",
"tiktok_profiles": "gd_l1villgoiiidt09ci",
"tiktok_posts": "gd_lu702nij2f790tmv9h",
"tiktok_shop": "gd_m45m1u911dsa4274pi",
"tiktok_comments": "gd_lkf2st302ap89utw5k",
"google_maps_reviews": "gd_luzfs1dn2oa0teb81",
"google_shopping": "gd_ltppk50q18kdw67omz",
"google_play_store": "gd_lsk382l8xei8vzm4u",
"apple_app_store": "gd_lsk9ki3u2iishmwrui",
"reuter_news": "gd_lyptx9h74wtlvpnfu",
"github_repository_file": "gd_lyrexgxc24b3d4imjt",
"yahoo_finance_business": "gd_lmrpz3vxmz972ghd7",
"x_posts": "gd_lwxkxvnf1cynvib9co",
"zillow_properties_listing": "gd_lfqkr8wm13ixtbd8f5",
"booking_hotel_listings": "gd_m5mbdl081229ln6t4a",
"youtube_profiles": "gd_lk538t2k2p1k3oos71",
"youtube_comments": "gd_lk9q0ew71spt1mxywf",
"reddit_posts": "gd_lvz8ah06191smkebj4",
"youtube_videos": "gd_m5mbdl081229ln6t4a",
}
if source_type not in datasets:
valid_sources = ", ".join(datasets.keys())
return f"Invalid source_type: {source_type}. Valid options are: {valid_sources}"
dataset_id = datasets[source_type]
request_data = {"url": url}
if source_type == "facebook_company_reviews" and num_of_reviews is not None:
request_data["num_of_reviews"] = str(num_of_reviews)
trigger_response = requests.post(
"https://api.brightdata.com/datasets/v3/trigger",
params={"dataset_id": dataset_id, "include_errors": "true"},
headers=self.headers,
json=[request_data],
)
trigger_data = trigger_response.json()
if not trigger_data.get("snapshot_id"):
return "No snapshot ID returned from trigger request"
snapshot_id = trigger_data["snapshot_id"]
import time
attempts = 0
max_attempts = self.timeout
while attempts < max_attempts:
try:
snapshot_response = requests.get(
f"https://api.brightdata.com/datasets/v3/snapshot/{snapshot_id}",
params={"format": "json"},
headers=self.headers,
)
snapshot_data = snapshot_response.json()
if isinstance(snapshot_data, dict) and snapshot_data.get("status") == "running":
attempts += 1
time.sleep(1)
continue
return json.dumps(snapshot_data)
except Exception:
attempts += 1
time.sleep(1)
return f"Timeout after {max_attempts} seconds waiting for {source_type} data"
except Exception as e:
return f"Error retrieving {source_type} data from {url}: {e}"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/brightdata.py",
"license": "Apache License 2.0",
"lines": 304,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/tools/test_brightdata.py | """Unit tests for BrightDataTools class."""
import base64
import json
from unittest.mock import Mock, patch
import pytest
import requests
from agno.agent import Agent
from agno.media import Image
from agno.tools.brightdata import BrightDataTools
from agno.tools.function import ToolResult
@pytest.fixture
def mock_agent():
"""Create a mock Agent instance."""
agent = Mock(spec=Agent)
return agent
@pytest.fixture
def mock_requests():
"""Mock requests module."""
with patch("agno.tools.brightdata.requests") as mock_requests:
yield mock_requests
@pytest.fixture
def brightdata_tools():
"""Create BrightDataTools instance with test API key."""
return BrightDataTools(
api_key="test_api_key",
serp_zone="test_serp_zone",
web_unlocker_zone="test_web_unlocker_zone",
enable_scrape_markdown=True,
enable_screenshot=True,
enable_search_engine=True,
enable_web_data_feed=True,
verbose=True,
)
def test_init_with_api_key():
"""Test initialization with provided API key."""
tools = BrightDataTools(api_key="test_key")
assert tools.api_key == "test_key"
assert tools.web_unlocker_zone == "web_unlocker1"
assert tools.serp_zone == "serp_api"
def test_init_with_env_var():
"""Test initialization with environment variable."""
with patch.dict("os.environ", {"BRIGHT_DATA_API_KEY": "env_key"}):
tools = BrightDataTools()
assert tools.api_key == "env_key"
def test_init_without_api_key():
"""Test initialization without API key raises ValueError."""
with patch.dict("os.environ", {}, clear=True):
with pytest.raises(ValueError, match="No Bright Data API key provided"):
BrightDataTools(api_key=None)
def test_init_with_selective_tools():
"""Test initialization with only selected tools."""
tools = BrightDataTools(
api_key="test_key",
enable_scrape_markdown=True,
enable_screenshot=False,
enable_search_engine=True,
enable_web_data_feed=False,
)
function_names = [func.name for func in tools.functions.values()]
assert "scrape_as_markdown" in function_names
assert "get_screenshot" not in function_names
assert "search_engine" in function_names
assert "web_data_feed" not in function_names
def test_make_request_success(brightdata_tools, mock_requests):
"""Test successful _make_request."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = "Success response"
mock_requests.post.return_value = mock_response
payload = {"url": "https://example.com", "zone": "test_zone"}
result = brightdata_tools._make_request(payload)
assert result == "Success response"
mock_requests.post.assert_called_once_with(
brightdata_tools.endpoint, headers=brightdata_tools.headers, data=json.dumps(payload)
)
def test_make_request_failure(brightdata_tools, mock_requests):
"""Test _make_request with HTTP error."""
mock_response = Mock()
mock_response.status_code = 400
mock_response.text = "Bad Request"
mock_requests.post.return_value = mock_response
payload = {"url": "https://example.com"}
with pytest.raises(Exception, match="Failed to scrape: 400 - Bad Request"):
brightdata_tools._make_request(payload)
def test_make_request_exception(brightdata_tools, mock_requests):
"""Test _make_request with network exception."""
mock_requests.post.side_effect = requests.RequestException("Network error")
payload = {"url": "https://example.com"}
with pytest.raises(Exception, match="Request failed: Network error"):
brightdata_tools._make_request(payload)
def test_scrape_as_markdown_success(brightdata_tools, mock_requests):
"""Test successful scrape_as_markdown."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = "# Markdown Content\n\nThis is a test."
mock_requests.post.return_value = mock_response
result = brightdata_tools.scrape_as_markdown("https://example.com")
assert result == "# Markdown Content\n\nThis is a test."
mock_requests.post.assert_called_once()
args, kwargs = mock_requests.post.call_args
payload = json.loads(kwargs["data"])
assert payload["url"] == "https://example.com"
assert payload["data_format"] == "markdown"
assert payload["zone"] == "test_web_unlocker_zone"
def test_scrape_as_markdown_no_api_key():
"""Test scrape_as_markdown without API key."""
with patch.dict("os.environ", {}, clear=True):
tools = BrightDataTools(api_key="test_key") # Create with key first
tools.api_key = None # Then remove it to test the method behavior
result = tools.scrape_as_markdown("https://example.com")
assert result == "Please provide a Bright Data API key"
def test_scrape_as_markdown_no_url(brightdata_tools):
"""Test scrape_as_markdown without URL."""
result = brightdata_tools.scrape_as_markdown("")
assert result == "Please provide a URL to scrape"
def test_scrape_as_markdown_exception(brightdata_tools, mock_requests):
"""Test scrape_as_markdown with exception."""
mock_requests.post.side_effect = Exception("Network error")
result = brightdata_tools.scrape_as_markdown("https://example.com")
assert "Error scraping URL https://example.com: Request failed: Network error" in result
def test_get_screenshot_success(brightdata_tools, mock_requests, mock_agent):
"""Test successful get_screenshot."""
# Mock image bytes
mock_image_bytes = b"fake_png_data"
mock_response = Mock()
mock_response.status_code = 200
mock_response.content = mock_image_bytes
mock_requests.post.return_value = mock_response
with patch("agno.tools.brightdata.uuid4") as mock_uuid:
mock_uuid.return_value = Mock()
mock_uuid.return_value.__str__ = Mock(return_value="test-uuid-123")
result = brightdata_tools.get_screenshot(mock_agent, "https://example.com")
# Check that result is a ToolResult with success content
assert isinstance(result, ToolResult)
assert "Screenshot captured and added as artifact with ID: test-uuid-123" in result.content
assert result.images is not None
assert len(result.images) == 1
# Verify API call
mock_requests.post.assert_called_once()
args, kwargs = mock_requests.post.call_args
payload = json.loads(kwargs["data"])
assert payload["url"] == "https://example.com"
assert payload["data_format"] == "screenshot"
assert payload["zone"] == "test_web_unlocker_zone"
# Verify ImageArtifact creation
image_artifact = result.images[0]
assert isinstance(image_artifact, Image)
assert image_artifact.id == "test-uuid-123"
assert image_artifact.mime_type == "image/png"
assert image_artifact.original_prompt == "Screenshot of https://example.com"
# Verify base64 encoding
expected_base64 = base64.b64encode(mock_image_bytes).decode("utf-8")
assert image_artifact.content == expected_base64.encode("utf-8")
def test_get_screenshot_no_api_key(mock_agent):
"""Test get_screenshot without API key."""
with patch.dict("os.environ", {}, clear=True):
tools = BrightDataTools(api_key="test_key") # Create with key first
tools.api_key = None # Then remove it to test the method behavior
result = tools.get_screenshot(mock_agent, "https://example.com")
# Check that result is a ToolResult with error content
assert isinstance(result, ToolResult)
assert result.content == "Please provide a Bright Data API key"
assert result.images is None
def test_get_screenshot_no_url(brightdata_tools, mock_agent):
"""Test get_screenshot without URL."""
result = brightdata_tools.get_screenshot(mock_agent, "")
# Check that result is a ToolResult with error content
assert isinstance(result, ToolResult)
assert result.content == "Please provide a URL to screenshot"
assert result.images is None
def test_get_screenshot_http_error(brightdata_tools, mock_requests, mock_agent):
"""Test get_screenshot with HTTP error."""
mock_response = Mock()
mock_response.status_code = 500
mock_response.text = "Internal Server Error"
mock_requests.post.return_value = mock_response
result = brightdata_tools.get_screenshot(mock_agent, "https://example.com")
# Check that result is a ToolResult with error content
assert isinstance(result, ToolResult)
assert "Error taking screenshot of https://example.com: Error 500: Internal Server Error" in result.content
assert result.images is None
def test_search_engine_success(brightdata_tools, mock_requests):
"""Test successful search_engine."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = "Search results markdown content"
mock_requests.post.return_value = mock_response
result = brightdata_tools.search_engine("python web scraping", engine="google", num_results=5)
assert result == "Search results markdown content"
mock_requests.post.assert_called_once()
args, kwargs = mock_requests.post.call_args
payload = json.loads(kwargs["data"])
assert "python%20web%20scraping" in payload["url"]
assert payload["data_format"] == "markdown"
assert payload["zone"] == "test_serp_zone"
def test_search_engine_with_params(brightdata_tools, mock_requests):
"""Test search_engine with language and country parameters."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = "Localized search results"
mock_requests.post.return_value = mock_response
brightdata_tools.search_engine("test query", engine="google", num_results=10, language="en", country_code="US")
mock_requests.post.assert_called_once()
args, kwargs = mock_requests.post.call_args
payload = json.loads(kwargs["data"])
assert "hl=en" in payload["url"]
assert "gl=US" in payload["url"]
assert "num=10" in payload["url"]
def test_search_engine_bing(brightdata_tools, mock_requests):
"""Test search_engine with Bing."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = "Bing search results"
mock_requests.post.return_value = mock_response
brightdata_tools.search_engine("test query", engine="bing")
mock_requests.post.assert_called_once()
args, kwargs = mock_requests.post.call_args
payload = json.loads(kwargs["data"])
assert "bing.com/search" in payload["url"]
def test_search_engine_invalid_engine(brightdata_tools):
"""Test search_engine with invalid engine."""
result = brightdata_tools.search_engine("test query", engine="invalid")
assert "Unsupported search engine: invalid" in result
def test_search_engine_no_api_key():
"""Test search_engine without API key."""
with patch.dict("os.environ", {}, clear=True):
tools = BrightDataTools(api_key="test_key") # Create with key first
tools.api_key = None # Then remove it to test the method behavior
result = tools.search_engine("test query")
assert result == "Please provide a Bright Data API key"
def test_search_engine_no_query(brightdata_tools):
"""Test search_engine without query."""
result = brightdata_tools.search_engine("")
assert result == "Please provide a query to search for"
def test_web_data_feed_success(brightdata_tools, mock_requests):
"""Test successful web_data_feed."""
# Mock trigger response
mock_trigger_response = Mock()
mock_trigger_response.json.return_value = {"snapshot_id": "test_snapshot_123"}
# Mock snapshot response
mock_snapshot_response = Mock()
mock_snapshot_response.json.return_value = {
"product_title": "Test Product",
"price": "$29.99",
"description": "Test product description",
}
mock_requests.post.side_effect = [mock_trigger_response]
mock_requests.get.return_value = mock_snapshot_response
result = brightdata_tools.web_data_feed("amazon_product", "https://amazon.com/dp/B123")
# Should return JSON string
result_data = json.loads(result)
assert result_data["product_title"] == "Test Product"
assert result_data["price"] == "$29.99"
# Verify trigger call
assert mock_requests.post.called
trigger_args = mock_requests.post.call_args
assert "datasets/v3/trigger" in trigger_args[0][0]
assert trigger_args[1]["json"] == [{"url": "https://amazon.com/dp/B123"}]
# Verify snapshot call
assert mock_requests.get.called
snapshot_args = mock_requests.get.call_args
assert "snapshot/test_snapshot_123" in snapshot_args[0][0]
def test_web_data_feed_invalid_source(brightdata_tools):
"""Test web_data_feed with invalid source type."""
result = brightdata_tools.web_data_feed("invalid_source", "https://example.com")
assert "Invalid source_type: invalid_source" in result
def test_web_data_feed_no_api_key():
"""Test web_data_feed without API key."""
with patch.dict("os.environ", {}, clear=True):
tools = BrightDataTools(api_key="test_key") # Create with key first
tools.api_key = None # Then remove it to test the method behavior
result = tools.web_data_feed("amazon_product", "https://example.com")
assert result == "Please provide a Bright Data API key"
def test_web_data_feed_no_url(brightdata_tools):
"""Test web_data_feed without URL."""
result = brightdata_tools.web_data_feed("amazon_product", "")
assert result == "Please provide a URL to retrieve data from"
def test_web_data_feed_no_snapshot_id(brightdata_tools, mock_requests):
"""Test web_data_feed when no snapshot ID is returned."""
mock_trigger_response = Mock()
mock_trigger_response.json.return_value = {} # No snapshot_id
mock_requests.post.return_value = mock_trigger_response
result = brightdata_tools.web_data_feed("amazon_product", "https://amazon.com/dp/B123")
assert result == "No snapshot ID returned from trigger request"
def test_web_data_feed_with_reviews_param(brightdata_tools, mock_requests):
"""Test web_data_feed with num_of_reviews parameter."""
mock_trigger_response = Mock()
mock_trigger_response.json.return_value = {"snapshot_id": "test_snapshot_123"}
mock_snapshot_response = Mock()
mock_snapshot_response.json.return_value = {"reviews": ["review1", "review2"]}
mock_requests.post.side_effect = [mock_trigger_response]
mock_requests.get.return_value = mock_snapshot_response
brightdata_tools.web_data_feed("facebook_company_reviews", "https://facebook.com/company", num_of_reviews=50)
# Verify the request included num_of_reviews
trigger_args = mock_requests.post.call_args
assert trigger_args[1]["json"] == [{"url": "https://facebook.com/company", "num_of_reviews": "50"}]
def test_web_data_feed_exception(brightdata_tools, mock_requests):
"""Test web_data_feed with exception."""
mock_requests.post.side_effect = Exception("Network error")
result = brightdata_tools.web_data_feed("amazon_product", "https://amazon.com/dp/B123")
assert "Error retrieving amazon_product data from https://amazon.com/dp/B123: Network error" in result
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_brightdata.py",
"license": "Apache License 2.0",
"lines": 301,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/run/base.py | from dataclasses import asdict, dataclass
from enum import Enum
from typing import Any, Dict, List, Optional, Type, Union
from pydantic import BaseModel
from agno.filters import FilterExpr
from agno.media import Audio, Image, Video
from agno.models.message import Citations, Message, MessageReferences
from agno.models.metrics import RunMetrics
from agno.reasoning.step import ReasoningStep
from agno.utils.log import log_error
@dataclass
class RunContext:
run_id: str
session_id: str
user_id: Optional[str] = None
workflow_id: Optional[str] = None
workflow_name: Optional[str] = None
dependencies: Optional[Dict[str, Any]] = None
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
metadata: Optional[Dict[str, Any]] = None
session_state: Optional[Dict[str, Any]] = None
output_schema: Optional[Union[Type[BaseModel], Dict[str, Any]]] = None
# Runtime-resolved callable factory results
tools: Optional[List[Any]] = None
knowledge: Optional[Any] = None
members: Optional[List[Any]] = None
@dataclass
class BaseRunOutputEvent:
def to_dict(self) -> Dict[str, Any]:
_dict = {
k: v
for k, v in asdict(self).items()
if v is not None
and k
not in [
"tools",
"tool",
"metadata",
"image",
"images",
"videos",
"audio",
"response_audio",
"citations",
"member_responses",
"reasoning_messages",
"reasoning_steps",
"references",
"additional_input",
"session_summary",
"metrics",
"run_input",
"requirements",
"tasks",
"memories",
]
}
if hasattr(self, "metadata") and self.metadata is not None:
_dict["metadata"] = self.metadata
if hasattr(self, "additional_input") and self.additional_input is not None:
_dict["additional_input"] = [m.to_dict() for m in self.additional_input]
if hasattr(self, "reasoning_messages") and self.reasoning_messages is not None:
_dict["reasoning_messages"] = [m.to_dict() for m in self.reasoning_messages]
if hasattr(self, "reasoning_steps") and self.reasoning_steps is not None:
_dict["reasoning_steps"] = [rs.model_dump() for rs in self.reasoning_steps]
if hasattr(self, "references") and self.references is not None:
_dict["references"] = [r.model_dump() for r in self.references]
if hasattr(self, "member_responses") and self.member_responses:
_dict["member_responses"] = [response.to_dict() for response in self.member_responses]
if hasattr(self, "images") and self.images is not None:
_dict["images"] = []
for img in self.images:
if isinstance(img, Image):
_dict["images"].append(img.to_dict())
else:
_dict["images"].append(img)
if hasattr(self, "videos") and self.videos is not None:
_dict["videos"] = []
for vid in self.videos:
if isinstance(vid, Video):
_dict["videos"].append(vid.to_dict())
else:
_dict["videos"].append(vid)
if hasattr(self, "audio") and self.audio is not None:
_dict["audio"] = []
for aud in self.audio:
if isinstance(aud, Audio):
_dict["audio"].append(aud.to_dict())
else:
_dict["audio"].append(aud)
if hasattr(self, "response_audio") and self.response_audio is not None:
if isinstance(self.response_audio, Audio):
_dict["response_audio"] = self.response_audio.to_dict()
else:
_dict["response_audio"] = self.response_audio
if hasattr(self, "citations") and self.citations is not None:
if isinstance(self.citations, Citations):
_dict["citations"] = self.citations.model_dump(exclude_none=True)
else:
_dict["citations"] = self.citations
if hasattr(self, "content") and self.content and isinstance(self.content, BaseModel):
_dict["content"] = self.content.model_dump(exclude_none=True)
if hasattr(self, "tools") and self.tools is not None:
from agno.models.response import ToolExecution
_dict["tools"] = []
for tool in self.tools:
if isinstance(tool, ToolExecution):
_dict["tools"].append(tool.to_dict())
else:
_dict["tools"].append(tool)
if hasattr(self, "tool") and self.tool is not None:
from agno.models.response import ToolExecution
if isinstance(self.tool, ToolExecution):
_dict["tool"] = self.tool.to_dict()
else:
_dict["tool"] = self.tool
if hasattr(self, "metrics") and self.metrics is not None:
_dict["metrics"] = self.metrics.to_dict()
if hasattr(self, "session_summary") and self.session_summary is not None:
_dict["session_summary"] = self.session_summary.to_dict()
if hasattr(self, "run_input") and self.run_input is not None:
_dict["run_input"] = self.run_input.to_dict()
if hasattr(self, "requirements") and self.requirements is not None:
_dict["requirements"] = [req.to_dict() if hasattr(req, "to_dict") else req for req in self.requirements]
if hasattr(self, "memories") and self.memories is not None:
_dict["memories"] = [mem.to_dict() if hasattr(mem, "to_dict") else mem for mem in self.memories]
if hasattr(self, "tasks") and self.tasks is not None:
_dict["tasks"] = [t.to_dict() for t in self.tasks]
return _dict
def to_json(self, separators=(", ", ": "), indent: Optional[int] = 2) -> str:
import json
from agno.utils.serialize import json_serializer
try:
_dict = self.to_dict()
except Exception:
log_error("Failed to convert response event to json", exc_info=True)
raise
if indent is None:
return json.dumps(_dict, separators=separators, default=json_serializer, ensure_ascii=False)
else:
return json.dumps(_dict, indent=indent, separators=separators, default=json_serializer, ensure_ascii=False)
@classmethod
def from_dict(cls, data: Dict[str, Any]):
tool = data.pop("tool", None)
if tool:
from agno.models.response import ToolExecution
data["tool"] = ToolExecution.from_dict(tool)
tools = data.pop("tools", None)
if tools:
from agno.models.response import ToolExecution
data["tools"] = [ToolExecution.from_dict(t) for t in tools]
images = data.pop("images", None)
if images:
data["images"] = [Image.model_validate(image) for image in images]
videos = data.pop("videos", None)
if videos:
data["videos"] = [Video.model_validate(video) for video in videos]
audio = data.pop("audio", None)
if audio:
data["audio"] = [Audio.model_validate(audio) for audio in audio]
response_audio = data.pop("response_audio", None)
if response_audio:
data["response_audio"] = Audio.model_validate(response_audio)
additional_input = data.pop("additional_input", None)
if additional_input is not None:
data["additional_input"] = [Message.model_validate(message) for message in additional_input]
reasoning_steps = data.pop("reasoning_steps", None)
if reasoning_steps is not None:
data["reasoning_steps"] = [ReasoningStep.model_validate(step) for step in reasoning_steps]
reasoning_messages = data.pop("reasoning_messages", None)
if reasoning_messages is not None:
data["reasoning_messages"] = [Message.model_validate(message) for message in reasoning_messages]
references = data.pop("references", None)
if references is not None:
data["references"] = [MessageReferences.model_validate(reference) for reference in references]
metrics = data.pop("metrics", None)
if metrics:
data["metrics"] = RunMetrics.from_dict(metrics)
session_summary = data.pop("session_summary", None)
if session_summary:
from agno.session.summary import SessionSummary
data["session_summary"] = SessionSummary.from_dict(session_summary)
run_input = data.pop("run_input", None)
if run_input:
from agno.run.team import BaseTeamRunEvent
if issubclass(cls, BaseTeamRunEvent):
from agno.run.team import TeamRunInput
data["run_input"] = TeamRunInput.from_dict(run_input)
else:
from agno.run.agent import RunInput
data["run_input"] = RunInput.from_dict(run_input)
# Handle requirements
requirements_data = data.pop("requirements", None)
if requirements_data is not None:
from agno.run.requirement import RunRequirement
requirements_list: List[RunRequirement] = []
for item in requirements_data:
if isinstance(item, RunRequirement):
requirements_list.append(item)
elif isinstance(item, dict):
requirements_list.append(RunRequirement.from_dict(item))
data["requirements"] = requirements_list if requirements_list else None
# Handle tasks (TaskData objects in TaskStateUpdatedEvent)
tasks_data = data.pop("tasks", None)
if tasks_data is not None:
from agno.run.team import TaskData
data["tasks"] = [TaskData.from_dict(t) if isinstance(t, dict) else t for t in tasks_data]
# Filter data to only include fields that are actually defined in the target class
# CustomEvent accepts arbitrary fields, so skip filtering for it
if cls.__name__ == "CustomEvent":
return cls(**data)
from dataclasses import fields
supported_fields = {f.name for f in fields(cls)}
filtered_data = {k: v for k, v in data.items() if k in supported_fields}
return cls(**filtered_data)
@property
def is_paused(self):
return False
@property
def is_cancelled(self):
return False
class RunStatus(str, Enum):
"""State of the main run response"""
pending = "PENDING"
running = "RUNNING"
completed = "COMPLETED"
paused = "PAUSED"
cancelled = "CANCELLED"
error = "ERROR"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/run/base.py",
"license": "Apache License 2.0",
"lines": 229,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/run/workflow.py | from dataclasses import asdict, dataclass, field
from enum import Enum
from time import time
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from pydantic import BaseModel
from agno.media import Audio, File, Image, Video
from agno.run.agent import RunEvent, RunOutput, run_output_event_from_dict
from agno.run.base import BaseRunOutputEvent, RunStatus
from agno.run.team import TeamRunEvent, TeamRunOutput, team_run_output_event_from_dict
from agno.utils.media import (
reconstruct_audio_list,
reconstruct_files,
reconstruct_images,
reconstruct_response_audio,
reconstruct_videos,
)
if TYPE_CHECKING:
from agno.workflow.types import (
ErrorRequirement,
StepOutput,
StepRequirement,
WorkflowMetrics,
)
else:
StepOutput = Any
StepRequirement = Any
ErrorRequirement = Any
WorkflowMetrics = Any
class WorkflowRunEvent(str, Enum):
"""Events that can be sent by workflow execution"""
workflow_started = "WorkflowStarted"
workflow_completed = "WorkflowCompleted"
workflow_cancelled = "WorkflowCancelled"
workflow_error = "WorkflowError"
workflow_agent_started = "WorkflowAgentStarted"
workflow_agent_completed = "WorkflowAgentCompleted"
step_started = "StepStarted"
step_completed = "StepCompleted"
step_paused = "StepPaused"
step_error = "StepError"
loop_execution_started = "LoopExecutionStarted"
loop_iteration_started = "LoopIterationStarted"
loop_iteration_completed = "LoopIterationCompleted"
loop_execution_completed = "LoopExecutionCompleted"
parallel_execution_started = "ParallelExecutionStarted"
parallel_execution_completed = "ParallelExecutionCompleted"
condition_execution_started = "ConditionExecutionStarted"
condition_execution_completed = "ConditionExecutionCompleted"
condition_paused = "ConditionPaused"
router_execution_started = "RouterExecutionStarted"
router_execution_completed = "RouterExecutionCompleted"
router_paused = "RouterPaused"
steps_execution_started = "StepsExecutionStarted"
steps_execution_completed = "StepsExecutionCompleted"
step_output = "StepOutput"
custom_event = "CustomEvent"
@dataclass
class BaseWorkflowRunOutputEvent(BaseRunOutputEvent):
"""Base class for all workflow run response events"""
created_at: int = field(default_factory=lambda: int(time()))
event: str = ""
# Workflow-specific fields
workflow_id: Optional[str] = None
workflow_name: Optional[str] = None
session_id: Optional[str] = None
run_id: Optional[str] = None
step_id: Optional[str] = None
parent_step_id: Optional[str] = None
def to_dict(self) -> Dict[str, Any]:
_dict = {k: v for k, v in asdict(self).items() if v is not None}
if hasattr(self, "content") and self.content and isinstance(self.content, BaseModel):
_dict["content"] = self.content.model_dump(exclude_none=True)
# Handle StepOutput fields that contain Message objects
if hasattr(self, "step_results") and self.step_results is not None:
_dict["step_results"] = [step.to_dict() if hasattr(step, "to_dict") else step for step in self.step_results]
if hasattr(self, "step_response") and self.step_response is not None:
_dict["step_response"] = (
self.step_response.to_dict() if hasattr(self.step_response, "to_dict") else self.step_response
)
if hasattr(self, "iteration_results") and self.iteration_results is not None:
_dict["iteration_results"] = [
step.to_dict() if hasattr(step, "to_dict") else step for step in self.iteration_results
]
if hasattr(self, "all_results") and self.all_results is not None:
_dict["all_results"] = [
[step.to_dict() if hasattr(step, "to_dict") else step for step in iteration]
for iteration in self.all_results
]
return _dict
@property
def is_cancelled(self):
return False
@property
def is_error(self):
return False
@property
def status(self):
status = "Completed"
if self.is_error:
status = "Error"
if self.is_cancelled:
status = "Cancelled"
return status
@dataclass
class WorkflowStartedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when workflow execution starts"""
event: str = WorkflowRunEvent.workflow_started.value
@dataclass
class WorkflowAgentStartedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when workflow agent starts (before deciding to run workflow or answer directly)"""
event: str = WorkflowRunEvent.workflow_agent_started.value
@dataclass
class WorkflowAgentCompletedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when workflow agent completes (after running workflow or answering directly)"""
event: str = WorkflowRunEvent.workflow_agent_completed.value
content: Optional[Any] = None
@dataclass
class WorkflowCompletedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when workflow execution completes"""
event: str = WorkflowRunEvent.workflow_completed.value
content: Optional[Any] = None
content_type: str = "str"
# Store actual step execution results as StepOutput objects
step_results: List[StepOutput] = field(default_factory=list)
metadata: Optional[Dict[str, Any]] = None
@dataclass
class WorkflowErrorEvent(BaseWorkflowRunOutputEvent):
"""Event sent when workflow execution fails"""
event: str = WorkflowRunEvent.workflow_error.value
error: Optional[str] = None
# From exceptions
error_type: Optional[str] = None
error_id: Optional[str] = None
additional_data: Optional[Dict[str, Any]] = None
@dataclass
class WorkflowCancelledEvent(BaseWorkflowRunOutputEvent):
"""Event sent when workflow execution is cancelled"""
event: str = WorkflowRunEvent.workflow_cancelled.value
reason: Optional[str] = None
@property
def is_cancelled(self):
return True
@dataclass
class StepStartedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when step execution starts"""
event: str = WorkflowRunEvent.step_started.value
step_name: Optional[str] = None
step_index: Optional[Union[int, tuple]] = None
@dataclass
class StepCompletedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when step execution completes"""
event: str = WorkflowRunEvent.step_completed.value
step_name: Optional[str] = None
step_index: Optional[Union[int, tuple]] = None
content: Optional[Any] = None
content_type: str = "str"
# Media content fields
images: Optional[List[Image]] = None
videos: Optional[List[Video]] = None
audio: Optional[List[Audio]] = None
response_audio: Optional[Audio] = None
# Store actual step execution results as StepOutput objects
step_response: Optional[StepOutput] = None
@dataclass
class StepPausedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when step execution is paused (e.g., requires user confirmation or user input)"""
event: str = WorkflowRunEvent.step_paused.value
step_name: Optional[str] = None
step_index: Optional[Union[int, tuple]] = None
step_id: Optional[str] = None
# Confirmation fields
requires_confirmation: bool = False
confirmation_message: Optional[str] = None
# User input fields
requires_user_input: bool = False
user_input_message: Optional[str] = None
@dataclass
class StepErrorEvent(BaseWorkflowRunOutputEvent):
"""Event sent when step execution fails"""
event: str = WorkflowRunEvent.step_error.value
step_name: Optional[str] = None
step_index: Optional[Union[int, tuple]] = None
error: Optional[str] = None
@dataclass
class LoopExecutionStartedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when loop execution starts"""
event: str = WorkflowRunEvent.loop_execution_started.value
step_name: Optional[str] = None
step_index: Optional[Union[int, tuple]] = None
max_iterations: Optional[int] = None
@dataclass
class LoopIterationStartedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when loop iteration starts"""
event: str = WorkflowRunEvent.loop_iteration_started.value
step_name: Optional[str] = None
step_index: Optional[Union[int, tuple]] = None
iteration: int = 0
max_iterations: Optional[int] = None
@dataclass
class LoopIterationCompletedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when loop iteration completes"""
event: str = WorkflowRunEvent.loop_iteration_completed.value
step_name: Optional[str] = None
step_index: Optional[Union[int, tuple]] = None
iteration: int = 0
max_iterations: Optional[int] = None
iteration_results: List[StepOutput] = field(default_factory=list)
should_continue: bool = True
@dataclass
class LoopExecutionCompletedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when loop execution completes"""
event: str = WorkflowRunEvent.loop_execution_completed.value
step_name: Optional[str] = None
step_index: Optional[Union[int, tuple]] = None
total_iterations: int = 0
max_iterations: Optional[int] = None
all_results: List[List[StepOutput]] = field(default_factory=list)
@dataclass
class ParallelExecutionStartedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when parallel step execution starts"""
event: str = WorkflowRunEvent.parallel_execution_started.value
step_name: Optional[str] = None
step_index: Optional[Union[int, tuple]] = None
parallel_step_count: Optional[int] = None
@dataclass
class ParallelExecutionCompletedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when parallel step execution completes"""
event: str = WorkflowRunEvent.parallel_execution_completed.value
step_name: Optional[str] = None
step_index: Optional[Union[int, tuple]] = None
parallel_step_count: Optional[int] = None
# Results from all parallel steps
step_results: List[StepOutput] = field(default_factory=list)
@dataclass
class ConditionExecutionStartedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when condition step execution starts"""
event: str = WorkflowRunEvent.condition_execution_started.value
step_name: Optional[str] = None
step_index: Optional[Union[int, tuple]] = None
condition_result: Optional[bool] = None
@dataclass
class ConditionExecutionCompletedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when condition step execution completes"""
event: str = WorkflowRunEvent.condition_execution_completed.value
step_name: Optional[str] = None
step_index: Optional[Union[int, tuple]] = None
condition_result: Optional[bool] = None
executed_steps: Optional[int] = None
# Which branch was executed: "if", "else", or None (condition false with no else_steps)
branch: Optional[str] = None
# Results from executed steps
step_results: List[StepOutput] = field(default_factory=list)
@dataclass
class RouterExecutionStartedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when router step execution starts"""
event: str = WorkflowRunEvent.router_execution_started.value
step_name: Optional[str] = None
step_index: Optional[Union[int, tuple]] = None
# Names of steps selected by router
selected_steps: List[str] = field(default_factory=list)
@dataclass
class RouterExecutionCompletedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when router step execution completes"""
event: str = WorkflowRunEvent.router_execution_completed.value
step_name: Optional[str] = None
step_index: Optional[Union[int, tuple]] = None
# Names of steps that were selected
selected_steps: List[str] = field(default_factory=list)
executed_steps: Optional[int] = None
# Results from executed steps
step_results: List[StepOutput] = field(default_factory=list)
@dataclass
class RouterPausedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when router pauses for user input (HITL)"""
event: str = WorkflowRunEvent.router_paused.value
step_name: Optional[str] = None
step_index: Optional[Union[int, tuple]] = None
# Available choices for user to select from
available_choices: List[str] = field(default_factory=list)
# Message to display to user
user_input_message: Optional[str] = None
# Whether multiple selections are allowed
allow_multiple_selections: bool = False
@dataclass
class StepsExecutionStartedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when steps execution starts"""
event: str = WorkflowRunEvent.steps_execution_started.value
step_name: Optional[str] = None
step_index: Optional[Union[int, tuple]] = None
steps_count: Optional[int] = None
@dataclass
class StepsExecutionCompletedEvent(BaseWorkflowRunOutputEvent):
"""Event sent when steps execution completes"""
event: str = WorkflowRunEvent.steps_execution_completed.value
step_name: Optional[str] = None
step_index: Optional[Union[int, tuple]] = None
steps_count: Optional[int] = None
executed_steps: Optional[int] = None
# Results from executed steps
step_results: List[StepOutput] = field(default_factory=list)
@dataclass
class StepOutputEvent(BaseWorkflowRunOutputEvent):
"""Event sent when a step produces output - replaces direct StepOutput yielding"""
event: str = "StepOutput"
step_name: Optional[str] = None
step_index: Optional[Union[int, tuple]] = None
# Store actual step execution result as StepOutput object
step_output: Optional[StepOutput] = None
# Properties for backward compatibility
@property
def content(self) -> Optional[Union[str, Dict[str, Any], List[Any], BaseModel, Any]]:
return self.step_output.content if self.step_output else None
@property
def images(self) -> Optional[List[Image]]:
return self.step_output.images if self.step_output else None
@property
def videos(self) -> Optional[List[Video]]:
return self.step_output.videos if self.step_output else None
@property
def audio(self) -> Optional[List[Audio]]:
return self.step_output.audio if self.step_output else None
@property
def success(self) -> bool:
return self.step_output.success if self.step_output else True
@property
def error(self) -> Optional[str]:
return self.step_output.error if self.step_output else None
@property
def stop(self) -> bool:
return self.step_output.stop if self.step_output else False
@dataclass
class CustomEvent(BaseWorkflowRunOutputEvent):
"""Event sent when a custom event is produced"""
event: str = WorkflowRunEvent.custom_event.value
def __init__(self, **kwargs):
# Store arbitrary attributes directly on the instance
for key, value in kwargs.items():
setattr(self, key, value)
# Union type for all workflow run response events
WorkflowRunOutputEvent = Union[
WorkflowStartedEvent,
WorkflowAgentStartedEvent,
WorkflowAgentCompletedEvent,
WorkflowCompletedEvent,
WorkflowErrorEvent,
WorkflowCancelledEvent,
StepStartedEvent,
StepCompletedEvent,
StepPausedEvent,
StepErrorEvent,
LoopExecutionStartedEvent,
LoopIterationStartedEvent,
LoopIterationCompletedEvent,
LoopExecutionCompletedEvent,
ParallelExecutionStartedEvent,
ParallelExecutionCompletedEvent,
ConditionExecutionStartedEvent,
ConditionExecutionCompletedEvent,
RouterExecutionStartedEvent,
RouterExecutionCompletedEvent,
RouterPausedEvent,
StepsExecutionStartedEvent,
StepsExecutionCompletedEvent,
StepOutputEvent,
CustomEvent,
]
# Map event string to dataclass for workflow events
WORKFLOW_RUN_EVENT_TYPE_REGISTRY = {
WorkflowRunEvent.workflow_started.value: WorkflowStartedEvent,
WorkflowRunEvent.workflow_agent_started.value: WorkflowAgentStartedEvent,
WorkflowRunEvent.workflow_agent_completed.value: WorkflowAgentCompletedEvent,
WorkflowRunEvent.workflow_completed.value: WorkflowCompletedEvent,
WorkflowRunEvent.workflow_cancelled.value: WorkflowCancelledEvent,
WorkflowRunEvent.workflow_error.value: WorkflowErrorEvent,
WorkflowRunEvent.step_started.value: StepStartedEvent,
WorkflowRunEvent.step_completed.value: StepCompletedEvent,
WorkflowRunEvent.step_paused.value: StepPausedEvent,
WorkflowRunEvent.step_error.value: StepErrorEvent,
WorkflowRunEvent.loop_execution_started.value: LoopExecutionStartedEvent,
WorkflowRunEvent.loop_iteration_started.value: LoopIterationStartedEvent,
WorkflowRunEvent.loop_iteration_completed.value: LoopIterationCompletedEvent,
WorkflowRunEvent.loop_execution_completed.value: LoopExecutionCompletedEvent,
WorkflowRunEvent.parallel_execution_started.value: ParallelExecutionStartedEvent,
WorkflowRunEvent.parallel_execution_completed.value: ParallelExecutionCompletedEvent,
WorkflowRunEvent.condition_execution_started.value: ConditionExecutionStartedEvent,
WorkflowRunEvent.condition_execution_completed.value: ConditionExecutionCompletedEvent,
WorkflowRunEvent.router_execution_started.value: RouterExecutionStartedEvent,
WorkflowRunEvent.router_execution_completed.value: RouterExecutionCompletedEvent,
WorkflowRunEvent.router_paused.value: RouterPausedEvent,
WorkflowRunEvent.steps_execution_started.value: StepsExecutionStartedEvent,
WorkflowRunEvent.steps_execution_completed.value: StepsExecutionCompletedEvent,
WorkflowRunEvent.step_output.value: StepOutputEvent,
WorkflowRunEvent.custom_event.value: CustomEvent,
}
def workflow_run_output_event_from_dict(data: dict) -> BaseWorkflowRunOutputEvent:
event_type = data.get("event", "")
if event_type in {e.value for e in RunEvent}:
return run_output_event_from_dict(data) # type: ignore
elif event_type in {e.value for e in TeamRunEvent}:
return team_run_output_event_from_dict(data) # type: ignore
else:
event_class = WORKFLOW_RUN_EVENT_TYPE_REGISTRY.get(event_type)
if not event_class:
raise ValueError(f"Unknown workflow event type: {event_type}")
return event_class.from_dict(data) # type: ignore
@dataclass
class WorkflowRunOutput:
"""Response returned by Workflow.run() functions - kept for backwards compatibility"""
input: Optional[Union[str, Dict[str, Any], List[Any], BaseModel]] = None
content: Optional[Union[str, Dict[str, Any], List[Any], BaseModel, Any]] = None
content_type: str = "str"
# Workflow-specific fields
workflow_id: Optional[str] = None
workflow_name: Optional[str] = None
run_id: Optional[str] = None
session_id: Optional[str] = None
user_id: Optional[str] = None
# Media content fields
images: Optional[List[Image]] = None
videos: Optional[List[Video]] = None
audio: Optional[List[Audio]] = None
files: Optional[List[File]] = None
response_audio: Optional[Audio] = None
# Store actual step execution results as StepOutput objects
step_results: List[Union[StepOutput, List[StepOutput]]] = field(default_factory=list)
# Store agent/team responses separately with parent_run_id references
step_executor_runs: Optional[List[Union[RunOutput, TeamRunOutput]]] = None
# Workflow agent run - stores the full agent RunOutput when workflow agent is used
# The agent's parent_run_id will point to this workflow run's run_id to establish the relationship
workflow_agent_run: Optional[RunOutput] = None
# Store events from workflow execution
events: Optional[List[WorkflowRunOutputEvent]] = None
# Workflow metrics aggregated from all steps
metrics: Optional[WorkflowMetrics] = None
metadata: Optional[Dict[str, Any]] = None
created_at: int = field(default_factory=lambda: int(time()))
status: RunStatus = RunStatus.pending
# Unified HITL requirements to continue a paused workflow
# Handles all HITL types: confirmation, user input, and route selection
step_requirements: Optional[List["StepRequirement"]] = None
# Error-level HITL requirements for handling step failures
error_requirements: Optional[List["ErrorRequirement"]] = None
# Track the paused step for resumption and debugging
paused_step_index: Optional[int] = None
paused_step_name: Optional[str] = None
@property
def is_paused(self) -> bool:
"""Check if the workflow is paused waiting for step confirmation or router selection"""
return self.status == RunStatus.paused
@property
def is_cancelled(self):
return self.status == RunStatus.cancelled
@property
def active_step_requirements(self) -> List["StepRequirement"]:
"""Get step requirements that still need to be resolved"""
if not self.step_requirements:
return []
return [req for req in self.step_requirements if not req.is_resolved]
@property
def steps_requiring_confirmation(self) -> List["StepRequirement"]:
"""Get step requirements that need user confirmation"""
if not self.step_requirements:
return []
return [req for req in self.step_requirements if req.needs_confirmation]
@property
def steps_requiring_user_input(self) -> List["StepRequirement"]:
"""Get step requirements that need user input (custom fields, not route selection)"""
if not self.step_requirements:
return []
return [req for req in self.step_requirements if req.needs_user_input]
@property
def steps_requiring_route(self) -> List["StepRequirement"]:
"""Get step requirements that need route selection (Router HITL)"""
if not self.step_requirements:
return []
return [req for req in self.step_requirements if req.needs_route_selection]
@property
def active_error_requirements(self) -> List["ErrorRequirement"]:
"""Get error requirements that still need user decision"""
if not self.error_requirements:
return []
return [req for req in self.error_requirements if not req.is_resolved]
@property
def steps_with_errors(self) -> List["ErrorRequirement"]:
"""Get error requirements that need user decision (retry or skip)"""
if not self.error_requirements:
return []
return [req for req in self.error_requirements if req.needs_decision]
def to_dict(self) -> Dict[str, Any]:
_dict = {
k: v
for k, v in asdict(self).items()
if v is not None
and k
not in [
"metadata",
"images",
"videos",
"audio",
"files",
"response_audio",
"step_results",
"step_executor_runs",
"events",
"metrics",
"workflow_agent_run",
"step_requirements",
"error_requirements",
]
}
if self.status is not None:
_dict["status"] = self.status.value if isinstance(self.status, RunStatus) else self.status
if self.metadata is not None:
_dict["metadata"] = self.metadata
if self.images is not None:
_dict["images"] = [img.to_dict() for img in self.images]
if self.videos is not None:
_dict["videos"] = [vid.to_dict() for vid in self.videos]
if self.audio is not None:
_dict["audio"] = [aud.to_dict() for aud in self.audio]
if self.files is not None:
_dict["files"] = [f.to_dict() for f in self.files]
if self.response_audio is not None:
_dict["response_audio"] = self.response_audio.to_dict()
if self.step_results:
flattened_responses = []
for step_response in self.step_results:
if isinstance(step_response, list):
# Handle List[StepOutput] from workflow components like Steps
flattened_responses.extend([s.to_dict() for s in step_response])
else:
# Handle single StepOutput
flattened_responses.append(step_response.to_dict())
_dict["step_results"] = flattened_responses
if self.step_executor_runs:
_dict["step_executor_runs"] = [run.to_dict() for run in self.step_executor_runs]
if self.workflow_agent_run is not None:
_dict["workflow_agent_run"] = self.workflow_agent_run.to_dict()
if self.metrics is not None:
_dict["metrics"] = self.metrics.to_dict()
if self.input is not None:
if isinstance(self.input, BaseModel):
_dict["input"] = self.input.model_dump(exclude_none=True)
else:
_dict["input"] = self.input
if self.content and isinstance(self.content, BaseModel):
_dict["content"] = self.content.model_dump(exclude_none=True, mode="json")
if self.events is not None:
_dict["events"] = [e.to_dict() for e in self.events]
if self.step_requirements is not None:
_dict["step_requirements"] = [req.to_dict() for req in self.step_requirements]
if self.error_requirements is not None:
_dict["error_requirements"] = [req.to_dict() for req in self.error_requirements]
return _dict
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "WorkflowRunOutput":
# Import here to avoid circular import
from agno.workflow.step import StepOutput
workflow_metrics_dict = data.pop("metrics", {})
workflow_metrics = None
if workflow_metrics_dict:
from agno.workflow.workflow import WorkflowMetrics
workflow_metrics = WorkflowMetrics.from_dict(workflow_metrics_dict)
step_results = data.pop("step_results", [])
parsed_step_results: List[Union[StepOutput, List[StepOutput]]] = []
if step_results:
for step_output_dict in step_results:
# Reconstruct StepOutput from dict
parsed_step_results.append(StepOutput.from_dict(step_output_dict))
# Parse step_executor_runs
step_executor_runs_data = data.pop("step_executor_runs", [])
step_executor_runs: List[Union[RunOutput, TeamRunOutput]] = []
if step_executor_runs_data:
step_executor_runs = []
for run_data in step_executor_runs_data:
if "team_id" in run_data or "team_name" in run_data:
step_executor_runs.append(TeamRunOutput.from_dict(run_data))
else:
step_executor_runs.append(RunOutput.from_dict(run_data))
workflow_agent_run_data = data.pop("workflow_agent_run", None)
workflow_agent_run = None
if workflow_agent_run_data:
if isinstance(workflow_agent_run_data, dict):
workflow_agent_run = RunOutput.from_dict(workflow_agent_run_data)
elif isinstance(workflow_agent_run_data, RunOutput):
workflow_agent_run = workflow_agent_run_data
metadata = data.pop("metadata", None)
images = reconstruct_images(data.pop("images", []))
videos = reconstruct_videos(data.pop("videos", []))
audio = reconstruct_audio_list(data.pop("audio", []))
files = reconstruct_files(data.pop("files", []))
response_audio = reconstruct_response_audio(data.pop("response_audio", None))
events_data = data.pop("events", [])
final_events = []
for event in events_data or []:
if "agent_id" in event:
# Agent event from agent step
from agno.run.agent import run_output_event_from_dict
event = run_output_event_from_dict(event)
elif "team_id" in event:
# Team event from team step
from agno.run.team import team_run_output_event_from_dict
event = team_run_output_event_from_dict(event)
else:
# Pure workflow event
event = workflow_run_output_event_from_dict(event)
final_events.append(event)
events = final_events
# Parse step_requirements
step_requirements_data = data.pop("step_requirements", None)
step_requirements = None
if step_requirements_data:
from agno.workflow.types import StepRequirement
step_requirements = [StepRequirement.from_dict(req) for req in step_requirements_data]
# Handle legacy router_requirements by converting to step_requirements
router_requirements_data = data.pop("router_requirements", None)
if router_requirements_data:
from agno.workflow.types import StepRequirement as StepReq
# Convert legacy router_requirements to step_requirements with requires_route_selection=True
router_as_step_reqs = [StepReq.from_dict(req) for req in router_requirements_data]
if step_requirements is None:
step_requirements = router_as_step_reqs
else:
step_requirements.extend(router_as_step_reqs)
# Parse error_requirements
error_requirements_data = data.pop("error_requirements", None)
error_requirements = None
if error_requirements_data:
from agno.workflow.types import ErrorRequirement
error_requirements = [ErrorRequirement.from_dict(req) for req in error_requirements_data]
input_data = data.pop("input", None)
# Filter data to only include fields that are actually defined in the WorkflowRunOutput dataclass
from dataclasses import fields
supported_fields = {f.name for f in fields(cls)}
filtered_data = {k: v for k, v in data.items() if k in supported_fields}
result = cls(
step_results=parsed_step_results,
workflow_agent_run=workflow_agent_run,
metadata=metadata,
images=images,
videos=videos,
audio=audio,
files=files,
response_audio=response_audio,
events=events,
metrics=workflow_metrics,
step_executor_runs=step_executor_runs,
step_requirements=step_requirements,
error_requirements=error_requirements,
input=input_data,
**filtered_data,
)
return result
def get_content_as_string(self, **kwargs) -> str:
import json
from pydantic import BaseModel
if isinstance(self.content, str):
return self.content
elif isinstance(self.content, BaseModel):
return self.content.model_dump_json(exclude_none=True, **kwargs)
else:
return json.dumps(self.content, **kwargs)
def has_completed(self) -> bool:
"""Check if the workflow run is completed (either successfully or with error)"""
return self.status in [RunStatus.completed, RunStatus.error]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/run/workflow.py",
"license": "Apache License 2.0",
"lines": 665,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/utils/events.py | from typing import Any, Dict, List, Optional, Union
from agno.media import Audio, Image
from agno.models.message import Citations
from agno.models.response import ToolExecution
from agno.reasoning.step import ReasoningStep
from agno.run.agent import (
CompressionCompletedEvent,
CompressionStartedEvent,
MemoryUpdateCompletedEvent,
MemoryUpdateStartedEvent,
ModelRequestCompletedEvent,
ModelRequestStartedEvent,
OutputModelResponseCompletedEvent,
OutputModelResponseStartedEvent,
ParserModelResponseCompletedEvent,
ParserModelResponseStartedEvent,
PostHookCompletedEvent,
PostHookStartedEvent,
PreHookCompletedEvent,
PreHookStartedEvent,
ReasoningCompletedEvent,
ReasoningContentDeltaEvent,
ReasoningStartedEvent,
ReasoningStepEvent,
RunCancelledEvent,
RunCompletedEvent,
RunContentCompletedEvent,
RunContentEvent,
RunContinuedEvent,
RunErrorEvent,
RunEvent,
RunInput,
RunOutput,
RunOutputEvent,
RunPausedEvent,
RunStartedEvent,
SessionSummaryCompletedEvent,
SessionSummaryStartedEvent,
ToolCallCompletedEvent,
ToolCallErrorEvent,
ToolCallStartedEvent,
)
from agno.run.requirement import RunRequirement
from agno.run.team import CompressionCompletedEvent as TeamCompressionCompletedEvent
from agno.run.team import CompressionStartedEvent as TeamCompressionStartedEvent
from agno.run.team import MemoryUpdateCompletedEvent as TeamMemoryUpdateCompletedEvent
from agno.run.team import MemoryUpdateStartedEvent as TeamMemoryUpdateStartedEvent
from agno.run.team import ModelRequestCompletedEvent as TeamModelRequestCompletedEvent
from agno.run.team import ModelRequestStartedEvent as TeamModelRequestStartedEvent
from agno.run.team import OutputModelResponseCompletedEvent as TeamOutputModelResponseCompletedEvent
from agno.run.team import OutputModelResponseStartedEvent as TeamOutputModelResponseStartedEvent
from agno.run.team import ParserModelResponseCompletedEvent as TeamParserModelResponseCompletedEvent
from agno.run.team import ParserModelResponseStartedEvent as TeamParserModelResponseStartedEvent
from agno.run.team import PostHookCompletedEvent as TeamPostHookCompletedEvent
from agno.run.team import PostHookStartedEvent as TeamPostHookStartedEvent
from agno.run.team import PreHookCompletedEvent as TeamPreHookCompletedEvent
from agno.run.team import PreHookStartedEvent as TeamPreHookStartedEvent
from agno.run.team import ReasoningCompletedEvent as TeamReasoningCompletedEvent
from agno.run.team import ReasoningContentDeltaEvent as TeamReasoningContentDeltaEvent
from agno.run.team import ReasoningStartedEvent as TeamReasoningStartedEvent
from agno.run.team import ReasoningStepEvent as TeamReasoningStepEvent
from agno.run.team import RunCancelledEvent as TeamRunCancelledEvent
from agno.run.team import RunCompletedEvent as TeamRunCompletedEvent
from agno.run.team import RunContentCompletedEvent as TeamRunContentCompletedEvent
from agno.run.team import RunContentEvent as TeamRunContentEvent
from agno.run.team import RunContinuedEvent as TeamRunContinuedEvent
from agno.run.team import RunErrorEvent as TeamRunErrorEvent
from agno.run.team import RunPausedEvent as TeamRunPausedEvent
from agno.run.team import RunStartedEvent as TeamRunStartedEvent
from agno.run.team import SessionSummaryCompletedEvent as TeamSessionSummaryCompletedEvent
from agno.run.team import SessionSummaryStartedEvent as TeamSessionSummaryStartedEvent
from agno.run.team import TaskCreatedEvent as TeamTaskCreatedEvent
from agno.run.team import TaskData as TeamTaskData
from agno.run.team import TaskIterationCompletedEvent as TeamTaskIterationCompletedEvent
from agno.run.team import TaskIterationStartedEvent as TeamTaskIterationStartedEvent
from agno.run.team import TaskStateUpdatedEvent as TeamTaskStateUpdatedEvent
from agno.run.team import TaskUpdatedEvent as TeamTaskUpdatedEvent
from agno.run.team import TeamRunEvent, TeamRunInput, TeamRunOutput, TeamRunOutputEvent
from agno.run.team import ToolCallCompletedEvent as TeamToolCallCompletedEvent
from agno.run.team import ToolCallErrorEvent as TeamToolCallErrorEvent
from agno.run.team import ToolCallStartedEvent as TeamToolCallStartedEvent
from agno.session.summary import SessionSummary
def create_team_run_started_event(from_run_response: TeamRunOutput) -> TeamRunStartedEvent:
return TeamRunStartedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
model=from_run_response.model, # type: ignore
model_provider=from_run_response.model_provider, # type: ignore
)
def create_run_started_event(from_run_response: RunOutput) -> RunStartedEvent:
return RunStartedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
model=from_run_response.model, # type: ignore
model_provider=from_run_response.model_provider, # type: ignore
)
def create_team_run_completed_event(from_run_response: TeamRunOutput) -> TeamRunCompletedEvent:
return TeamRunCompletedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
content=from_run_response.content, # type: ignore
content_type=from_run_response.content_type, # type: ignore
reasoning_content=from_run_response.reasoning_content, # type: ignore
citations=from_run_response.citations, # type: ignore
model_provider_data=from_run_response.model_provider_data, # type: ignore
images=from_run_response.images, # type: ignore
videos=from_run_response.videos, # type: ignore
audio=from_run_response.audio, # type: ignore
response_audio=from_run_response.response_audio, # type: ignore
references=from_run_response.references, # type: ignore
additional_input=from_run_response.additional_input, # type: ignore
reasoning_steps=from_run_response.reasoning_steps, # type: ignore
reasoning_messages=from_run_response.reasoning_messages, # type: ignore
member_responses=from_run_response.member_responses, # type: ignore
metadata=from_run_response.metadata, # type: ignore
metrics=from_run_response.metrics, # type: ignore
session_state=from_run_response.session_state, # type: ignore
)
def create_run_completed_event(from_run_response: RunOutput) -> RunCompletedEvent:
return RunCompletedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
content=from_run_response.content, # type: ignore
content_type=from_run_response.content_type, # type: ignore
reasoning_content=from_run_response.reasoning_content, # type: ignore
citations=from_run_response.citations, # type: ignore
model_provider_data=from_run_response.model_provider_data, # type: ignore
images=from_run_response.images, # type: ignore
videos=from_run_response.videos, # type: ignore
audio=from_run_response.audio, # type: ignore
response_audio=from_run_response.response_audio, # type: ignore
references=from_run_response.references, # type: ignore
additional_input=from_run_response.additional_input, # type: ignore
reasoning_steps=from_run_response.reasoning_steps, # type: ignore
reasoning_messages=from_run_response.reasoning_messages, # type: ignore
metadata=from_run_response.metadata, # type: ignore
metrics=from_run_response.metrics, # type: ignore
session_state=from_run_response.session_state, # type: ignore
)
def create_run_paused_event(
from_run_response: RunOutput,
tools: Optional[List[ToolExecution]] = None,
requirements: Optional[List[RunRequirement]] = None,
) -> RunPausedEvent:
return RunPausedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
tools=tools,
requirements=requirements,
content=from_run_response.content,
)
def create_run_continued_event(from_run_response: RunOutput) -> RunContinuedEvent:
return RunContinuedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_team_run_error_event(
from_run_response: TeamRunOutput,
error: str,
error_type: Optional[str] = None,
error_id: Optional[str] = None,
additional_data: Optional[Dict[str, Any]] = None,
) -> TeamRunErrorEvent:
return TeamRunErrorEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
content=error,
error_type=error_type,
error_id=error_id,
additional_data=additional_data,
)
def create_run_error_event(
from_run_response: RunOutput,
error: str,
error_type: Optional[str] = None,
error_id: Optional[str] = None,
additional_data: Optional[Dict[str, Any]] = None,
) -> RunErrorEvent:
return RunErrorEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
content=error,
error_type=error_type,
error_id=error_id,
additional_data=additional_data,
)
def create_team_run_cancelled_event(from_run_response: TeamRunOutput, reason: str) -> TeamRunCancelledEvent:
return TeamRunCancelledEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
reason=reason,
)
def create_team_run_paused_event(
from_run_response: TeamRunOutput,
tools: Optional[List[ToolExecution]] = None,
requirements: Optional[List[RunRequirement]] = None,
) -> TeamRunPausedEvent:
return TeamRunPausedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
tools=tools,
requirements=requirements,
content=from_run_response.content,
)
def create_team_run_continued_event(from_run_response: TeamRunOutput) -> TeamRunContinuedEvent:
return TeamRunContinuedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_run_cancelled_event(from_run_response: RunOutput, reason: str) -> RunCancelledEvent:
return RunCancelledEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
reason=reason,
)
def create_pre_hook_started_event(
from_run_response: RunOutput, pre_hook_name: Optional[str] = None, run_input: Optional[RunInput] = None
) -> PreHookStartedEvent:
from copy import deepcopy
return PreHookStartedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
pre_hook_name=pre_hook_name,
run_input=deepcopy(run_input),
)
def create_team_pre_hook_started_event(
from_run_response: TeamRunOutput, pre_hook_name: Optional[str] = None, run_input: Optional[TeamRunInput] = None
) -> TeamPreHookStartedEvent:
from copy import deepcopy
return TeamPreHookStartedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
pre_hook_name=pre_hook_name,
run_input=deepcopy(run_input),
)
def create_pre_hook_completed_event(
from_run_response: RunOutput, pre_hook_name: Optional[str] = None, run_input: Optional[RunInput] = None
) -> PreHookCompletedEvent:
from copy import deepcopy
return PreHookCompletedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
pre_hook_name=pre_hook_name,
run_input=deepcopy(run_input),
)
def create_team_pre_hook_completed_event(
from_run_response: TeamRunOutput, pre_hook_name: Optional[str] = None, run_input: Optional[TeamRunInput] = None
) -> TeamPreHookCompletedEvent:
from copy import deepcopy
return TeamPreHookCompletedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
pre_hook_name=pre_hook_name,
run_input=deepcopy(run_input),
)
def create_post_hook_started_event(
from_run_response: RunOutput, post_hook_name: Optional[str] = None
) -> PostHookStartedEvent:
return PostHookStartedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
post_hook_name=post_hook_name,
)
def create_team_post_hook_started_event(
from_run_response: TeamRunOutput, post_hook_name: Optional[str] = None
) -> TeamPostHookStartedEvent:
return TeamPostHookStartedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
post_hook_name=post_hook_name,
)
def create_post_hook_completed_event(
from_run_response: RunOutput, post_hook_name: Optional[str] = None
) -> PostHookCompletedEvent:
return PostHookCompletedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
post_hook_name=post_hook_name,
)
def create_team_post_hook_completed_event(
from_run_response: TeamRunOutput, post_hook_name: Optional[str] = None
) -> TeamPostHookCompletedEvent:
return TeamPostHookCompletedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
post_hook_name=post_hook_name,
)
def create_memory_update_started_event(from_run_response: RunOutput) -> MemoryUpdateStartedEvent:
return MemoryUpdateStartedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_team_memory_update_started_event(from_run_response: TeamRunOutput) -> TeamMemoryUpdateStartedEvent:
return TeamMemoryUpdateStartedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_memory_update_completed_event(
from_run_response: RunOutput, memories: Optional[List[Any]] = None
) -> MemoryUpdateCompletedEvent:
return MemoryUpdateCompletedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
memories=memories,
)
def create_team_memory_update_completed_event(
from_run_response: TeamRunOutput, memories: Optional[List[Any]] = None
) -> TeamMemoryUpdateCompletedEvent:
return TeamMemoryUpdateCompletedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
memories=memories,
)
def create_team_session_summary_started_event(
from_run_response: TeamRunOutput,
) -> TeamSessionSummaryStartedEvent:
return TeamSessionSummaryStartedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_team_session_summary_completed_event(
from_run_response: TeamRunOutput, session_summary: Optional[SessionSummary] = None
) -> TeamSessionSummaryCompletedEvent:
return TeamSessionSummaryCompletedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
session_summary=session_summary,
)
def create_session_summary_started_event(from_run_response: RunOutput) -> SessionSummaryStartedEvent:
return SessionSummaryStartedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_session_summary_completed_event(
from_run_response: RunOutput, session_summary: Optional[SessionSummary] = None
) -> SessionSummaryCompletedEvent:
return SessionSummaryCompletedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
session_summary=session_summary,
)
def create_reasoning_started_event(from_run_response: RunOutput) -> ReasoningStartedEvent:
return ReasoningStartedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_team_reasoning_started_event(from_run_response: TeamRunOutput) -> TeamReasoningStartedEvent:
return TeamReasoningStartedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_reasoning_step_event(
from_run_response: RunOutput, reasoning_step: ReasoningStep, reasoning_content: str
) -> ReasoningStepEvent:
return ReasoningStepEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
content=reasoning_step,
content_type=reasoning_step.__class__.__name__,
reasoning_content=reasoning_content,
)
def create_reasoning_content_delta_event(
from_run_response: RunOutput, reasoning_content: str
) -> ReasoningContentDeltaEvent:
"""Create an event for streaming reasoning content chunks."""
return ReasoningContentDeltaEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
reasoning_content=reasoning_content,
)
def create_team_reasoning_step_event(
from_run_response: TeamRunOutput, reasoning_step: ReasoningStep, reasoning_content: str
) -> TeamReasoningStepEvent:
return TeamReasoningStepEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
content=reasoning_step,
content_type=reasoning_step.__class__.__name__,
reasoning_content=reasoning_content,
)
def create_team_reasoning_content_delta_event(
from_run_response: TeamRunOutput, reasoning_content: str
) -> TeamReasoningContentDeltaEvent:
"""Create an event for streaming reasoning content chunks for Team."""
return TeamReasoningContentDeltaEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
reasoning_content=reasoning_content,
)
def create_reasoning_completed_event(
from_run_response: RunOutput, content: Optional[Any] = None, content_type: Optional[str] = None
) -> ReasoningCompletedEvent:
return ReasoningCompletedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
content=content,
content_type=content_type or "str",
)
def create_team_reasoning_completed_event(
from_run_response: TeamRunOutput, content: Optional[Any] = None, content_type: Optional[str] = None
) -> TeamReasoningCompletedEvent:
return TeamReasoningCompletedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
content=content,
content_type=content_type or "str",
)
def create_tool_call_started_event(from_run_response: RunOutput, tool: ToolExecution) -> ToolCallStartedEvent:
return ToolCallStartedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
tool=tool,
)
def create_team_tool_call_started_event(
from_run_response: TeamRunOutput, tool: ToolExecution
) -> TeamToolCallStartedEvent:
return TeamToolCallStartedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
tool=tool,
)
def create_tool_call_completed_event(
from_run_response: RunOutput, tool: ToolExecution, content: Optional[Any] = None
) -> ToolCallCompletedEvent:
return ToolCallCompletedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
tool=tool,
content=content,
images=from_run_response.images,
videos=from_run_response.videos,
audio=from_run_response.audio,
)
def create_team_tool_call_completed_event(
from_run_response: TeamRunOutput, tool: ToolExecution, content: Optional[Any] = None
) -> TeamToolCallCompletedEvent:
return TeamToolCallCompletedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
tool=tool,
content=content,
images=from_run_response.images,
videos=from_run_response.videos,
audio=from_run_response.audio,
)
def create_tool_call_error_event(
from_run_response: RunOutput, tool: ToolExecution, error: Optional[str] = None
) -> ToolCallErrorEvent:
return ToolCallErrorEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
tool=tool,
error=error,
)
def create_team_tool_call_error_event(
from_run_response: TeamRunOutput, tool: ToolExecution, error: Optional[str] = None
) -> TeamToolCallErrorEvent:
return TeamToolCallErrorEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
tool=tool,
error=error,
)
def create_run_output_content_event(
from_run_response: RunOutput,
content: Optional[Any] = None,
content_type: Optional[str] = None,
reasoning_content: Optional[str] = None,
redacted_reasoning_content: Optional[str] = None,
model_provider_data: Optional[Dict[str, Any]] = None,
citations: Optional[Citations] = None,
response_audio: Optional[Audio] = None,
image: Optional[Image] = None,
) -> RunContentEvent:
thinking_combined = (reasoning_content or "") + (redacted_reasoning_content or "")
return RunContentEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
content=content,
content_type=content_type or "str",
reasoning_content=thinking_combined,
citations=citations,
response_audio=response_audio,
image=image,
references=from_run_response.references,
additional_input=from_run_response.additional_input,
reasoning_steps=from_run_response.reasoning_steps,
reasoning_messages=from_run_response.reasoning_messages,
model_provider_data=model_provider_data,
)
def create_team_run_output_content_event(
from_run_response: TeamRunOutput,
content: Optional[Any] = None,
content_type: Optional[str] = None,
reasoning_content: Optional[str] = None,
redacted_reasoning_content: Optional[str] = None,
citations: Optional[Citations] = None,
model_provider_data: Optional[Dict[str, Any]] = None,
response_audio: Optional[Audio] = None,
image: Optional[Image] = None,
) -> TeamRunContentEvent:
thinking_combined = (reasoning_content or "") + (redacted_reasoning_content or "")
return TeamRunContentEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
content=content,
content_type=content_type or "str",
reasoning_content=thinking_combined,
citations=citations,
model_provider_data=model_provider_data,
response_audio=response_audio,
image=image,
references=from_run_response.references, # type: ignore
additional_input=from_run_response.additional_input, # type: ignore
reasoning_steps=from_run_response.reasoning_steps, # type: ignore
reasoning_messages=from_run_response.reasoning_messages, # type: ignore
)
def create_run_content_completed_event(
from_run_response: RunOutput,
) -> RunContentCompletedEvent:
return RunContentCompletedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_team_run_content_completed_event(
from_run_response: TeamRunOutput,
) -> TeamRunContentCompletedEvent:
return TeamRunContentCompletedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_parser_model_response_started_event(
from_run_response: RunOutput,
) -> ParserModelResponseStartedEvent:
return ParserModelResponseStartedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_parser_model_response_completed_event(
from_run_response: RunOutput,
) -> ParserModelResponseCompletedEvent:
return ParserModelResponseCompletedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_team_parser_model_response_started_event(
from_run_response: TeamRunOutput,
) -> TeamParserModelResponseStartedEvent:
return TeamParserModelResponseStartedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_team_parser_model_response_completed_event(
from_run_response: TeamRunOutput,
) -> TeamParserModelResponseCompletedEvent:
return TeamParserModelResponseCompletedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_output_model_response_started_event(from_run_response: RunOutput) -> OutputModelResponseStartedEvent:
return OutputModelResponseStartedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_output_model_response_completed_event(from_run_response: RunOutput) -> OutputModelResponseCompletedEvent:
return OutputModelResponseCompletedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_team_output_model_response_started_event(
from_run_response: TeamRunOutput,
) -> TeamOutputModelResponseStartedEvent:
return TeamOutputModelResponseStartedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_team_output_model_response_completed_event(
from_run_response: TeamRunOutput,
) -> TeamOutputModelResponseCompletedEvent:
return TeamOutputModelResponseCompletedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_model_request_started_event(
from_run_response: RunOutput,
model: Optional[str] = None,
model_provider: Optional[str] = None,
) -> ModelRequestStartedEvent:
return ModelRequestStartedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
model=model,
model_provider=model_provider,
)
def create_model_request_completed_event(
from_run_response: RunOutput,
model: Optional[str] = None,
model_provider: Optional[str] = None,
input_tokens: Optional[int] = None,
output_tokens: Optional[int] = None,
total_tokens: Optional[int] = None,
time_to_first_token: Optional[float] = None,
reasoning_tokens: Optional[int] = None,
cache_read_tokens: Optional[int] = None,
cache_write_tokens: Optional[int] = None,
) -> ModelRequestCompletedEvent:
return ModelRequestCompletedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
model=model,
model_provider=model_provider,
input_tokens=input_tokens,
output_tokens=output_tokens,
total_tokens=total_tokens,
time_to_first_token=time_to_first_token,
reasoning_tokens=reasoning_tokens,
cache_read_tokens=cache_read_tokens,
cache_write_tokens=cache_write_tokens,
)
def create_team_model_request_started_event(
from_run_response: TeamRunOutput,
model: Optional[str] = None,
model_provider: Optional[str] = None,
) -> TeamModelRequestStartedEvent:
return TeamModelRequestStartedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
model=model,
model_provider=model_provider,
)
def create_team_model_request_completed_event(
from_run_response: TeamRunOutput,
model: Optional[str] = None,
model_provider: Optional[str] = None,
input_tokens: Optional[int] = None,
output_tokens: Optional[int] = None,
total_tokens: Optional[int] = None,
time_to_first_token: Optional[float] = None,
reasoning_tokens: Optional[int] = None,
cache_read_tokens: Optional[int] = None,
cache_write_tokens: Optional[int] = None,
) -> TeamModelRequestCompletedEvent:
return TeamModelRequestCompletedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
model=model,
model_provider=model_provider,
input_tokens=input_tokens,
output_tokens=output_tokens,
total_tokens=total_tokens,
time_to_first_token=time_to_first_token,
reasoning_tokens=reasoning_tokens,
cache_read_tokens=cache_read_tokens,
cache_write_tokens=cache_write_tokens,
)
def create_compression_started_event(
from_run_response: RunOutput,
) -> CompressionStartedEvent:
return CompressionStartedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_compression_completed_event(
from_run_response: RunOutput,
tool_results_compressed: Optional[int] = None,
original_size: Optional[int] = None,
compressed_size: Optional[int] = None,
) -> CompressionCompletedEvent:
return CompressionCompletedEvent(
session_id=from_run_response.session_id,
agent_id=from_run_response.agent_id, # type: ignore
agent_name=from_run_response.agent_name, # type: ignore
run_id=from_run_response.run_id,
tool_results_compressed=tool_results_compressed,
original_size=original_size,
compressed_size=compressed_size,
)
def create_team_compression_started_event(
from_run_response: TeamRunOutput,
) -> TeamCompressionStartedEvent:
return TeamCompressionStartedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
)
def create_team_compression_completed_event(
from_run_response: TeamRunOutput,
tool_results_compressed: Optional[int] = None,
original_size: Optional[int] = None,
compressed_size: Optional[int] = None,
) -> TeamCompressionCompletedEvent:
return TeamCompressionCompletedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
tool_results_compressed=tool_results_compressed,
original_size=original_size,
compressed_size=compressed_size,
)
# ---------------------------------------------------------------------------
# Task Mode Events
# ---------------------------------------------------------------------------
def create_team_task_iteration_started_event(
from_run_response: TeamRunOutput,
iteration: int,
max_iterations: int,
) -> TeamTaskIterationStartedEvent:
return TeamTaskIterationStartedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
iteration=iteration,
max_iterations=max_iterations,
)
def create_team_task_iteration_completed_event(
from_run_response: TeamRunOutput,
iteration: int,
max_iterations: int,
task_summary: Optional[str] = None,
) -> TeamTaskIterationCompletedEvent:
return TeamTaskIterationCompletedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
iteration=iteration,
max_iterations=max_iterations,
task_summary=task_summary,
)
def create_team_task_state_updated_event(
from_run_response: TeamRunOutput,
task_summary: Optional[str] = None,
goal_complete: bool = False,
tasks: Optional[List[TeamTaskData]] = None,
completion_summary: Optional[str] = None,
) -> TeamTaskStateUpdatedEvent:
return TeamTaskStateUpdatedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
task_summary=task_summary,
goal_complete=goal_complete,
tasks=tasks or [],
completion_summary=completion_summary,
)
def create_team_task_created_event(
from_run_response: TeamRunOutput,
task_id: str,
title: str,
description: str = "",
assignee: Optional[str] = None,
status: str = "pending",
dependencies: Optional[List[str]] = None,
) -> TeamTaskCreatedEvent:
return TeamTaskCreatedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
task_id=task_id,
title=title,
description=description,
assignee=assignee,
status=status,
dependencies=dependencies or [],
)
def create_team_task_updated_event(
from_run_response: TeamRunOutput,
task_id: str,
title: str,
status: str,
previous_status: Optional[str] = None,
result: Optional[str] = None,
assignee: Optional[str] = None,
) -> TeamTaskUpdatedEvent:
return TeamTaskUpdatedEvent(
session_id=from_run_response.session_id,
team_id=from_run_response.team_id, # type: ignore
team_name=from_run_response.team_name, # type: ignore
run_id=from_run_response.run_id,
task_id=task_id,
title=title,
status=status,
previous_status=previous_status,
result=result,
assignee=assignee,
)
def handle_event(
event: Union[RunOutputEvent, TeamRunOutputEvent],
run_response: Union[RunOutput, TeamRunOutput],
events_to_skip: Optional[List[Union[RunEvent, TeamRunEvent]]] = None,
store_events: bool = False,
) -> Union[RunOutputEvent, TeamRunOutputEvent]:
# We only store events that are not run_response_content events
_events_to_skip: List[str] = [event.value for event in events_to_skip] if events_to_skip else []
if store_events and event.event not in _events_to_skip:
if run_response.events is None:
run_response.events = []
run_response.events.append(event) # type: ignore
return event
def add_error_event(
error: RunErrorEvent,
events: Optional[List[RunOutputEvent]],
):
if events is None:
events = []
events.append(error)
return events
def add_team_error_event(
error: TeamRunErrorEvent,
events: Optional[List[Union[RunOutputEvent, TeamRunOutputEvent]]],
):
if events is None:
events = []
events.append(error)
return events
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/events.py",
"license": "Apache License 2.0",
"lines": 938,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/integration/agent/test_event_streaming.py | from dataclasses import dataclass
from textwrap import dedent
import pytest
from pydantic import BaseModel
from agno.agent.agent import Agent
from agno.db.base import SessionType
from agno.models.openai.chat import OpenAIChat
from agno.run.agent import CustomEvent, RunEvent, RunInput, RunOutput
from agno.tools.decorator import tool
from agno.tools.reasoning import ReasoningTools
from agno.tools.yfinance import YFinanceTools
def test_basic_events():
"""Test that the agent streams events."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
telemetry=False,
)
response_generator = agent.run("Hello, how are you?", stream=True, stream_events=False)
event_counts = {}
for run_response in response_generator:
event_counts[run_response.event] = event_counts.get(run_response.event, 0) + 1
assert event_counts.keys() == {RunEvent.run_content}
assert event_counts[RunEvent.run_content] > 1
@pytest.mark.asyncio
async def test_async_basic_events():
"""Test that the agent streams events."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
telemetry=False,
)
event_counts = {}
async for run_response in agent.arun("Hello, how are you?", stream=True, stream_events=False):
event_counts[run_response.event] = event_counts.get(run_response.event, 0) + 1
assert event_counts.keys() == {RunEvent.run_content}
assert event_counts[RunEvent.run_content] > 1
def test_basic_intermediate_steps_events(shared_db):
"""Test that the agent streams events."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
store_events=True,
telemetry=False,
)
response_generator = agent.run("Hello, how are you?", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.run_completed,
}
assert len(events[RunEvent.run_started]) == 1
assert events[RunEvent.run_started][0].model == "gpt-4o-mini"
assert events[RunEvent.run_started][0].model_provider == "OpenAI"
assert events[RunEvent.run_started][0].session_id is not None
assert events[RunEvent.run_started][0].agent_id is not None
assert events[RunEvent.run_started][0].run_id is not None
assert events[RunEvent.run_started][0].created_at is not None
assert len(events[RunEvent.model_request_started]) == 1
assert len(events[RunEvent.model_request_completed]) == 1
assert len(events[RunEvent.run_content]) > 1
assert len(events[RunEvent.run_content_completed]) == 1
assert len(events[RunEvent.run_completed]) == 1
completed_event = events[RunEvent.run_completed][0]
assert hasattr(completed_event, "metadata")
assert hasattr(completed_event, "metrics")
assert completed_event.metrics is not None
assert completed_event.metrics.total_tokens > 0
# Check the stored events
run_response_from_storage = shared_db.get_sessions(session_type=SessionType.AGENT)[0].runs[0]
assert run_response_from_storage.events is not None
assert len(run_response_from_storage.events) == 5, (
"We should have run_started, llm events, and run completed events"
)
assert run_response_from_storage.events[0].event == RunEvent.run_started
assert run_response_from_storage.events[1].event == RunEvent.model_request_started
assert run_response_from_storage.events[2].event == RunEvent.model_request_completed
assert run_response_from_storage.events[3].event == RunEvent.run_content_completed
assert run_response_from_storage.events[4].event == RunEvent.run_completed
persisted_completed_event = run_response_from_storage.events[4]
assert hasattr(persisted_completed_event, "metadata")
assert hasattr(persisted_completed_event, "metrics")
assert persisted_completed_event.metrics is not None
assert persisted_completed_event.metrics.total_tokens > 0
def test_intermediate_steps_with_tools(shared_db):
"""Test that the agent streams events."""
agent = Agent(
db=shared_db,
model=OpenAIChat(id="gpt-4o-mini"),
tools=[YFinanceTools(cache_results=True)],
telemetry=False,
store_events=True,
)
response_generator = agent.run("What is the stock price of Apple?", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.tool_call_started,
RunEvent.tool_call_completed,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.run_completed,
}
assert len(events[RunEvent.run_started]) == 1
assert len(events[RunEvent.model_request_started]) >= 1
assert len(events[RunEvent.model_request_completed]) >= 1
assert len(events[RunEvent.run_content]) > 1
assert len(events[RunEvent.run_content_completed]) == 1
assert len(events[RunEvent.run_completed]) == 1
assert len(events[RunEvent.tool_call_started]) == 1
assert events[RunEvent.tool_call_started][0].tool.tool_name == "get_current_stock_price" # type: ignore
assert len(events[RunEvent.tool_call_completed]) == 1
assert events[RunEvent.tool_call_completed][0].content is not None # type: ignore
assert events[RunEvent.tool_call_completed][0].tool.result is not None # type: ignore
completed_event = events[RunEvent.run_completed][0]
assert completed_event.metrics is not None
assert completed_event.metrics.total_tokens > 0
# Check the stored events
run_response_from_storage = shared_db.get_sessions(session_type=SessionType.AGENT)[0].runs[0]
assert run_response_from_storage.events is not None
assert len(run_response_from_storage.events) >= 7
assert run_response_from_storage.events[0].event == RunEvent.run_started
assert run_response_from_storage.events[1].event == RunEvent.model_request_started
def test_intermediate_steps_with_custom_events():
"""Test that the agent streams events."""
@dataclass
class WeatherRequestEvent(CustomEvent):
city: str = ""
temperature: int = 0
def get_weather(city: str):
yield WeatherRequestEvent(city=city, temperature=70)
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_weather],
telemetry=False,
)
response_generator = agent.run("What is the weather in Tokyo?", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.tool_call_started,
RunEvent.custom_event,
RunEvent.tool_call_completed,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.run_completed,
}
assert len(events[RunEvent.run_content]) > 1
assert len(events[RunEvent.custom_event]) == 1
assert events[RunEvent.custom_event][0].city == "Tokyo"
assert events[RunEvent.custom_event][0].temperature == 70
assert events[RunEvent.custom_event][0].to_dict()["city"] == "Tokyo"
assert events[RunEvent.custom_event][0].to_dict()["temperature"] == 70
# Verify tool_call_id is injected and matches the tool call
custom_event = events[RunEvent.custom_event][0]
tool_started_event = events[RunEvent.tool_call_started][0]
assert custom_event.tool_call_id is not None, "tool_call_id should not be None"
assert custom_event.tool_call_id == tool_started_event.tool.tool_call_id
@pytest.mark.asyncio
async def test_async_intermediate_steps_with_custom_events():
"""Test that the agent streams custom events asynchronously with tool_call_id."""
@dataclass
class WeatherRequestEvent(CustomEvent):
city: str = ""
temperature: int = 0
def get_weather(city: str):
yield WeatherRequestEvent(city=city, temperature=70)
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_weather],
telemetry=False,
)
events = {}
async for run_response_delta in agent.arun("What is the weather in Tokyo?", stream=True, stream_events=True):
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.tool_call_started,
RunEvent.custom_event,
RunEvent.tool_call_completed,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.run_completed,
}
assert len(events[RunEvent.run_content]) > 1
assert len(events[RunEvent.custom_event]) == 1
assert events[RunEvent.custom_event][0].city == "Tokyo"
assert events[RunEvent.custom_event][0].temperature == 70
assert events[RunEvent.custom_event][0].to_dict()["city"] == "Tokyo"
assert events[RunEvent.custom_event][0].to_dict()["temperature"] == 70
# Verify tool_call_id is injected and matches the tool call
custom_event = events[RunEvent.custom_event][0]
tool_started_event = events[RunEvent.tool_call_started][0]
assert custom_event.tool_call_id is not None, "tool_call_id should not be None"
assert custom_event.tool_call_id == tool_started_event.tool.tool_call_id
def test_intermediate_steps_with_reasoning():
"""Test that the agent streams events."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[ReasoningTools(add_instructions=True)],
instructions=dedent("""\
You are an expert problem-solving assistant with strong analytical skills! 🧠
Use step-by-step reasoning to solve the problem.
\
"""),
telemetry=False,
)
response_generator = agent.run("What is the sum of the first 10 natural numbers?", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.tool_call_started,
RunEvent.tool_call_completed,
RunEvent.reasoning_started,
RunEvent.reasoning_completed,
RunEvent.reasoning_step,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.run_completed,
}
assert len(events[RunEvent.run_started]) == 1
assert len(events[RunEvent.model_request_started]) >= 1
assert len(events[RunEvent.model_request_completed]) >= 1
assert len(events[RunEvent.run_content]) > 1
assert len(events[RunEvent.run_content_completed]) == 1
assert len(events[RunEvent.run_completed]) == 1
assert len(events[RunEvent.tool_call_started]) > 1
assert len(events[RunEvent.tool_call_completed]) > 1
assert len(events[RunEvent.reasoning_started]) == 1
assert len(events[RunEvent.reasoning_completed]) == 1
assert events[RunEvent.reasoning_completed][0].content is not None # type: ignore
assert events[RunEvent.reasoning_completed][0].content_type == "ReasoningSteps" # type: ignore
assert len(events[RunEvent.reasoning_step]) > 1
assert events[RunEvent.reasoning_step][0].content is not None # type: ignore
assert events[RunEvent.reasoning_step][0].content_type == "ReasoningStep" # type: ignore
assert events[RunEvent.reasoning_step][0].reasoning_content is not None # type: ignore
def test_intermediate_steps_with_user_confirmation(shared_db):
"""Test that the agent streams events."""
@tool(requires_confirmation=True)
def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
store_events=True,
add_history_to_context=True,
num_history_runs=2,
telemetry=False,
)
response_generator = agent.run("What is the weather in Tokyo?", stream=True, stream_events=True)
# First until we hit a pause
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
run_response = agent.get_last_run_output()
assert events.keys() == {
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.run_paused,
}
assert len(events[RunEvent.run_started]) == 1
assert len(events[RunEvent.run_paused]) == 1
assert events[RunEvent.run_paused][0].tools[0].requires_confirmation is True # type: ignore
assert run_response.is_paused
assert run_response.tools[0].requires_confirmation
# Mark the tool as confirmed
updated_tools = run_response.tools
run_id = run_response.run_id
updated_tools[0].confirmed = True
# Check stored events
stored_session = shared_db.get_sessions(session_type=SessionType.AGENT)[0]
assert stored_session.runs[0].events is not None
assert len(stored_session.runs[0].events) == 4
assert stored_session.runs[0].events[0].event == RunEvent.run_started
assert stored_session.runs[0].events[1].event == RunEvent.model_request_started
assert stored_session.runs[0].events[2].event == RunEvent.model_request_completed
assert stored_session.runs[0].events[3].event == RunEvent.run_paused
# Then we continue the run
response_generator = agent.continue_run(run_id=run_id, updated_tools=updated_tools, stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
run_response = agent.get_last_run_output()
assert run_response.tools[0].result == "It is currently 70 degrees and cloudy in Tokyo"
assert events.keys() == {
RunEvent.run_continued,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.tool_call_started,
RunEvent.tool_call_completed,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.run_completed,
}
assert len(events[RunEvent.run_continued]) == 1
assert len(events[RunEvent.tool_call_started]) == 1
assert events[RunEvent.tool_call_started][0].tool.tool_name == "get_the_weather" # type: ignore
assert len(events[RunEvent.tool_call_completed]) == 1
assert events[RunEvent.tool_call_completed][0].content is not None
assert events[RunEvent.tool_call_completed][0].tool.result is not None
assert len(events[RunEvent.run_content]) > 1
assert len(events[RunEvent.run_content_completed]) == 1
assert len(events[RunEvent.run_completed]) == 1
assert run_response.is_paused is False
# Check stored events
stored_session = shared_db.get_sessions(session_type=SessionType.AGENT)[0]
assert stored_session.runs[0].events is not None
assert len(stored_session.runs[0].events) == 11
assert stored_session.runs[0].events[0].event == RunEvent.run_started
assert stored_session.runs[0].events[1].event == RunEvent.model_request_started
assert stored_session.runs[0].events[2].event == RunEvent.model_request_completed
assert stored_session.runs[0].events[3].event == RunEvent.run_paused
assert stored_session.runs[0].events[4].event == RunEvent.run_continued
assert stored_session.runs[0].events[5].event == RunEvent.tool_call_started
assert stored_session.runs[0].events[6].event == RunEvent.tool_call_completed
assert stored_session.runs[0].events[7].event == RunEvent.model_request_started
assert stored_session.runs[0].events[8].event == RunEvent.model_request_completed
assert stored_session.runs[0].events[9].event == RunEvent.run_content_completed
assert stored_session.runs[0].events[10].event == RunEvent.run_completed
@pytest.mark.asyncio
async def test_custom_event_in_acontinue_run_with_async_tool(shared_db):
"""Test that CustomEvent from async generator tools is properly yielded in acontinue_run.
This tests the fix for GitHub issue #6069 where CustomEvents from confirmed tools
were not being yielded as separate events during acontinue_run.
"""
@dataclass
class WeatherRequestEvent(CustomEvent):
city: str = ""
temperature: int = 0
@tool(requires_confirmation=True)
async def get_the_weather(city: str):
"""Get weather for a city, yielding a custom event first."""
yield WeatherRequestEvent(city=city, temperature=70)
yield f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
telemetry=False,
)
session_id = "test_custom_event_acontinue"
# Initial run that requires confirmation
response = await agent.arun("What is the weather in Tokyo?", session_id=session_id)
assert response.is_paused
assert response.tools is not None
assert response.tools[0].requires_confirmation
assert response.tools[0].tool_name == "get_the_weather"
# Mark the tool as confirmed
response.tools[0].confirmed = True
# Continue the run with streaming and stream_events
events = {}
async for run_response_delta in agent.acontinue_run(
run_id=response.run_id,
updated_tools=response.tools,
session_id=session_id,
stream=True,
stream_events=True,
):
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
# Verify that CustomEvent was yielded as a separate event
assert RunEvent.custom_event in events.keys(), (
f"CustomEvent should be yielded as a separate event. Got events: {events.keys()}"
)
assert len(events[RunEvent.custom_event]) == 1
custom_event = events[RunEvent.custom_event][0]
assert custom_event.city == "Tokyo"
assert custom_event.temperature == 70
# Verify tool_call_id is injected and matches the tool call
tool_started_event = events[RunEvent.tool_call_started][0]
assert custom_event.tool_call_id is not None, "tool_call_id should not be None"
assert custom_event.tool_call_id == tool_started_event.tool.tool_call_id
# Verify tool result contains the actual weather data
tool_completed_event = events[RunEvent.tool_call_completed][0]
assert "70 degrees" in str(tool_completed_event.tool.result)
# Verify all expected events are present
assert RunEvent.run_continued in events.keys()
assert RunEvent.tool_call_started in events.keys()
assert RunEvent.tool_call_completed in events.keys()
assert RunEvent.run_completed in events.keys()
def test_custom_event_in_continue_run_with_sync_generator_tool(shared_db):
"""Test that CustomEvent from sync generator tools is properly yielded in continue_run.
This tests the sync version of the fix for GitHub issue #6069.
"""
@dataclass
class WeatherRequestEvent(CustomEvent):
city: str = ""
temperature: int = 0
@tool(requires_confirmation=True)
def get_the_weather(city: str):
"""Get weather for a city, yielding a custom event first."""
yield WeatherRequestEvent(city=city, temperature=70)
yield f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
telemetry=False,
)
session_id = "test_custom_event_continue"
# Initial run that requires confirmation
response = agent.run("What is the weather in Tokyo?", session_id=session_id)
assert response.is_paused
assert response.tools is not None
assert response.tools[0].requires_confirmation
assert response.tools[0].tool_name == "get_the_weather"
# Mark the tool as confirmed
response.tools[0].confirmed = True
# Continue the run with streaming and stream_events
events = {}
for run_response_delta in agent.continue_run(
run_id=response.run_id,
updated_tools=response.tools,
session_id=session_id,
stream=True,
stream_events=True,
):
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
# Verify that CustomEvent was yielded as a separate event
assert RunEvent.custom_event in events.keys(), (
f"CustomEvent should be yielded as a separate event. Got events: {events.keys()}"
)
assert len(events[RunEvent.custom_event]) == 1
custom_event = events[RunEvent.custom_event][0]
assert custom_event.city == "Tokyo"
assert custom_event.temperature == 70
# Verify tool_call_id is injected and matches the tool call
tool_started_event = events[RunEvent.tool_call_started][0]
assert custom_event.tool_call_id is not None, "tool_call_id should not be None"
assert custom_event.tool_call_id == tool_started_event.tool.tool_call_id
# Verify tool result contains the actual weather data
tool_completed_event = events[RunEvent.tool_call_completed][0]
assert "70 degrees" in str(tool_completed_event.tool.result)
def test_intermediate_steps_with_memory(shared_db):
"""Test that the agent streams events."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
update_memory_on_run=True,
telemetry=False,
)
response_generator = agent.run("Hello, how are you?", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.run_completed,
RunEvent.memory_update_started,
RunEvent.memory_update_completed,
}
assert len(events[RunEvent.run_started]) == 1
assert len(events[RunEvent.model_request_started]) == 1
assert len(events[RunEvent.model_request_completed]) == 1
assert len(events[RunEvent.run_content]) > 1
assert len(events[RunEvent.run_content_completed]) == 1
assert len(events[RunEvent.run_completed]) == 1
assert len(events[RunEvent.memory_update_started]) == 1
assert len(events[RunEvent.memory_update_completed]) == 1
def test_intermediate_steps_with_session_summary(shared_db):
"""Test that the agent streams events."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
enable_session_summaries=True,
telemetry=False,
)
response_generator = agent.run("Hello, how are you?", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.run_completed,
RunEvent.session_summary_started,
RunEvent.session_summary_completed,
}
assert len(events[RunEvent.run_started]) == 1
assert len(events[RunEvent.model_request_started]) == 1
assert len(events[RunEvent.model_request_completed]) == 1
assert len(events[RunEvent.run_content]) > 1
assert len(events[RunEvent.run_content_completed]) == 1
assert len(events[RunEvent.run_completed]) == 1
assert len(events[RunEvent.session_summary_started]) == 1
assert len(events[RunEvent.session_summary_completed]) == 1
def test_pre_hook_events_are_emitted(shared_db):
"""Test that the agent streams events."""
def pre_hook_1(run_input: RunInput) -> None:
run_input.input_content += " (Modified by pre-hook 1)"
def pre_hook_2(run_input: RunInput) -> None:
run_input.input_content += " (Modified by pre-hook 2)"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
pre_hooks=[pre_hook_1, pre_hook_2],
telemetry=False,
)
response_generator = agent.run("Hello, how are you?", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.pre_hook_started,
RunEvent.pre_hook_completed,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.run_completed,
}
assert len(events[RunEvent.run_started]) == 1
assert len(events[RunEvent.model_request_started]) == 1
assert len(events[RunEvent.model_request_completed]) == 1
assert len(events[RunEvent.run_content]) > 1
assert len(events[RunEvent.run_content_completed]) == 1
assert len(events[RunEvent.run_completed]) == 1
assert len(events[RunEvent.pre_hook_started]) == 2
assert len(events[RunEvent.pre_hook_completed]) == 2
assert events[RunEvent.pre_hook_started][0].pre_hook_name == "pre_hook_1"
assert events[RunEvent.pre_hook_started][0].run_input.input_content == "Hello, how are you?"
assert events[RunEvent.pre_hook_completed][0].pre_hook_name == "pre_hook_1"
assert (
events[RunEvent.pre_hook_completed][0].run_input.input_content == "Hello, how are you? (Modified by pre-hook 1)"
)
assert (
events[RunEvent.pre_hook_started][1].run_input.input_content == "Hello, how are you? (Modified by pre-hook 1)"
)
assert events[RunEvent.pre_hook_started][1].pre_hook_name == "pre_hook_2"
assert events[RunEvent.pre_hook_completed][1].pre_hook_name == "pre_hook_2"
assert (
events[RunEvent.pre_hook_completed][1].run_input.input_content
== "Hello, how are you? (Modified by pre-hook 1) (Modified by pre-hook 2)"
)
@pytest.mark.asyncio
async def test_async_pre_hook_events_are_emitted(shared_db):
"""Test that the agent streams events."""
async def pre_hook_1(run_input: RunInput) -> None:
run_input.input_content += " (Modified by pre-hook 1)"
async def pre_hook_2(run_input: RunInput) -> None:
run_input.input_content += " (Modified by pre-hook 2)"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
pre_hooks=[pre_hook_1, pre_hook_2],
telemetry=False,
)
response_generator = agent.arun("Hello, how are you?", stream=True, stream_events=True)
events = {}
async for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.pre_hook_started,
RunEvent.pre_hook_completed,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.run_completed,
}
assert len(events[RunEvent.run_started]) == 1
assert len(events[RunEvent.model_request_started]) == 1
assert len(events[RunEvent.model_request_completed]) == 1
assert len(events[RunEvent.run_content]) > 1
assert len(events[RunEvent.run_content_completed]) == 1
assert len(events[RunEvent.run_completed]) == 1
assert len(events[RunEvent.pre_hook_started]) == 2
assert len(events[RunEvent.pre_hook_completed]) == 2
assert events[RunEvent.pre_hook_started][0].pre_hook_name == "pre_hook_1"
assert events[RunEvent.pre_hook_started][0].run_input.input_content == "Hello, how are you?"
assert events[RunEvent.pre_hook_completed][0].pre_hook_name == "pre_hook_1"
assert (
events[RunEvent.pre_hook_completed][0].run_input.input_content == "Hello, how are you? (Modified by pre-hook 1)"
)
assert (
events[RunEvent.pre_hook_started][1].run_input.input_content == "Hello, how are you? (Modified by pre-hook 1)"
)
assert events[RunEvent.pre_hook_started][1].pre_hook_name == "pre_hook_2"
assert events[RunEvent.pre_hook_completed][1].pre_hook_name == "pre_hook_2"
assert (
events[RunEvent.pre_hook_completed][1].run_input.input_content
== "Hello, how are you? (Modified by pre-hook 1) (Modified by pre-hook 2)"
)
def test_post_hook_events_are_emitted(shared_db):
"""Test that post hook events are emitted correctly during streaming."""
def post_hook_1(run_output: RunOutput) -> None:
run_output.content = str(run_output.content) + " (Modified by post-hook 1)"
def post_hook_2(run_output: RunOutput) -> None:
run_output.content = str(run_output.content) + " (Modified by post-hook 2)"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
post_hooks=[post_hook_1, post_hook_2],
telemetry=False,
)
response_generator = agent.run("Hello, how are you?", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.post_hook_started,
RunEvent.post_hook_completed,
RunEvent.run_completed,
}
assert len(events[RunEvent.run_started]) == 1
assert len(events[RunEvent.model_request_started]) == 1
assert len(events[RunEvent.model_request_completed]) == 1
assert len(events[RunEvent.run_content]) > 1
assert len(events[RunEvent.run_content_completed]) == 1
assert len(events[RunEvent.run_completed]) == 1
assert len(events[RunEvent.post_hook_started]) == 2
assert len(events[RunEvent.post_hook_completed]) == 2
# Verify first post hook
assert events[RunEvent.post_hook_started][0].post_hook_name == "post_hook_1"
assert events[RunEvent.post_hook_completed][0].post_hook_name == "post_hook_1"
# Verify second post hook
assert events[RunEvent.post_hook_started][1].post_hook_name == "post_hook_2"
assert events[RunEvent.post_hook_completed][1].post_hook_name == "post_hook_2"
# Verify final output includes modifications from both hooks
final_event = events[RunEvent.run_completed][0]
assert "(Modified by post-hook 1)" in str(final_event.content)
assert "(Modified by post-hook 2)" in str(final_event.content)
@pytest.mark.asyncio
async def test_async_post_hook_events_are_emitted(shared_db):
"""Test that async post hook events are emitted correctly during streaming."""
async def post_hook_1(run_output: RunOutput) -> None:
run_output.content = str(run_output.content) + " (Modified by async post-hook 1)"
async def post_hook_2(run_output: RunOutput) -> None:
run_output.content = str(run_output.content) + " (Modified by async post-hook 2)"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
post_hooks=[post_hook_1, post_hook_2],
telemetry=False,
)
response_generator = agent.arun("Hello, how are you?", stream=True, stream_events=True)
events = {}
async for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.post_hook_started,
RunEvent.post_hook_completed,
RunEvent.run_completed,
}
assert len(events[RunEvent.run_started]) == 1
assert len(events[RunEvent.model_request_started]) == 1
assert len(events[RunEvent.model_request_completed]) == 1
assert len(events[RunEvent.run_content]) > 1
assert len(events[RunEvent.run_content_completed]) == 1
assert len(events[RunEvent.run_completed]) == 1
assert len(events[RunEvent.post_hook_started]) == 2
assert len(events[RunEvent.post_hook_completed]) == 2
# Verify first post hook
assert events[RunEvent.post_hook_started][0].post_hook_name == "post_hook_1"
assert events[RunEvent.post_hook_completed][0].post_hook_name == "post_hook_1"
# Verify second post hook
assert events[RunEvent.post_hook_started][1].post_hook_name == "post_hook_2"
assert events[RunEvent.post_hook_completed][1].post_hook_name == "post_hook_2"
# Verify final output includes modifications from both hooks
final_event = events[RunEvent.run_completed][0]
assert "(Modified by async post-hook 1)" in str(final_event.content)
assert "(Modified by async post-hook 2)" in str(final_event.content)
def test_pre_and_post_hook_events_are_emitted(shared_db):
"""Test that both pre and post hook events are emitted correctly during streaming."""
def pre_hook(run_input: RunInput) -> None:
run_input.input_content += " (Modified by pre-hook)"
def post_hook(run_output: RunOutput) -> None:
run_output.content = str(run_output.content) + " (Modified by post-hook)"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
pre_hooks=[pre_hook],
post_hooks=[post_hook],
telemetry=False,
)
response_generator = agent.run("Hello", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.pre_hook_started,
RunEvent.pre_hook_completed,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.post_hook_started,
RunEvent.post_hook_completed,
RunEvent.run_completed,
}
# Verify pre hook events
assert len(events[RunEvent.pre_hook_started]) == 1
assert len(events[RunEvent.pre_hook_completed]) == 1
assert events[RunEvent.pre_hook_started][0].pre_hook_name == "pre_hook"
assert events[RunEvent.pre_hook_completed][0].pre_hook_name == "pre_hook"
# Verify post hook events
assert len(events[RunEvent.post_hook_started]) == 1
assert len(events[RunEvent.post_hook_completed]) == 1
assert events[RunEvent.post_hook_started][0].post_hook_name == "post_hook"
assert events[RunEvent.post_hook_completed][0].post_hook_name == "post_hook"
# Verify final output includes modifications
final_event = events[RunEvent.run_completed][0]
assert "(Modified by post-hook)" in str(final_event.content)
def test_intermediate_steps_with_structured_output(shared_db):
"""Test that the agent streams events."""
class Person(BaseModel):
name: str
description: str
age: int
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
output_schema=Person,
telemetry=False,
)
response_generator = agent.run("Describe Elon Musk", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
run_response = agent.get_last_run_output()
assert events.keys() == {
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.run_completed,
}
assert len(events[RunEvent.run_started]) == 1
assert len(events[RunEvent.model_request_started]) == 1
assert len(events[RunEvent.model_request_completed]) == 1
assert len(events[RunEvent.run_content]) == 1
assert len(events[RunEvent.run_content_completed]) == 1
assert len(events[RunEvent.run_completed]) == 1
assert events[RunEvent.run_content][0].content is not None
assert events[RunEvent.run_content][0].content_type == "Person"
assert events[RunEvent.run_content][0].content.name == "Elon Musk"
assert len(events[RunEvent.run_content][0].content.description) > 1
assert events[RunEvent.run_completed][0].content is not None # type: ignore
assert events[RunEvent.run_completed][0].content_type == "Person" # type: ignore
assert events[RunEvent.run_completed][0].content.name == "Elon Musk" # type: ignore
assert len(events[RunEvent.run_completed][0].content.description) > 1 # type: ignore
completed_event_structured = events[RunEvent.run_completed][0]
assert completed_event_structured.metrics is not None
assert completed_event_structured.metrics.total_tokens > 0
assert run_response.content is not None
assert run_response.content_type == "Person"
assert run_response.content["name"] == "Elon Musk"
def test_intermediate_steps_with_parser_model(shared_db):
"""Test that the agent streams events."""
class Person(BaseModel):
name: str
description: str
age: int
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
output_schema=Person,
parser_model=OpenAIChat(id="gpt-4o-mini"),
telemetry=False,
)
response_generator = agent.run("Describe Elon Musk", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
run_response = agent.get_last_run_output()
assert events.keys() == {
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.parser_model_response_started,
RunEvent.parser_model_response_completed,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.run_completed,
}
assert len(events[RunEvent.run_started]) == 1
assert len(events[RunEvent.model_request_started]) == 1
assert len(events[RunEvent.model_request_completed]) == 1
assert len(events[RunEvent.parser_model_response_started]) == 1
assert len(events[RunEvent.parser_model_response_completed]) == 1
assert (
len(events[RunEvent.run_content]) >= 2
) # The first model streams, then the parser model has a single content event
assert len(events[RunEvent.run_content_completed]) == 1
assert len(events[RunEvent.run_completed]) == 1
assert events[RunEvent.run_content][-1].content is not None
assert events[RunEvent.run_content][-1].content_type == "Person"
assert events[RunEvent.run_content][-1].content.name == "Elon Musk"
assert len(events[RunEvent.run_content][-1].content.description) > 1
assert events[RunEvent.run_completed][0].content is not None # type: ignore
assert events[RunEvent.run_completed][0].content_type == "Person" # type: ignore
assert events[RunEvent.run_completed][0].content.name == "Elon Musk" # type: ignore
assert len(events[RunEvent.run_completed][0].content.description) > 1 # type: ignore
assert run_response is not None
assert run_response.content is not None
assert run_response.content_type == "Person"
assert run_response.content["name"] == "Elon Musk"
def test_run_completed_event_metrics_validation(shared_db):
"""Test that RunCompletedEvent properly includes populated metrics on completion."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
store_events=True,
telemetry=False,
)
response_generator = agent.run(
"Get the current stock price of AAPL",
session_id="test_session",
stream=True,
stream_events=True,
)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert RunEvent.run_completed in events
completed_event = events[RunEvent.run_completed][0]
assert completed_event.metadata is not None or completed_event.metadata is None # Can be None or dict
assert completed_event.metrics is not None, "Metrics should be populated on completion"
metrics = completed_event.metrics
assert metrics.total_tokens > 0, "Total tokens should be greater than 0"
assert metrics.input_tokens >= 0, "Input tokens should be non-negative"
assert metrics.output_tokens >= 0, "Output tokens should be non-negative"
assert metrics.total_tokens == metrics.input_tokens + metrics.output_tokens, "Total should equal input + output"
assert metrics.duration is not None, "Duration should be populated on completion"
assert metrics.duration > 0, "Duration should be greater than 0"
stored_session = agent.get_session(session_id="test_session")
assert stored_session is not None and stored_session.runs is not None
stored_run = stored_session.runs[0]
assert stored_run.metrics is not None
assert stored_run.metrics.total_tokens > 0
def test_model_request_events(shared_db):
"""Test that model request started and completed events are emitted."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
store_events=True,
telemetry=False,
)
response_generator = agent.run("Hello, how are you?", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.run_completed,
}
# Verify model request started event
assert len(events[RunEvent.model_request_started]) == 1
model_started = events[RunEvent.model_request_started][0]
assert model_started.model == "gpt-4o-mini"
assert model_started.model_provider == "OpenAI"
# Verify model request completed event
assert len(events[RunEvent.model_request_completed]) == 1
model_completed = events[RunEvent.model_request_completed][0]
assert model_completed.model == "gpt-4o-mini"
assert model_completed.model_provider == "OpenAI"
assert model_completed.input_tokens is not None
assert model_completed.input_tokens > 0
assert model_completed.output_tokens is not None
assert model_completed.output_tokens > 0
assert model_completed.total_tokens is not None
assert model_completed.total_tokens == model_completed.input_tokens + model_completed.output_tokens
# Verify new metrics fields exist (may be None)
assert hasattr(model_completed, "time_to_first_token")
assert hasattr(model_completed, "reasoning_tokens")
assert hasattr(model_completed, "cache_read_tokens")
assert hasattr(model_completed, "cache_write_tokens")
@pytest.mark.asyncio
async def test_async_model_request_events(shared_db):
"""Test that async model request started and completed events are emitted."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
store_events=True,
telemetry=False,
)
events = {}
async for run_response_delta in agent.arun("Hello, how are you?", stream=True, stream_events=True):
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.run_completed,
}
# Verify model request started event
assert len(events[RunEvent.model_request_started]) == 1
model_started = events[RunEvent.model_request_started][0]
assert model_started.model == "gpt-4o-mini"
assert model_started.model_provider == "OpenAI"
# Verify model request completed event
assert len(events[RunEvent.model_request_completed]) == 1
model_completed = events[RunEvent.model_request_completed][0]
assert model_completed.model == "gpt-4o-mini"
assert model_completed.model_provider == "OpenAI"
assert model_completed.input_tokens is not None
assert model_completed.input_tokens > 0
assert model_completed.output_tokens is not None
assert model_completed.output_tokens > 0
assert model_completed.total_tokens is not None
assert model_completed.total_tokens == model_completed.input_tokens + model_completed.output_tokens
# Verify new metrics fields exist (may be None)
assert hasattr(model_completed, "time_to_first_token")
assert hasattr(model_completed, "reasoning_tokens")
assert hasattr(model_completed, "cache_read_tokens")
assert hasattr(model_completed, "cache_write_tokens")
def test_model_request_events_with_tools(shared_db):
"""Test that multiple model request events are emitted when tools are used."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
tools=[YFinanceTools(cache_results=True)],
store_events=True,
telemetry=False,
)
response_generator = agent.run("What is the stock price of Apple?", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert RunEvent.model_request_started in events.keys()
assert RunEvent.model_request_completed in events.keys()
# With tools, there should be at least 2 model requests (one for tool call, one for response)
assert len(events[RunEvent.model_request_started]) >= 2, (
f"Expected at least 2 model request started events, got {len(events[RunEvent.model_request_started])}"
)
assert len(events[RunEvent.model_request_completed]) >= 2, (
f"Expected at least 2 model request completed events, got {len(events[RunEvent.model_request_completed])}"
)
# Verify all LLM completed events have model info and token counts
for model_completed in events[RunEvent.model_request_completed]:
assert model_completed.model == "gpt-4o-mini"
assert model_completed.model_provider == "OpenAI"
assert model_completed.input_tokens is not None
assert model_completed.output_tokens is not None
assert model_completed.total_tokens is not None
def test_memory_update_completed_contains_memories(shared_db):
"""Test that MemoryUpdateCompletedEvent contains the updated memories."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
user_id="test_memory_user",
enable_user_memories=True,
telemetry=False,
)
# First run to create a memory
response_generator = agent.run("My name is Alice and I live in Paris", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert RunEvent.memory_update_started in events.keys()
assert RunEvent.memory_update_completed in events.keys()
assert len(events[RunEvent.memory_update_started]) == 1
assert len(events[RunEvent.memory_update_completed]) == 1
# Verify memory_update_completed event has memories field
memory_completed = events[RunEvent.memory_update_completed][0]
assert hasattr(memory_completed, "memories")
# The memories field should contain the user's memories (may be None if no memories created)
if memory_completed.memories is not None:
assert isinstance(memory_completed.memories, list)
# If memories were created, verify structure
if len(memory_completed.memories) > 0:
assert hasattr(memory_completed.memories[0], "memory")
@pytest.mark.asyncio
async def test_async_memory_update_completed_contains_memories(shared_db):
"""Test that async MemoryUpdateCompletedEvent contains the updated memories."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
user_id="test_async_memory_user",
enable_user_memories=True,
telemetry=False,
)
events = {}
async for run_response_delta in agent.arun("My favorite color is blue", stream=True, stream_events=True):
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert RunEvent.memory_update_started in events.keys()
assert RunEvent.memory_update_completed in events.keys()
assert len(events[RunEvent.memory_update_started]) == 1
assert len(events[RunEvent.memory_update_completed]) == 1
# Verify memory_update_completed event has memories field
memory_completed = events[RunEvent.memory_update_completed][0]
assert hasattr(memory_completed, "memories")
# The memories field should contain the user's memories (may be None if no memories created)
if memory_completed.memories is not None:
assert isinstance(memory_completed.memories, list)
def test_compression_events(shared_db):
"""Test that compression events are emitted when tool result compression is enabled."""
@tool
def get_large_data(query: str) -> str:
"""Returns a large amount of data for testing compression."""
return f"Large data response for {query}: " + "x" * 500
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
tools=[get_large_data],
compress_tool_results=True,
store_events=True,
telemetry=False,
)
response_generator = agent.run(
"Get large data for 'test1' and 'test2'",
stream=True,
stream_events=True,
)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
# Compression events should be present when compression occurs
if RunEvent.compression_started in events.keys():
assert RunEvent.compression_completed in events.keys()
# Verify compression started event
assert len(events[RunEvent.compression_started]) >= 1
# Verify compression completed event has stats
assert len(events[RunEvent.compression_completed]) >= 1
compression_completed = events[RunEvent.compression_completed][0]
assert hasattr(compression_completed, "tool_results_compressed")
assert hasattr(compression_completed, "original_size")
assert hasattr(compression_completed, "compressed_size")
@pytest.mark.asyncio
async def test_async_compression_events(shared_db):
"""Test that async compression events are emitted when tool result compression is enabled."""
@tool
def get_large_data(query: str) -> str:
"""Returns a large amount of data for testing compression."""
return f"Large data response for {query}: " + "x" * 500
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
tools=[get_large_data],
compress_tool_results=True,
store_events=True,
telemetry=False,
)
events = {}
async for run_response_delta in agent.arun(
"Get large data for 'test1' and 'test2'",
stream=True,
stream_events=True,
):
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
# Compression events should be present when compression occurs
if RunEvent.compression_started in events.keys():
assert RunEvent.compression_completed in events.keys()
# Verify compression started event
assert len(events[RunEvent.compression_started]) >= 1
# Verify compression completed event has stats
assert len(events[RunEvent.compression_completed]) >= 1
compression_completed = events[RunEvent.compression_completed][0]
assert hasattr(compression_completed, "tool_results_compressed")
assert hasattr(compression_completed, "original_size")
assert hasattr(compression_completed, "compressed_size")
def test_custom_event_properties_persist_after_db_reload(shared_db):
"""Test that custom event subclass properties persist after loading from database."""
from dataclasses import field
from typing import Any, Dict
@dataclass
class MimeEvent(CustomEvent):
mime_type: str = ""
data: Dict[str, Any] = field(default_factory=dict)
def get_chart(city: str):
"""Get a chart for the given city."""
yield MimeEvent(
mime_type="application/echart+json",
data={"title": "Test Chart", "series": [{"type": "pie"}]},
)
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
tools=[get_chart],
store_events=True,
telemetry=False,
)
response_generator = agent.run("Get a chart for Tokyo", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert RunEvent.custom_event in events
assert events[RunEvent.custom_event][0].mime_type == "application/echart+json"
assert events[RunEvent.custom_event][0].data["title"] == "Test Chart"
# Check stored events from DB
stored_session = shared_db.get_sessions(session_type=SessionType.AGENT)[0]
stored_run = stored_session.runs[0]
custom_events = [e for e in stored_run.events if e.event == RunEvent.custom_event]
assert len(custom_events) >= 1
assert hasattr(custom_events[0], "mime_type")
assert hasattr(custom_events[0], "data")
assert custom_events[0].mime_type == "application/echart+json"
assert custom_events[0].data["title"] == "Test Chart"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_event_streaming.py",
"license": "Apache License 2.0",
"lines": 1188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/teams/test_event_streaming.py | from textwrap import dedent
import pytest
from pydantic import BaseModel
from agno.agent import RunEvent
from agno.agent.agent import Agent
from agno.db.in_memory.in_memory_db import InMemoryDb
from agno.models.openai.chat import OpenAIChat
from agno.run.team import TeamRunInput, TeamRunOutput
from agno.team import Team, TeamRunEvent
from agno.tools.calculator import CalculatorTools
from agno.tools.decorator import tool
from agno.tools.reasoning import ReasoningTools
from agno.tools.websearch import WebSearchTools
from agno.tools.yfinance import YFinanceTools
def test_basic_events():
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[],
telemetry=False,
)
response_generator = team.run("Hello, how are you?", stream=True, stream_events=False)
event_counts = {}
for run_response in response_generator:
event_counts[run_response.event] = event_counts.get(run_response.event, 0) + 1
assert event_counts.keys() == {TeamRunEvent.run_content}
assert event_counts[TeamRunEvent.run_content] > 1
@pytest.mark.asyncio
async def test_async_basic_events():
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[],
telemetry=False,
)
event_counts = {}
async for run_response in team.arun("Hello, how are you?", stream=True, stream_events=False):
event_counts[run_response.event] = event_counts.get(run_response.event, 0) + 1
assert event_counts.keys() == {TeamRunEvent.run_content}
assert event_counts[TeamRunEvent.run_content] > 1
def test_basic_intermediate_steps_events(shared_db):
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[],
db=shared_db,
store_events=True,
telemetry=False,
)
response_generator = team.run("Hello, how are you?", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
TeamRunEvent.run_started,
TeamRunEvent.model_request_started,
TeamRunEvent.model_request_completed,
TeamRunEvent.run_content,
TeamRunEvent.run_content_completed,
TeamRunEvent.run_completed,
}
assert len(events[TeamRunEvent.run_started]) == 1
assert events[TeamRunEvent.run_started][0].model == "gpt-4o-mini"
assert events[TeamRunEvent.run_started][0].model_provider == "OpenAI"
assert events[TeamRunEvent.run_started][0].session_id is not None
assert events[TeamRunEvent.run_started][0].team_id is not None
assert events[TeamRunEvent.run_started][0].run_id is not None
assert events[TeamRunEvent.run_started][0].created_at is not None
assert len(events[TeamRunEvent.run_content]) > 1
assert len(events[TeamRunEvent.run_content_completed]) == 1
assert len(events[TeamRunEvent.run_completed]) == 1
team_completed_event = events[TeamRunEvent.run_completed][0]
assert hasattr(team_completed_event, "metadata")
assert hasattr(team_completed_event, "metrics")
assert team_completed_event.metrics is not None
assert team_completed_event.metrics.total_tokens > 0
run_response_from_storage = team.get_last_run_output()
assert run_response_from_storage is not None
assert run_response_from_storage.events is not None
assert len(run_response_from_storage.events) == 5, (
"We should have run_started, model_request_started, model_request_completed, run_content_completed, and run_completed events"
)
assert run_response_from_storage.events[0].event == TeamRunEvent.run_started
assert run_response_from_storage.events[1].event == TeamRunEvent.model_request_started
assert run_response_from_storage.events[2].event == TeamRunEvent.model_request_completed
assert run_response_from_storage.events[3].event == TeamRunEvent.run_content_completed
assert run_response_from_storage.events[4].event == TeamRunEvent.run_completed
persisted_team_completed_event = run_response_from_storage.events[4]
assert hasattr(persisted_team_completed_event, "metadata")
assert hasattr(persisted_team_completed_event, "metrics")
assert persisted_team_completed_event.metrics is not None
assert persisted_team_completed_event.metrics.total_tokens > 0
def test_intermediate_steps_with_tools(shared_db):
team = Team(
model=OpenAIChat(id="o3-mini"),
members=[],
tools=[YFinanceTools(cache_results=True)],
db=shared_db,
store_events=True,
telemetry=False,
)
events = {}
for run_response_delta in team.run("What is the stock price of Apple?", stream=True, stream_events=True):
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
TeamRunEvent.run_started,
TeamRunEvent.model_request_started,
TeamRunEvent.model_request_completed,
TeamRunEvent.tool_call_started,
TeamRunEvent.tool_call_completed,
TeamRunEvent.run_content,
TeamRunEvent.run_content_completed,
TeamRunEvent.run_completed,
}
assert len(events[TeamRunEvent.tool_call_started]) >= 1
# The team may first try to delegate the task to a member, then call the tool directly
tool_names = [event.tool.tool_name for event in events[TeamRunEvent.tool_call_started]]
assert "get_current_stock_price" in tool_names or "delegate_task_to_member" in tool_names
assert len(events[TeamRunEvent.tool_call_completed]) >= 1
# Check that at least one tool call completed successfully
completed_tools = [event for event in events[TeamRunEvent.tool_call_completed] if event.content is not None]
assert len(completed_tools) >= 1
assert any(event.tool.result is not None for event in events[TeamRunEvent.tool_call_completed])
run_response_from_storage = team.get_last_run_output()
assert run_response_from_storage is not None
assert run_response_from_storage.events is not None
assert len(run_response_from_storage.events) >= 4
# Check that we have the essential events (may have more due to member delegation)
event_types = [event.event for event in run_response_from_storage.events]
assert TeamRunEvent.run_started in event_types
assert TeamRunEvent.tool_call_started in event_types
assert TeamRunEvent.tool_call_completed in event_types
assert TeamRunEvent.run_content_completed in event_types
assert TeamRunEvent.run_completed in event_types
def test_intermediate_steps_with_reasoning():
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[],
tools=[ReasoningTools(add_instructions=True)],
instructions=dedent("""\
You are an expert problem-solving assistant with strong analytical skills! 🧠
Use step-by-step reasoning to solve the problem.
\
"""),
telemetry=False,
)
response_generator = team.run(
"What is the sum of the first 10 natural numbers?",
stream=True,
stream_events=True,
)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
TeamRunEvent.run_started,
TeamRunEvent.model_request_started,
TeamRunEvent.model_request_completed,
TeamRunEvent.tool_call_started,
TeamRunEvent.tool_call_completed,
TeamRunEvent.reasoning_started,
TeamRunEvent.reasoning_completed,
TeamRunEvent.reasoning_step,
TeamRunEvent.run_content,
TeamRunEvent.run_content_completed,
TeamRunEvent.run_completed,
}
assert len(events[TeamRunEvent.run_started]) == 1
assert len(events[TeamRunEvent.run_content]) > 1
assert len(events[TeamRunEvent.run_content_completed]) == 1
assert len(events[TeamRunEvent.run_completed]) == 1
assert len(events[TeamRunEvent.tool_call_started]) > 1
assert len(events[TeamRunEvent.tool_call_completed]) > 1
assert len(events[TeamRunEvent.reasoning_started]) == 1
assert len(events[TeamRunEvent.reasoning_completed]) == 1
assert events[TeamRunEvent.reasoning_completed][0].content is not None
assert events[TeamRunEvent.reasoning_completed][0].content_type == "ReasoningSteps"
assert len(events[TeamRunEvent.reasoning_step]) > 1
assert events[TeamRunEvent.reasoning_step][0].content is not None
assert events[TeamRunEvent.reasoning_step][0].content_type == "ReasoningStep"
assert events[TeamRunEvent.reasoning_step][0].reasoning_content is not None
@pytest.mark.skip(reason="Not yet implemented")
def test_intermediate_steps_with_user_confirmation():
@tool(requires_confirmation=True)
def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[],
tools=[get_the_weather],
telemetry=False,
)
response_generator = team.run("What is the weather in Tokyo?", stream=True, stream_events=True)
# First until we hit a pause
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {TeamRunEvent.run_started, TeamRunEvent.run_paused}
assert len(events[TeamRunEvent.run_started]) == 1
assert len(events[TeamRunEvent.run_paused]) == 1
assert events[TeamRunEvent.run_paused][0].tools[0].requires_confirmation is True
assert team.is_paused
assert team.run_response.tools[0].requires_confirmation
# Mark the tool as confirmed
updated_tools = team.run_response.tools
run_id = team.run_response.run_id
updated_tools[0].confirmed = True
# Then we continue the run
response_generator = team.continue_run(run_id=run_id, updated_tools=updated_tools, stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert team.run_response.tools[0].result == "It is currently 70 degrees and cloudy in Tokyo"
assert events.keys() == {
TeamRunEvent.run_continued,
TeamRunEvent.tool_call_started,
TeamRunEvent.tool_call_completed,
TeamRunEvent.run_content,
TeamRunEvent.run_completed,
}
assert len(events[TeamRunEvent.run_continued]) == 1
assert len(events[TeamRunEvent.tool_call_started]) == 1
assert events[TeamRunEvent.tool_call_started][0].tool.tool_name == "get_the_weather"
assert len(events[TeamRunEvent.tool_call_completed]) == 1
assert events[TeamRunEvent.tool_call_completed][0].content is not None
assert events[TeamRunEvent.tool_call_completed][0].tool.result is not None
assert len(events[TeamRunEvent.run_content]) > 1
assert len(events[TeamRunEvent.run_completed]) == 1
assert team.run_response.is_paused is False
def test_intermediate_steps_with_memory(shared_db):
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[],
db=shared_db,
update_memory_on_run=True,
telemetry=False,
)
response_generator = team.run("Hello, how are you?", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
TeamRunEvent.run_started,
TeamRunEvent.model_request_started,
TeamRunEvent.model_request_completed,
TeamRunEvent.run_content,
TeamRunEvent.run_content_completed,
TeamRunEvent.run_completed,
TeamRunEvent.memory_update_started,
TeamRunEvent.memory_update_completed,
}
assert len(events[TeamRunEvent.run_started]) == 1
assert len(events[TeamRunEvent.run_content]) > 1
assert len(events[TeamRunEvent.run_content_completed]) == 1
assert len(events[TeamRunEvent.run_completed]) == 1
assert len(events[TeamRunEvent.memory_update_started]) == 1
assert len(events[TeamRunEvent.memory_update_completed]) == 1
def test_intermediate_steps_with_session_summary(shared_db):
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[],
db=shared_db,
enable_session_summaries=True,
telemetry=False,
)
response_generator = team.run("Hello, how are you?", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
TeamRunEvent.run_started,
TeamRunEvent.model_request_started,
TeamRunEvent.model_request_completed,
TeamRunEvent.run_content,
TeamRunEvent.run_content_completed,
TeamRunEvent.run_completed,
TeamRunEvent.session_summary_started,
TeamRunEvent.session_summary_completed,
}
assert len(events[TeamRunEvent.run_started]) == 1
assert len(events[TeamRunEvent.run_content]) > 1
assert len(events[TeamRunEvent.run_content_completed]) == 1
assert len(events[TeamRunEvent.run_completed]) == 1
assert len(events[TeamRunEvent.session_summary_started]) == 1
assert len(events[TeamRunEvent.session_summary_completed]) == 1
def test_pre_hook_events_are_emitted(shared_db):
"""Test that the agent streams events."""
def pre_hook_1(run_input: TeamRunInput) -> None:
run_input.input_content += " (Modified by pre-hook 1)"
def pre_hook_2(run_input: TeamRunInput) -> None:
run_input.input_content += " (Modified by pre-hook 2)"
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[],
pre_hooks=[pre_hook_1, pre_hook_2],
db=shared_db,
telemetry=False,
)
response_generator = team.run("Hello, how are you?", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
TeamRunEvent.run_started,
TeamRunEvent.model_request_started,
TeamRunEvent.model_request_completed,
TeamRunEvent.pre_hook_started,
TeamRunEvent.pre_hook_completed,
TeamRunEvent.run_content,
TeamRunEvent.run_content_completed,
TeamRunEvent.run_completed,
}
assert len(events[TeamRunEvent.run_started]) == 1
assert len(events[TeamRunEvent.run_content]) > 1
assert len(events[TeamRunEvent.run_content_completed]) == 1
assert len(events[TeamRunEvent.run_completed]) == 1
assert len(events[TeamRunEvent.pre_hook_started]) == 2
assert len(events[TeamRunEvent.pre_hook_completed]) == 2
assert events[TeamRunEvent.pre_hook_started][0].pre_hook_name == "pre_hook_1"
assert events[TeamRunEvent.pre_hook_started][0].run_input.input_content == "Hello, how are you?"
assert events[TeamRunEvent.pre_hook_completed][0].pre_hook_name == "pre_hook_1"
assert (
events[TeamRunEvent.pre_hook_completed][0].run_input.input_content
== "Hello, how are you? (Modified by pre-hook 1)"
)
assert (
events[TeamRunEvent.pre_hook_started][1].run_input.input_content
== "Hello, how are you? (Modified by pre-hook 1)"
)
assert events[TeamRunEvent.pre_hook_started][1].pre_hook_name == "pre_hook_2"
assert events[TeamRunEvent.pre_hook_completed][1].pre_hook_name == "pre_hook_2"
assert (
events[TeamRunEvent.pre_hook_completed][1].run_input.input_content
== "Hello, how are you? (Modified by pre-hook 1) (Modified by pre-hook 2)"
)
@pytest.mark.asyncio
async def test_async_pre_hook_events_are_emitted(shared_db):
"""Test that the agent streams events."""
async def pre_hook_1(run_input: TeamRunInput) -> None:
run_input.input_content += " (Modified by pre-hook 1)"
async def pre_hook_2(run_input: TeamRunInput) -> None:
run_input.input_content += " (Modified by pre-hook 2)"
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[],
pre_hooks=[pre_hook_1, pre_hook_2],
db=shared_db,
telemetry=False,
)
response_generator = team.arun("Hello, how are you?", stream=True, stream_events=True)
events = {}
async for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
TeamRunEvent.run_started,
TeamRunEvent.model_request_started,
TeamRunEvent.model_request_completed,
TeamRunEvent.pre_hook_started,
TeamRunEvent.pre_hook_completed,
TeamRunEvent.run_content,
TeamRunEvent.run_content_completed,
TeamRunEvent.run_completed,
}
assert len(events[TeamRunEvent.run_started]) == 1
assert len(events[TeamRunEvent.run_content]) > 1
assert len(events[TeamRunEvent.run_content_completed]) == 1
assert len(events[TeamRunEvent.run_completed]) == 1
assert len(events[TeamRunEvent.pre_hook_started]) == 2
assert len(events[TeamRunEvent.pre_hook_completed]) == 2
assert events[TeamRunEvent.pre_hook_started][0].pre_hook_name == "pre_hook_1"
assert events[TeamRunEvent.pre_hook_started][0].run_input.input_content == "Hello, how are you?"
assert events[TeamRunEvent.pre_hook_completed][0].pre_hook_name == "pre_hook_1"
assert (
events[TeamRunEvent.pre_hook_completed][0].run_input.input_content
== "Hello, how are you? (Modified by pre-hook 1)"
)
assert (
events[TeamRunEvent.pre_hook_started][1].run_input.input_content
== "Hello, how are you? (Modified by pre-hook 1)"
)
assert events[TeamRunEvent.pre_hook_started][1].pre_hook_name == "pre_hook_2"
assert events[TeamRunEvent.pre_hook_completed][1].pre_hook_name == "pre_hook_2"
assert (
events[TeamRunEvent.pre_hook_completed][1].run_input.input_content
== "Hello, how are you? (Modified by pre-hook 1) (Modified by pre-hook 2)"
)
def test_post_hook_events_are_emitted(shared_db):
"""Test that post hook events are emitted correctly during streaming."""
def post_hook_1(run_output: TeamRunOutput) -> None:
run_output.content = str(run_output.content) + " (Modified by post-hook 1)"
def post_hook_2(run_output: TeamRunOutput) -> None:
run_output.content = str(run_output.content) + " (Modified by post-hook 2)"
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[],
post_hooks=[post_hook_1, post_hook_2],
db=shared_db,
telemetry=False,
)
response_generator = team.run("Hello, how are you?", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
TeamRunEvent.run_started,
TeamRunEvent.model_request_started,
TeamRunEvent.model_request_completed,
TeamRunEvent.run_content,
TeamRunEvent.run_content_completed,
TeamRunEvent.post_hook_started,
TeamRunEvent.post_hook_completed,
TeamRunEvent.run_completed,
}
assert len(events[TeamRunEvent.run_started]) == 1
assert len(events[TeamRunEvent.run_content]) > 1
assert len(events[TeamRunEvent.run_content_completed]) == 1
assert len(events[TeamRunEvent.run_completed]) == 1
assert len(events[TeamRunEvent.post_hook_started]) == 2
assert len(events[TeamRunEvent.post_hook_completed]) == 2
# Verify first post hook
assert events[TeamRunEvent.post_hook_started][0].post_hook_name == "post_hook_1"
assert events[TeamRunEvent.post_hook_completed][0].post_hook_name == "post_hook_1"
# Verify second post hook
assert events[TeamRunEvent.post_hook_started][1].post_hook_name == "post_hook_2"
assert events[TeamRunEvent.post_hook_completed][1].post_hook_name == "post_hook_2"
# Verify final output includes modifications from both hooks
final_event = events[TeamRunEvent.run_completed][0]
assert "(Modified by post-hook 1)" in str(final_event.content)
assert "(Modified by post-hook 2)" in str(final_event.content)
@pytest.mark.asyncio
async def test_async_post_hook_events_are_emitted(shared_db):
"""Test that async post hook events are emitted correctly during streaming."""
async def post_hook_1(run_output: TeamRunOutput) -> None:
run_output.content = str(run_output.content) + " (Modified by async post-hook 1)"
async def post_hook_2(run_output: TeamRunOutput) -> None:
run_output.content = str(run_output.content) + " (Modified by async post-hook 2)"
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[],
post_hooks=[post_hook_1, post_hook_2],
db=shared_db,
telemetry=False,
)
response_generator = team.arun("Hello, how are you?", stream=True, stream_events=True)
events = {}
async for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
TeamRunEvent.run_started,
TeamRunEvent.model_request_started,
TeamRunEvent.model_request_completed,
TeamRunEvent.run_content,
TeamRunEvent.run_content_completed,
TeamRunEvent.post_hook_started,
TeamRunEvent.post_hook_completed,
TeamRunEvent.run_completed,
}
assert len(events[TeamRunEvent.run_started]) == 1
assert len(events[TeamRunEvent.run_content]) > 1
assert len(events[TeamRunEvent.run_content_completed]) == 1
assert len(events[TeamRunEvent.run_completed]) == 1
assert len(events[TeamRunEvent.post_hook_started]) == 2
assert len(events[TeamRunEvent.post_hook_completed]) == 2
# Verify first post hook
assert events[TeamRunEvent.post_hook_started][0].post_hook_name == "post_hook_1"
assert events[TeamRunEvent.post_hook_completed][0].post_hook_name == "post_hook_1"
# Verify second post hook
assert events[TeamRunEvent.post_hook_started][1].post_hook_name == "post_hook_2"
assert events[TeamRunEvent.post_hook_completed][1].post_hook_name == "post_hook_2"
# Verify final output includes modifications from both hooks
final_event = events[TeamRunEvent.run_completed][0]
assert "(Modified by async post-hook 1)" in str(final_event.content)
assert "(Modified by async post-hook 2)" in str(final_event.content)
def test_pre_and_post_hook_events_are_emitted(shared_db):
"""Test that both pre and post hook events are emitted correctly during streaming."""
def pre_hook(run_input: TeamRunInput) -> None:
run_input.input_content += " (Modified by pre-hook)"
def post_hook(run_output: TeamRunOutput) -> None:
run_output.content = str(run_output.content) + " (Modified by post-hook)"
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[],
pre_hooks=[pre_hook],
post_hooks=[post_hook],
db=shared_db,
telemetry=False,
)
response_generator = team.run("Hello", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
TeamRunEvent.run_started,
TeamRunEvent.model_request_started,
TeamRunEvent.model_request_completed,
TeamRunEvent.pre_hook_started,
TeamRunEvent.pre_hook_completed,
TeamRunEvent.run_content,
TeamRunEvent.run_content_completed,
TeamRunEvent.post_hook_started,
TeamRunEvent.post_hook_completed,
TeamRunEvent.run_completed,
}
# Verify pre hook events
assert len(events[TeamRunEvent.pre_hook_started]) == 1
assert len(events[TeamRunEvent.pre_hook_completed]) == 1
assert events[TeamRunEvent.pre_hook_started][0].pre_hook_name == "pre_hook"
assert events[TeamRunEvent.pre_hook_completed][0].pre_hook_name == "pre_hook"
# Verify post hook events
assert len(events[TeamRunEvent.post_hook_started]) == 1
assert len(events[TeamRunEvent.post_hook_completed]) == 1
assert events[TeamRunEvent.post_hook_started][0].post_hook_name == "post_hook"
assert events[TeamRunEvent.post_hook_completed][0].post_hook_name == "post_hook"
# Verify final output includes modifications
final_event = events[TeamRunEvent.run_completed][0]
assert "(Modified by post-hook)" in str(final_event.content)
def test_intermediate_steps_with_structured_output(shared_db):
"""Test that the agent streams events."""
class Person(BaseModel):
name: str
description: str
age: int
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[],
db=shared_db,
output_schema=Person,
instructions="You have no members, answer directly",
telemetry=False,
)
response_generator = team.run("Describe Elon Musk", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
TeamRunEvent.run_started,
TeamRunEvent.model_request_started,
TeamRunEvent.model_request_completed,
TeamRunEvent.run_content,
TeamRunEvent.run_content_completed,
TeamRunEvent.run_completed,
}
assert len(events[TeamRunEvent.run_started]) == 1
assert len(events[TeamRunEvent.run_content]) == 1
assert len(events[TeamRunEvent.run_content_completed]) == 1
assert len(events[TeamRunEvent.run_completed]) == 1
assert events[TeamRunEvent.run_content][0].content is not None
assert events[TeamRunEvent.run_content][0].content_type == "Person"
assert events[TeamRunEvent.run_content][0].content.name == "Elon Musk"
assert len(events[TeamRunEvent.run_content][0].content.description) > 1
assert events[TeamRunEvent.run_completed][0].content is not None
assert events[TeamRunEvent.run_completed][0].content_type == "Person"
assert events[TeamRunEvent.run_completed][0].content.name == "Elon Musk"
assert len(events[TeamRunEvent.run_completed][0].content.description) > 1
team_completed_event_structured = events[TeamRunEvent.run_completed][0]
assert team_completed_event_structured.metrics is not None
assert team_completed_event_structured.metrics.total_tokens > 0
def test_intermediate_steps_with_parser_model(shared_db):
"""Test that the agent streams events."""
class Person(BaseModel):
name: str
description: str
age: int
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[],
db=shared_db,
output_schema=Person,
parser_model=OpenAIChat(id="gpt-4o-mini"),
instructions="You have no members, answer directly",
telemetry=False,
)
response_generator = team.run("Describe Elon Musk", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
run_response = team.get_last_run_output()
assert events.keys() == {
TeamRunEvent.run_started,
TeamRunEvent.model_request_started,
TeamRunEvent.model_request_completed,
TeamRunEvent.parser_model_response_started,
TeamRunEvent.parser_model_response_completed,
TeamRunEvent.run_content,
TeamRunEvent.run_content_completed,
TeamRunEvent.run_completed,
}
assert len(events[TeamRunEvent.run_started]) == 1
assert len(events[TeamRunEvent.parser_model_response_started]) == 1
assert len(events[TeamRunEvent.parser_model_response_completed]) == 1
assert (
len(events[TeamRunEvent.run_content]) >= 2
) # The first model streams, then the parser model has a single content event
assert len(events[TeamRunEvent.run_content_completed]) == 1
assert len(events[TeamRunEvent.run_completed]) == 1
assert events[TeamRunEvent.run_content][-1].content is not None
assert events[TeamRunEvent.run_content][-1].content_type == "Person"
# Handle both dict and Pydantic model cases
content = events[TeamRunEvent.run_content][-1].content
if isinstance(content, dict):
assert content["name"] == "Elon Musk"
assert len(content["description"]) > 1
else:
assert content.name == "Elon Musk"
assert len(content.description) > 1
assert events[TeamRunEvent.run_completed][0].content is not None
assert events[TeamRunEvent.run_completed][0].content_type == "Person"
# Handle both dict and Pydantic model cases
completed_content = events[TeamRunEvent.run_completed][0].content
if isinstance(completed_content, dict):
assert completed_content["name"] == "Elon Musk"
assert len(completed_content["description"]) > 1
else:
assert completed_content.name == "Elon Musk"
assert len(completed_content.description) > 1
assert run_response.content is not None
assert run_response.content_type == "Person"
# Handle both dict and Pydantic model cases
response_content = run_response.content
if isinstance(response_content, dict):
assert response_content["name"] == "Elon Musk"
assert len(response_content["description"]) > 1
else:
assert response_content.name == "Elon Musk"
assert len(response_content.description) > 1
def test_intermediate_steps_with_member_agents():
agent_1 = Agent(
name="Analyst",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You are an expert problem-solving assistant with strong analytical skills! 🧠",
tools=[ReasoningTools(add_instructions=True)],
)
agent_2 = Agent(
name="Math Agent",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You can do Math!",
tools=[CalculatorTools()],
)
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[agent_1, agent_2],
telemetry=False,
)
response_generator = team.run(
"Analyse and then solve the problem: 'solve 10 factorial'", stream=True, stream_events=True
)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
TeamRunEvent.run_started,
TeamRunEvent.model_request_started,
TeamRunEvent.model_request_completed,
TeamRunEvent.tool_call_started,
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.tool_call_started,
RunEvent.tool_call_completed,
RunEvent.reasoning_started,
RunEvent.reasoning_step,
RunEvent.reasoning_completed,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.run_completed,
TeamRunEvent.tool_call_completed,
TeamRunEvent.run_content,
TeamRunEvent.run_content_completed,
TeamRunEvent.run_completed,
}
assert len(events[TeamRunEvent.run_started]) == 1
# Transfer twice, from team to member agents
assert len(events[TeamRunEvent.tool_call_started]) == 2
assert events[TeamRunEvent.tool_call_started][0].tool.tool_name == "delegate_task_to_member"
assert events[TeamRunEvent.tool_call_started][0].tool.tool_args["member_id"] == "analyst"
assert events[TeamRunEvent.tool_call_started][1].tool.tool_name == "delegate_task_to_member"
assert events[TeamRunEvent.tool_call_started][1].tool.tool_args["member_id"] == "math-agent"
assert len(events[TeamRunEvent.tool_call_completed]) == 2
assert events[TeamRunEvent.tool_call_completed][0].tool.tool_name == "delegate_task_to_member"
assert events[TeamRunEvent.tool_call_completed][0].tool.result is not None
assert events[TeamRunEvent.tool_call_completed][1].tool.tool_name == "delegate_task_to_member"
assert events[TeamRunEvent.tool_call_completed][1].tool.result is not None
assert len(events[TeamRunEvent.run_content]) > 1
assert len(events[TeamRunEvent.run_content_completed]) == 1
assert len(events[TeamRunEvent.run_completed]) == 1
# Two member agents
assert len(events[RunEvent.run_started]) == 2
assert events[RunEvent.run_started][0].parent_run_id == events[TeamRunEvent.run_started][0].run_id
assert events[RunEvent.run_started][1].parent_run_id == events[TeamRunEvent.run_started][0].run_id
assert len(events[RunEvent.run_completed]) == 2
assert events[RunEvent.run_completed][0].parent_run_id == events[TeamRunEvent.run_completed][0].run_id
assert events[RunEvent.run_completed][1].parent_run_id == events[TeamRunEvent.run_completed][0].run_id
# Lots of member tool calls
assert len(events[RunEvent.tool_call_started]) > 1
assert len(events[RunEvent.tool_call_completed]) > 1
assert len(events[RunEvent.reasoning_started]) == 1
assert len(events[RunEvent.reasoning_completed]) == 1
assert len(events[RunEvent.reasoning_step]) > 1
assert len(events[RunEvent.run_content]) > 1
assert len(events[RunEvent.run_content_completed]) >= 1
def test_intermediate_steps_with_member_agents_only_member_events():
agent_math = Agent(
name="Math Agent",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You can do Math!",
tools=[CalculatorTools()],
)
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[agent_math],
telemetry=False,
stream_member_events=True,
)
response_generator = team.run(
"Analyse and then solve the problem: 'solve 10 factorial'",
stream=True,
stream_events=False, # stream_events=False to only stream member events
)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
RunEvent.run_started,
RunEvent.model_request_started,
RunEvent.model_request_completed,
RunEvent.tool_call_started,
RunEvent.tool_call_completed,
RunEvent.run_content,
RunEvent.run_content_completed,
RunEvent.run_completed,
TeamRunEvent.run_content,
}
# Agent content restreamed as team content
assert len(events[TeamRunEvent.run_content]) > 1
assert len(events[RunEvent.run_started]) == 1
assert len(events[RunEvent.tool_call_started]) == 1
assert len(events[RunEvent.tool_call_completed]) == 1
assert len(events[RunEvent.run_content]) > 1
assert len(events[RunEvent.run_content_completed]) == 1
assert len(events[RunEvent.run_completed]) == 1
# Two member agents
assert len(events[RunEvent.run_started]) == 1
assert events[RunEvent.run_started][0].parent_run_id == events[TeamRunEvent.run_content][0].run_id
assert len(events[RunEvent.run_completed]) == 1
assert events[RunEvent.run_completed][0].parent_run_id == events[TeamRunEvent.run_content][0].run_id
# Lots of member tool calls
assert len(events[RunEvent.tool_call_started]) == 1
assert len(events[RunEvent.tool_call_completed]) == 1
assert len(events[RunEvent.run_content]) > 1
assert len(events[RunEvent.run_content_completed]) == 1
def test_intermediate_steps_with_member_agents_nested_team():
agent_1 = Agent(
name="Finance Analyst",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You are an expert finance analyst with strong analytical skills! 🧠",
tools=[YFinanceTools(cache_results=True)],
)
sub_team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
name="News Team",
members=[],
tools=[WebSearchTools(cache_results=True)],
telemetry=False,
)
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[agent_1, sub_team],
tools=[ReasoningTools(add_instructions=True)],
telemetry=False,
)
response_generator = team.run("Do a stock market analysis for Apple.", stream=True, stream_events=True)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
# Core events that must always be present
required_events = {
TeamRunEvent.run_started.value,
TeamRunEvent.model_request_started.value,
TeamRunEvent.model_request_completed.value,
TeamRunEvent.tool_call_started.value,
TeamRunEvent.tool_call_completed.value,
RunEvent.run_started.value,
RunEvent.model_request_started.value,
RunEvent.model_request_completed.value,
RunEvent.run_content.value,
RunEvent.run_content_completed.value,
RunEvent.run_completed.value,
TeamRunEvent.run_content.value,
TeamRunEvent.run_content_completed.value,
TeamRunEvent.run_completed.value,
}
# Reasoning events are optional - model may or may not use the reasoning tool
optional_reasoning_events = {
TeamRunEvent.reasoning_started.value,
TeamRunEvent.reasoning_step.value,
TeamRunEvent.reasoning_completed.value,
}
# Member agent tool events are optional - depends on delegation
optional_member_tool_events = {
RunEvent.tool_call_started.value,
RunEvent.tool_call_completed.value,
}
actual_events = set(events.keys())
# Check that all required events are present
assert required_events.issubset(actual_events), f"Missing required events: {required_events - actual_events}"
# Check that actual events are within the expected set (required + optional)
all_expected = required_events | optional_reasoning_events | optional_member_tool_events
unexpected_events = actual_events - all_expected
assert not unexpected_events, f"Unexpected events: {unexpected_events}"
def test_intermediate_steps_with_member_agents_streaming_off():
agent_1 = Agent(
name="Analyst",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You are an expert problem-solving assistant with strong analytical skills! 🧠",
tools=[ReasoningTools(add_instructions=True)],
)
agent_2 = Agent(
name="Math Agent",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You can do Math!",
tools=[CalculatorTools()],
)
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
members=[agent_1, agent_2],
telemetry=False,
stream_member_events=False,
)
response_generator = team.run(
"Analyse and then solve the problem: 'solve 10 factorial'", stream=True, stream_events=True
)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert events.keys() == {
TeamRunEvent.run_started,
TeamRunEvent.model_request_started,
TeamRunEvent.model_request_completed,
TeamRunEvent.tool_call_started,
TeamRunEvent.tool_call_completed,
TeamRunEvent.run_content,
TeamRunEvent.run_content_completed,
TeamRunEvent.run_completed,
}
assert len(events[TeamRunEvent.run_started]) == 1
# Transfer twice, from team to member agents
assert len(events[TeamRunEvent.tool_call_started]) == 2
assert events[TeamRunEvent.tool_call_started][0].tool.tool_name == "delegate_task_to_member"
assert events[TeamRunEvent.tool_call_started][0].tool.tool_args["member_id"] == "analyst"
assert events[TeamRunEvent.tool_call_started][1].tool.tool_name == "delegate_task_to_member"
assert events[TeamRunEvent.tool_call_started][1].tool.tool_args["member_id"] == "math-agent"
assert len(events[TeamRunEvent.tool_call_completed]) == 2
assert events[TeamRunEvent.tool_call_completed][0].tool.tool_name == "delegate_task_to_member"
assert events[TeamRunEvent.tool_call_completed][0].tool.result is not None
assert events[TeamRunEvent.tool_call_completed][1].tool.tool_name == "delegate_task_to_member"
assert events[TeamRunEvent.tool_call_completed][1].tool.result is not None
assert len(events[TeamRunEvent.run_content]) > 1
assert len(events[TeamRunEvent.run_completed]) == 1
def test_intermediate_steps_with_member_agents_delegate_to_all_members():
def get_news_from_hackernews(query: str):
return "The best way to learn to code is to use the Hackernews API."
def get_news_from_duckduckgo(query: str):
return "The best way to learn to code is to use the DuckDuckGo API."
agent_1 = Agent(
name="Web Researcher",
model=OpenAIChat(id="o3-mini"),
instructions="You are an expert web researcher with strong analytical skills! Use your tools to find answers to questions.",
tools=[get_news_from_duckduckgo],
stream_events=True,
)
agent_2 = Agent(
name="Hackernews Researcher",
model=OpenAIChat(id="o3-mini"),
instructions="You are an expert hackernews researcher with strong analytical skills! Use your tools to find answers to questions.",
tools=[get_news_from_hackernews],
stream_events=True,
)
team = Team(
model=OpenAIChat(id="o3-mini"),
members=[agent_1, agent_2],
telemetry=False,
delegate_to_all_members=True,
instructions="You are a discussion master. Forward the task to the member agents.",
)
response_generator = team.run(
input="Start the discussion on the topic: 'What is the best way to learn to code?'",
stream=True,
stream_events=True,
)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert len(events[TeamRunEvent.run_started]) == 1
# Assert expected events from team
assert len(events[TeamRunEvent.tool_call_started]) == 1
assert len(events[TeamRunEvent.run_content]) > 1
assert len(events[TeamRunEvent.run_completed]) == 1
# Assert expected tool call events
assert events[TeamRunEvent.tool_call_started][0].tool.tool_name == "delegate_task_to_members"
assert len(events[TeamRunEvent.tool_call_completed]) == 1
assert events[TeamRunEvent.tool_call_completed][0].tool.tool_name == "delegate_task_to_members"
assert events[TeamRunEvent.tool_call_completed][0].tool.result is not None
# Assert expected events from members
assert len(events[RunEvent.run_started]) == 2
assert len(events[RunEvent.run_completed]) == 2
assert len(events[RunEvent.run_content]) > 1
def test_tool_parent_run_id():
agent_1 = Agent(
name="Big questions agent",
model=OpenAIChat(id="gpt-5-mini"),
instructions="You answer big questions.",
)
team = Team(
model=OpenAIChat(id="gpt-5-mini"),
members=[agent_1],
db=InMemoryDb(),
instructions="Delegate to your member agents to answer the question.",
)
response_generator = team.run(
input="What is the meaning of life?",
session_id="test_session",
stream=True,
stream_events=True,
)
events = {}
for run_response_delta in response_generator:
if run_response_delta.event not in events:
events[run_response_delta.event] = []
events[run_response_delta.event].append(run_response_delta)
assert len(events[TeamRunEvent.run_started]) == 1
assert len(events[TeamRunEvent.run_completed]) == 1
# Model may delegate multiple times depending on its behavior
assert len(events[TeamRunEvent.tool_call_started]) >= 1
assert len(events[TeamRunEvent.tool_call_completed]) >= 1
team_session = team.get_session(session_id="test_session")
assert team_session is not None
team_run = team_session.get_run(run_id=events[TeamRunEvent.run_started][0].run_id)
assert team_run is not None
member_run = team_session.get_run(run_id=events[RunEvent.run_started][0].run_id)
assert member_run is not None
assert member_run.parent_run_id == team_run.run_id
# Assert expected tool call events - check the first delegate call
assert events[TeamRunEvent.tool_call_started][0].tool.tool_name == "delegate_task_to_member"
assert events[TeamRunEvent.tool_call_started][0].run_id == member_run.parent_run_id
assert events[TeamRunEvent.tool_call_completed][0].tool.tool_name == "delegate_task_to_member"
assert events[TeamRunEvent.tool_call_completed][0].run_id == member_run.parent_run_id
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/test_event_streaming.py",
"license": "Apache License 2.0",
"lines": 981,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/utils/location.py | from typing import Any, Dict
import requests
from agno.utils.log import log_warning
def get_location() -> Dict[str, Any]:
"""Get approximate location using IP geolocation."""
try:
response = requests.get("https://api.ipify.org?format=json", timeout=5)
ip = response.json()["ip"]
response = requests.get(f"http://ip-api.com/json/{ip}", timeout=5)
if response.status_code == 200:
data = response.json()
return {"city": data.get("city"), "region": data.get("region"), "country": data.get("country")}
except Exception as e:
log_warning(f"Failed to get location: {e}")
return {}
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/location.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/tools/aws_ses.py | from typing import Optional
from agno.tools import Toolkit
from agno.utils.log import log_debug
try:
import boto3
except ImportError:
raise ImportError("boto3 is required for AWSSESTool. Please install it using `pip install boto3`.")
class AWSSESTool(Toolkit):
def __init__(
self,
sender_email: Optional[str] = None,
sender_name: Optional[str] = None,
region_name: str = "us-east-1",
enable_send_email: bool = True,
all: bool = False,
**kwargs,
):
tools = []
if all or enable_send_email:
tools.append(self.send_email)
super().__init__(name="aws_ses_tool", tools=tools, **kwargs)
self.client = boto3.client("ses", region_name=region_name)
self.sender_email = sender_email
self.sender_name = sender_name
def send_email(self, subject: str, body: str, receiver_email: str) -> str:
"""
Use this tool to send an email using AWS SES.
Args: subject: The subject of the email
body: The body of the email
receiver_email: The email address of the receiver
"""
if not self.client:
raise Exception("AWS SES client not initialized. Please check the configuration.")
if not subject:
return "Email subject cannot be empty."
if not body:
return "Email body cannot be empty."
try:
response = self.client.send_email(
Source=f"{self.sender_name} <{self.sender_email}>",
Destination={
"ToAddresses": [receiver_email],
},
Message={
"Body": {
"Text": {
"Charset": "UTF-8",
"Data": body,
},
},
"Subject": {
"Charset": "UTF-8",
"Data": subject,
},
},
)
log_debug(f"Email sent with message ID: {response['MessageId']}")
return "Email sent successfully!"
except Exception as e:
raise Exception(f"Failed to send email: {e}")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/aws_ses.py",
"license": "Apache License 2.0",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/tools/test_aws_ses.py | """Unit tests for AWS SES Tool"""
from unittest.mock import Mock, patch
import pytest
from botocore.exceptions import ClientError
from agno.tools.aws_ses import AWSSESTool
class TestAWSSESTool:
"""Test cases for AWSSESTool"""
@patch("boto3.client")
def test_initialization_default_region(self, mock_boto_client):
"""Test tool initialization with default region"""
# Arrange
mock_client = Mock()
mock_boto_client.return_value = mock_client
# Act
tool = AWSSESTool(sender_email="test@example.com", sender_name="Test Sender")
# Assert
mock_boto_client.assert_called_once_with("ses", region_name="us-east-1")
assert tool.sender_email == "test@example.com"
assert tool.sender_name == "Test Sender"
assert tool.client == mock_client
assert tool.name == "aws_ses_tool"
@patch("boto3.client")
def test_initialization_custom_region(self, mock_boto_client):
"""Test tool initialization with custom region"""
# Arrange
mock_client = Mock()
mock_boto_client.return_value = mock_client
# Act
AWSSESTool(sender_email="test@example.com", sender_name="Test Sender", region_name="us-west-2")
# Assert
mock_boto_client.assert_called_once_with("ses", region_name="us-west-2")
@patch("boto3.client")
def test_send_email_success(self, mock_boto_client):
"""Test successful email sending"""
# Arrange
mock_client = Mock()
mock_response = {
"MessageId": "0101019740cf4f5e-8e090a0f-9edf-4a3d-b5bf-78667b95c2c7-000000",
"ResponseMetadata": {"RequestId": "test-request-id", "HTTPStatusCode": 200},
}
mock_client.send_email.return_value = mock_response
mock_boto_client.return_value = mock_client
tool = AWSSESTool(sender_email="sender@example.com", sender_name="Test Sender", region_name="us-west-2")
# Act
result = tool.send_email(subject="Test Subject", body="Test Body", receiver_email="receiver@example.com")
# Assert
assert result == "Email sent successfully!"
mock_client.send_email.assert_called_once_with(
Source="Test Sender <sender@example.com>",
Destination={
"ToAddresses": ["receiver@example.com"],
},
Message={
"Body": {
"Text": {
"Charset": "UTF-8",
"Data": "Test Body",
},
},
"Subject": {
"Charset": "UTF-8",
"Data": "Test Subject",
},
},
)
@patch("boto3.client")
def test_send_email_empty_subject(self, mock_boto_client):
"""Test email sending with empty subject"""
# Arrange
mock_client = Mock()
mock_boto_client.return_value = mock_client
tool = AWSSESTool(sender_email="sender@example.com", sender_name="Test Sender")
# Act
result = tool.send_email(subject="", body="Test Body", receiver_email="receiver@example.com")
# Assert
assert result == "Email subject cannot be empty."
mock_client.send_email.assert_not_called()
@patch("boto3.client")
def test_send_email_empty_body(self, mock_boto_client):
"""Test email sending with empty body"""
# Arrange
mock_client = Mock()
mock_boto_client.return_value = mock_client
tool = AWSSESTool(sender_email="sender@example.com", sender_name="Test Sender")
# Act
result = tool.send_email(subject="Test Subject", body="", receiver_email="receiver@example.com")
# Assert
assert result == "Email body cannot be empty."
mock_client.send_email.assert_not_called()
@patch("boto3.client")
def test_send_email_invalid_email_format(self, mock_boto_client):
"""Test email sending with invalid email format"""
# Arrange
mock_client = Mock()
mock_error_response = {"Error": {"Code": "InvalidParameterValue", "Message": "Missing final '@domain'"}}
mock_client.send_email.side_effect = ClientError(mock_error_response, "SendEmail")
mock_boto_client.return_value = mock_client
tool = AWSSESTool(sender_email="sender@example.com", sender_name="Test Sender")
# Act & Assert
with pytest.raises(Exception) as exc_info:
tool.send_email(subject="Test Subject", body="Test Body", receiver_email="invalidemailformat")
assert "Failed to send email" in str(exc_info.value)
assert "Missing final '@domain'" in str(exc_info.value)
@patch("boto3.client")
def test_send_email_aws_error(self, mock_boto_client):
"""Test email sending with AWS error"""
# Arrange
mock_client = Mock()
mock_error_response = {"Error": {"Code": "MessageRejected", "Message": "Email address is not verified."}}
mock_client.send_email.side_effect = ClientError(mock_error_response, "SendEmail")
mock_boto_client.return_value = mock_client
tool = AWSSESTool(sender_email="sender@example.com", sender_name="Test Sender")
# Act & Assert
with pytest.raises(Exception) as exc_info:
tool.send_email(subject="Test Subject", body="Test Body", receiver_email="unverified@example.com")
assert "Failed to send email" in str(exc_info.value)
assert "Email address is not verified" in str(exc_info.value)
@patch("boto3.client")
def test_send_email_no_client(self, mock_boto_client):
"""Test email sending when client is not initialized"""
# Arrange
tool = AWSSESTool(sender_email="sender@example.com", sender_name="Test Sender")
tool.client = None # Simulate client not initialized
# Act & Assert
with pytest.raises(Exception) as exc_info:
tool.send_email(subject="Test Subject", body="Test Body", receiver_email="receiver@example.com")
assert "AWS SES client not initialized" in str(exc_info.value)
@patch("boto3.client")
def test_send_email_with_special_characters(self, mock_boto_client):
"""Test email sending with special characters in content"""
# Arrange
mock_client = Mock()
mock_response = {"MessageId": "test-message-id", "ResponseMetadata": {"HTTPStatusCode": 200}}
mock_client.send_email.return_value = mock_response
mock_boto_client.return_value = mock_client
tool = AWSSESTool(sender_email="sender@example.com", sender_name="Test Sender")
# Act
result = tool.send_email(
subject="Test Subject with émojis 🎉",
body="Body with special chars: ñ, ü, é, 中文, 日本語",
receiver_email="receiver@example.com",
)
# Assert
assert result == "Email sent successfully!"
call_args = mock_client.send_email.call_args[1]
assert call_args["Message"]["Subject"]["Data"] == "Test Subject with émojis 🎉"
assert "中文" in call_args["Message"]["Body"]["Text"]["Data"]
@patch("boto3.client")
def test_send_email_multiple_calls(self, mock_boto_client):
"""Test multiple email sends"""
# Arrange
mock_client = Mock()
mock_response = {"MessageId": "test-message-id", "ResponseMetadata": {"HTTPStatusCode": 200}}
mock_client.send_email.return_value = mock_response
mock_boto_client.return_value = mock_client
tool = AWSSESTool(sender_email="sender@example.com", sender_name="Test Sender")
# Act
result1 = tool.send_email(subject="First Email", body="First Body", receiver_email="receiver1@example.com")
result2 = tool.send_email(subject="Second Email", body="Second Body", receiver_email="receiver2@example.com")
# Assert
assert result1 == "Email sent successfully!"
assert result2 == "Email sent successfully!"
assert mock_client.send_email.call_count == 2
def test_import_error_handling(self):
"""Test that import error is handled properly"""
# This test verifies that the module imports correctly
# The actual ImportError is raised at module level if boto3 is missing
from agno.tools.aws_ses import AWSSESTool
assert AWSSESTool is not None
@patch("boto3.client")
def test_send_email_return_message_id(self, mock_boto_client):
"""Test that send_email returns success message with message ID"""
# Arrange
mock_client = Mock()
message_id = "0101019740cf4f5e-8e090a0f-9edf-4a3d-b5bf-78667b95c2c7-000000"
mock_response = {"MessageId": message_id, "ResponseMetadata": {"HTTPStatusCode": 200}}
mock_client.send_email.return_value = mock_response
mock_boto_client.return_value = mock_client
tool = AWSSESTool(sender_email="sender@example.com", sender_name="Test Sender")
# Act
with patch("agno.tools.aws_ses.log_debug") as mock_log:
result = tool.send_email(subject="Test", body="Test", receiver_email="test@example.com")
# Assert
assert result == "Email sent successfully!"
mock_log.assert_called_once_with(f"Email sent with message ID: {message_id}")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_aws_ses.py",
"license": "Apache License 2.0",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/daytona.py | import json
from os import getenv
from pathlib import Path
from textwrap import dedent
from typing import Any, Dict, List, Optional, Union
from agno.agent import Agent
from agno.team import Team
from agno.tools import Toolkit
from agno.utils.code_execution import prepare_python_code
from agno.utils.log import log_debug, log_error, log_info, log_warning
try:
from daytona import (
CodeLanguage,
CreateSandboxFromSnapshotParams,
Daytona,
DaytonaConfig,
Sandbox,
)
except ImportError:
raise ImportError("`daytona` not installed. Please install using `pip install daytona`")
DEFAULT_INSTRUCTIONS = dedent(
"""\
You have access to a persistent Daytona sandbox for code execution. The sandbox maintains state across interactions.
Available tools:
- `run_code`: Execute code in the sandbox
- `run_shell_command`: Execute shell commands (bash)
- `create_file`: Create or update files
- `read_file`: Read file contents
- `list_files`: List directory contents
- `delete_file`: Delete files or directories
- `change_directory`: Change the working directory
MANDATORY: When users ask for code (Python, JavaScript, TypeScript, etc.), you MUST:
1. Write the code
2. Execute it using run_code tool
3. Show the actual output/results
4. Never just provide code without executing it
CRITICAL WORKFLOW:
1. Before running Python scripts, check if required packages are installed
2. Install missing packages with: run_shell_command("pip install package1 package2")
3. When running scripts, capture both output AND errors
4. If a script produces no output, check for errors or add print statements
IMPORTANT: Always use single quotes for the content parameter when creating files
Remember: Your job is to provide working, executed code examples, not just code snippets!
"""
)
class DaytonaTools(Toolkit):
def __init__(
self,
api_key: Optional[str] = None,
api_url: Optional[str] = None,
sandbox_id: Optional[str] = None,
sandbox_language: Optional[CodeLanguage] = None,
sandbox_target: Optional[str] = None,
sandbox_os: Optional[str] = None,
auto_stop_interval: Optional[int] = 60, # Stop after 1 hour
sandbox_os_user: Optional[str] = None,
sandbox_env_vars: Optional[Dict[str, str]] = None,
sandbox_labels: Optional[Dict[str, str]] = None,
sandbox_public: Optional[bool] = None,
organization_id: Optional[str] = None,
timeout: int = 300,
auto_create_sandbox: bool = True,
verify_ssl: Optional[bool] = False,
persistent: bool = True,
instructions: Optional[str] = None,
add_instructions: bool = False,
**kwargs,
):
self.api_key = api_key or getenv("DAYTONA_API_KEY")
if not self.api_key:
raise ValueError("DAYTONA_API_KEY not set. Please set the DAYTONA_API_KEY environment variable.")
self.api_url = api_url or getenv("DAYTONA_API_URL")
self.sandbox_id = sandbox_id
self.sandbox_target = sandbox_target
self.organization_id = organization_id
self.sandbox_language = sandbox_language or CodeLanguage.PYTHON
self.sandbox_os = sandbox_os
self.auto_stop_interval = auto_stop_interval
self.sandbox_os_user = sandbox_os_user
self.sandbox_env_vars = sandbox_env_vars
self.sandbox_labels = sandbox_labels or {}
self.sandbox_public = sandbox_public
self.timeout = timeout
self.auto_create_sandbox = auto_create_sandbox
self.persistent = persistent
self.verify_ssl = verify_ssl
# Set instructions - use default if none provided
self.instructions = instructions or DEFAULT_INSTRUCTIONS
if not self.verify_ssl:
self._disable_ssl_verification()
self.config = DaytonaConfig(
api_key=self.api_key,
api_url=self.api_url,
target=self.sandbox_target,
organization_id=self.organization_id,
)
self.daytona = Daytona(self.config)
tools: List[Any] = [
self.run_code,
self.run_shell_command,
self.create_file,
self.read_file,
self.list_files,
self.delete_file,
self.change_directory,
]
super().__init__(
name="daytona_tools",
tools=tools,
instructions=self.instructions,
add_instructions=add_instructions,
**kwargs,
)
def _disable_ssl_verification(self) -> None:
try:
from daytona_api_client import Configuration
original_init = Configuration.__init__
# Create a wrapper that sets verify_ssl = False
def patched_init(self, *args, **kwargs):
original_init(self, *args, **kwargs)
self.verify_ssl = False
setattr(Configuration, "__init__", patched_init)
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
log_debug(
"SSL certificate verification is disabled",
)
except ImportError:
log_warning("Could not import daytona_api_client.Configuration for SSL patching")
def _get_working_directory(self, agent: Union[Agent, Team]) -> str:
"""Get the current working directory from agent session state."""
if agent and hasattr(agent, "session_state"):
if agent.session_state is None:
agent.session_state = {}
return agent.session_state.get("working_directory", "/home/daytona")
return "/home/daytona"
def _set_working_directory(self, agent: Union[Agent, Team], directory: str) -> None:
"""Set the working directory in agent session state."""
if agent and hasattr(agent, "session_state"):
if agent.session_state is None:
agent.session_state = {}
agent.session_state["working_directory"] = directory
log_info(f"Updated working directory to: {directory}")
def _get_or_create_sandbox(self, agent: Union[Agent, Team]) -> Sandbox:
"""Get existing sandbox or create new one"""
try:
sandbox = None
# Use explicit sandbox
if self.sandbox_id:
try:
sandbox = self.daytona.get(self.sandbox_id)
log_debug(f"Using explicit sandbox: {self.sandbox_id}")
except Exception as e:
log_debug(f"Failed to get sandbox {self.sandbox_id}: {e}")
sandbox = None
# Use persistent sandbox
elif self.persistent and hasattr(agent, "session_state"):
if agent.session_state is None:
agent.session_state = {}
sandbox_id = agent.session_state.get("sandbox_id")
if sandbox_id:
try:
sandbox = self.daytona.get(sandbox_id)
log_debug(f"Using persistent sandbox: {sandbox_id}")
except Exception as e:
log_debug(f"Failed to get sandbox {sandbox_id}: {e}")
sandbox = None
# Create new sandbox if none found
if sandbox is None:
sandbox = self._create_new_sandbox(agent)
# Store sandbox ID for persistent sandboxes
if self.persistent and hasattr(agent, "session_state"):
if agent.session_state is None:
agent.session_state = {}
agent.session_state["sandbox_id"] = sandbox.id
# Ensure sandbox is started
if sandbox.state != "started":
log_info(f"Starting sandbox {sandbox.id}")
self.daytona.start(sandbox, timeout=self.timeout)
return sandbox
except Exception as e:
if self.auto_create_sandbox:
log_warning(f"Error in sandbox management: {e}. Creating new sandbox.")
return self._create_new_sandbox(agent)
else:
raise e
def _create_new_sandbox(self, agent: Optional[Union[Agent, Team]] = None) -> Sandbox:
"""Create a new sandbox with the configured parameters."""
try:
labels = self.sandbox_labels.copy()
labels.setdefault("created_by", "agno_daytona_toolkit")
labels.setdefault("language", str(self.sandbox_language))
if self.persistent:
labels.setdefault("persistent", "true")
params = CreateSandboxFromSnapshotParams(
language=self.sandbox_language,
os_user=self.sandbox_os_user,
env_vars=self.sandbox_env_vars,
auto_stop_interval=self.auto_stop_interval,
labels=labels,
public=self.sandbox_public,
)
sandbox = self.daytona.create(params, timeout=self.timeout)
# Add the sandbox_id to the Agent state
if self.persistent and agent and hasattr(agent, "session_state"):
if agent.session_state is None:
agent.session_state = {}
agent.session_state["sandbox_id"] = sandbox.id
log_info(f"Created new Daytona sandbox: {sandbox.id}")
return sandbox
except Exception as e:
log_error(f"Error creating Daytona sandbox: {e}")
raise e
# Tools
def run_code(self, agent: Union[Agent, Team], code: str) -> str:
"""Execute Python code in the Daytona sandbox.
Args:
code: Code to execute
Returns:
Execution output as a string
"""
try:
current_sandbox = self._get_or_create_sandbox(agent)
if self.sandbox_language == CodeLanguage.PYTHON:
code = prepare_python_code(code)
response = current_sandbox.process.code_run(code)
self.result = response.result
return self.result
except Exception as e:
return json.dumps({"status": "error", "message": f"Error executing code: {str(e)}"})
def run_shell_command(self, agent: Union[Agent, Team], command: str) -> str:
"""Execute a shell command in the sandbox.
Args:
command: Shell command to execute
Returns:
Command output as a string
"""
try:
current_sandbox = self._get_or_create_sandbox(agent)
# Use persistent working directory if not specified
cwd = self._get_working_directory(agent)
# Handle cd commands specially to update working directory
if command.strip().startswith("cd "):
new_dir = command.strip()[3:].strip()
# Convert to Path
new_path = Path(new_dir)
# Resolve relative paths
if not new_path.is_absolute():
# Get current absolute path first
result = current_sandbox.process.exec(f"cd {cwd} && pwd", cwd="/")
current_abs_path = Path(result.result.strip())
new_path = current_abs_path / new_path
# Normalize the path
new_path_str = str(new_path.resolve())
# Test if directory exists
test_result = current_sandbox.process.exec(
f"test -d {new_path_str} && echo 'exists' || echo 'not found'", cwd="/"
)
if "exists" in test_result.result:
self._set_working_directory(agent, new_path_str)
return f"Changed directory to: {new_path_str}"
else:
return f"Error: Directory {new_path_str} not found"
# Execute the command
response = current_sandbox.process.exec(command, cwd=cwd)
return response.result
except Exception as e:
return json.dumps({"status": "error", "message": f"Error executing command: {str(e)}"})
def create_file(self, agent: Union[Agent, Team], file_path: str, content: str) -> str:
"""Create or update a file in the sandbox.
Args:
file_path: Path to the file (relative to current directory or absolute)
content: Content to write to the file
Returns:
Success message or error
"""
try:
current_sandbox = self._get_or_create_sandbox(agent)
# Convert to Path object
path = Path(file_path)
# Handle relative paths
if not path.is_absolute():
path = Path(self._get_working_directory(agent)) / path
# Ensure the path is normalized
path_str = str(path)
# Create directory if needed
parent_dir = str(path.parent)
if parent_dir and parent_dir != "/":
result = current_sandbox.process.exec(f"mkdir -p {parent_dir}")
if result.exit_code != 0:
return json.dumps({"status": "error", "message": f"Failed to create directory: {result.result}"})
# Write the file using shell command
# Use cat with heredoc for better handling of special characters
escaped_content = content.replace("'", "'\"'\"'")
command = f"cat > '{path_str}' << 'EOF'\n{escaped_content}\nEOF"
result = current_sandbox.process.exec(command)
if result.exit_code != 0:
return json.dumps({"status": "error", "message": f"Failed to create file: {result.result}"})
return f"File created/updated: {path_str}"
except Exception as e:
return json.dumps({"status": "error", "message": f"Error creating file: {str(e)}"})
def read_file(self, agent: Union[Agent, Team], file_path: str) -> str:
"""Read a file from the sandbox.
Args:
file_path: Path to the file (relative to current directory or absolute)
Returns:
File content or error message
"""
try:
current_sandbox = self._get_or_create_sandbox(agent)
# Convert to Path object
path = Path(file_path)
# Handle relative paths
if not path.is_absolute():
path = Path(self._get_working_directory(agent)) / path
path_str = str(path)
# Read file using cat
result = current_sandbox.process.exec(f"cat '{path_str}'")
if result.exit_code != 0:
return json.dumps({"status": "error", "message": f"Error reading file: {result.result}"})
return result.result
except Exception as e:
return json.dumps({"status": "error", "message": f"Error reading file: {str(e)}"})
def list_files(self, agent: Union[Agent, Team], directory: Optional[str] = None) -> str:
"""List files in a directory.
Args:
directory: Directory to list (defaults to current working directory)
Returns:
List of files and directories as formatted string
"""
try:
current_sandbox = self._get_or_create_sandbox(agent)
# Use current directory if not specified
if directory is None:
dir_path = Path(self._get_working_directory(agent))
else:
dir_path = Path(directory)
# Handle relative paths
if not dir_path.is_absolute():
dir_path = Path(self._get_working_directory(agent)) / dir_path
path_str = str(dir_path)
# List files using ls -la for detailed info
result = current_sandbox.process.exec(f"ls -la '{path_str}'")
if result.exit_code != 0:
return json.dumps({"status": "error", "message": f"Error listing directory: {result.result}"})
return f"Contents of {path_str}:\n{result.result}"
except Exception as e:
return json.dumps({"status": "error", "message": f"Error listing files: {str(e)}"})
def delete_file(self, agent: Union[Agent, Team], file_path: str) -> str:
"""Delete a file or directory from the sandbox.
Args:
file_path: Path to the file or directory (relative to current directory or absolute)
Returns:
Success message or error
"""
try:
current_sandbox = self._get_or_create_sandbox(agent)
# Convert to Path object
path = Path(file_path)
# Handle relative paths
if not path.is_absolute():
path = Path(self._get_working_directory(agent)) / path
path_str = str(path)
# Check if it's a directory or file
check_result = current_sandbox.process.exec(f"test -d '{path_str}' && echo 'directory' || echo 'file'")
if "directory" in check_result.result:
# Remove directory recursively
result = current_sandbox.process.exec(f"rm -rf '{path_str}'")
else:
# Remove file
result = current_sandbox.process.exec(f"rm -f '{path_str}'")
if result.exit_code != 0:
return json.dumps({"status": "error", "message": f"Failed to delete: {result.result}"})
return f"Deleted: {path_str}"
except Exception as e:
return json.dumps({"status": "error", "message": f"Error deleting file: {str(e)}"})
def change_directory(self, agent: Union[Agent, Team], directory: str) -> str:
"""Change the current working directory.
Args:
directory: Directory to change to (relative to current directory or absolute)
Returns:
Success message or error
"""
try:
result = self.run_shell_command(agent, f"cd {directory}")
self._set_working_directory(agent, directory)
return result
except Exception as e:
return json.dumps({"status": "error", "message": f"Error changing directory: {str(e)}"})
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/daytona.py",
"license": "Apache License 2.0",
"lines": 389,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/utils/code_execution.py | """Utils for our multiple integrations with external code execution environments."""
import re
def prepare_python_code(code: str) -> str:
"""Fix common problems with LLM-generated Python code."""
python_keywords = {"true": "True", "false": "False", "none": "None"}
for lowercase, capitalized in python_keywords.items():
code = re.sub(rf"\b({lowercase})\b", capitalized, code)
return code
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/code_execution.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/unit/tools/test_crawl4ai.py | """Unit tests for Crawl4aiTools class."""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from agno.tools.crawl4ai import Crawl4aiTools
@pytest.fixture
def mock_async_crawler():
"""Create a mock AsyncWebCrawler."""
with patch("agno.tools.crawl4ai.AsyncWebCrawler") as mock_crawler:
mock_instance = AsyncMock()
mock_crawler.return_value.__aenter__.return_value = mock_instance
yield mock_instance
@pytest.fixture
def mock_browser_config():
"""Create a mock BrowserConfig."""
with patch("agno.tools.crawl4ai.BrowserConfig") as mock_config:
mock_instance = MagicMock()
mock_config.return_value = mock_instance
yield mock_config
@pytest.fixture
def mock_crawler_run_config():
"""Create a mock CrawlerRunConfig."""
with patch("agno.tools.crawl4ai.CrawlerRunConfig") as mock_config:
yield mock_config
@pytest.fixture
def crawl4ai_tools():
"""Create a Crawl4aiTools instance with default settings."""
with (
patch("agno.tools.crawl4ai.AsyncWebCrawler"),
patch("agno.tools.crawl4ai.BrowserConfig"),
patch("agno.tools.crawl4ai.CrawlerRunConfig"),
):
return Crawl4aiTools()
@pytest.fixture
def custom_crawl4ai_tools():
"""Create a Crawl4aiTools instance with custom settings."""
with (
patch("agno.tools.crawl4ai.AsyncWebCrawler"),
patch("agno.tools.crawl4ai.BrowserConfig"),
patch("agno.tools.crawl4ai.CrawlerRunConfig"),
):
return Crawl4aiTools(
max_length=2000,
timeout=30,
use_pruning=True,
pruning_threshold=0.6,
bm25_threshold=2.0,
wait_until="networkidle",
headless=False,
)
def create_mock_crawler_result(
raw_markdown: str = "This is the extracted content from the webpage.",
fit_markdown: str = None,
html: str = "<html><body>Test content</body></html>",
text: str = "Test text content",
success: bool = True,
):
"""Helper function to create mock crawler result."""
result = MagicMock()
if raw_markdown:
result.markdown = MagicMock()
result.markdown.raw_markdown = raw_markdown
else:
result.markdown = None
result.fit_markdown = fit_markdown
result.html = html
result.text = text
result.success = success
return result
def test_initialization_default(crawl4ai_tools):
"""Test initialization with default values."""
assert crawl4ai_tools.name == "crawl4ai_tools"
assert crawl4ai_tools.max_length == 5000
assert crawl4ai_tools.timeout == 60
assert crawl4ai_tools.use_pruning is False
assert crawl4ai_tools.pruning_threshold == 0.48
assert crawl4ai_tools.bm25_threshold == 1.0
assert crawl4ai_tools.wait_until == "domcontentloaded"
assert crawl4ai_tools.headless is True
# Check registered functions
function_names = [func.name for func in crawl4ai_tools.functions.values()]
assert "crawl" in function_names
assert len(crawl4ai_tools.functions) == 1
def test_initialization_custom(custom_crawl4ai_tools):
"""Test initialization with custom values."""
assert custom_crawl4ai_tools.max_length == 2000
assert custom_crawl4ai_tools.timeout == 30
assert custom_crawl4ai_tools.use_pruning is True
assert custom_crawl4ai_tools.pruning_threshold == 0.6
assert custom_crawl4ai_tools.bm25_threshold == 2.0
assert custom_crawl4ai_tools.wait_until == "networkidle"
assert custom_crawl4ai_tools.headless is False
def test_crawl_no_url(crawl4ai_tools):
"""Test crawl with no URL provided."""
result = crawl4ai_tools.crawl("")
assert result == "Error: No URL provided"
result = crawl4ai_tools.crawl([])
assert result == "Error: No URL provided"
def test_crawl_single_url_success(crawl4ai_tools, mock_async_crawler, mock_browser_config, mock_crawler_run_config):
"""Test successful crawling of a single URL."""
# Setup mock result
mock_result = create_mock_crawler_result()
mock_async_crawler.arun = AsyncMock(return_value=mock_result)
# Execute
result = crawl4ai_tools.crawl("https://example.com")
# Assert
assert result == "This is the extracted content from the webpage."
mock_browser_config.assert_called_once_with(headless=True, verbose=False)
mock_crawler_run_config.assert_called_once()
# Check config parameters
config_call_args = mock_crawler_run_config.call_args[1]
assert config_call_args["page_timeout"] == 60000 # 60 seconds in milliseconds
assert config_call_args["wait_until"] == "domcontentloaded"
assert config_call_args["cache_mode"] == "bypass"
assert config_call_args["verbose"] is False
def test_crawl_with_search_query(crawl4ai_tools, mock_async_crawler, mock_browser_config, mock_crawler_run_config):
"""Test crawling with search query for content filtering."""
# Setup mock result
mock_result = create_mock_crawler_result()
mock_async_crawler.arun = AsyncMock(return_value=mock_result)
# Mock the imports that happen inside _build_config
with (
patch("crawl4ai.content_filter_strategy.BM25ContentFilter") as mock_bm25,
patch("crawl4ai.markdown_generation_strategy.DefaultMarkdownGenerator") as mock_markdown_gen,
):
# Execute with search query
result = crawl4ai_tools.crawl("https://example.com", search_query="machine learning")
# Assert
assert result == "This is the extracted content from the webpage."
# Verify BM25 content filter is used
mock_bm25.assert_called_once_with(user_query="machine learning", bm25_threshold=1.0)
mock_markdown_gen.assert_called_once()
def test_crawl_with_fit_markdown(crawl4ai_tools, mock_async_crawler):
"""Test crawling when fit_markdown is available."""
# Setup mock result with fit_markdown
mock_result = create_mock_crawler_result(
raw_markdown="This is the raw content.", fit_markdown="This is the filtered content."
)
mock_async_crawler.arun = AsyncMock(return_value=mock_result)
# Execute
result = crawl4ai_tools.crawl("https://example.com")
# Should return fit_markdown when available
assert result == "This is the filtered content."
def test_crawl_length_truncation(crawl4ai_tools, mock_async_crawler):
"""Test content truncation when exceeding max_length."""
# Setup mock with long content
mock_result = create_mock_crawler_result(raw_markdown="A" * 10000)
mock_async_crawler.arun = AsyncMock(return_value=mock_result)
# Execute
result = crawl4ai_tools.crawl("https://example.com")
# Assert truncation
assert len(result) == 5003 # 5000 + "..."
assert result.endswith("...")
assert result[:5000] == "A" * 5000
def test_crawl_multiple_urls(crawl4ai_tools, mock_async_crawler):
"""Test crawling multiple URLs."""
# Setup different results for each URL
results = [
create_mock_crawler_result(raw_markdown="Content from site 1"),
create_mock_crawler_result(raw_markdown="Content from site 2"),
]
# Configure arun to return different results
call_count = 0
async def mock_arun(url, config):
nonlocal call_count
result = results[call_count]
call_count += 1
return result
mock_async_crawler.arun = mock_arun
# Execute
urls = ["https://site1.com", "https://site2.com"]
result = crawl4ai_tools.crawl(urls)
# Assert
assert isinstance(result, dict)
assert len(result) == 2
assert result["https://site1.com"] == "Content from site 1"
assert result["https://site2.com"] == "Content from site 2"
def test_crawl_error_handling(crawl4ai_tools, mock_async_crawler):
"""Test error handling during crawl."""
mock_async_crawler.arun = AsyncMock(side_effect=Exception("Network error"))
# Execute
result = crawl4ai_tools.crawl("https://example.com")
# Assert error message
assert "Error crawling https://example.com: Network error" in result
def test_crawl_no_content(crawl4ai_tools, mock_async_crawler):
"""Test handling of empty results."""
mock_async_crawler.arun = AsyncMock(return_value=None)
# Execute
result = crawl4ai_tools.crawl("https://example.com")
# Assert
assert result == "Error: No content found"
def test_crawl_text_fallback(crawl4ai_tools, mock_async_crawler):
"""Test fallback to text when markdown is not available."""
# Create result with only text
mock_result = create_mock_crawler_result(raw_markdown=None, text="Plain text content")
mock_async_crawler.arun = AsyncMock(return_value=mock_result)
# Execute
result = crawl4ai_tools.crawl("https://example.com")
# Should fall back to text
assert result == "Plain text content"
def test_crawl_no_readable_content(crawl4ai_tools, mock_async_crawler):
"""Test error when no readable content is available."""
# Create result with only HTML
mock_result = create_mock_crawler_result(raw_markdown=None, text=None, html="<html><body>Test</body></html>")
mock_async_crawler.arun = AsyncMock(return_value=mock_result)
# Execute
result = crawl4ai_tools.crawl("https://example.com")
# Should return error
assert result == "Error: No readable content extracted"
def test_pruning_configuration(mock_async_crawler, mock_browser_config, mock_crawler_run_config):
"""Test pruning filter configuration."""
# Mock the imports that happen inside _build_config
with (
patch("crawl4ai.content_filter_strategy.PruningContentFilter") as mock_pruning,
patch("crawl4ai.markdown_generation_strategy.DefaultMarkdownGenerator") as mock_markdown_gen,
):
# Create toolkit with pruning enabled
toolkit = Crawl4aiTools(use_pruning=True, pruning_threshold=0.6)
# Setup mock result
mock_result = create_mock_crawler_result()
mock_async_crawler.arun = AsyncMock(return_value=mock_result)
# Execute
result = toolkit.crawl("https://example.com")
# Assert
assert result == "This is the extracted content from the webpage."
# Verify pruning filter is used
mock_pruning.assert_called_once_with(threshold=0.6, threshold_type="fixed", min_word_threshold=2)
mock_markdown_gen.assert_called_once()
def test_crawl_with_multiple_urls_and_errors(crawl4ai_tools, mock_async_crawler):
"""Test crawling multiple URLs with some failures."""
# Configure arun to fail for second URL
call_count = 0
async def mock_arun(url, config):
nonlocal call_count
call_count += 1
if call_count == 2:
raise Exception("Connection failed")
return create_mock_crawler_result(raw_markdown=f"Content from {url}")
mock_async_crawler.arun = mock_arun
# Execute
urls = ["https://success.com", "https://fail.com"]
result = crawl4ai_tools.crawl(urls)
# Assert
assert "Content from https://success.com" in result["https://success.com"]
assert "Error crawling https://fail.com: Connection failed" in result["https://fail.com"]
def test_browser_config_proxy_forwarding(mock_async_crawler, mock_browser_config):
tools = Crawl4aiTools(proxy_config={"proxy_config": {"server": "http://proxy:8080"}})
mock_result = create_mock_crawler_result()
mock_async_crawler.arun = AsyncMock(return_value=mock_result)
tools.crawl("https://example.com")
mock_browser_config.assert_called_once_with(
headless=True,
verbose=False,
proxy_config={"server": "http://proxy:8080"},
)
@patch("agno.tools.crawl4ai.log_warning")
def test_crawl_logging(mock_log_warning, crawl4ai_tools, mock_async_crawler):
"""Test logging during crawl operations."""
# Setup result with only HTML (no markdown, no text)
mock_result = create_mock_crawler_result(raw_markdown=None, text=None, html="<html><body>Test</body></html>")
# Make sure result has html attribute but not text attribute
mock_result.text = None
delattr(mock_result, "text") # Remove the text attribute entirely
mock_result.html = "<html><body>Test</body></html>"
mock_async_crawler.arun = AsyncMock(return_value=mock_result)
# Execute
result = crawl4ai_tools.crawl("https://example.com")
# Check that error is returned and log was called
assert result == "Error: Could not extract markdown from page"
# Check warning was logged
mock_log_warning.assert_called_once_with("Only HTML available, no markdown extracted")
@patch("agno.tools.crawl4ai.asyncio.run")
def test_asyncio_run_error(mock_asyncio_run, crawl4ai_tools):
"""Test handling of asyncio.run errors."""
mock_asyncio_run.side_effect = RuntimeError("Event loop error")
with pytest.raises(RuntimeError) as excinfo:
crawl4ai_tools.crawl("https://example.com")
assert "Event loop error" in str(excinfo.value)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_crawl4ai.py",
"license": "Apache License 2.0",
"lines": 277,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/test_parser_model.py | from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.anthropic import Claude
from agno.models.google import Gemini
from agno.models.openai import OpenAIChat
class ParkGuide(BaseModel):
park_name: str = Field(..., description="The official name of the national park.")
activities: List[str] = Field(
..., description="A list of popular activities to do in the park. Provide at least three."
)
best_season_to_visit: str = Field(
..., description="The best season to visit the park (e.g., Spring, Summer, Autumn, Winter)."
)
def test_claude_with_openai_parser_model():
park_agent = Agent(
model=Claude(id="claude-sonnet-4-20250514"), # Main model to generate the content
description="You are an expert on national parks and provide concise guides.",
output_schema=ParkGuide,
parser_model=OpenAIChat(id="gpt-4o"), # Model to parse the output
)
response = park_agent.run("Tell me about Yosemite National Park.")
assert response.content is not None
assert isinstance(response.content, ParkGuide)
assert isinstance(response.content.park_name, str)
assert len(response.content.park_name) > 0
assert isinstance(response.content.activities, list)
assert len(response.content.activities) >= 2
for activity in response.content.activities:
assert isinstance(activity, str)
assert len(activity) > 0
assert isinstance(response.content.best_season_to_visit, str)
assert len(response.content.best_season_to_visit) > 0
def test_openai_with_claude_parser_model():
park_agent = Agent(
model=OpenAIChat(id="gpt-4o"), # Main model to generate the content
description="You are an expert on national parks and provide concise guides.",
output_schema=ParkGuide,
parser_model=Claude(id="claude-sonnet-4-20250514"), # Model to parse the output
)
response = park_agent.run("Tell me about Yosemite National Park.")
assert response.content is not None
assert isinstance(response.content, ParkGuide)
assert isinstance(response.content.park_name, str)
assert len(response.content.park_name) > 0
assert isinstance(response.content.activities, list)
assert len(response.content.activities) >= 2
for activity in response.content.activities:
assert isinstance(activity, str)
assert len(activity) > 0
assert isinstance(response.content.best_season_to_visit, str)
assert len(response.content.best_season_to_visit) > 0
def test_gemini_with_openai_parser_model():
park_agent = Agent(
model=Gemini(id="gemini-2.0-flash-001"), # Main model to generate the content
description="You are an expert on national parks and provide concise guides.",
output_schema=ParkGuide,
parser_model=OpenAIChat(id="gpt-4o"), # Model to parse the output
)
response = park_agent.run("Tell me about Yosemite National Park.")
assert response.content is not None
assert isinstance(response.content, ParkGuide)
assert isinstance(response.content.park_name, str)
assert len(response.content.park_name) > 0
assert isinstance(response.content.activities, list)
assert len(response.content.activities) >= 2
for activity in response.content.activities:
assert isinstance(activity, str)
assert len(activity) > 0
assert isinstance(response.content.best_season_to_visit, str)
assert len(response.content.best_season_to_visit) > 0
def test_parser_model_stream(shared_db):
park_agent = Agent(
model=OpenAIChat(id="gpt-4o"), # Main model to generate the content
description="You are an expert on national parks and provide concise guides.",
output_schema=ParkGuide,
db=shared_db,
parser_model=Claude(id="claude-sonnet-4-20250514"), # Model to parse the output
)
response = park_agent.run("Tell me about Yosemite National Park.", stream=True)
for event in response:
pass
run_response = park_agent.get_last_run_output()
assert run_response.content is not None
assert isinstance(run_response.content.get("park_name"), str)
assert len(run_response.content.get("park_name")) > 0
assert isinstance(run_response.content.get("activities"), list)
assert len(run_response.content.get("activities")) >= 2
for activity in run_response.content.get("activities"):
assert isinstance(activity, str)
assert len(activity) > 0
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_parser_model.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/webtools.py | import httpx
from agno.tools import Toolkit
from agno.utils.log import logger
class WebTools(Toolkit):
"""
A toolkit for working with web-related tools.
"""
def __init__(
self,
retries: int = 3,
enable_expand_url: bool = True,
all: bool = False,
**kwargs,
):
self.retries = retries
tools = []
if all or enable_expand_url:
tools.append(self.expand_url)
super().__init__(name="web_tools", tools=tools, **kwargs)
def expand_url(self, url: str) -> str:
"""
Expands a shortened URL to its final destination using HTTP HEAD requests with retries.
:param url: The URL to expand.
:return: The final destination URL if successful; otherwise, returns the original URL.
"""
timeout = 5
for attempt in range(1, self.retries + 1):
try:
response = httpx.head(url, follow_redirects=True, timeout=timeout)
final_url = response.url
logger.info(f"expand_url: {url} expanded to {final_url} on attempt {attempt}")
return str(final_url)
except Exception as e:
logger.error(f"Error expanding URL {url} on attempt {attempt}: {e}")
return url
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/webtools.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/unit/tools/test_webtools.py | """Unit tests for WebTools class."""
from unittest.mock import Mock, patch
import pytest
from agno.tools.webtools import WebTools
@pytest.fixture
def web_tools():
"""Fixture to create a WebTools instance."""
return WebTools(retries=3)
def test_expand_url_success(web_tools):
"""Test successful expansion of a URL."""
mock_url = "https://tinyurl.com/k2fkfxra."
final_url = "https://github.com/agno-agi/agno"
mock_response = Mock()
mock_response.url = final_url
with patch("httpx.head", return_value=mock_response) as mock_head:
result = web_tools.expand_url(mock_url)
assert result == final_url
mock_head.assert_called_once_with(mock_url, follow_redirects=True, timeout=5)
def test_toolkit_registration(web_tools):
"""Test that the expand_url method is registered correctly."""
assert "expand_url" in [func.name for func in web_tools.functions.values()]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_webtools.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/models/langdb/langdb.py | from dataclasses import dataclass, field
from os import getenv
from typing import Any, Dict, Optional
from agno.exceptions import ModelAuthenticationError
from agno.models.openai.like import OpenAILike
@dataclass
class LangDB(OpenAILike):
"""
A class for using models hosted on LangDB.
Attributes:
id (str): The model id. Defaults to "gpt-4o".
name (str): The model name. Defaults to "LangDB".
provider (str): The provider name. Defaults to "LangDB".
api_key (Optional[str]): The API key. Defaults to getenv("LANGDB_API_KEY").
project_id (Optional[str]): The project id. Defaults to None.
"""
id: str = "gpt-4o"
name: str = "LangDB"
provider: str = "LangDB"
api_key: Optional[str] = field(default_factory=lambda: getenv("LANGDB_API_KEY"))
project_id: Optional[str] = field(default_factory=lambda: getenv("LANGDB_PROJECT_ID"))
base_host_url: str = field(default_factory=lambda: getenv("LANGDB_API_BASE_URL", "https://api.us-east-1.langdb.ai"))
base_url: Optional[str] = None
label: Optional[str] = None
default_headers: Optional[dict] = None
def _get_client_params(self) -> Dict[str, Any]:
if not self.api_key:
self.api_key = getenv("LANGDB_API_KEY")
if not self.api_key:
raise ModelAuthenticationError(
message="LANGDB_API_KEY not set. Please set the LANGDB_API_KEY environment variable.",
model_name=self.name,
)
if not self.project_id:
raise ModelAuthenticationError(
message="LANGDB_PROJECT_ID not set. Please set the LANGDB_PROJECT_ID environment variable.",
model_name=self.name,
)
if not self.base_url:
self.base_url = f"{self.base_host_url}/{self.project_id}/v1"
# Initialize headers with label if present
if self.label and not self.default_headers:
self.default_headers = {
"x-label": self.label,
}
client_params = super()._get_client_params()
return client_params
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/langdb/langdb.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/integration/models/langdb/test_basic.py | import pytest
from pydantic import BaseModel, Field
from agno.agent import Agent, RunOutput # noqa
from agno.db.sqlite import SqliteDb
from agno.models.langdb import LangDB
def _assert_metrics(response: RunOutput):
assert response.metrics is not None
input_tokens = response.metrics.input_tokens
output_tokens = response.metrics.output_tokens
total_tokens = response.metrics.total_tokens
assert input_tokens > 0
assert output_tokens > 0
assert total_tokens > 0
assert total_tokens == input_tokens + output_tokens
def test_basic():
agent = Agent(model=LangDB(id="gemini-1.5-pro-latest"), markdown=True, telemetry=False)
# Print the response in the terminal
response: RunOutput = agent.run("Share a 2 sentence horror story")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
def test_basic_stream():
agent = Agent(model=LangDB(id="gemini-1.5-pro-latest"), markdown=True, telemetry=False)
response_stream = agent.run("Share a 2 sentence horror story", stream=True)
# Verify it's an iterator
assert hasattr(response_stream, "__iter__")
responses = list(response_stream)
assert len(responses) > 0
for response in responses:
assert response.content is not None
@pytest.mark.asyncio
async def test_async_basic():
agent = Agent(model=LangDB(id="gemini-1.5-pro-latest"), markdown=True, telemetry=False)
response = await agent.arun("Share a 2 sentence horror story")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
@pytest.mark.asyncio
async def test_async_basic_stream():
agent = Agent(model=LangDB(id="gemini-1.5-pro-latest"), markdown=True, telemetry=False)
async for response in agent.arun("Share a 2 sentence horror story", stream=True):
assert response.content is not None
def test_with_memory():
agent = Agent(
db=SqliteDb(db_file="tmp/test_with_memory.db"),
model=LangDB(id="gemini-1.5-pro-latest"),
add_history_to_context=True,
markdown=True,
telemetry=False,
)
# First interaction
response1 = agent.run("My name is John Smith")
assert response1.content is not None
# Second interaction should remember the name
response2 = agent.run("What's my name?")
assert response2.content is not None
assert "John Smith" in response2.content
# Verify memories were created
messages = agent.get_session_messages()
assert len(messages) == 5
assert [m.role for m in messages] == ["system", "user", "assistant", "user", "assistant"]
# Test metrics structure and types
_assert_metrics(response2)
def test_structured_output():
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(model=LangDB(id="gemini-1.5-pro-latest"), output_schema=MovieScript, telemetry=False)
response = agent.run("Create a movie about time travel")
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
def test_json_response_mode():
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(
model=LangDB(id="gemini-1.5-pro-latest"),
output_schema=MovieScript,
use_json_mode=True,
telemetry=False,
)
response = agent.run("Create a movie about time travel")
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
def test_history():
agent = Agent(
model=LangDB(id="gemini-1.5-pro-latest"),
db=SqliteDb(db_file="tmp/langdb/test_basic.db"),
add_history_to_context=True,
store_history_messages=True,
telemetry=False,
)
run_output = agent.run("Hello")
assert run_output.messages is not None
assert len(run_output.messages) == 2
run_output = agent.run("Hello 2")
assert run_output.messages is not None
assert len(run_output.messages) == 4
run_output = agent.run("Hello 3")
assert run_output.messages is not None
assert len(run_output.messages) == 6
run_output = agent.run("Hello 4")
assert run_output.messages is not None
assert len(run_output.messages) == 8
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/langdb/test_basic.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/langdb/test_tool_use.py | from typing import Optional
import pytest
from agno.agent import Agent, RunOutput # noqa
from agno.models.langdb import LangDB
from agno.tools.exa import ExaTools
from agno.tools.websearch import WebSearchTools
from agno.tools.yfinance import YFinanceTools
def test_tool_use():
agent = Agent(
model=LangDB(id="gemini-1.5-pro-latest"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
assert response.content is not None
assert "TSLA" in response.content
def test_tool_use_stream():
agent = Agent(
model=LangDB(id="gemini-1.5-pro-latest"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response_stream = agent.run("What is the current price of TSLA?", stream=True, stream_events=True)
for chunk in response_stream:
if chunk.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(chunk, "tool") and chunk.tool: # type: ignore
if chunk.tool.tool_name: # type: ignore
tool_call_seen = True
if chunk.content is not None and "TSLA" in chunk.content:
keyword_seen_in_response = True
assert tool_call_seen, "No tool calls observed in stream"
assert keyword_seen_in_response, "Keyword not found in response"
@pytest.mark.asyncio
async def test_async_tool_use():
agent = Agent(
model=LangDB(id="gemini-1.5-pro-latest"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = await agent.arun("What is the current price of TSLA?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.role == "assistant" and msg.tool_calls is not None)
assert response.content is not None
assert "TSLA" in response.content
@pytest.mark.asyncio
async def test_async_tool_use_stream():
agent = Agent(
model=LangDB(id="gemini-1.5-pro-latest"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
async for response in agent.arun("What is the current price of TSLA?", stream=True, stream_events=True):
if response.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(response, "tool") and response.tool: # type: ignore
if response.tool.tool_name: # type: ignore
tool_call_seen = True
if response.content is not None and "TSLA" in response.content:
keyword_seen_in_response = True
# Asserting we found tool responses in the response stream
assert tool_call_seen, "No tool calls observed in stream"
# Asserting we found the expected keyword in the response stream -> proving the correct tool was called
assert keyword_seen_in_response, "Keyword not found in response"
def test_tool_use_tool_call_limit():
agent = Agent(
model=LangDB(id="gemini-1.5-pro-latest"),
tools=[YFinanceTools(cache_results=True)],
tool_call_limit=1,
markdown=True,
telemetry=False,
)
response = agent.run("Find me the current price of TSLA, then after that find me the latest news about Tesla.")
# Verify tool usage, should only call the first tool
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
tool_calls = []
for msg in response.messages:
if msg.tool_calls is not None:
tool_calls.extend(msg.tool_calls)
assert response.content is not None
def test_tool_use_with_content():
agent = Agent(
model=LangDB(id="gemini-1.5-pro-latest"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA? What does the ticker stand for?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
assert response.content is not None
assert "TSLA" in response.content or "Tesla" in response.content
def test_parallel_tool_calls():
agent = Agent(
model=LangDB(id="gemini-1.5-pro-latest"),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA and AAPL?")
# Verify tool usage
assert response.messages is not None
tool_calls = []
for msg in response.messages:
if msg.tool_calls is not None:
tool_calls.extend(msg.tool_calls)
assert len([call for call in tool_calls if call.get("type", "") == "function"]) >= 2 # Total of 2 tool calls made
assert response.content is not None
assert "TSLA" in response.content and "AAPL" in response.content
def test_multiple_tool_calls():
agent = Agent(
model=LangDB(id="gemini-1.5-pro-latest"),
tools=[YFinanceTools(cache_results=True), WebSearchTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA and what is the latest news about it?")
# Verify tool usage
assert response.messages is not None
tool_calls = []
for msg in response.messages:
if msg.tool_calls is not None:
tool_calls.extend(msg.tool_calls)
assert len([call for call in tool_calls if call.get("type", "") == "function"]) >= 2 # Total of 2 tool calls made
assert response.content is not None
assert "TSLA" in response.content
def test_tool_call_custom_tool_no_parameters():
def get_the_weather_in_tokyo():
"""
Get the weather in Tokyo
"""
return "It is currently 70 degrees and cloudy in Tokyo"
agent = Agent(
model=LangDB(id="gpt-4o"),
tools=[get_the_weather_in_tokyo],
markdown=True,
telemetry=False,
)
response = agent.run("What is the weather in Tokyo?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
assert response.content is not None
assert "Tokyo" in response.content
def test_tool_call_custom_tool_optional_parameters():
def get_the_weather(city: Optional[str] = None):
"""
Get the weather in a city
Args:
city: The city to get the weather for
"""
if city is None:
return "It is currently 70 degrees and cloudy in Tokyo"
else:
return f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=LangDB(id="gemini-1.5-pro-latest"),
tools=[get_the_weather],
markdown=True,
telemetry=False,
)
response = agent.run("What is the weather in Paris?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
assert response.content is not None
assert "70" in response.content
def test_tool_call_list_parameters():
agent = Agent(
model=LangDB(id="gemini-1.5-pro-latest"),
tools=[ExaTools()],
instructions="Use a single tool call if possible",
markdown=True,
telemetry=False,
)
response = agent.run(
"What are the papers at https://arxiv.org/pdf/2307.06435 and https://arxiv.org/pdf/2502.09601 about?"
)
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.tool_calls is not None)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
for call in tool_calls:
if call.get("type", "") == "function":
assert call["function"]["name"] in ["get_contents", "exa_answer", "search_exa"]
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/langdb/test_tool_use.py",
"license": "Apache License 2.0",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/anthropic/test_prompt_caching.py | """
Integration tests for Claude model prompt caching functionality.
Tests the basic caching features including:
- System message caching with real API calls
- Cache performance tracking
- Usage metrics with standard field names
"""
from pathlib import Path
from unittest.mock import Mock
import pytest
from agno.agent import Agent, RunOutput
from agno.models.anthropic import Claude
from agno.utils.media import download_file
def _get_large_system_prompt() -> str:
"""Load an example large system message from S3"""
txt_path = Path(__file__).parent.joinpath("system_prompt.txt")
download_file(
"https://agno-public.s3.amazonaws.com/prompts/system_promt.txt",
str(txt_path),
)
return txt_path.read_text()
def _assert_cache_metrics(response: RunOutput, expect_cache_write: bool = False, expect_cache_read: bool = False):
"""Assert cache-related metrics in response."""
if response.metrics is None:
pytest.fail("Response metrics is None")
cache_write_tokens = response.metrics.cache_write_tokens
cache_read_tokens = response.metrics.cache_read_tokens
if expect_cache_write:
assert cache_write_tokens > 0, "Expected cache write tokens but found none"
if expect_cache_read:
assert cache_read_tokens > 0, "Expected cache read tokens but found none"
def test_system_message_caching_basic():
"""Test basic system message caching functionality."""
claude = Claude(cache_system_prompt=True)
system_message = "You are a helpful assistant."
kwargs = claude._prepare_request_kwargs(system_message)
expected_system = [{"text": system_message, "type": "text", "cache_control": {"type": "ephemeral"}}]
assert kwargs["system"] == expected_system
def test_extended_cache_time():
"""Test extended cache time configuration."""
claude = Claude(cache_system_prompt=True, extended_cache_time=True)
system_message = "You are a helpful assistant."
kwargs = claude._prepare_request_kwargs(system_message)
expected_system = [{"text": system_message, "type": "text", "cache_control": {"type": "ephemeral", "ttl": "1h"}}]
assert kwargs["system"] == expected_system
def test_usage_metrics_parsing():
"""Test parsing enhanced usage metrics with standard field names."""
claude = Claude()
mock_response = Mock()
mock_response.role = "assistant"
mock_response.content = [Mock(type="text", text="Test response", citations=None)]
mock_response.stop_reason = None
mock_usage = Mock()
mock_usage.input_tokens = 100
mock_usage.output_tokens = 50
mock_usage.cache_creation_input_tokens = 80
mock_usage.cache_read_input_tokens = 20
if hasattr(mock_usage, "cache_creation"):
del mock_usage.cache_creation
if hasattr(mock_usage, "cache_read"):
del mock_usage.cache_read
mock_response.usage = mock_usage
model_response = claude._parse_provider_response(mock_response)
assert model_response.response_usage is not None
assert model_response.response_usage.input_tokens == 100
assert model_response.response_usage.output_tokens == 50
assert model_response.response_usage.cache_write_tokens == 80
assert model_response.response_usage.cache_read_tokens == 20
def test_prompt_caching_with_agent():
"""Test prompt caching using Agent with a large system prompt."""
large_system_prompt = _get_large_system_prompt()
print(f"System prompt length: {len(large_system_prompt)} characters")
agent = Agent(
model=Claude(id="claude-sonnet-4-5-20250929", cache_system_prompt=True),
system_message=large_system_prompt,
telemetry=False,
)
response = agent.run("Explain the key principles of microservices architecture")
print(f"First response metrics: {response.metrics}")
if response.metrics is None:
pytest.fail("Response metrics is None")
cache_creation_tokens = response.metrics.cache_write_tokens
cache_hit_tokens = response.metrics.cache_read_tokens
print(f"Cache creation tokens: {cache_creation_tokens}")
print(f"Cache hit tokens: {cache_hit_tokens}")
cache_activity = cache_creation_tokens > 0 or cache_hit_tokens > 0
if not cache_activity:
print("No cache activity detected. This might be due to:")
print("1. System prompt being below Anthropic's minimum caching threshold")
print("2. Cache already existing from previous runs")
print("Skipping cache assertions...")
return
assert response.content is not None
if cache_creation_tokens > 0:
print(f"✅ Cache was created with {cache_creation_tokens} tokens")
response2 = agent.run("How would you implement monitoring for this architecture?")
if response2.metrics is None:
pytest.fail("Response2 metrics is None")
cache_read_tokens = response2.metrics.cache_read_tokens
assert cache_read_tokens > 0, f"Expected cache read tokens but found {cache_read_tokens}"
else:
print(f"✅ Cache was used with {cache_hit_tokens} tokens from previous run")
@pytest.mark.asyncio
async def test_async_prompt_caching():
"""Test async prompt caching functionality."""
large_system_prompt = _get_large_system_prompt()
agent = Agent(
model=Claude(id="claude-sonnet-4-20250514", cache_system_prompt=True),
system_message=large_system_prompt,
telemetry=False,
)
response = await agent.arun("Explain REST API design patterns")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/anthropic/test_prompt_caching.py",
"license": "Apache License 2.0",
"lines": 116,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/models/vllm/vllm.py | from dataclasses import dataclass
from os import getenv
from typing import Any, Dict, List, Optional, Type, Union
from pydantic import BaseModel
from agno.exceptions import ModelAuthenticationError
from agno.models.openai.like import OpenAILike
from agno.utils.log import log_debug
@dataclass
class VLLM(OpenAILike):
"""
Class for interacting with vLLM models via OpenAI-compatible API.
Attributes:
id: Model identifier
name: API name
provider: API provider
base_url: vLLM server URL
temperature: Sampling temperature
top_p: Nucleus sampling probability
presence_penalty: Repetition penalty
top_k: Top-k sampling
enable_thinking: Special mode flag
"""
id: str = "not-set"
name: str = "VLLM"
provider: str = "VLLM"
api_key: Optional[str] = None
base_url: Optional[str] = None
temperature: float = 0.7
top_p: float = 0.8
presence_penalty: float = 1.5
top_k: Optional[int] = None
enable_thinking: Optional[bool] = None
def _get_client_params(self) -> Dict[str, Any]:
"""
Returns client parameters for API requests, checking for VLLM_API_KEY.
Returns:
Dict[str, Any]: A dictionary of client parameters for API requests.
"""
if not self.api_key:
self.api_key = getenv("VLLM_API_KEY")
if not self.api_key:
raise ModelAuthenticationError(
message="VLLM_API_KEY not set. Please set the VLLM_API_KEY environment variable.",
model_name=self.name,
)
if not self.base_url:
self.base_url = getenv("VLLM_BASE_URL", "http://localhost:8000/v1/")
return super()._get_client_params()
def get_request_params(
self,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
tools: Optional[List[Dict[str, Any]]] = None,
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
**kwargs: Any,
) -> Dict[str, Any]:
request_kwargs = super().get_request_params(
response_format=response_format, tools=tools, tool_choice=tool_choice
)
vllm_body: Dict[str, Any] = {}
if self.top_k is not None:
vllm_body["top_k"] = self.top_k
if self.enable_thinking is not None:
vllm_body.setdefault("chat_template_kwargs", {})["enable_thinking"] = self.enable_thinking
if vllm_body:
existing_body = request_kwargs.get("extra_body") or {}
request_kwargs["extra_body"] = {**existing_body, **vllm_body}
if request_kwargs:
log_debug(f"Calling {self.provider} with request parameters: {request_kwargs}", log_level=2)
return request_kwargs
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/vllm/vllm.py",
"license": "Apache License 2.0",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/models/vllm/test_basic.py | import pytest
from pydantic import BaseModel, Field
from agno.agent import Agent, RunOutput
from agno.db.sqlite.sqlite import SqliteDb
from agno.exceptions import ModelProviderError
from agno.models.vllm import VLLM
# Use default model id or override via env var
VLLM_MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
def _assert_metrics(response: RunOutput):
assert response.metrics is not None
input_tokens = response.metrics.input_tokens
output_tokens = response.metrics.output_tokens
total_tokens = response.metrics.total_tokens
assert input_tokens > 0
assert output_tokens > 0
assert total_tokens > 0
assert total_tokens == input_tokens + output_tokens
def test_basic():
agent = Agent(model=VLLM(id=VLLM_MODEL_ID), markdown=True, telemetry=False)
# Print the response in the terminal
response: RunOutput = agent.run("Share a 2 sentence comedy story")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
def test_basic_stream():
agent = Agent(model=VLLM(id=VLLM_MODEL_ID), markdown=True, telemetry=False)
for response in agent.run("Share a 2 sentence horror story", stream=True):
assert response.content is not None
@pytest.mark.asyncio
async def test_async_basic():
agent = Agent(model=VLLM(id=VLLM_MODEL_ID), markdown=True, telemetry=False)
response = await agent.arun("Share a 2 sentence horror story")
assert response.content is not None
assert response.messages is not None
assert len(response.messages) == 3
assert [m.role for m in response.messages] == ["system", "user", "assistant"]
_assert_metrics(response)
@pytest.mark.asyncio
async def test_async_basic_stream():
agent = Agent(model=VLLM(id=VLLM_MODEL_ID), markdown=True, telemetry=False)
async for response in agent.arun("Share a 2 sentence horror story", stream=True):
assert response.content is not None
def test_with_memory():
agent = Agent(
db=SqliteDb(db_file="tmp/test_with_memory.db"),
model=VLLM(id=VLLM_MODEL_ID),
add_history_to_context=True,
markdown=True,
telemetry=False,
)
# First interaction
response1 = agent.run("My name is John Smith")
assert response1.content is not None
# Second interaction should remember the name
response2 = agent.run("What's my name?")
assert response2.content is not None
assert "John Smith" in response2.content
# Verify memories were created
messages = agent.get_session_messages()
assert len(messages) == 5
assert [m.role for m in messages] == ["system", "user", "assistant", "user", "assistant"]
# Test metrics structure and types
_assert_metrics(response2)
def test_output_schema():
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(
model=VLLM(id=VLLM_MODEL_ID),
output_schema=MovieScript,
telemetry=False,
)
response = agent.run("Create a movie about time travel")
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
def test_json_response_mode():
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
plot: str = Field(..., description="Brief plot summary")
agent = Agent(
model=VLLM(id=VLLM_MODEL_ID),
output_schema=MovieScript,
use_json_mode=True,
telemetry=False,
)
response = agent.run("Create a movie about time travel")
# Verify structured output
assert isinstance(response.content, MovieScript)
assert response.content.title is not None
assert response.content.genre is not None
assert response.content.plot is not None
def test_history():
agent = Agent(
model=VLLM(id=VLLM_MODEL_ID),
db=SqliteDb(db_file="tmp/VLLM/test_basic.db"),
add_history_to_context=True,
store_history_messages=True,
telemetry=False,
)
run_output = agent.run("Hello")
assert run_output.messages is not None
assert len(run_output.messages) == 2
run_output = agent.run("Hello 2")
assert run_output.messages is not None
assert len(run_output.messages) == 4
run_output = agent.run("Hello 3")
assert run_output.messages is not None
assert len(run_output.messages) == 6
run_output = agent.run("Hello 4")
assert run_output.messages is not None
assert len(run_output.messages) == 8
def test_exception():
agent = Agent(model=VLLM(id="invalid-model-id"), markdown=True, telemetry=False)
with pytest.raises(ModelProviderError):
agent.run("Test VLLM exception")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/vllm/test_basic.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/vllm/test_tool_use.py | from typing import Optional
import pytest
from agno.agent import Agent
from agno.models.vllm import VLLM
from agno.tools.exa import ExaTools
from agno.tools.websearch import WebSearchTools
from agno.tools.yfinance import YFinanceTools
VLLM_MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
def test_tool_use():
agent = Agent(
model=VLLM(id=VLLM_MODEL_ID),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages)
assert response.content is not None
assert "TSLA" in response.content
def test_tool_use_stream():
agent = Agent(
model=VLLM(id=VLLM_MODEL_ID),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
responses = []
tool_call_seen = False
for response in agent.run("What is the current price of TSLA?", stream=True, stream_events=True):
responses.append(response)
if response.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(response, "tool") and response.tool:
if response.tool.tool_name: # type: ignore
tool_call_seen = True
assert len(responses) > 0
assert tool_call_seen, "No tool calls observed in stream"
assert any("TSLA" in r.content for r in responses if r.content)
@pytest.mark.asyncio
async def test_async_tool_use():
agent = Agent(
model=VLLM(id=VLLM_MODEL_ID),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = await agent.arun("What is the current price of TSLA?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages if msg.role == "assistant")
assert response.content is not None
assert "TSLA" in response.content
@pytest.mark.asyncio
async def test_async_tool_use_stream():
agent = Agent(
model=VLLM(id=VLLM_MODEL_ID),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
responses = []
tool_call_seen = False
async for response in agent.arun("What is the current price of TSLA?", stream=True, stream_events=True):
responses.append(response)
if response.event in ["ToolCallStarted", "ToolCallCompleted"] and hasattr(response, "tool") and response.tool:
if response.tool.tool_name: # type: ignore
tool_call_seen = True
assert len(responses) > 0
assert tool_call_seen, "No tool calls observed in stream"
assert any("TSLA" in r.content for r in responses if r.content)
def test_parallel_tool_calls():
agent = Agent(
model=VLLM(id=VLLM_MODEL_ID),
tools=[YFinanceTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA and AAPL?")
# Verify tool usage
assert response.messages is not None
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
assert len([call for call in tool_calls if call.get("type", "") == "function"]) >= 2 # Total of 2 tool calls made
assert response.content is not None
assert "TSLA" in response.content and "AAPL" in response.content
def test_multiple_tool_calls():
agent = Agent(
model=VLLM(id=VLLM_MODEL_ID),
tools=[YFinanceTools(cache_results=True), WebSearchTools(cache_results=True)],
markdown=True,
telemetry=False,
)
response = agent.run("What is the current price of TSLA and what is the latest news about it?")
# Verify tool usage
assert response.messages is not None
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
assert len([call for call in tool_calls if call.get("type", "") == "function"]) >= 2 # Total of 2 tool calls made
assert response.content is not None
assert "TSLA" in response.content and "latest news" in response.content.lower()
def test_tool_call_custom_tool_no_parameters():
def get_the_weather_in_tokyo():
"""
Get the weather in Tokyo
"""
return "It is currently 70 degrees and cloudy in Tokyo"
agent = Agent(
model=VLLM(id=VLLM_MODEL_ID),
tools=[get_the_weather_in_tokyo],
markdown=True,
telemetry=False,
)
response = agent.run("What is the weather in Tokyo?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages)
assert response.content is not None
assert "70" in response.content
def test_tool_call_custom_tool_optional_parameters():
def get_the_weather(city: Optional[str] = None):
"""
Get the weather in a city
Args:
city: The city to get the weather for
"""
if city is None:
return "It is currently 70 degrees and cloudy in Tokyo"
else:
return f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=VLLM(id=VLLM_MODEL_ID),
tools=[get_the_weather],
markdown=True,
telemetry=False,
)
response = agent.run("What is the weather in Paris?")
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages)
assert response.content is not None
assert "70" in response.content
def test_tool_call_list_parameters():
agent = Agent(
model=VLLM(id=VLLM_MODEL_ID),
tools=[ExaTools()],
instructions="Use a single tool call if possible",
markdown=True,
telemetry=False,
)
response = agent.run(
"What are the papers at https://arxiv.org/pdf/2307.06435 and https://arxiv.org/pdf/2502.09601 about?"
)
# Verify tool usage
assert response.messages is not None
assert any(msg.tool_calls for msg in response.messages)
tool_calls = []
for msg in response.messages:
if msg.tool_calls:
tool_calls.extend(msg.tool_calls)
for call in tool_calls:
if call.get("type", "") == "function":
assert call["function"]["name"] in ["get_contents", "exa_answer", "search_exa"]
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/vllm/test_tool_use.py",
"license": "Apache License 2.0",
"lines": 167,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.