sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
agno-agi/agno:libs/agno/tests/unit/tools/test_shopify.py | """Unit tests for ShopifyTools class."""
import json
from unittest.mock import MagicMock, patch
import pytest
from agno.tools.shopify import ShopifyTools
@pytest.fixture
def mock_httpx_client():
"""Create a mock httpx client."""
with patch("agno.tools.shopify.httpx.Client") as mock_client:
mock_instance = MagicMock()
mock_client.return_value.__enter__ = MagicMock(return_value=mock_instance)
mock_client.return_value.__exit__ = MagicMock(return_value=False)
yield mock_instance
@pytest.fixture
def shopify_tools():
"""Create ShopifyTools instance with test credentials."""
return ShopifyTools(
shop_name="test-store",
access_token="test_access_token",
api_version="2025-10",
)
@pytest.fixture
def mock_shop_response():
"""Mock shop info response."""
return {
"data": {
"shop": {
"name": "Test Store",
"email": "test@example.com",
"currencyCode": "USD",
"primaryDomain": {"url": "https://test-store.myshopify.com"},
"billingAddress": {"country": "United States", "city": "New York"},
"plan": {"displayName": "Basic"},
}
}
}
@pytest.fixture
def mock_products_response():
"""Mock products response."""
return {
"data": {
"products": {
"edges": [
{
"node": {
"id": "gid://shopify/Product/123",
"title": "Test Product",
"status": "ACTIVE",
"totalInventory": 100,
"createdAt": "2024-01-01T00:00:00Z",
"priceRangeV2": {
"minVariantPrice": {"amount": "10.00", "currencyCode": "USD"},
"maxVariantPrice": {"amount": "20.00", "currencyCode": "USD"},
},
"variants": {
"edges": [
{
"node": {
"id": "gid://shopify/ProductVariant/456",
"title": "Default",
"sku": "TEST-SKU",
"price": "15.00",
"inventoryQuantity": 100,
}
}
]
},
}
}
]
}
}
}
@pytest.fixture
def mock_orders_response():
"""Mock orders response."""
return {
"data": {
"orders": {
"edges": [
{
"node": {
"id": "gid://shopify/Order/789",
"name": "#1001",
"createdAt": "2024-01-15T10:00:00Z",
"displayFinancialStatus": "PAID",
"displayFulfillmentStatus": "FULFILLED",
"totalPriceSet": {"shopMoney": {"amount": "50.00", "currencyCode": "USD"}},
"subtotalPriceSet": {"shopMoney": {"amount": "45.00"}},
"customer": {
"id": "gid://shopify/Customer/111",
"email": "customer@example.com",
"firstName": "John",
"lastName": "Doe",
},
"lineItems": {
"edges": [
{
"node": {
"id": "gid://shopify/LineItem/222",
"title": "Test Product",
"quantity": 2,
"variant": {"id": "gid://shopify/ProductVariant/456", "sku": "TEST-SKU"},
"originalUnitPriceSet": {"shopMoney": {"amount": "15.00"}},
}
}
]
},
}
}
]
}
}
}
def test_init_with_credentials():
"""Test initialization with provided credentials."""
tools = ShopifyTools(shop_name="my-store", access_token="my_token")
assert tools.shop_name == "my-store"
assert tools.access_token == "my_token"
assert tools.api_version == "2025-10"
assert tools.timeout == 30
def test_init_with_env_variables():
"""Test initialization with environment variables."""
with patch.dict(
"os.environ",
{
"SHOPIFY_SHOP_NAME": "env-store",
"SHOPIFY_ACCESS_TOKEN": "env_token",
},
):
tools = ShopifyTools()
assert tools.shop_name == "env-store"
assert tools.access_token == "env_token"
def test_init_with_custom_api_version():
"""Test initialization with custom API version."""
tools = ShopifyTools(
shop_name="test-store",
access_token="test_token",
api_version="2024-10",
)
assert tools.api_version == "2024-10"
def test_init_with_custom_timeout():
"""Test initialization with custom timeout."""
tools = ShopifyTools(
shop_name="test-store",
access_token="test_token",
timeout=60,
)
assert tools.timeout == 60
def test_base_url_construction():
"""Test that base URL is correctly constructed."""
tools = ShopifyTools(
shop_name="my-store",
access_token="token",
api_version="2025-10",
)
expected_url = "https://my-store.myshopify.com/admin/api/2025-10/graphql.json"
assert tools.base_url == expected_url
def test_tools_registration():
"""Test that all expected tools are registered."""
tools = ShopifyTools(shop_name="test-store", access_token="test_token")
function_names = [func.name for func in tools.functions.values()]
expected_tools = [
"get_shop_info",
"get_products",
"get_orders",
"get_top_selling_products",
"get_products_bought_together",
"get_sales_by_date_range",
"get_order_analytics",
"get_product_sales_breakdown",
"get_customer_order_history",
"get_inventory_levels",
"get_low_stock_products",
"get_sales_trends",
"get_average_order_value",
"get_repeat_customers",
]
for tool_name in expected_tools:
assert tool_name in function_names
def test_successful_request(shopify_tools, mock_httpx_client):
"""Test successful GraphQL request."""
mock_response = MagicMock()
mock_response.json.return_value = {"data": {"shop": {"name": "Test Store"}}}
mock_httpx_client.post.return_value = mock_response
result = shopify_tools._make_graphql_request("query { shop { name } }")
assert result == {"shop": {"name": "Test Store"}}
mock_httpx_client.post.assert_called_once()
def test_request_with_variables(shopify_tools, mock_httpx_client):
"""Test GraphQL request with variables."""
mock_response = MagicMock()
mock_response.json.return_value = {"data": {"product": {"title": "Test"}}}
mock_httpx_client.post.return_value = mock_response
result = shopify_tools._make_graphql_request(
"query($id: ID!) { product(id: $id) { title } }",
variables={"id": "123"},
)
assert result == {"product": {"title": "Test"}}
def test_request_with_errors(shopify_tools, mock_httpx_client):
"""Test GraphQL request that returns errors."""
mock_response = MagicMock()
mock_response.json.return_value = {"errors": [{"message": "Access denied"}]}
mock_httpx_client.post.return_value = mock_response
result = shopify_tools._make_graphql_request("query { shop { name } }")
assert "error" in result
def test_request_json_decode_error(shopify_tools, mock_httpx_client):
"""Test handling of JSON decode errors."""
mock_response = MagicMock()
mock_response.json.side_effect = Exception("Invalid JSON")
mock_response.text = "Invalid response"
mock_httpx_client.post.return_value = mock_response
with pytest.raises(Exception, match="Invalid JSON"):
shopify_tools._make_graphql_request("query { shop { name } }")
def test_get_shop_info_success(shopify_tools, mock_httpx_client, mock_shop_response):
"""Test successful shop info retrieval."""
mock_response = MagicMock()
mock_response.json.return_value = mock_shop_response
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_shop_info()
result_data = json.loads(result)
assert result_data["name"] == "Test Store"
assert result_data["email"] == "test@example.com"
assert result_data["currencyCode"] == "USD"
def test_get_shop_info_error(shopify_tools, mock_httpx_client):
"""Test shop info with error response."""
mock_response = MagicMock()
mock_response.json.return_value = {"errors": [{"message": "Unauthorized"}]}
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_shop_info()
result_data = json.loads(result)
assert "error" in result_data
def test_get_products_success(shopify_tools, mock_httpx_client, mock_products_response):
"""Test successful products retrieval."""
mock_response = MagicMock()
mock_response.json.return_value = mock_products_response
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_products(max_results=10)
result_data = json.loads(result)
assert len(result_data) == 1
assert result_data[0]["title"] == "Test Product"
assert result_data[0]["status"] == "ACTIVE"
def test_get_products_with_status_filter(shopify_tools, mock_httpx_client, mock_products_response):
"""Test products retrieval with status filter."""
mock_response = MagicMock()
mock_response.json.return_value = mock_products_response
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_products(status="ACTIVE")
result_data = json.loads(result)
assert len(result_data) == 1
def test_get_products_max_results_limit(shopify_tools, mock_httpx_client, mock_products_response):
"""Test that max_results is capped at 250."""
mock_response = MagicMock()
mock_response.json.return_value = mock_products_response
mock_httpx_client.post.return_value = mock_response
shopify_tools.get_products(max_results=500)
# Verify the query was called (indirectly checking limit)
mock_httpx_client.post.assert_called_once()
def test_get_orders_success(shopify_tools, mock_httpx_client, mock_orders_response):
"""Test successful orders retrieval."""
mock_response = MagicMock()
mock_response.json.return_value = mock_orders_response
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_orders(max_results=10, created_after="2024-01-01")
result_data = json.loads(result)
assert len(result_data) == 1
assert result_data[0]["name"] == "#1001"
assert result_data[0]["financial_status"] == "PAID"
def test_get_orders_with_status_filter(shopify_tools, mock_httpx_client, mock_orders_response):
"""Test orders retrieval with status filter."""
mock_response = MagicMock()
mock_response.json.return_value = mock_orders_response
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_orders(status="paid")
result_data = json.loads(result)
assert len(result_data) == 1
def test_get_orders_customer_info(shopify_tools, mock_httpx_client, mock_orders_response):
"""Test that customer info is properly extracted."""
mock_response = MagicMock()
mock_response.json.return_value = mock_orders_response
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_orders()
result_data = json.loads(result)
assert result_data[0]["customer"]["email"] == "customer@example.com"
assert result_data[0]["customer"]["name"] == "John Doe"
def test_get_top_selling_products_success(shopify_tools, mock_httpx_client):
"""Test successful top selling products retrieval."""
mock_response = MagicMock()
mock_response.json.return_value = {
"data": {
"orders": {
"edges": [
{
"node": {
"lineItems": {
"edges": [
{
"node": {
"title": "Best Seller",
"quantity": 10,
"variant": {
"id": "gid://shopify/ProductVariant/123",
"product": {
"id": "gid://shopify/Product/456",
"title": "Best Seller Product",
},
},
"originalUnitPriceSet": {"shopMoney": {"amount": "25.00"}},
}
}
]
}
}
}
]
}
}
}
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_top_selling_products(limit=5, created_after="2024-01-01")
result_data = json.loads(result)
assert len(result_data) == 1
assert result_data[0]["title"] == "Best Seller Product"
assert result_data[0]["total_quantity"] == 10
assert result_data[0]["rank"] == 1
def test_get_products_bought_together_success(shopify_tools, mock_httpx_client):
"""Test successful products bought together analysis."""
mock_response = MagicMock()
mock_response.json.return_value = {
"data": {
"orders": {
"edges": [
{
"node": {
"lineItems": {
"edges": [
{
"node": {
"variant": {
"product": {
"id": "gid://shopify/Product/1",
"title": "Product A",
}
}
}
},
{
"node": {
"variant": {
"product": {
"id": "gid://shopify/Product/2",
"title": "Product B",
}
}
}
},
]
}
}
},
{
"node": {
"lineItems": {
"edges": [
{
"node": {
"variant": {
"product": {
"id": "gid://shopify/Product/1",
"title": "Product A",
}
}
}
},
{
"node": {
"variant": {
"product": {
"id": "gid://shopify/Product/2",
"title": "Product B",
}
}
}
},
]
}
}
},
]
}
}
}
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_products_bought_together(min_occurrences=2, created_after="2024-01-01")
result_data = json.loads(result)
assert len(result_data) == 1
assert result_data[0]["times_bought_together"] == 2
def test_get_sales_by_date_range_success(shopify_tools, mock_httpx_client):
"""Test successful sales by date range retrieval."""
mock_response = MagicMock()
mock_response.json.return_value = {
"data": {
"orders": {
"edges": [
{
"node": {
"createdAt": "2024-01-15T10:00:00Z",
"totalPriceSet": {"shopMoney": {"amount": "100.00", "currencyCode": "USD"}},
"lineItems": {"edges": [{"node": {"quantity": 3}}]},
}
}
]
}
}
}
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_sales_by_date_range("2024-01-01", "2024-01-31")
result_data = json.loads(result)
assert result_data["total_revenue"] == 100.00
assert result_data["total_orders"] == 1
assert result_data["total_items_sold"] == 3
def test_get_order_analytics_success(shopify_tools, mock_httpx_client):
"""Test successful order analytics retrieval."""
mock_response = MagicMock()
mock_response.json.return_value = {
"data": {
"orders": {
"edges": [
{
"node": {
"displayFinancialStatus": "PAID",
"displayFulfillmentStatus": "FULFILLED",
"totalPriceSet": {"shopMoney": {"amount": "50.00", "currencyCode": "USD"}},
"subtotalPriceSet": {"shopMoney": {"amount": "45.00"}},
"totalShippingPriceSet": {"shopMoney": {"amount": "5.00"}},
"totalTaxSet": {"shopMoney": {"amount": "0.00"}},
"lineItems": {"edges": [{"node": {"quantity": 2}}]},
}
}
]
}
}
}
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_order_analytics(created_after="2024-01-01")
result_data = json.loads(result)
assert result_data["total_orders"] == 1
assert result_data["total_revenue"] == 50.00
assert result_data["average_order_value"] == 50.00
def test_get_order_analytics_no_orders(shopify_tools, mock_httpx_client):
"""Test order analytics with no orders."""
mock_response = MagicMock()
mock_response.json.return_value = {"data": {"orders": {"edges": []}}}
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_order_analytics()
result_data = json.loads(result)
assert "message" in result_data
def test_get_inventory_levels_success(shopify_tools, mock_httpx_client, mock_products_response):
"""Test successful inventory levels retrieval."""
mock_response = MagicMock()
mock_response.json.return_value = mock_products_response
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_inventory_levels(max_results=50)
result_data = json.loads(result)
assert len(result_data) == 1
assert result_data[0]["total_inventory"] == 100
def test_get_low_stock_products_success(shopify_tools, mock_httpx_client):
"""Test successful low stock products retrieval."""
mock_response = MagicMock()
mock_response.json.return_value = {
"data": {
"products": {
"edges": [
{
"node": {
"id": "gid://shopify/Product/123",
"title": "Low Stock Product",
"totalInventory": 5,
"variants": {
"edges": [
{
"node": {
"id": "gid://shopify/ProductVariant/456",
"title": "Default",
"sku": "LOW-SKU",
"inventoryQuantity": 5,
}
}
]
},
}
}
]
}
}
}
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_low_stock_products(threshold=10)
result_data = json.loads(result)
assert len(result_data) == 1
assert result_data[0]["total_inventory"] == 5
def test_get_sales_trends_success(shopify_tools, mock_httpx_client):
"""Test successful sales trends retrieval."""
mock_response = MagicMock()
mock_response.json.return_value = {
"data": {
"orders": {
"edges": [
{
"node": {
"totalPriceSet": {"shopMoney": {"amount": "100.00", "currencyCode": "USD"}},
"lineItems": {"edges": [{"node": {"quantity": 2}}]},
}
}
]
}
}
}
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_sales_trends(created_after="2024-01-01", compare_previous_period=True)
result_data = json.loads(result)
assert "current_period" in result_data
assert result_data["current_period"]["total_revenue"] == 100.00
def test_get_average_order_value_success(shopify_tools, mock_httpx_client):
"""Test successful average order value calculation."""
mock_response = MagicMock()
mock_response.json.return_value = {
"data": {
"orders": {
"edges": [
{
"node": {
"createdAt": "2024-01-15T10:00:00Z",
"totalPriceSet": {"shopMoney": {"amount": "50.00", "currencyCode": "USD"}},
}
},
{
"node": {
"createdAt": "2024-01-15T11:00:00Z",
"totalPriceSet": {"shopMoney": {"amount": "100.00", "currencyCode": "USD"}},
}
},
]
}
}
}
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_average_order_value(group_by="day", created_after="2024-01-01")
result_data = json.loads(result)
assert result_data["overall_average_order_value"] == 75.00
def test_get_average_order_value_no_orders(shopify_tools, mock_httpx_client):
"""Test average order value with no orders."""
mock_response = MagicMock()
mock_response.json.return_value = {"data": {"orders": {"edges": []}}}
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_average_order_value()
result_data = json.loads(result)
assert "message" in result_data
def test_get_repeat_customers_success(shopify_tools, mock_httpx_client):
"""Test successful repeat customers retrieval."""
mock_response = MagicMock()
mock_response.json.return_value = {
"data": {
"orders": {
"edges": [
{
"node": {
"customer": {
"id": "gid://shopify/Customer/123",
"email": "repeat@example.com",
"firstName": "Repeat",
"lastName": "Customer",
"numberOfOrders": 5,
"amountSpent": {"amount": "500.00", "currencyCode": "USD"},
},
"totalPriceSet": {"shopMoney": {"amount": "100.00"}},
}
},
{
"node": {
"customer": {
"id": "gid://shopify/Customer/123",
"email": "repeat@example.com",
"firstName": "Repeat",
"lastName": "Customer",
"numberOfOrders": 5,
"amountSpent": {"amount": "500.00", "currencyCode": "USD"},
},
"totalPriceSet": {"shopMoney": {"amount": "100.00"}},
}
},
]
}
}
}
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_repeat_customers(min_orders=2, created_after="2024-01-01")
result_data = json.loads(result)
assert result_data["repeat_customer_count"] == 1
assert result_data["customers"][0]["orders_in_period"] == 2
def test_get_customer_order_history_success(shopify_tools, mock_httpx_client, mock_orders_response):
"""Test successful customer order history retrieval."""
mock_response = MagicMock()
mock_response.json.return_value = mock_orders_response
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_customer_order_history("customer@example.com")
result_data = json.loads(result)
assert "customer" in result_data
assert "orders" in result_data
assert len(result_data["orders"]) == 1
def test_get_customer_order_history_no_orders(shopify_tools, mock_httpx_client):
"""Test customer order history with no orders."""
mock_response = MagicMock()
mock_response.json.return_value = {"data": {"orders": {"edges": []}}}
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_customer_order_history("nonexistent@example.com")
result_data = json.loads(result)
assert "message" in result_data
def test_get_product_sales_breakdown_success(shopify_tools, mock_httpx_client):
"""Test successful product sales breakdown retrieval."""
mock_response = MagicMock()
mock_response.json.return_value = {
"data": {
"orders": {
"edges": [
{
"node": {
"createdAt": "2024-01-15T10:00:00Z",
"lineItems": {
"edges": [
{
"node": {
"title": "Test Product",
"quantity": 2,
"variant": {
"id": "gid://shopify/ProductVariant/456",
"title": "Default",
"sku": "TEST-SKU",
"product": {
"id": "gid://shopify/Product/123",
"title": "Test Product",
},
},
"originalUnitPriceSet": {
"shopMoney": {"amount": "25.00", "currencyCode": "USD"}
},
}
}
]
},
}
}
]
}
}
}
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_product_sales_breakdown("gid://shopify/Product/123", created_after="2024-01-01")
result_data = json.loads(result)
assert result_data["product_title"] == "Test Product"
assert result_data["total_quantity_sold"] == 2
assert result_data["total_revenue"] == 50.00
def test_get_product_sales_breakdown_with_numeric_id(shopify_tools, mock_httpx_client):
"""Test product sales breakdown with numeric ID."""
mock_response = MagicMock()
mock_response.json.return_value = {
"data": {
"orders": {
"edges": [
{
"node": {
"createdAt": "2024-01-15T10:00:00Z",
"lineItems": {
"edges": [
{
"node": {
"title": "Test Product",
"quantity": 1,
"variant": {
"id": "gid://shopify/ProductVariant/456",
"title": "Default",
"sku": "TEST-SKU",
"product": {
"id": "gid://shopify/Product/123",
"title": "Test Product",
},
},
"originalUnitPriceSet": {
"shopMoney": {"amount": "25.00", "currencyCode": "USD"}
},
}
}
]
},
}
}
]
}
}
}
mock_httpx_client.post.return_value = mock_response
# Test with numeric ID (should be normalized to full GID format)
result = shopify_tools.get_product_sales_breakdown("123", created_after="2024-01-01")
result_data = json.loads(result)
assert result_data["product_title"] == "Test Product"
def test_get_product_sales_breakdown_not_found(shopify_tools, mock_httpx_client):
"""Test product sales breakdown when product not found."""
mock_response = MagicMock()
mock_response.json.return_value = {"data": {"orders": {"edges": []}}}
mock_httpx_client.post.return_value = mock_response
result = shopify_tools.get_product_sales_breakdown("gid://shopify/Product/999")
result_data = json.loads(result)
assert "error" in result_data
def test_toolkit_name():
"""Test that toolkit has correct name."""
tools = ShopifyTools(shop_name="test-store", access_token="test_token")
assert tools.name == "shopify"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_shopify.py",
"license": "Apache License 2.0",
"lines": 716,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/models/test_generator_session_state.py | """
Tests for session_state persistence in generator-based tools.
This test suite verifies that session_state modifications made during
generator iteration are properly captured and not overwritten by stale state.
The bug: When a tool is a generator function, updated_session_state was captured
before the generator body executed. Any session_state modifications made during
yield iterations would be lost when merge_dictionaries ran later, overwriting
the changes with the stale pre-execution snapshot.
Fix: For generators, we don't capture updated_session_state in execute()/aexecute().
Instead, we re-capture it after the generator is fully consumed in base.py.
"""
from typing import Iterator
import pytest
from agno.tools.function import Function, FunctionCall, FunctionExecutionResult
def test_sync_generator_session_state_not_captured_early():
"""Verify that sync generators don't capture session_state before consumption."""
from agno.run import RunContext
session_state = {"initial": "value"}
def generator_tool_with_context(run_context: RunContext) -> Iterator[str]:
"""A generator tool that modifies run_context.session_state during iteration."""
run_context.session_state["modified_during_yield"] = True
yield "first"
run_context.session_state["second_modification"] = "done"
yield "second"
# Create the function with run_context
func = Function.from_callable(generator_tool_with_context)
run_context = RunContext(run_id="test-run", session_id="test-session", session_state=session_state)
func._run_context = run_context
func.process_entrypoint()
fc = FunctionCall(function=func, arguments={})
# Execute - this returns a FunctionExecutionResult
result = fc.execute()
# For generators, updated_session_state should be None
# (since the generator hasn't been consumed yet)
assert result.status == "success"
assert result.updated_session_state is None
# The result should be a generator
assert hasattr(result.result, "__iter__")
# Consume the generator
output = list(result.result)
assert output == ["first", "second"]
# After consumption, session_state should have the modifications
assert session_state["modified_during_yield"] is True
assert session_state["second_modification"] == "done"
def test_non_generator_session_state_captured():
"""Verify that non-generator functions capture session_state normally."""
from agno.run import RunContext
session_state = {"initial": "value"}
def regular_tool_with_context(run_context: RunContext) -> str:
"""A regular tool that modifies run_context.session_state."""
run_context.session_state["modified"] = True
return "done"
# Create the function with run_context
func = Function.from_callable(regular_tool_with_context)
run_context = RunContext(run_id="test-run", session_id="test-session", session_state=session_state)
func._run_context = run_context
func.process_entrypoint()
fc = FunctionCall(function=func, arguments={})
# Execute
result = fc.execute()
# For non-generators, updated_session_state should be captured
assert result.status == "success"
assert result.updated_session_state == session_state
assert session_state["modified"] is True
@pytest.mark.asyncio
async def test_async_generator_session_state_not_captured_early():
"""Verify that async generators don't capture session_state before consumption."""
from typing import AsyncIterator
from agno.run import RunContext
session_state = {"initial": "value"}
async def async_generator_tool_with_context(run_context: RunContext) -> AsyncIterator[str]:
"""An async generator tool that modifies run_context.session_state during iteration."""
run_context.session_state["async_modified"] = True
yield "async_first"
run_context.session_state["async_second"] = "done"
yield "async_second"
# Create the function with run_context
func = Function.from_callable(async_generator_tool_with_context)
run_context = RunContext(run_id="test-run", session_id="test-session", session_state=session_state)
func._run_context = run_context
func.process_entrypoint()
fc = FunctionCall(function=func, arguments={})
# Execute asynchronously
result = await fc.aexecute()
# For async generators, updated_session_state should be None
assert result.status == "success"
assert result.updated_session_state is None
# The result should be an async generator
assert hasattr(result.result, "__anext__")
# Consume the async generator
output = []
async for item in result.result:
output.append(item)
assert output == ["async_first", "async_second"]
# After consumption, session_state should have the modifications
assert session_state["async_modified"] is True
assert session_state["async_second"] == "done"
@pytest.mark.asyncio
async def test_async_non_generator_session_state_captured():
"""Verify that async non-generator functions capture session_state normally."""
from agno.run import RunContext
session_state = {"initial": "value"}
async def async_regular_tool_with_context(run_context: RunContext) -> str:
"""An async regular tool that modifies run_context.session_state."""
run_context.session_state["async_regular"] = True
return "async_done"
# Create the function with run_context
func = Function.from_callable(async_regular_tool_with_context)
run_context = RunContext(run_id="test-run", session_id="test-session", session_state=session_state)
func._run_context = run_context
func.process_entrypoint()
fc = FunctionCall(function=func, arguments={})
# Execute asynchronously
result = await fc.aexecute()
# For non-generators, session_state modifications should be in place
assert result.status == "success"
assert session_state["async_regular"] is True
def test_execution_result_with_none_session_state():
"""Verify FunctionExecutionResult can have None updated_session_state."""
result = FunctionExecutionResult(
status="success",
result="test",
updated_session_state=None,
)
assert result.updated_session_state is None
def test_execution_result_with_session_state():
"""Verify FunctionExecutionResult can have dict updated_session_state."""
session_state = {"key": "value"}
result = FunctionExecutionResult(
status="success",
result="test",
updated_session_state=session_state,
)
assert result.updated_session_state == session_state
assert result.updated_session_state["key"] == "value"
def test_base_model_recaptures_session_state_after_sync_generator():
"""
Test that base.py run_function_call re-captures session_state after generator consumption.
This tests the full flow: function.py returns None for generators,
then base.py re-captures after the generator is consumed.
"""
from types import GeneratorType
from agno.run import RunContext
session_state = {"initial": "value"}
def generator_tool_with_context(run_context: RunContext) -> Iterator[str]:
"""A generator tool that modifies run_context.session_state."""
run_context.session_state["modified_in_generator"] = True
yield "output"
# Create the function and function call with run_context
func = Function.from_callable(generator_tool_with_context)
run_context = RunContext(run_id="test-run", session_id="test-session", session_state=session_state)
func._run_context = run_context
func.process_entrypoint()
fc = FunctionCall(function=func, arguments={})
# Execute - returns FunctionExecutionResult with updated_session_state=None for generators
execution_result = fc.execute()
assert execution_result.updated_session_state is None
assert isinstance(execution_result.result, GeneratorType)
# Simulate what base.py does: consume the generator
output = list(execution_result.result)
assert output == ["output"]
# Verify session_state was modified during iteration
assert session_state["modified_in_generator"] is True
# Simulate the re-capture logic from base.py run_function_call
# This is what happens after generator consumption in base.py
if execution_result.updated_session_state is None:
if fc.function._run_context is not None and fc.function._run_context.session_state is not None:
execution_result.updated_session_state = fc.function._run_context.session_state
# Now updated_session_state should be captured with the modifications
assert execution_result.updated_session_state is not None
assert execution_result.updated_session_state["modified_in_generator"] is True
@pytest.mark.asyncio
async def test_base_model_recaptures_session_state_after_async_generator():
"""
Test that base.py arun_function_calls re-captures session_state after async generator consumption.
This tests the full flow: function.py returns None for async generators,
then base.py re-captures after the generator is consumed.
"""
from typing import AsyncIterator
from agno.run import RunContext
session_state = {"initial": "value"}
async def async_generator_tool_with_context(run_context: RunContext) -> AsyncIterator[str]:
"""An async generator tool that modifies run_context.session_state."""
run_context.session_state["async_modified_in_generator"] = True
yield "async_output"
# Create the function and function call with run_context
func = Function.from_callable(async_generator_tool_with_context)
run_context = RunContext(run_id="test-run", session_id="test-session", session_state=session_state)
func._run_context = run_context
func.process_entrypoint()
fc = FunctionCall(function=func, arguments={})
# Execute - returns FunctionExecutionResult with updated_session_state=None for generators
execution_result = await fc.aexecute()
assert execution_result.updated_session_state is None
# Consume the async generator
output = []
async for item in execution_result.result:
output.append(item)
assert output == ["async_output"]
# Verify session_state was modified during iteration
assert session_state["async_modified_in_generator"] is True
# Simulate the re-capture logic from base.py arun_function_calls
updated_session_state = execution_result.updated_session_state
if updated_session_state is None:
if fc.function._run_context is not None and fc.function._run_context.session_state is not None:
updated_session_state = fc.function._run_context.session_state
# Now updated_session_state should be captured with the modifications
assert updated_session_state is not None
assert updated_session_state["async_modified_in_generator"] is True
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/models/test_generator_session_state.py",
"license": "Apache License 2.0",
"lines": 209,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/os/routers/agents/router.py | import json
from typing import TYPE_CHECKING, Any, AsyncGenerator, List, Optional, Union, cast
from uuid import uuid4
from fastapi import (
APIRouter,
BackgroundTasks,
Depends,
File,
Form,
HTTPException,
Query,
Request,
UploadFile,
)
from fastapi.responses import JSONResponse, StreamingResponse
from agno.agent.agent import Agent
from agno.agent.remote import RemoteAgent
from agno.db.base import BaseDb
from agno.exceptions import InputCheckError, OutputCheckError
from agno.media import Audio, Image, Video
from agno.media import File as FileMedia
from agno.os.auth import (
get_auth_token_from_request,
get_authentication_dependency,
require_approval_resolved,
require_resource_access,
)
from agno.os.routers.agents.schema import AgentResponse
from agno.os.schema import (
BadRequestResponse,
InternalServerErrorResponse,
NotFoundResponse,
UnauthenticatedResponse,
ValidationErrorResponse,
)
from agno.os.settings import AgnoAPISettings
from agno.os.utils import (
format_sse_event,
get_agent_by_id,
get_request_kwargs,
process_audio,
process_document,
process_image,
process_video,
)
from agno.registry import Registry
from agno.run.agent import RunErrorEvent, RunOutput
from agno.run.base import RunStatus
from agno.utils.log import log_debug, log_error, log_warning
if TYPE_CHECKING:
from agno.os.app import AgentOS
async def agent_response_streamer(
agent: Union[Agent, RemoteAgent],
message: str,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
images: Optional[List[Image]] = None,
audio: Optional[List[Audio]] = None,
videos: Optional[List[Video]] = None,
files: Optional[List[FileMedia]] = None,
background_tasks: Optional[BackgroundTasks] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> AsyncGenerator:
try:
# Pass background_tasks if provided
if background_tasks is not None:
kwargs["background_tasks"] = background_tasks
if "stream_events" in kwargs:
stream_events = kwargs.pop("stream_events")
else:
stream_events = True
# Pass auth_token for remote agents
if auth_token and isinstance(agent, RemoteAgent):
kwargs["auth_token"] = auth_token
run_response = agent.arun(
input=message,
session_id=session_id,
user_id=user_id,
images=images,
audio=audio,
videos=videos,
files=files,
stream=True,
stream_events=stream_events,
**kwargs,
)
async for run_response_chunk in run_response:
yield format_sse_event(run_response_chunk) # type: ignore
except (InputCheckError, OutputCheckError) as e:
error_response = RunErrorEvent(
content=str(e),
error_type=e.type,
error_id=e.error_id,
additional_data=e.additional_data,
)
yield format_sse_event(error_response)
except Exception as e:
import traceback
traceback.print_exc(limit=3)
error_response = RunErrorEvent(
content=str(e),
)
yield format_sse_event(error_response)
async def agent_continue_response_streamer(
agent: Union[Agent, RemoteAgent],
run_id: str,
updated_tools: Optional[List] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
background_tasks: Optional[BackgroundTasks] = None,
auth_token: Optional[str] = None,
) -> AsyncGenerator:
try:
# Build kwargs for remote agent auth
extra_kwargs: dict = {}
if auth_token and isinstance(agent, RemoteAgent):
extra_kwargs["auth_token"] = auth_token
continue_response = agent.acontinue_run(
run_id=run_id,
updated_tools=updated_tools,
session_id=session_id,
user_id=user_id,
stream=True,
stream_events=True,
background_tasks=background_tasks,
**extra_kwargs,
)
async for run_response_chunk in continue_response:
yield format_sse_event(run_response_chunk) # type: ignore
except (InputCheckError, OutputCheckError) as e:
error_response = RunErrorEvent(
content=str(e),
error_type=e.type,
error_id=e.error_id,
additional_data=e.additional_data,
)
yield format_sse_event(error_response)
except Exception as e:
import traceback
traceback.print_exc(limit=3)
error_response = RunErrorEvent(
content=str(e),
error_type=e.type if hasattr(e, "type") else None,
error_id=e.error_id if hasattr(e, "error_id") else None,
)
yield format_sse_event(error_response)
return
def get_agent_router(
os: "AgentOS",
settings: AgnoAPISettings = AgnoAPISettings(),
registry: Optional[Registry] = None,
) -> APIRouter:
"""
Create the agent router with comprehensive OpenAPI documentation.
"""
router = APIRouter(
dependencies=[Depends(get_authentication_dependency(settings))],
responses={
400: {"description": "Bad Request", "model": BadRequestResponse},
401: {"description": "Unauthorized", "model": UnauthenticatedResponse},
404: {"description": "Not Found", "model": NotFoundResponse},
422: {"description": "Validation Error", "model": ValidationErrorResponse},
500: {"description": "Internal Server Error", "model": InternalServerErrorResponse},
},
)
@router.post(
"/agents/{agent_id}/runs",
tags=["Agents"],
operation_id="create_agent_run",
response_model_exclude_none=True,
summary="Create Agent Run",
description=(
"Execute an agent with a message and optional media files. Supports both streaming and non-streaming responses.\n\n"
"**Features:**\n"
"- Text message input with optional session management\n"
"- Multi-media support: images (PNG, JPEG, WebP), audio (WAV, MP3), video (MP4, WebM, etc.)\n"
"- Document processing: PDF, CSV, DOCX, TXT, JSON\n"
"- Real-time streaming responses with Server-Sent Events (SSE)\n"
"- User and session context preservation\n\n"
"**Streaming Response:**\n"
"When `stream=true`, returns SSE events with `event` and `data` fields."
),
responses={
200: {
"description": "Agent run executed successfully",
"content": {
"text/event-stream": {
"examples": {
"event_stream": {
"summary": "Example event stream response",
"value": 'event: RunStarted\ndata: {"content": "Hello!", "run_id": "123..."}\n\n',
}
}
},
},
},
400: {"description": "Invalid request or unsupported file type", "model": BadRequestResponse},
404: {"description": "Agent not found", "model": NotFoundResponse},
},
dependencies=[Depends(require_resource_access("agents", "run", "agent_id"))],
)
async def create_agent_run(
agent_id: str,
request: Request,
background_tasks: BackgroundTasks,
message: str = Form(...),
stream: bool = Form(True),
session_id: Optional[str] = Form(None),
user_id: Optional[str] = Form(None),
files: Optional[List[UploadFile]] = File(None),
version: Optional[str] = Form(None),
background: bool = Form(False),
):
kwargs = await get_request_kwargs(request, create_agent_run)
if hasattr(request.state, "user_id") and request.state.user_id is not None:
if user_id and user_id != request.state.user_id:
log_warning("User ID parameter passed in both request state and kwargs, using request state")
user_id = request.state.user_id
if hasattr(request.state, "session_id") and request.state.session_id is not None:
if session_id and session_id != request.state.session_id:
log_warning("Session ID parameter passed in both request state and kwargs, using request state")
session_id = request.state.session_id
if hasattr(request.state, "session_state") and request.state.session_state is not None:
session_state = request.state.session_state
if "session_state" in kwargs:
log_warning("Session state parameter passed in both request state and kwargs, using request state")
kwargs["session_state"] = session_state
if hasattr(request.state, "dependencies") and request.state.dependencies is not None:
dependencies = request.state.dependencies
if "dependencies" in kwargs:
log_warning("Dependencies parameter passed in both request state and kwargs, using request state")
kwargs["dependencies"] = dependencies
if hasattr(request.state, "metadata") and request.state.metadata is not None:
metadata = request.state.metadata
if "metadata" in kwargs:
log_warning("Metadata parameter passed in both request state and kwargs, using request state")
kwargs["metadata"] = metadata
agent = get_agent_by_id(
agent_id, os.agents, os.db, registry, version=int(version) if version else None, create_fresh=True
)
if agent is None:
raise HTTPException(status_code=404, detail="Agent not found")
if session_id is None or session_id == "":
log_debug("Creating new session")
session_id = str(uuid4())
base64_images: List[Image] = []
base64_audios: List[Audio] = []
base64_videos: List[Video] = []
input_files: List[FileMedia] = []
if files:
for file in files:
if file.content_type in [
"image/png",
"image/jpeg",
"image/jpg",
"image/gif",
"image/webp",
"image/bmp",
"image/tiff",
"image/tif",
"image/avif",
"image/heic",
"image/heif",
]:
try:
base64_image = process_image(file)
base64_images.append(base64_image)
except Exception as e:
log_error(f"Error processing image {file.filename}: {e}")
continue
elif file.content_type in [
"audio/wav",
"audio/wave",
"audio/mp3",
"audio/mpeg",
"audio/ogg",
"audio/mp4",
"audio/m4a",
"audio/aac",
"audio/flac",
]:
try:
audio = process_audio(file)
base64_audios.append(audio)
except Exception as e:
log_error(f"Error processing audio {file.filename} with content type {file.content_type}: {e}")
continue
elif file.content_type in [
"video/x-flv",
"video/quicktime",
"video/mpeg",
"video/mpegs",
"video/mpgs",
"video/mpg",
"video/mpg",
"video/mp4",
"video/webm",
"video/wmv",
"video/3gpp",
]:
try:
base64_video = process_video(file)
base64_videos.append(base64_video)
except Exception as e:
log_error(f"Error processing video {file.filename}: {e}")
continue
elif file.content_type in [
"application/pdf",
"application/json",
"application/x-javascript",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"text/javascript",
"application/x-python",
"text/x-python",
"text/plain",
"text/html",
"text/css",
"text/md",
"text/csv",
"text/xml",
"text/rtf",
]:
# Process document files
try:
input_file = process_document(file)
if input_file is not None:
input_files.append(input_file)
except Exception as e:
log_error(f"Error processing file {file.filename}: {e}")
continue
else:
raise HTTPException(status_code=400, detail="Unsupported file type")
# Extract auth token for remote agents
auth_token = get_auth_token_from_request(request)
# Background execution: return 202 immediately with run metadata
if background:
if isinstance(agent, RemoteAgent):
raise HTTPException(status_code=400, detail="Background execution is not supported for remote agents")
if not agent.db:
raise HTTPException(
status_code=400, detail="Background execution requires a database to be configured on the agent"
)
run_response = cast(
RunOutput,
await agent.arun( # type: ignore[misc]
input=message,
session_id=session_id,
user_id=user_id,
images=base64_images if base64_images else None,
audio=base64_audios if base64_audios else None,
videos=base64_videos if base64_videos else None,
files=input_files if input_files else None,
stream=False,
background=True,
**kwargs,
),
)
return JSONResponse(
status_code=202,
content={
"run_id": run_response.run_id,
"session_id": run_response.session_id,
"status": run_response.status.value if run_response.status else "PENDING",
},
)
if stream:
return StreamingResponse(
agent_response_streamer(
agent,
message,
session_id=session_id,
user_id=user_id,
images=base64_images if base64_images else None,
audio=base64_audios if base64_audios else None,
videos=base64_videos if base64_videos else None,
files=input_files if input_files else None,
background_tasks=background_tasks,
auth_token=auth_token,
**kwargs,
),
media_type="text/event-stream",
)
else:
# Pass auth_token for remote agents
if auth_token and isinstance(agent, RemoteAgent):
kwargs["auth_token"] = auth_token
try:
run_response = cast(
RunOutput,
await agent.arun( # type: ignore[misc]
input=message,
session_id=session_id,
user_id=user_id,
images=base64_images if base64_images else None,
audio=base64_audios if base64_audios else None,
videos=base64_videos if base64_videos else None,
files=input_files if input_files else None,
stream=False,
background_tasks=background_tasks,
**kwargs,
),
)
return run_response.to_dict()
except InputCheckError as e:
raise HTTPException(status_code=400, detail=str(e))
@router.post(
"/agents/{agent_id}/runs/{run_id}/cancel",
tags=["Agents"],
operation_id="cancel_agent_run",
response_model_exclude_none=True,
summary="Cancel Agent Run",
description=(
"Cancel a currently executing agent run. This will attempt to stop the agent's execution gracefully.\n\n"
"**Note:** Cancellation may not be immediate for all operations."
),
responses={
200: {},
404: {"description": "Agent not found", "model": NotFoundResponse},
500: {"description": "Failed to cancel run", "model": InternalServerErrorResponse},
},
dependencies=[Depends(require_resource_access("agents", "run", "agent_id"))],
)
async def cancel_agent_run(
agent_id: str,
run_id: str,
):
agent = get_agent_by_id(agent_id=agent_id, agents=os.agents, db=os.db, registry=os.registry, create_fresh=True)
if agent is None:
raise HTTPException(status_code=404, detail="Agent not found")
# cancel_run always stores cancellation intent (even for not-yet-registered runs
# in cancel-before-start scenarios), so we always return success.
await agent.acancel_run(run_id=run_id)
return JSONResponse(content={}, status_code=200)
@router.post(
"/agents/{agent_id}/runs/{run_id}/continue",
tags=["Agents"],
operation_id="continue_agent_run",
response_model_exclude_none=True,
summary="Continue Agent Run",
description=(
"Continue a paused or incomplete agent run with updated tool results.\n\n"
"**Use Cases:**\n"
"- Resume execution after tool approval/rejection\n"
"- Provide manual tool execution results\n"
"- Resume after admin approval (tools can be empty; resolution fetched from DB)\n\n"
"**Tools Parameter:**\n"
"JSON string containing array of tool execution objects with results.\n"
"Can be empty when an admin-required approval has been resolved."
),
responses={
200: {
"description": "Agent run continued successfully",
"content": {
"text/event-stream": {
"example": 'event: RunContent\ndata: {"created_at": 1757348314, "run_id": "123..."}\n\n'
},
},
},
400: {"description": "Invalid JSON in tools field or invalid tool structure", "model": BadRequestResponse},
403: {"description": "Run has a pending admin approval and cannot be continued by the user yet."},
404: {"description": "Agent not found", "model": NotFoundResponse},
409: {
"description": "Run is not paused (e.g. run is already running, continued, or errored). Only PAUSED runs can be continued.",
},
},
dependencies=[
Depends(require_resource_access("agents", "run", "agent_id")),
Depends(require_approval_resolved(os.db)),
],
)
async def continue_agent_run(
agent_id: str,
run_id: str,
request: Request,
background_tasks: BackgroundTasks,
tools: str = Form(""), # JSON string of tools (optional when admin approval resolved)
session_id: Optional[str] = Form(None),
user_id: Optional[str] = Form(None),
stream: bool = Form(True),
):
if hasattr(request.state, "user_id") and request.state.user_id is not None:
user_id = request.state.user_id
if hasattr(request.state, "session_id") and request.state.session_id is not None:
session_id = request.state.session_id
# Parse the JSON string manually
try:
tools_data = json.loads(tools) if tools else None
except json.JSONDecodeError:
raise HTTPException(status_code=400, detail="Invalid JSON in tools field")
agent = get_agent_by_id(agent_id=agent_id, agents=os.agents, db=os.db, registry=os.registry, create_fresh=True)
if agent is None:
raise HTTPException(status_code=404, detail="Agent not found")
if session_id is None or session_id == "":
log_warning(
"Continuing run without session_id. This might lead to unexpected behavior if session context is important."
)
# Fetch existing run once for validation and potential approval resolution
existing_run = None
if session_id and not isinstance(agent, RemoteAgent):
existing_run = await agent.aget_run_output(run_id=run_id, session_id=session_id)
# Only allow /continue when the run is in a paused state. If running, continued, or errored, return 409.
if existing_run is not None:
is_paused = getattr(existing_run, "is_paused", False)
if not is_paused:
status = getattr(existing_run, "status", None)
_status_to_detail = {
RunStatus.running: "run is already running",
RunStatus.completed: "run is already continued",
RunStatus.error: "run is already errored",
RunStatus.cancelled: "run is already cancelled",
RunStatus.pending: "run is already pending",
}
detail = _status_to_detail.get(
status, # type: ignore[arg-type]
f"run is not paused (status={getattr(status, 'value', status)})",
)
raise HTTPException(
status_code=409,
detail=detail,
)
# Convert tools dict to ToolExecution objects if provided
updated_tools = None
if tools_data:
try:
from agno.models.response import ToolExecution
updated_tools = [ToolExecution.from_dict(tool) for tool in tools_data]
except Exception as e:
raise HTTPException(status_code=400, detail=f"Invalid structure or content for tools: {str(e)}")
# Extract auth token for remote agents
auth_token = get_auth_token_from_request(request)
if stream:
return StreamingResponse(
agent_continue_response_streamer(
agent,
run_id=run_id, # run_id from path
updated_tools=updated_tools,
session_id=session_id,
user_id=user_id,
background_tasks=background_tasks,
auth_token=auth_token,
),
media_type="text/event-stream",
)
else:
# Build extra kwargs for remote agent auth
extra_kwargs: dict = {}
if auth_token and isinstance(agent, RemoteAgent):
extra_kwargs["auth_token"] = auth_token
try:
run_response_obj = cast(
RunOutput,
await agent.acontinue_run( # type: ignore
run_id=run_id, # run_id from path
updated_tools=updated_tools,
session_id=session_id,
user_id=user_id,
stream=False,
background_tasks=background_tasks,
**extra_kwargs,
),
)
return run_response_obj.to_dict()
except InputCheckError as e:
raise HTTPException(status_code=400, detail=str(e))
@router.get(
"/agents",
response_model=List[AgentResponse],
response_model_exclude_none=True,
tags=["Agents"],
operation_id="get_agents",
summary="List All Agents",
description=(
"Retrieve a comprehensive list of all agents configured in this OS instance.\n\n"
"**Returns:**\n"
"- Agent metadata (ID, name, description)\n"
"- Model configuration and capabilities\n"
"- Available tools and their configurations\n"
"- Session, knowledge, memory, and reasoning settings\n"
"- Only meaningful (non-default) configurations are included"
),
responses={
200: {
"description": "List of agents retrieved successfully",
"content": {
"application/json": {
"example": [
{
"id": "main-agent",
"name": "Main Agent",
"db_id": "c6bf0644-feb8-4930-a305-380dae5ad6aa",
"model": {"name": "OpenAIChat", "model": "gpt-4o", "provider": "OpenAI"},
"tools": None,
"sessions": {"session_table": "agno_sessions"},
"knowledge": {"knowledge_table": "main_knowledge"},
"system_message": {"markdown": True, "add_datetime_to_context": True},
}
]
}
},
}
},
)
async def get_agents(request: Request) -> List[AgentResponse]:
"""Return the list of all Agents present in the contextual OS"""
# Filter agents based on user's scopes (only if authorization is enabled)
if getattr(request.state, "authorization_enabled", False):
from agno.os.auth import filter_resources_by_access, get_accessible_resources
# Check if user has any agent scopes at all
accessible_ids = get_accessible_resources(request, "agents")
if not accessible_ids:
raise HTTPException(status_code=403, detail="Insufficient permissions")
# Limit results based on the user's access/scopes
accessible_agents = filter_resources_by_access(request, os.agents or [], "agents")
else:
accessible_agents = os.agents or []
agents: List[AgentResponse] = []
if accessible_agents:
for agent in accessible_agents:
if isinstance(agent, RemoteAgent):
agents.append(await agent.get_agent_config())
else:
agent_response = await AgentResponse.from_agent(agent=agent, is_component=False)
agents.append(agent_response)
if os.db and isinstance(os.db, BaseDb):
from agno.agent.agent import get_agents
# Exclude agents whose IDs are owned by the registry
exclude_ids = registry.get_agent_ids() if registry else None
db_agents = get_agents(db=os.db, registry=registry, exclude_component_ids=exclude_ids or None)
if db_agents:
for db_agent in db_agents:
agent_response = await AgentResponse.from_agent(agent=db_agent, is_component=True)
agents.append(agent_response)
return agents
@router.get(
"/agents/{agent_id}",
response_model=AgentResponse,
response_model_exclude_none=True,
tags=["Agents"],
operation_id="get_agent",
summary="Get Agent Details",
description=(
"Retrieve detailed configuration and capabilities of a specific agent.\n\n"
"**Returns comprehensive agent information including:**\n"
"- Model configuration and provider details\n"
"- Complete tool inventory and configurations\n"
"- Session management settings\n"
"- Knowledge base and memory configurations\n"
"- Reasoning capabilities and settings\n"
"- System prompts and response formatting options"
),
responses={
200: {
"description": "Agent details retrieved successfully",
"content": {
"application/json": {
"example": {
"id": "main-agent",
"name": "Main Agent",
"db_id": "9e064c70-6821-4840-a333-ce6230908a70",
"model": {"name": "OpenAIChat", "model": "gpt-4o", "provider": "OpenAI"},
"tools": None,
"sessions": {"session_table": "agno_sessions"},
"knowledge": {"knowledge_table": "main_knowledge"},
"system_message": {"markdown": True, "add_datetime_to_context": True},
}
}
},
},
404: {"description": "Agent not found", "model": NotFoundResponse},
},
dependencies=[Depends(require_resource_access("agents", "read", "agent_id"))],
)
async def get_agent(agent_id: str, request: Request) -> AgentResponse:
agent = get_agent_by_id(agent_id=agent_id, agents=os.agents, db=os.db, registry=os.registry, create_fresh=True)
if agent is None:
raise HTTPException(status_code=404, detail="Agent not found")
if isinstance(agent, RemoteAgent):
return await agent.get_agent_config()
else:
return await AgentResponse.from_agent(agent=agent)
@router.get(
"/agents/{agent_id}/runs/{run_id}",
tags=["Agents"],
operation_id="get_agent_run",
summary="Get Agent Run",
description=(
"Retrieve the status and output of an agent run. Use this to poll for background run completion.\n\n"
"Requires the `session_id` that was returned when the run was created."
),
responses={
200: {"description": "Run output retrieved successfully"},
404: {"description": "Agent or run not found", "model": NotFoundResponse},
},
dependencies=[Depends(require_resource_access("agents", "run", "agent_id"))],
)
async def get_agent_run(
agent_id: str,
run_id: str,
session_id: str = Query(..., description="Session ID for the run"),
):
agent = get_agent_by_id(agent_id=agent_id, agents=os.agents, db=os.db, registry=os.registry, create_fresh=True)
if agent is None:
raise HTTPException(status_code=404, detail="Agent not found")
if isinstance(agent, RemoteAgent):
raise HTTPException(status_code=400, detail="Run polling is not supported for remote agents")
run_output = await agent.aget_run_output(run_id=run_id, session_id=session_id)
if run_output is None:
raise HTTPException(status_code=404, detail="Run not found")
return run_output.to_dict()
@router.get(
"/agents/{agent_id}/runs",
tags=["Agents"],
operation_id="list_agent_runs",
summary="List Agent Runs",
description=(
"List runs for an agent within a session, optionally filtered by status.\n\n"
"Useful for monitoring background runs and viewing run history."
),
responses={
200: {"description": "List of runs retrieved successfully"},
404: {"description": "Agent not found", "model": NotFoundResponse},
},
dependencies=[Depends(require_resource_access("agents", "run", "agent_id"))],
)
async def list_agent_runs(
agent_id: str,
session_id: str = Query(..., description="Session ID to list runs for"),
status: Optional[str] = Query(None, description="Filter by run status (PENDING, RUNNING, COMPLETED, ERROR)"),
):
from agno.os.schema import RunSchema
agent = get_agent_by_id(agent_id=agent_id, agents=os.agents, db=os.db, registry=os.registry, create_fresh=True)
if agent is None:
raise HTTPException(status_code=404, detail="Agent not found")
if isinstance(agent, RemoteAgent):
raise HTTPException(status_code=400, detail="Run listing is not supported for remote agents")
# Load the session to get its runs
from agno.agent._storage import aread_or_create_session
session = await aread_or_create_session(agent, session_id=session_id)
runs = session.runs or []
# Convert to dicts and optionally filter by status
result = []
for run in runs:
run_dict = run.to_dict()
if status and run_dict.get("status") != status:
continue
result.append(RunSchema.from_dict(run_dict))
return result
return router
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/agents/router.py",
"license": "Apache License 2.0",
"lines": 749,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/agents/schema.py | from typing import Any, Dict, Optional
from uuid import uuid4
from pydantic import BaseModel
from agno.agent import Agent
from agno.models.message import Message
from agno.os.schema import ModelResponse
from agno.os.utils import (
format_tools,
)
from agno.run import RunContext
from agno.run.agent import RunOutput
from agno.session import AgentSession
from agno.utils.agent import aexecute_instructions, aexecute_system_message
class AgentResponse(BaseModel):
id: Optional[str] = None
name: Optional[str] = None
db_id: Optional[str] = None
description: Optional[str] = None
role: Optional[str] = None
model: Optional[ModelResponse] = None
tools: Optional[Dict[str, Any]] = None
sessions: Optional[Dict[str, Any]] = None
knowledge: Optional[Dict[str, Any]] = None
memory: Optional[Dict[str, Any]] = None
reasoning: Optional[Dict[str, Any]] = None
default_tools: Optional[Dict[str, Any]] = None
system_message: Optional[Dict[str, Any]] = None
extra_messages: Optional[Dict[str, Any]] = None
response_settings: Optional[Dict[str, Any]] = None
introduction: Optional[str] = None
streaming: Optional[Dict[str, Any]] = None
metadata: Optional[Dict[str, Any]] = None
input_schema: Optional[Dict[str, Any]] = None
is_component: bool = False
current_version: Optional[int] = None
stage: Optional[str] = None
@classmethod
async def from_agent(
cls,
agent: Agent,
is_component: bool = False,
) -> "AgentResponse":
def filter_meaningful_config(d: Dict[str, Any], defaults: Dict[str, Any]) -> Optional[Dict[str, Any]]:
"""Filter out fields that match their default values, keeping only meaningful user configurations"""
filtered = {}
for key, value in d.items():
if value is None:
continue
# Skip if value matches the default exactly
if key in defaults and value == defaults[key]:
continue
# Keep non-default values
filtered[key] = value
return filtered if filtered else None
# Define default values for filtering
agent_defaults = {
# Sessions defaults
"add_history_to_context": False,
"num_history_runs": 3,
"enable_session_summaries": False,
"search_session_history": False,
"cache_session": False,
# Knowledge defaults
"add_references": False,
"references_format": "json",
"enable_agentic_knowledge_filters": False,
# Memory defaults
"enable_agentic_memory": False,
"update_memory_on_run": False,
# Reasoning defaults
"reasoning": False,
"reasoning_min_steps": 1,
"reasoning_max_steps": 10,
# Default tools defaults
"read_chat_history": False,
"search_knowledge": True,
"update_knowledge": False,
"read_tool_call_history": False,
# System message defaults
"system_message_role": "system",
"build_context": True,
"markdown": False,
"add_name_to_context": False,
"add_datetime_to_context": False,
"add_location_to_context": False,
"resolve_in_context": True,
# Extra messages defaults
"user_message_role": "user",
"build_user_context": True,
# Response settings defaults
"retries": 0,
"delay_between_retries": 1,
"exponential_backoff": False,
"parse_response": True,
"use_json_mode": False,
# Streaming defaults
"stream_events": False,
}
session_id = str(uuid4())
run_id = str(uuid4())
agent_tools = await agent.aget_tools(
session=AgentSession(session_id=session_id, session_data={}),
run_response=RunOutput(run_id=run_id, session_id=session_id),
run_context=RunContext(run_id=run_id, session_id=session_id, user_id=agent.user_id),
check_mcp_tools=False,
)
formatted_tools = format_tools(agent_tools) if agent_tools else None
additional_input = agent.additional_input
if additional_input and isinstance(additional_input[0], Message):
additional_input = [message.to_dict() for message in additional_input] # type: ignore
input_schema_dict = None
if agent.input_schema is not None:
if isinstance(agent.input_schema, dict):
input_schema_dict = agent.input_schema
else:
try:
input_schema_dict = agent.input_schema.model_json_schema()
except Exception:
pass
# Build model only if it has at least one non-null field
model_name = agent.model.name if (agent.model and agent.model.name) else None
model_provider = agent.model.provider if (agent.model and agent.model.provider) else None
model_id = agent.model.id if (agent.model and agent.model.id) else None
_agent_model_data: Dict[str, Any] = {}
if model_name is not None:
_agent_model_data["name"] = model_name
if model_id is not None:
_agent_model_data["model"] = model_id
if model_provider is not None:
_agent_model_data["provider"] = model_provider
session_table = agent.db.session_table_name if agent.db else None
knowledge_table = agent.db.knowledge_table_name if agent.db and agent.knowledge else None
tools_info = {
"tools": formatted_tools,
"tool_call_limit": agent.tool_call_limit,
"tool_choice": agent.tool_choice,
}
sessions_info = {
"session_table": session_table,
"add_history_to_context": agent.add_history_to_context,
"enable_session_summaries": agent.enable_session_summaries,
"num_history_runs": agent.num_history_runs,
"search_session_history": agent.search_session_history,
"num_history_sessions": agent.num_history_sessions,
"cache_session": agent.cache_session,
}
contents_db = getattr(agent.knowledge, "contents_db", None) if agent.knowledge else None
knowledge_info = {
"db_id": contents_db.id if contents_db else None,
"knowledge_table": knowledge_table,
"enable_agentic_knowledge_filters": agent.enable_agentic_knowledge_filters,
"knowledge_filters": (
[f.to_dict() if hasattr(f, "to_dict") else f for f in agent.knowledge_filters]
if isinstance(agent.knowledge_filters, list)
else agent.knowledge_filters
),
"references_format": agent.references_format,
}
memory_info: Optional[Dict[str, Any]] = None
if agent.memory_manager is not None:
memory_info = {
"enable_agentic_memory": agent.enable_agentic_memory,
"update_memory_on_run": agent.update_memory_on_run,
"enable_user_memories": agent.enable_user_memories, # Soon to be deprecated. Use update_memory_on_run
"metadata": agent.metadata,
"memory_table": agent.db.memory_table_name if agent.db and agent.update_memory_on_run else None,
}
if agent.memory_manager.model is not None:
memory_info["model"] = ModelResponse(
name=agent.memory_manager.model.name,
model=agent.memory_manager.model.id,
provider=agent.memory_manager.model.provider,
).model_dump()
reasoning_info: Dict[str, Any] = {
"reasoning": agent.reasoning,
"reasoning_agent_id": agent.reasoning_agent.id if agent.reasoning_agent else None,
"reasoning_min_steps": agent.reasoning_min_steps,
"reasoning_max_steps": agent.reasoning_max_steps,
}
if agent.reasoning_model:
reasoning_info["reasoning_model"] = ModelResponse(
name=agent.reasoning_model.name,
model=agent.reasoning_model.id,
provider=agent.reasoning_model.provider,
).model_dump()
default_tools_info = {
"read_chat_history": agent.read_chat_history,
"search_knowledge": agent.search_knowledge,
"update_knowledge": agent.update_knowledge,
"read_tool_call_history": agent.read_tool_call_history,
}
instructions = agent.instructions if agent.instructions else None
if instructions and callable(instructions):
instructions = await aexecute_instructions(instructions=instructions, agent=agent)
system_message = agent.system_message if agent.system_message else None
if system_message and callable(system_message):
system_message = await aexecute_system_message(system_message=system_message, agent=agent)
system_message_info = {
"system_message": str(system_message) if system_message else None,
"system_message_role": agent.system_message_role,
"build_context": agent.build_context,
"description": agent.description,
"instructions": instructions,
"expected_output": agent.expected_output,
"additional_context": agent.additional_context,
"markdown": agent.markdown,
"add_name_to_context": agent.add_name_to_context,
"add_datetime_to_context": agent.add_datetime_to_context,
"add_location_to_context": agent.add_location_to_context,
"timezone_identifier": agent.timezone_identifier,
"resolve_in_context": agent.resolve_in_context,
}
extra_messages_info = {
"additional_input": additional_input, # type: ignore
"user_message_role": agent.user_message_role,
"build_user_context": agent.build_user_context,
}
# Handle output_schema name for both Pydantic models and JSON schemas
output_schema_name = None
if agent.output_schema is not None:
if isinstance(agent.output_schema, dict):
if "json_schema" in agent.output_schema:
output_schema_name = agent.output_schema["json_schema"].get("name", "JSONSchema")
elif "schema" in agent.output_schema and isinstance(agent.output_schema["schema"], dict):
output_schema_name = agent.output_schema["schema"].get("title", "JSONSchema")
else:
output_schema_name = agent.output_schema.get("title", "JSONSchema")
elif hasattr(agent.output_schema, "__name__"):
output_schema_name = agent.output_schema.__name__
response_settings_info: Dict[str, Any] = {
"retries": agent.retries,
"delay_between_retries": agent.delay_between_retries,
"exponential_backoff": agent.exponential_backoff,
"output_schema_name": output_schema_name,
"parser_model_prompt": agent.parser_model_prompt,
"parse_response": agent.parse_response,
"structured_outputs": agent.structured_outputs,
"use_json_mode": agent.use_json_mode,
"save_response_to_file": agent.save_response_to_file,
}
if agent.parser_model:
response_settings_info["parser_model"] = ModelResponse(
name=agent.parser_model.name,
model=agent.parser_model.id,
provider=agent.parser_model.provider,
).model_dump()
streaming_info = {
"stream": agent.stream,
"stream_events": agent.stream_events,
}
return AgentResponse(
id=agent.id,
name=agent.name,
db_id=agent.db.id if agent.db else None,
description=agent.description,
role=agent.role,
model=ModelResponse(**_agent_model_data) if _agent_model_data else None,
tools=filter_meaningful_config(tools_info, {}),
sessions=filter_meaningful_config(sessions_info, agent_defaults),
knowledge=filter_meaningful_config(knowledge_info, agent_defaults),
memory=filter_meaningful_config(memory_info, agent_defaults) if memory_info else None,
reasoning=filter_meaningful_config(reasoning_info, agent_defaults),
default_tools=filter_meaningful_config(default_tools_info, agent_defaults),
system_message=filter_meaningful_config(system_message_info, agent_defaults),
extra_messages=filter_meaningful_config(extra_messages_info, agent_defaults),
response_settings=filter_meaningful_config(response_settings_info, agent_defaults),
streaming=filter_meaningful_config(streaming_info, agent_defaults),
introduction=agent.introduction,
metadata=agent.metadata,
input_schema=input_schema_dict,
is_component=is_component,
current_version=getattr(agent, "_version", None),
stage=getattr(agent, "_stage", None),
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/agents/schema.py",
"license": "Apache License 2.0",
"lines": 274,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/teams/router.py | from typing import TYPE_CHECKING, Any, AsyncGenerator, List, Optional, Union
from uuid import uuid4
from fastapi import (
APIRouter,
BackgroundTasks,
Depends,
File,
Form,
HTTPException,
Query,
Request,
UploadFile,
)
from fastapi.responses import JSONResponse, StreamingResponse
from agno.db.base import BaseDb
from agno.exceptions import InputCheckError, OutputCheckError
from agno.media import Audio, Image, Video
from agno.media import File as FileMedia
from agno.os.auth import get_auth_token_from_request, get_authentication_dependency, require_resource_access
from agno.os.routers.teams.schema import TeamResponse
from agno.os.schema import (
BadRequestResponse,
InternalServerErrorResponse,
NotFoundResponse,
UnauthenticatedResponse,
ValidationErrorResponse,
)
from agno.os.settings import AgnoAPISettings
from agno.os.utils import (
format_sse_event,
get_request_kwargs,
get_team_by_id,
process_audio,
process_document,
process_image,
process_video,
)
from agno.registry import Registry
from agno.run.team import RunErrorEvent as TeamRunErrorEvent
from agno.team.remote import RemoteTeam
from agno.team.team import Team
from agno.utils.log import log_warning, logger
if TYPE_CHECKING:
from agno.os.app import AgentOS
async def team_response_streamer(
team: Union[Team, RemoteTeam],
message: str,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
images: Optional[List[Image]] = None,
audio: Optional[List[Audio]] = None,
videos: Optional[List[Video]] = None,
files: Optional[List[FileMedia]] = None,
background_tasks: Optional[BackgroundTasks] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> AsyncGenerator:
"""Run the given team asynchronously and yield its response"""
try:
# Pass background_tasks if provided
if background_tasks is not None:
kwargs["background_tasks"] = background_tasks
if "stream_events" in kwargs:
stream_events = kwargs.pop("stream_events")
else:
stream_events = True
# Pass auth_token for remote teams
if auth_token and isinstance(team, RemoteTeam):
kwargs["auth_token"] = auth_token
run_response = team.arun(
input=message,
session_id=session_id,
user_id=user_id,
images=images,
audio=audio,
videos=videos,
files=files,
stream=True,
stream_events=stream_events,
**kwargs,
)
async for run_response_chunk in run_response:
yield format_sse_event(run_response_chunk) # type: ignore
except (InputCheckError, OutputCheckError) as e:
error_response = TeamRunErrorEvent(
content=str(e),
error_type=e.type,
error_id=e.error_id,
additional_data=e.additional_data,
)
yield format_sse_event(error_response)
except BaseException as e:
import traceback
traceback.print_exc()
error_response = TeamRunErrorEvent(
content=str(e),
error_type=e.type if hasattr(e, "type") else None,
error_id=e.error_id if hasattr(e, "error_id") else None,
)
yield format_sse_event(error_response)
return
def get_team_router(
os: "AgentOS",
settings: AgnoAPISettings = AgnoAPISettings(),
registry: Optional[Registry] = None,
) -> APIRouter:
"""Create the team router with comprehensive OpenAPI documentation."""
router = APIRouter(
dependencies=[Depends(get_authentication_dependency(settings))],
responses={
400: {"description": "Bad Request", "model": BadRequestResponse},
401: {"description": "Unauthorized", "model": UnauthenticatedResponse},
404: {"description": "Not Found", "model": NotFoundResponse},
422: {"description": "Validation Error", "model": ValidationErrorResponse},
500: {"description": "Internal Server Error", "model": InternalServerErrorResponse},
},
)
@router.post(
"/teams/{team_id}/runs",
tags=["Teams"],
operation_id="create_team_run",
response_model_exclude_none=True,
summary="Create Team Run",
description=(
"Execute a team collaboration with multiple agents working together on a task.\n\n"
"**Features:**\n"
"- Text message input with optional session management\n"
"- Multi-media support: images (PNG, JPEG, WebP), audio (WAV, MP3), video (MP4, WebM, etc.)\n"
"- Document processing: PDF, CSV, DOCX, TXT, JSON\n"
"- Real-time streaming responses with Server-Sent Events (SSE)\n"
"- User and session context preservation\n\n"
"**Streaming Response:**\n"
"When `stream=true`, returns SSE events with `event` and `data` fields."
),
responses={
200: {
"description": "Team run executed successfully",
"content": {
"text/event-stream": {
"example": 'event: RunStarted\ndata: {"content": "Hello!", "run_id": "123..."}\n\n'
},
},
},
400: {"description": "Invalid request or unsupported file type", "model": BadRequestResponse},
404: {"description": "Team not found", "model": NotFoundResponse},
},
dependencies=[Depends(require_resource_access("teams", "run", "team_id"))],
)
async def create_team_run(
team_id: str,
request: Request,
background_tasks: BackgroundTasks,
message: str = Form(...),
stream: bool = Form(True),
monitor: bool = Form(True),
session_id: Optional[str] = Form(None),
user_id: Optional[str] = Form(None),
files: Optional[List[UploadFile]] = File(None),
version: Optional[int] = Form(None),
background: bool = Form(False),
):
kwargs = await get_request_kwargs(request, create_team_run)
if hasattr(request.state, "user_id") and request.state.user_id is not None:
if user_id and user_id != request.state.user_id:
log_warning("User ID parameter passed in both request state and kwargs, using request state")
user_id = request.state.user_id
if hasattr(request.state, "session_id") and request.state.session_id is not None:
if session_id and session_id != request.state.session_id:
log_warning("Session ID parameter passed in both request state and kwargs, using request state")
session_id = request.state.session_id
if hasattr(request.state, "session_state") and request.state.session_state is not None:
session_state = request.state.session_state
if "session_state" in kwargs:
log_warning("Session state parameter passed in both request state and kwargs, using request state")
kwargs["session_state"] = session_state
if hasattr(request.state, "dependencies") and request.state.dependencies is not None:
dependencies = request.state.dependencies
if "dependencies" in kwargs:
log_warning("Dependencies parameter passed in both request state and kwargs, using request state")
kwargs["dependencies"] = dependencies
if hasattr(request.state, "metadata") and request.state.metadata is not None:
metadata = request.state.metadata
if "metadata" in kwargs:
log_warning("Metadata parameter passed in both request state and kwargs, using request state")
kwargs["metadata"] = metadata
logger.debug(f"Creating team run: {message=} {session_id=} {monitor=} {user_id=} {team_id=} {files=} {kwargs=}")
team = get_team_by_id(
team_id=team_id, teams=os.teams, db=os.db, version=version, registry=registry, create_fresh=True
)
if team is None:
raise HTTPException(status_code=404, detail="Team not found")
if session_id is not None and session_id != "":
logger.debug(f"Continuing session: {session_id}")
else:
logger.debug("Creating new session")
session_id = str(uuid4())
base64_images: List[Image] = []
base64_audios: List[Audio] = []
base64_videos: List[Video] = []
document_files: List[FileMedia] = []
if files:
for file in files:
if file.content_type in [
"image/png",
"image/jpeg",
"image/jpg",
"image/webp",
"image/heic",
"image/heif",
]:
try:
base64_image = process_image(file)
base64_images.append(base64_image)
except Exception as e:
logger.error(f"Error processing image {file.filename}: {e}")
continue
elif file.content_type in ["audio/wav", "audio/mp3", "audio/mpeg"]:
try:
base64_audio = process_audio(file)
base64_audios.append(base64_audio)
except Exception as e:
logger.error(f"Error processing audio {file.filename}: {e}")
continue
elif file.content_type in [
"video/x-flv",
"video/quicktime",
"video/mpeg",
"video/mpegs",
"video/mpgs",
"video/mpg",
"video/mpg",
"video/mp4",
"video/webm",
"video/wmv",
"video/3gpp",
]:
try:
base64_video = process_video(file)
base64_videos.append(base64_video)
except Exception as e:
logger.error(f"Error processing video {file.filename}: {e}")
continue
elif file.content_type in [
"application/pdf",
"text/csv",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"text/plain",
"application/json",
]:
document_file = process_document(file)
if document_file is not None:
document_files.append(document_file)
else:
raise HTTPException(status_code=400, detail="Unsupported file type")
# Extract auth token for remote teams
auth_token = get_auth_token_from_request(request)
# Background execution: return 202 immediately with run metadata
if background:
if isinstance(team, RemoteTeam):
raise HTTPException(status_code=400, detail="Background execution is not supported for remote teams")
if not team.db:
raise HTTPException(
status_code=400, detail="Background execution requires a database to be configured on the team"
)
run_response = await team.arun( # type: ignore[misc]
input=message,
session_id=session_id,
user_id=user_id,
images=base64_images if base64_images else None,
audio=base64_audios if base64_audios else None,
videos=base64_videos if base64_videos else None,
files=document_files if document_files else None,
stream=False,
background=True,
**kwargs,
)
return JSONResponse(
status_code=202,
content={
"run_id": run_response.run_id,
"session_id": run_response.session_id,
"status": run_response.status.value if run_response.status else "PENDING",
},
)
if stream:
return StreamingResponse(
team_response_streamer(
team,
message,
session_id=session_id,
user_id=user_id,
images=base64_images if base64_images else None,
audio=base64_audios if base64_audios else None,
videos=base64_videos if base64_videos else None,
files=document_files if document_files else None,
background_tasks=background_tasks,
auth_token=auth_token,
**kwargs,
),
media_type="text/event-stream",
)
else:
# Pass auth_token for remote teams
if auth_token and isinstance(team, RemoteTeam):
kwargs["auth_token"] = auth_token
try:
run_response = await team.arun( # type: ignore[misc]
input=message,
session_id=session_id,
user_id=user_id,
images=base64_images if base64_images else None,
audio=base64_audios if base64_audios else None,
videos=base64_videos if base64_videos else None,
files=document_files if document_files else None,
stream=False,
background_tasks=background_tasks,
**kwargs,
)
return run_response.to_dict()
except InputCheckError as e:
raise HTTPException(status_code=400, detail=str(e))
@router.post(
"/teams/{team_id}/runs/{run_id}/cancel",
tags=["Teams"],
operation_id="cancel_team_run",
response_model_exclude_none=True,
summary="Cancel Team Run",
description=(
"Cancel a currently executing team run. This will attempt to stop the team's execution gracefully.\n\n"
"**Note:** Cancellation may not be immediate for all operations."
),
responses={
200: {},
404: {"description": "Team not found", "model": NotFoundResponse},
500: {"description": "Failed to cancel team run", "model": InternalServerErrorResponse},
},
dependencies=[Depends(require_resource_access("teams", "run", "team_id"))],
)
async def cancel_team_run(
team_id: str,
run_id: str,
):
team = get_team_by_id(team_id=team_id, teams=os.teams, db=os.db, registry=registry, create_fresh=True)
if team is None:
raise HTTPException(status_code=404, detail="Team not found")
# cancel_run always stores cancellation intent (even for not-yet-registered runs
# in cancel-before-start scenarios), so we always return success.
await team.acancel_run(run_id=run_id)
return JSONResponse(content={}, status_code=200)
@router.get(
"/teams",
response_model=List[TeamResponse],
response_model_exclude_none=True,
tags=["Teams"],
operation_id="get_teams",
summary="List All Teams",
description=(
"Retrieve a comprehensive list of all teams configured in this OS instance.\n\n"
"**Returns team information including:**\n"
"- Team metadata (ID, name, description, execution mode)\n"
"- Model configuration for team coordination\n"
"- Team member roster with roles and capabilities\n"
"- Knowledge sharing and memory configurations"
),
responses={
200: {
"description": "List of teams retrieved successfully",
"content": {
"application/json": {
"example": [
{
"team_id": "basic-team",
"name": "Basic Team",
"mode": "coordinate",
"model": {"name": "OpenAIChat", "model": "gpt-4o", "provider": "OpenAI"},
"tools": [
{
"name": "transfer_task_to_member",
"description": "Use this function to transfer a task to the selected team member.\nYou must provide a clear and concise description of the task the member should achieve AND the expected output.",
"parameters": {
"type": "object",
"properties": {
"member_id": {
"type": "string",
"description": "(str) The ID of the member to transfer the task to. Use only the ID of the member, not the ID of the team followed by the ID of the member.",
},
"task_description": {
"type": "string",
"description": "(str) A clear and concise description of the task the member should achieve.",
},
"expected_output": {
"type": "string",
"description": "(str) The expected output from the member (optional).",
},
},
"additionalProperties": False,
"required": ["member_id", "task_description"],
},
}
],
"members": [
{
"agent_id": "basic-agent",
"name": "Basic Agent",
"model": {"name": "OpenAIChat", "model": "gpt-4o", "provider": "OpenAI gpt-4o"},
"memory": {
"app_name": "Memory",
"app_url": None,
"model": {"name": "OpenAIChat", "model": "gpt-4o", "provider": "OpenAI"},
},
"session_table": "agno_sessions",
"memory_table": "agno_memories",
}
],
"enable_agentic_context": False,
"memory": {
"app_name": "agno_memories",
"app_url": "/memory/1",
"model": {"name": "OpenAIChat", "model": "gpt-4o", "provider": "OpenAI"},
},
"async_mode": False,
"session_table": "agno_sessions",
"memory_table": "agno_memories",
}
]
}
},
}
},
)
async def get_teams(request: Request) -> List[TeamResponse]:
"""Return the list of all Teams present in the contextual OS"""
# Filter teams based on user's scopes (only if authorization is enabled)
if getattr(request.state, "authorization_enabled", False):
from agno.os.auth import filter_resources_by_access, get_accessible_resources
# Check if user has any team scopes at all
accessible_ids = get_accessible_resources(request, "teams")
if not accessible_ids:
raise HTTPException(status_code=403, detail="Insufficient permissions")
accessible_teams = filter_resources_by_access(request, os.teams or [], "teams")
else:
accessible_teams = os.teams or []
teams = []
for team in accessible_teams:
if isinstance(team, RemoteTeam):
teams.append(await team.get_team_config())
else:
team_response = await TeamResponse.from_team(team=team, is_component=False)
teams.append(team_response)
# Also load teams from database
if os.db and isinstance(os.db, BaseDb):
from agno.team.team import get_teams
# Exclude teams whose IDs are owned by the registry
exclude_ids = registry.get_team_ids() if registry else None
db_teams = get_teams(db=os.db, registry=registry, exclude_component_ids=exclude_ids or None)
for db_team in db_teams:
team_response = await TeamResponse.from_team(team=db_team, is_component=True)
teams.append(team_response)
return teams
@router.get(
"/teams/{team_id}",
response_model=TeamResponse,
response_model_exclude_none=True,
tags=["Teams"],
operation_id="get_team",
summary="Get Team Details",
description=("Retrieve detailed configuration and member information for a specific team."),
responses={
200: {
"description": "Team details retrieved successfully",
"content": {
"application/json": {
"example": {
"team_id": "basic-team",
"name": "Basic Team",
"description": None,
"mode": "coordinate",
"model": {"name": "OpenAIChat", "model": "gpt-4o", "provider": "OpenAI"},
"tools": [
{
"name": "transfer_task_to_member",
"description": "Use this function to transfer a task to the selected team member.\nYou must provide a clear and concise description of the task the member should achieve AND the expected output.",
"parameters": {
"type": "object",
"properties": {
"member_id": {
"type": "string",
"description": "(str) The ID of the member to transfer the task to. Use only the ID of the member, not the ID of the team followed by the ID of the member.",
},
"task_description": {
"type": "string",
"description": "(str) A clear and concise description of the task the member should achieve.",
},
"expected_output": {
"type": "string",
"description": "(str) The expected output from the member (optional).",
},
},
"additionalProperties": False,
"required": ["member_id", "task_description"],
},
}
],
"instructions": None,
"members": [
{
"agent_id": "basic-agent",
"name": "Basic Agent",
"description": None,
"instructions": None,
"model": {"name": "OpenAIChat", "model": "gpt-4o", "provider": "OpenAI gpt-4o"},
"tools": None,
"memory": {
"app_name": "Memory",
"app_url": None,
"model": {"name": "OpenAIChat", "model": "gpt-4o", "provider": "OpenAI"},
},
"knowledge": None,
"session_table": "agno_sessions",
"memory_table": "agno_memories",
"knowledge_table": None,
}
],
"expected_output": None,
"dependencies": None,
"enable_agentic_context": False,
"memory": {
"app_name": "Memory",
"app_url": None,
"model": {"name": "OpenAIChat", "model": "gpt-4o", "provider": "OpenAI"},
},
"knowledge": None,
"async_mode": False,
"session_table": "agno_sessions",
"memory_table": "agno_memories",
"knowledge_table": None,
}
}
},
},
404: {"description": "Team not found", "model": NotFoundResponse},
},
dependencies=[Depends(require_resource_access("teams", "read", "team_id"))],
)
async def get_team(team_id: str, request: Request) -> TeamResponse:
team = get_team_by_id(team_id=team_id, teams=os.teams, db=os.db, registry=registry, create_fresh=True)
if team is None:
raise HTTPException(status_code=404, detail="Team not found")
if isinstance(team, RemoteTeam):
return await team.get_team_config()
else:
return await TeamResponse.from_team(team=team)
@router.get(
"/teams/{team_id}/runs/{run_id}",
tags=["Teams"],
operation_id="get_team_run",
summary="Get Team Run",
description=(
"Retrieve the status and output of a team run. Use this to poll for background run completion.\n\n"
"Requires the `session_id` that was returned when the run was created."
),
responses={
200: {"description": "Run output retrieved successfully"},
404: {"description": "Team or run not found", "model": NotFoundResponse},
},
dependencies=[Depends(require_resource_access("teams", "run", "team_id"))],
)
async def get_team_run(
team_id: str,
run_id: str,
session_id: str = Query(..., description="Session ID for the run"),
):
team = get_team_by_id(team_id=team_id, teams=os.teams, db=os.db, registry=registry, create_fresh=True)
if team is None:
raise HTTPException(status_code=404, detail="Team not found")
if isinstance(team, RemoteTeam):
raise HTTPException(status_code=400, detail="Run polling is not supported for remote teams")
run_output = await team.aget_run_output(run_id=run_id, session_id=session_id)
if run_output is None:
raise HTTPException(status_code=404, detail="Run not found")
return run_output.to_dict()
@router.get(
"/teams/{team_id}/runs",
tags=["Teams"],
operation_id="list_team_runs",
summary="List Team Runs",
description=(
"List runs for a team within a session, optionally filtered by status.\n\n"
"Useful for monitoring background runs and viewing run history."
),
responses={
200: {"description": "List of runs retrieved successfully"},
404: {"description": "Team not found", "model": NotFoundResponse},
},
dependencies=[Depends(require_resource_access("teams", "run", "team_id"))],
)
async def list_team_runs(
team_id: str,
session_id: str = Query(..., description="Session ID to list runs for"),
status: Optional[str] = Query(None, description="Filter by run status (PENDING, RUNNING, COMPLETED, ERROR)"),
):
from agno.os.schema import TeamRunSchema
from agno.team._storage import _aread_or_create_session
team = get_team_by_id(team_id=team_id, teams=os.teams, db=os.db, registry=registry, create_fresh=True)
if team is None:
raise HTTPException(status_code=404, detail="Team not found")
if isinstance(team, RemoteTeam):
raise HTTPException(status_code=400, detail="Run listing is not supported for remote teams")
session = await _aread_or_create_session(team, session_id=session_id)
runs = session.runs or []
result = []
for run in runs:
run_dict = run.to_dict()
if status and run_dict.get("status") != status:
continue
result.append(TeamRunSchema.from_dict(run_dict))
return result
return router
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/teams/router.py",
"license": "Apache License 2.0",
"lines": 618,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/teams/schema.py | from typing import Any, Dict, List, Optional, Union
from uuid import uuid4
from pydantic import BaseModel
from agno.agent import Agent
from agno.os.routers.agents.schema import AgentResponse
from agno.os.schema import ModelResponse
from agno.os.utils import (
format_team_tools,
)
from agno.run import RunContext
from agno.run.team import TeamRunOutput
from agno.session import TeamSession
from agno.team.team import Team
from agno.utils.agent import aexecute_instructions, aexecute_system_message
class TeamResponse(BaseModel):
id: Optional[str] = None
name: Optional[str] = None
db_id: Optional[str] = None
description: Optional[str] = None
role: Optional[str] = None
mode: Optional[str] = None
model: Optional[ModelResponse] = None
tools: Optional[Dict[str, Any]] = None
sessions: Optional[Dict[str, Any]] = None
knowledge: Optional[Dict[str, Any]] = None
memory: Optional[Dict[str, Any]] = None
reasoning: Optional[Dict[str, Any]] = None
default_tools: Optional[Dict[str, Any]] = None
system_message: Optional[Dict[str, Any]] = None
response_settings: Optional[Dict[str, Any]] = None
introduction: Optional[str] = None
streaming: Optional[Dict[str, Any]] = None
members: Optional[List[Union[AgentResponse, "TeamResponse"]]] = None
metadata: Optional[Dict[str, Any]] = None
input_schema: Optional[Dict[str, Any]] = None
is_component: bool = False
current_version: Optional[int] = None
stage: Optional[str] = None
@classmethod
async def from_team(
cls,
team: Team,
is_component: bool = False,
) -> "TeamResponse":
def filter_meaningful_config(d: Dict[str, Any], defaults: Dict[str, Any]) -> Optional[Dict[str, Any]]:
"""Filter out fields that match their default values, keeping only meaningful user configurations"""
filtered = {}
for key, value in d.items():
if value is None:
continue
# Skip if value matches the default exactly
if key in defaults and value == defaults[key]:
continue
# Keep non-default values
filtered[key] = value
return filtered if filtered else None
# Define default values for filtering (similar to agent defaults)
team_defaults = {
# Sessions defaults
"add_history_to_context": False,
"num_history_runs": 3,
"enable_session_summaries": False,
"cache_session": False,
# Knowledge defaults
"add_references": False,
"references_format": "json",
"enable_agentic_knowledge_filters": False,
# Memory defaults
"enable_agentic_memory": False,
"update_memory_on_run": False,
# Reasoning defaults
"reasoning": False,
"reasoning_min_steps": 1,
"reasoning_max_steps": 10,
# Default tools defaults
"search_knowledge": True,
"read_chat_history": False,
"get_member_information_tool": False,
# System message defaults
"system_message_role": "system",
"markdown": False,
"add_datetime_to_context": False,
"add_location_to_context": False,
"resolve_in_context": True,
# Response settings defaults
"parse_response": True,
"use_json_mode": False,
# Streaming defaults
"stream_events": False,
"stream_member_events": False,
}
run_id = str(uuid4())
session_id = str(uuid4())
_tools = team._determine_tools_for_model(
model=team.model, # type: ignore
session=TeamSession(session_id=session_id, session_data={}),
run_response=TeamRunOutput(run_id=run_id),
run_context=RunContext(run_id=run_id, session_id=session_id, session_state={}),
async_mode=True,
team_run_context={},
check_mcp_tools=False,
)
team_tools = _tools
formatted_tools = format_team_tools(team_tools) if team_tools else None
input_schema_dict = None
if team.input_schema is not None:
try:
input_schema_dict = team.input_schema.model_json_schema()
except Exception:
pass
model_name = team.model.name or team.model.__class__.__name__ if team.model else None
model_provider = team.model.provider or team.model.__class__.__name__ if team.model else ""
model_id = team.model.id if team.model else None
if model_provider and model_id:
model_provider = f"{model_provider} {model_id}"
elif model_name and model_id:
model_provider = f"{model_name} {model_id}"
elif model_id:
model_provider = model_id
session_table = team.db.session_table_name if team.db else None
knowledge_table = team.db.knowledge_table_name if team.db and team.knowledge else None
tools_info = {
"tools": formatted_tools,
"tool_call_limit": team.tool_call_limit,
"tool_choice": team.tool_choice,
}
sessions_info = {
"session_table": session_table,
"add_history_to_context": team.add_history_to_context,
"enable_session_summaries": team.enable_session_summaries,
"num_history_runs": team.num_history_runs,
"cache_session": team.cache_session,
}
contents_db = getattr(team.knowledge, "contents_db", None) if team.knowledge else None
knowledge_info = {
"db_id": contents_db.id if contents_db else None,
"knowledge_table": knowledge_table,
"enable_agentic_knowledge_filters": team.enable_agentic_knowledge_filters,
"knowledge_filters": (
[f.to_dict() if hasattr(f, "to_dict") else f for f in team.knowledge_filters]
if isinstance(team.knowledge_filters, list)
else team.knowledge_filters
),
"references_format": team.references_format,
}
memory_info: Optional[Dict[str, Any]] = None
if team.memory_manager is not None:
memory_info = {
"enable_agentic_memory": team.enable_agentic_memory,
"update_memory_on_run": team.update_memory_on_run,
"enable_user_memories": team.enable_user_memories, # Soon to be deprecated. Use update_memory_on_run
"metadata": team.metadata,
"memory_table": team.db.memory_table_name if team.db and team.update_memory_on_run else None,
}
if team.memory_manager.model is not None:
memory_info["model"] = ModelResponse(
name=team.memory_manager.model.name,
model=team.memory_manager.model.id,
provider=team.memory_manager.model.provider,
).model_dump()
reasoning_info: Dict[str, Any] = {
"reasoning": team.reasoning,
"reasoning_agent_id": team.reasoning_agent.id if team.reasoning_agent else None,
"reasoning_min_steps": team.reasoning_min_steps,
"reasoning_max_steps": team.reasoning_max_steps,
}
if team.reasoning_model:
reasoning_info["reasoning_model"] = ModelResponse(
name=team.reasoning_model.name,
model=team.reasoning_model.id,
provider=team.reasoning_model.provider,
).model_dump()
default_tools_info = {
"search_knowledge": team.search_knowledge,
"read_chat_history": team.read_chat_history,
"get_member_information_tool": team.get_member_information_tool,
}
team_instructions = team.instructions if team.instructions else None
if team_instructions and callable(team_instructions):
team_instructions = await aexecute_instructions(instructions=team_instructions, agent=team, team=team)
team_system_message = team.system_message if team.system_message else None
if team_system_message and callable(team_system_message):
team_system_message = await aexecute_system_message(
system_message=team_system_message, agent=team, team=team
)
system_message_info = {
"system_message": team_system_message,
"system_message_role": team.system_message_role,
"description": team.description,
"instructions": team_instructions,
"expected_output": team.expected_output,
"additional_context": team.additional_context,
"markdown": team.markdown,
"add_datetime_to_context": team.add_datetime_to_context,
"add_location_to_context": team.add_location_to_context,
"resolve_in_context": team.resolve_in_context,
}
# Handle output_schema name for both Pydantic models and JSON schemas
output_schema_name = None
if team.output_schema is not None:
if isinstance(team.output_schema, dict):
if "json_schema" in team.output_schema:
output_schema_name = team.output_schema["json_schema"].get("name", "JSONSchema")
elif "schema" in team.output_schema and isinstance(team.output_schema["schema"], dict):
output_schema_name = team.output_schema["schema"].get("title", "JSONSchema")
else:
output_schema_name = team.output_schema.get("title", "JSONSchema")
elif hasattr(team.output_schema, "__name__"):
output_schema_name = team.output_schema.__name__
response_settings_info: Dict[str, Any] = {
"output_schema_name": output_schema_name,
"parser_model_prompt": team.parser_model_prompt,
"parse_response": team.parse_response,
"use_json_mode": team.use_json_mode,
}
if team.parser_model:
response_settings_info["parser_model"] = ModelResponse(
name=team.parser_model.name,
model=team.parser_model.id,
provider=team.parser_model.provider,
).model_dump()
streaming_info = {
"stream": team.stream,
"stream_events": team.stream_events,
"stream_member_events": team.stream_member_events,
}
# Build team model only if it has at least one non-null field
_team_model_data: Dict[str, Any] = {}
if team.model and team.model.name is not None:
_team_model_data["name"] = team.model.name
if team.model and team.model.id is not None:
_team_model_data["model"] = team.model.id
if team.model and team.model.provider is not None:
_team_model_data["provider"] = team.model.provider
members: List[Union[AgentResponse, TeamResponse]] = []
for member in team.members if isinstance(team.members, list) else []:
if isinstance(member, Agent):
agent_response = await AgentResponse.from_agent(member)
members.append(agent_response)
if isinstance(member, Team):
team_response = await TeamResponse.from_team(member)
members.append(team_response)
return TeamResponse(
id=team.id,
name=team.name,
db_id=team.db.id if team.db else None,
description=team.description,
role=team.role,
mode=team.mode.value if team.mode else None,
model=ModelResponse(**_team_model_data) if _team_model_data else None,
tools=filter_meaningful_config(tools_info, {}),
sessions=filter_meaningful_config(sessions_info, team_defaults),
knowledge=filter_meaningful_config(knowledge_info, team_defaults),
memory=filter_meaningful_config(memory_info, team_defaults) if memory_info else None,
reasoning=filter_meaningful_config(reasoning_info, team_defaults),
default_tools=filter_meaningful_config(default_tools_info, team_defaults),
system_message=filter_meaningful_config(system_message_info, team_defaults),
response_settings=filter_meaningful_config(response_settings_info, team_defaults),
introduction=team.introduction,
streaming=filter_meaningful_config(streaming_info, team_defaults),
members=members if members else None,
metadata=team.metadata,
input_schema=input_schema_dict,
is_component=is_component,
current_version=getattr(team, "_version", None),
stage=getattr(team, "_stage", None),
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/teams/schema.py",
"license": "Apache License 2.0",
"lines": 267,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/workflows/router.py | import json
from typing import TYPE_CHECKING, Any, AsyncGenerator, Dict, List, Optional, Union
from uuid import uuid4
from fastapi import (
APIRouter,
BackgroundTasks,
Depends,
Form,
HTTPException,
Query,
Request,
WebSocket,
)
from fastapi.responses import JSONResponse, StreamingResponse
from pydantic import BaseModel
from agno.db.base import BaseDb
from agno.exceptions import InputCheckError, OutputCheckError
from agno.os.auth import (
get_auth_token_from_request,
get_authentication_dependency,
require_resource_access,
validate_websocket_token,
)
from agno.os.managers import event_buffer, websocket_manager
from agno.os.routers.workflows.schema import WorkflowResponse
from agno.os.schema import (
BadRequestResponse,
InternalServerErrorResponse,
NotFoundResponse,
UnauthenticatedResponse,
ValidationErrorResponse,
WorkflowSummaryResponse,
)
from agno.os.settings import AgnoAPISettings
from agno.os.utils import (
format_sse_event,
get_request_kwargs,
get_workflow_by_id,
)
from agno.run.base import RunStatus
from agno.run.workflow import WorkflowErrorEvent
from agno.utils.log import log_debug, log_warning, logger
from agno.utils.serialize import json_serializer
from agno.workflow.remote import RemoteWorkflow
from agno.workflow.workflow import Workflow
if TYPE_CHECKING:
from agno.os.app import AgentOS
async def handle_workflow_via_websocket(websocket: WebSocket, message: dict, os: "AgentOS"):
"""Handle workflow execution directly via WebSocket"""
try:
workflow_id = message.get("workflow_id")
session_id = message.get("session_id")
user_message = message.get("message", "")
user_id = message.get("user_id")
if not workflow_id:
await websocket.send_text(json.dumps({"event": "error", "error": "workflow_id is required"}))
return
# Get workflow from OS
workflow = get_workflow_by_id(
workflow_id=workflow_id, workflows=os.workflows, db=os.db, registry=os.registry, create_fresh=True
)
if not workflow:
await websocket.send_text(json.dumps({"event": "error", "error": f"Workflow {workflow_id} not found"}))
return
if isinstance(workflow, RemoteWorkflow):
await websocket.send_text(
json.dumps({"event": "error", "error": "Remote workflows are not supported via WebSocket"})
)
return
# Generate session_id if not provided
# Use workflow's default session_id if not provided in message
if not session_id:
if workflow.session_id:
session_id = workflow.session_id
else:
session_id = str(uuid4())
# Execute workflow in background with streaming
await workflow.arun( # type: ignore
input=user_message,
session_id=session_id,
user_id=user_id,
stream=True,
stream_events=True,
background=True,
websocket=websocket,
)
# NOTE: Don't register the original websocket in the manager
# It's already handled by the WebSocketHandler passed to the workflow
# The manager is ONLY for reconnected clients (see handle_workflow_subscription)
except (InputCheckError, OutputCheckError) as e:
await websocket.send_text(
json.dumps(
{
"event": "error",
"error": str(e),
"error_type": e.type,
"error_id": e.error_id,
"additional_data": e.additional_data,
}
)
)
except Exception as e:
logger.error(f"Error executing workflow via WebSocket: {e}")
error_payload = {
"event": "error",
"error": str(e),
"error_type": e.type if hasattr(e, "type") else None,
"error_id": e.error_id if hasattr(e, "error_id") else None,
}
error_payload = {k: v for k, v in error_payload.items() if v is not None}
await websocket.send_text(json.dumps(error_payload))
async def handle_workflow_subscription(websocket: WebSocket, message: dict, os: "AgentOS"):
"""
Handle subscription/reconnection to an existing workflow run.
Allows clients to reconnect after page refresh or disconnection and catch up on missed events.
"""
try:
run_id = message.get("run_id")
workflow_id = message.get("workflow_id")
session_id = message.get("session_id")
last_event_index = message.get("last_event_index") # 0-based index of last received event
if not run_id:
await websocket.send_text(json.dumps({"event": "error", "error": "run_id is required for subscription"}))
return
# Check if run exists in event buffer
buffer_status = event_buffer.get_run_status(run_id)
if buffer_status is None:
# Run not in buffer - check database
if workflow_id and session_id:
workflow = get_workflow_by_id(
workflow_id=workflow_id, workflows=os.workflows, db=os.db, registry=os.registry, create_fresh=True
)
if workflow and isinstance(workflow, Workflow):
workflow_run = await workflow.aget_run_output(run_id, session_id)
if workflow_run:
# Run exists in DB - send all events from DB
if workflow_run.events:
await websocket.send_text(
json.dumps(
{
"event": "replay",
"run_id": run_id,
"status": workflow_run.status.value if workflow_run.status else "unknown",
"total_events": len(workflow_run.events),
"message": "Run completed. Replaying all events from database.",
}
)
)
# Send events one by one
for idx, event in enumerate(workflow_run.events):
# Convert event to dict and add event_index
event_dict = event.model_dump() if hasattr(event, "model_dump") else event.to_dict()
event_dict["event_index"] = idx
if "run_id" not in event_dict:
event_dict["run_id"] = run_id
await websocket.send_text(json.dumps(event_dict, default=json_serializer))
else:
await websocket.send_text(
json.dumps(
{
"event": "replay",
"run_id": run_id,
"status": workflow_run.status.value if workflow_run.status else "unknown",
"total_events": 0,
"message": "Run completed but no events stored.",
}
)
)
return
# Run not found anywhere
await websocket.send_text(
json.dumps({"event": "error", "error": f"Run {run_id} not found in buffer or database"})
)
return
# Run is in buffer (still active or recently completed)
if buffer_status in [RunStatus.completed, RunStatus.error, RunStatus.cancelled]:
# Run finished - send all events from buffer
all_events = event_buffer.get_events(run_id, last_event_index=None)
await websocket.send_text(
json.dumps(
{
"event": "replay",
"run_id": run_id,
"status": buffer_status.value,
"total_events": len(all_events),
"message": f"Run {buffer_status.value}. Replaying all events.",
}
)
)
# Send all events
for idx, buffered_event in enumerate(all_events):
# Convert event to dict and add event_index
event_dict = (
buffered_event.model_dump() if hasattr(buffered_event, "model_dump") else buffered_event.to_dict()
)
event_dict["event_index"] = idx
if "run_id" not in event_dict:
event_dict["run_id"] = run_id
await websocket.send_text(json.dumps(event_dict))
return
# Run is still active - send missed events and subscribe to new ones
missed_events = event_buffer.get_events(run_id, last_event_index)
current_event_count = event_buffer.get_event_count(run_id)
if missed_events:
# Send catch-up notification
await websocket.send_text(
json.dumps(
{
"event": "catch_up",
"run_id": run_id,
"status": "running",
"missed_events": len(missed_events),
"current_event_count": current_event_count,
"message": f"Catching up on {len(missed_events)} missed events.",
}
)
)
# Send missed events
start_index = (last_event_index + 1) if last_event_index is not None else 0
for idx, buffered_event in enumerate(missed_events):
# Convert event to dict and add event_index
event_dict = (
buffered_event.model_dump() if hasattr(buffered_event, "model_dump") else buffered_event.to_dict()
)
event_dict["event_index"] = start_index + idx
if "run_id" not in event_dict:
event_dict["run_id"] = run_id
await websocket.send_text(json.dumps(event_dict))
# Register websocket for future events
await websocket_manager.register_websocket(run_id, websocket)
# Send subscription confirmation
await websocket.send_text(
json.dumps(
{
"event": "subscribed",
"run_id": run_id,
"status": "running",
"current_event_count": current_event_count,
"message": "Subscribed to workflow run. You will receive new events as they occur.",
}
)
)
log_debug(f"Client subscribed to workflow run {run_id} (last_event_index: {last_event_index})")
except Exception as e:
logger.error(f"Error handling workflow subscription: {e}")
await websocket.send_text(
json.dumps(
{
"event": "error",
"error": f"Subscription failed: {str(e)}",
}
)
)
async def workflow_response_streamer(
workflow: Union[Workflow, RemoteWorkflow],
input: Union[str, Dict[str, Any], List[Any], BaseModel],
session_id: Optional[str] = None,
user_id: Optional[str] = None,
background_tasks: Optional[BackgroundTasks] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> AsyncGenerator:
try:
# Pass background_tasks if provided
if background_tasks is not None:
kwargs["background_tasks"] = background_tasks
if "stream_events" in kwargs:
stream_events = kwargs.pop("stream_events")
else:
stream_events = True
# Pass auth_token for remote workflows
if auth_token and isinstance(workflow, RemoteWorkflow):
kwargs["auth_token"] = auth_token
run_response = workflow.arun( # type: ignore
input=input,
session_id=session_id,
user_id=user_id,
stream=True,
stream_events=stream_events,
**kwargs,
)
async for run_response_chunk in run_response:
yield format_sse_event(run_response_chunk) # type: ignore
except (InputCheckError, OutputCheckError) as e:
error_response = WorkflowErrorEvent(
error=str(e),
error_type=e.type,
error_id=e.error_id,
additional_data=e.additional_data,
)
yield format_sse_event(error_response)
except Exception as e:
import traceback
traceback.print_exc()
error_response = WorkflowErrorEvent(
error=str(e),
error_type=e.type if hasattr(e, "type") else None,
error_id=e.error_id if hasattr(e, "error_id") else None,
)
yield format_sse_event(error_response)
return
def get_websocket_router(
os: "AgentOS",
settings: AgnoAPISettings = AgnoAPISettings(),
) -> APIRouter:
"""
Create WebSocket router with support for both legacy (os_security_key) and JWT authentication.
WebSocket endpoints handle authentication internally via message-based auth.
Authentication methods (in order of precedence):
1. JWT tokens - if JWTMiddleware is configured (via app.state.jwt_middleware)
2. Legacy bearer token - if settings.os_security_key is set
3. No authentication - if neither is configured
The JWT middleware instance is accessed from app.state.jwt_middleware, which is set
by AgentOS when authorization is enabled. This allows reusing the same validation
logic and loaded keys as the HTTP middleware.
Args:
os: The AgentOS instance
settings: API settings (includes os_security_key for legacy auth)
"""
ws_router = APIRouter()
@ws_router.websocket(
"/workflows/ws",
name="workflow_websocket",
)
async def workflow_websocket_endpoint(websocket: WebSocket):
"""WebSocket endpoint for receiving real-time workflow events"""
# Check if JWT validator is configured (set by AgentOS when authorization=True)
jwt_validator = getattr(websocket.app.state, "jwt_validator", None)
jwt_auth_enabled = jwt_validator is not None
# Determine auth requirements - JWT takes precedence over legacy
requires_auth = jwt_auth_enabled or bool(settings.os_security_key)
await websocket_manager.connect(websocket, requires_auth=requires_auth)
# Store user context from JWT auth
websocket_user_context: Dict[str, Any] = {}
try:
while True:
data = await websocket.receive_text()
message = json.loads(data)
action = message.get("action")
# Handle authentication first
if action == "authenticate":
token = message.get("token")
if not token:
await websocket.send_text(json.dumps({"event": "auth_error", "error": "Token is required"}))
continue
if jwt_auth_enabled and jwt_validator:
# Use JWT validator for token validation
try:
payload = jwt_validator.validate_token(token)
claims = jwt_validator.extract_claims(payload)
await websocket_manager.authenticate_websocket(websocket)
# Store user context from JWT
websocket_user_context["user_id"] = claims["user_id"]
websocket_user_context["scopes"] = claims["scopes"]
websocket_user_context["payload"] = payload
# Include user info in auth success message
await websocket.send_text(
json.dumps(
{
"event": "authenticated",
"message": "JWT authentication successful.",
"user_id": claims["user_id"],
}
)
)
except Exception as e:
error_msg = str(e) if str(e) else "Invalid token"
error_type = "expired" if "expired" in error_msg.lower() else "invalid_token"
await websocket.send_text(
json.dumps(
{
"event": "auth_error",
"error": error_msg,
"error_type": error_type,
}
)
)
continue
elif validate_websocket_token(token, settings):
# Legacy os_security_key authentication
await websocket_manager.authenticate_websocket(websocket)
else:
await websocket.send_text(json.dumps({"event": "auth_error", "error": "Invalid token"}))
continue
# Check authentication for all other actions (only when required)
elif requires_auth and not websocket_manager.is_authenticated(websocket):
auth_type = "JWT" if jwt_auth_enabled else "bearer token"
await websocket.send_text(
json.dumps(
{
"event": "auth_required",
"error": f"Authentication required. Send authenticate action with valid {auth_type}.",
}
)
)
continue
# Handle authenticated actions
elif action == "ping":
await websocket.send_text(json.dumps({"event": "pong"}))
elif action == "start-workflow":
# Add user context to message if available from JWT auth
if websocket_user_context:
if "user_id" not in message and websocket_user_context.get("user_id"):
message["user_id"] = websocket_user_context["user_id"]
# Handle workflow execution directly via WebSocket
await handle_workflow_via_websocket(websocket, message, os)
elif action == "reconnect":
# Subscribe/reconnect to an existing workflow run
await handle_workflow_subscription(websocket, message, os)
else:
await websocket.send_text(json.dumps({"event": "error", "error": f"Unknown action: {action}"}))
except Exception as e:
if "1012" not in str(e) and "1001" not in str(e):
logger.error(f"WebSocket error: {e}")
finally:
# Clean up the websocket connection
await websocket_manager.disconnect_websocket(websocket)
return ws_router
def get_workflow_router(
os: "AgentOS",
settings: AgnoAPISettings = AgnoAPISettings(),
) -> APIRouter:
"""Create the workflow router with comprehensive OpenAPI documentation."""
router = APIRouter(
dependencies=[Depends(get_authentication_dependency(settings))],
responses={
400: {"description": "Bad Request", "model": BadRequestResponse},
401: {"description": "Unauthorized", "model": UnauthenticatedResponse},
404: {"description": "Not Found", "model": NotFoundResponse},
422: {"description": "Validation Error", "model": ValidationErrorResponse},
500: {"description": "Internal Server Error", "model": InternalServerErrorResponse},
},
)
@router.get(
"/workflows",
response_model=List[WorkflowSummaryResponse],
response_model_exclude_none=True,
tags=["Workflows"],
operation_id="get_workflows",
summary="List All Workflows",
description=(
"Retrieve a comprehensive list of all workflows configured in this OS instance.\n\n"
"**Return Information:**\n"
"- Workflow metadata (ID, name, description)\n"
"- Input schema requirements\n"
"- Step sequence and execution flow\n"
"- Associated agents and teams"
),
responses={
200: {
"description": "List of workflows retrieved successfully",
"content": {
"application/json": {
"example": [
{
"id": "content-creation-workflow",
"name": "Content Creation Workflow",
"description": "Automated content creation from blog posts to social media",
"db_id": "123",
}
]
}
},
}
},
)
async def get_workflows(request: Request) -> List[WorkflowSummaryResponse]:
# Filter workflows based on user's scopes (only if authorization is enabled)
if getattr(request.state, "authorization_enabled", False):
from agno.os.auth import filter_resources_by_access, get_accessible_resources
# Check if user has any workflow scopes at all
accessible_ids = get_accessible_resources(request, "workflows")
if not accessible_ids:
raise HTTPException(status_code=403, detail="Insufficient permissions")
accessible_workflows = filter_resources_by_access(request, os.workflows or [], "workflows")
else:
accessible_workflows = os.workflows or []
workflows: List[WorkflowSummaryResponse] = []
if accessible_workflows:
for workflow in accessible_workflows:
workflows.append(WorkflowSummaryResponse.from_workflow(workflow=workflow, is_component=False))
if os.db and isinstance(os.db, BaseDb):
from agno.workflow.workflow import get_workflows
for db_workflow in get_workflows(db=os.db, registry=os.registry):
try:
workflows.append(WorkflowSummaryResponse.from_workflow(workflow=db_workflow, is_component=True))
except Exception as e:
workflow_id = getattr(db_workflow, "id", "unknown")
logger.error(f"Error converting workflow {workflow_id} to response: {e}")
continue
return workflows
@router.get(
"/workflows/{workflow_id}",
response_model=WorkflowResponse,
response_model_exclude_none=True,
tags=["Workflows"],
operation_id="get_workflow",
summary="Get Workflow Details",
description=("Retrieve detailed configuration and step information for a specific workflow."),
responses={
200: {
"description": "Workflow details retrieved successfully",
"content": {
"application/json": {
"example": {
"id": "content-creation-workflow",
"name": "Content Creation Workflow",
"description": "Automated content creation from blog posts to social media",
"db_id": "123",
}
}
},
},
404: {"description": "Workflow not found", "model": NotFoundResponse},
},
dependencies=[Depends(require_resource_access("workflows", "read", "workflow_id"))],
)
async def get_workflow(workflow_id: str, request: Request) -> WorkflowResponse:
workflow = get_workflow_by_id(
workflow_id=workflow_id, workflows=os.workflows, db=os.db, registry=os.registry, create_fresh=True
)
if workflow is None:
raise HTTPException(status_code=404, detail="Workflow not found")
if isinstance(workflow, RemoteWorkflow):
return await workflow.get_workflow_config()
else:
return await WorkflowResponse.from_workflow(workflow=workflow)
@router.post(
"/workflows/{workflow_id}/runs",
tags=["Workflows"],
operation_id="create_workflow_run",
response_model_exclude_none=True,
summary="Execute Workflow",
description=(
"Execute a workflow with the provided input data. Workflows can run in streaming or batch mode.\n\n"
"**Execution Modes:**\n"
"- **Streaming (`stream=true`)**: Real-time step-by-step execution updates via SSE\n"
"- **Non-Streaming (`stream=false`)**: Complete workflow execution with final result\n\n"
"**Workflow Execution Process:**\n"
"1. Input validation against workflow schema\n"
"2. Sequential or parallel step execution based on workflow design\n"
"3. Data flow between steps with transformation\n"
"4. Error handling and automatic retries where configured\n"
"5. Final result compilation and response\n\n"
"**Session Management:**\n"
"Workflows support session continuity for stateful execution across multiple runs."
),
responses={
200: {
"description": "Workflow executed successfully",
"content": {
"text/event-stream": {
"example": 'event: RunStarted\ndata: {"content": "Hello!", "run_id": "123..."}\n\n'
},
},
},
400: {"description": "Invalid input data or workflow configuration", "model": BadRequestResponse},
404: {"description": "Workflow not found", "model": NotFoundResponse},
500: {"description": "Workflow execution error", "model": InternalServerErrorResponse},
},
dependencies=[Depends(require_resource_access("workflows", "run", "workflow_id"))],
)
async def create_workflow_run(
workflow_id: str,
request: Request,
background_tasks: BackgroundTasks,
message: str = Form(...),
stream: bool = Form(True),
session_id: Optional[str] = Form(None),
user_id: Optional[str] = Form(None),
version: Optional[int] = Form(None),
):
kwargs = await get_request_kwargs(request, create_workflow_run)
if hasattr(request.state, "user_id") and request.state.user_id is not None:
if user_id and user_id != request.state.user_id:
log_warning("User ID parameter passed in both request state and kwargs, using request state")
user_id = request.state.user_id
if hasattr(request.state, "session_id") and request.state.session_id is not None:
if session_id and session_id != request.state.session_id:
log_warning("Session ID parameter passed in both request state and kwargs, using request state")
session_id = request.state.session_id
if hasattr(request.state, "session_state") and request.state.session_state is not None:
session_state = request.state.session_state
if "session_state" in kwargs:
log_warning("Session state parameter passed in both request state and kwargs, using request state")
kwargs["session_state"] = session_state
if hasattr(request.state, "dependencies") and request.state.dependencies is not None:
dependencies = request.state.dependencies
if "dependencies" in kwargs:
log_warning("Dependencies parameter passed in both request state and kwargs, using request state")
kwargs["dependencies"] = dependencies
if hasattr(request.state, "metadata") and request.state.metadata is not None:
metadata = request.state.metadata
if "metadata" in kwargs:
log_warning("Metadata parameter passed in both request state and kwargs, using request state")
kwargs["metadata"] = metadata
# Retrieve the workflow by ID
workflow = get_workflow_by_id(
workflow_id=workflow_id,
workflows=os.workflows,
db=os.db,
version=version,
registry=os.registry,
create_fresh=True,
)
if workflow is None:
raise HTTPException(status_code=404, detail="Workflow not found")
if session_id:
logger.debug(f"Continuing session: {session_id}")
else:
logger.debug("Creating new session")
session_id = str(uuid4())
# Extract auth token for remote workflows
auth_token = get_auth_token_from_request(request)
# Return based on stream parameter
try:
if stream:
return StreamingResponse(
workflow_response_streamer(
workflow,
input=message,
session_id=session_id,
user_id=user_id,
background_tasks=background_tasks,
auth_token=auth_token,
**kwargs,
),
media_type="text/event-stream",
)
else:
# Pass auth_token for remote workflows
if auth_token and isinstance(workflow, RemoteWorkflow):
kwargs["auth_token"] = auth_token
run_response = await workflow.arun(
input=message,
session_id=session_id,
user_id=user_id,
stream=False,
background_tasks=background_tasks,
**kwargs,
)
return run_response.to_dict()
except InputCheckError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
# Handle unexpected runtime errors
raise HTTPException(status_code=500, detail=f"Error running workflow: {str(e)}")
@router.post(
"/workflows/{workflow_id}/runs/{run_id}/cancel",
tags=["Workflows"],
operation_id="cancel_workflow_run",
summary="Cancel Workflow Run",
description=(
"Cancel a currently executing workflow run, stopping all active steps and cleanup.\n"
"**Note:** Complex workflows with multiple parallel steps may take time to fully cancel."
),
responses={
200: {},
404: {"description": "Workflow or run not found", "model": NotFoundResponse},
500: {"description": "Failed to cancel workflow run", "model": InternalServerErrorResponse},
},
dependencies=[Depends(require_resource_access("workflows", "run", "workflow_id"))],
)
async def cancel_workflow_run(workflow_id: str, run_id: str):
workflow = get_workflow_by_id(
workflow_id=workflow_id, workflows=os.workflows, db=os.db, registry=os.registry, create_fresh=True
)
if workflow is None:
raise HTTPException(status_code=404, detail="Workflow not found")
# cancel_run always stores cancellation intent (even for not-yet-registered runs
# in cancel-before-start scenarios), so we always return success.
await workflow.acancel_run(run_id=run_id)
return JSONResponse(content={}, status_code=200)
@router.get(
"/workflows/{workflow_id}/runs/{run_id}",
tags=["Workflows"],
operation_id="get_workflow_run",
summary="Get Workflow Run",
description=(
"Retrieve the status and output of a workflow run. Use this to poll for run completion.\n\n"
"Requires the `session_id` that was returned when the run was created."
),
responses={
200: {"description": "Run output retrieved successfully"},
404: {"description": "Workflow or run not found", "model": NotFoundResponse},
},
dependencies=[Depends(require_resource_access("workflows", "run", "workflow_id"))],
)
async def get_workflow_run(
workflow_id: str,
run_id: str,
session_id: str = Query(..., description="Session ID for the run"),
):
workflow = get_workflow_by_id(
workflow_id=workflow_id, workflows=os.workflows, db=os.db, registry=os.registry, create_fresh=True
)
if workflow is None:
raise HTTPException(status_code=404, detail="Workflow not found")
if isinstance(workflow, RemoteWorkflow):
raise HTTPException(status_code=400, detail="Run polling is not supported for remote workflows")
run_output = await workflow.aget_run_output(run_id=run_id, session_id=session_id)
if run_output is None:
raise HTTPException(status_code=404, detail="Run not found")
return run_output.to_dict()
return router
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/workflows/router.py",
"license": "Apache License 2.0",
"lines": 705,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/workflows/schema.py | from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from agno.os.routers.agents.schema import AgentResponse
from agno.os.routers.teams.schema import TeamResponse
from agno.workflow.agent import WorkflowAgent
from agno.workflow.workflow import Workflow
def _generate_schema_from_params(params: Dict[str, Any]) -> Dict[str, Any]:
"""Convert function parameters to JSON schema"""
properties: Dict[str, Any] = {}
required: List[str] = []
for param_name, param_info in params.items():
# Skip the default 'message' parameter for custom kwargs workflows
if param_name == "message":
continue
# Map Python types to JSON schema types
param_type = param_info.get("annotation", "str")
default_value = param_info.get("default")
is_required = param_info.get("required", False)
# Convert Python type annotations to JSON schema types
if param_type == "str":
properties[param_name] = {"type": "string"}
elif param_type == "bool":
properties[param_name] = {"type": "boolean"}
elif param_type == "int":
properties[param_name] = {"type": "integer"}
elif param_type == "float":
properties[param_name] = {"type": "number"}
elif "List" in str(param_type):
properties[param_name] = {"type": "array", "items": {"type": "string"}}
else:
properties[param_name] = {"type": "string"} # fallback
# Add default value if present
if default_value is not None:
properties[param_name]["default"] = default_value
# Add to required if no default value
if is_required and default_value is None:
required.append(param_name)
schema = {"type": "object", "properties": properties}
if required:
schema["required"] = required
return schema
def get_workflow_input_schema_dict(workflow: Workflow) -> Optional[Dict[str, Any]]:
"""Get input schema as dictionary for API responses"""
# Priority 1: Explicit input_schema (Pydantic model)
if workflow.input_schema is not None:
try:
return workflow.input_schema.model_json_schema()
except Exception:
return None
# Priority 2: Auto-generate from custom kwargs
if workflow.steps and callable(workflow.steps):
custom_params = workflow.run_parameters
if custom_params and len(custom_params) > 1: # More than just 'message'
return _generate_schema_from_params(custom_params)
# Priority 3: No schema (expects string message)
return None
class WorkflowResponse(BaseModel):
id: Optional[str] = Field(None, description="Unique identifier for the workflow")
name: Optional[str] = Field(None, description="Name of the workflow")
db_id: Optional[str] = Field(None, description="Database identifier")
description: Optional[str] = Field(None, description="Description of the workflow")
input_schema: Optional[Dict[str, Any]] = Field(None, description="Input schema for the workflow")
steps: Optional[List[Dict[str, Any]]] = Field(None, description="List of workflow steps")
agent: Optional[AgentResponse] = Field(None, description="Agent configuration if used")
team: Optional[TeamResponse] = Field(None, description="Team configuration if used")
metadata: Optional[Dict[str, Any]] = Field(None, description="Additional metadata")
workflow_agent: bool = Field(False, description="Whether this workflow uses a WorkflowAgent")
is_component: bool = Field(False, description="Whether this workflow was created via Builder")
current_version: Optional[int] = Field(None, description="Current published version number")
stage: Optional[str] = Field(None, description="Stage of the loaded config (draft/published)")
@classmethod
async def _resolve_agents_and_teams_recursively(cls, steps: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Parse Agents and Teams into AgentResponse and TeamResponse objects.
If the given steps have nested steps, recursively work on those."""
if not steps:
return steps
def _prune_none(value: Any) -> Any:
# Recursively remove None values from dicts and lists
if isinstance(value, dict):
return {k: _prune_none(v) for k, v in value.items() if v is not None}
if isinstance(value, list):
return [_prune_none(v) for v in value]
return value
for idx, step in enumerate(steps):
if step.get("agent"):
# Convert to dict and exclude fields that are None
agent_response = await AgentResponse.from_agent(step.get("agent")) # type: ignore
step["agent"] = agent_response.model_dump(exclude_none=True)
if step.get("team"):
team_response = await TeamResponse.from_team(step.get("team")) # type: ignore
step["team"] = team_response.model_dump(exclude_none=True)
if step.get("steps"):
step["steps"] = await cls._resolve_agents_and_teams_recursively(step["steps"])
# Prune None values in the entire step
steps[idx] = _prune_none(step)
return steps
@classmethod
async def from_workflow(
cls,
workflow: Workflow,
is_component: bool = False,
) -> "WorkflowResponse":
workflow_dict = workflow.to_dict_for_steps()
steps = workflow_dict.get("steps")
if steps:
steps = await cls._resolve_agents_and_teams_recursively(steps)
return cls(
id=workflow.id,
name=workflow.name,
db_id=workflow.db.id if workflow.db else None,
description=workflow.description,
steps=steps,
input_schema=get_workflow_input_schema_dict(workflow),
metadata=workflow.metadata,
workflow_agent=isinstance(workflow.agent, WorkflowAgent) if workflow.agent else False,
is_component=is_component,
current_version=getattr(workflow, "_version", None),
stage=getattr(workflow, "_stage", None),
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/workflows/schema.py",
"license": "Apache License 2.0",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/eval/agent_as_judge.py | from dataclasses import asdict, dataclass, field
from inspect import iscoroutinefunction
from os import getenv
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Literal, Optional, Union
from uuid import uuid4
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.db.base import AsyncBaseDb, BaseDb
from agno.db.schemas.evals import EvalType
from agno.eval.base import BaseEval
from agno.eval.utils import async_log_eval, log_eval_run, store_result_in_file
from agno.exceptions import EvalError
from agno.models.base import Model
from agno.run.agent import RunInput, RunOutput
from agno.run.team import TeamRunInput, TeamRunOutput
from agno.utils.log import log_warning, logger, set_log_level_to_debug, set_log_level_to_info
if TYPE_CHECKING:
from rich.console import Console
from agno.metrics import RunMetrics
class NumericJudgeResponse(BaseModel):
"""Response schema for numeric scoring mode."""
score: int = Field(..., ge=1, le=10, description="Score between 1 and 10.")
reason: str = Field(..., description="Detailed reasoning for the evaluation.")
class BinaryJudgeResponse(BaseModel):
"""Response schema for binary scoring mode."""
passed: bool = Field(..., description="Pass/fail result.")
reason: str = Field(..., description="Detailed reasoning for the evaluation.")
@dataclass
class AgentAsJudgeEvaluation:
"""Result of a single agent-as-judge evaluation."""
input: str
output: str
criteria: str
score: Optional[int]
reason: str
passed: bool
def print_eval(self, console: Optional["Console"] = None):
from rich.box import ROUNDED
from rich.console import Console
from rich.markdown import Markdown
from rich.table import Table
if console is None:
console = Console()
status_style = "green" if self.passed else "red"
status_text = "PASSED" if self.passed else "FAILED"
results_table = Table(
box=ROUNDED,
border_style="blue",
show_header=False,
title="[ Agent As Judge Evaluation ]",
title_style="bold sky_blue1",
title_justify="center",
)
results_table.add_row("Input", self.input[:200] + "..." if len(self.input) > 200 else self.input)
results_table.add_row("Output", self.output[:200] + "..." if len(self.output) > 200 else self.output)
if self.score is not None:
results_table.add_row("Score", f"{self.score}/10")
results_table.add_row("Status", f"[{status_style}]{status_text}[/{status_style}]")
results_table.add_row("Reason", Markdown(self.reason))
console.print(results_table)
@dataclass
class AgentAsJudgeResult:
"""Aggregated results from agent-as-judge evaluations."""
run_id: str
results: List[AgentAsJudgeEvaluation] = field(default_factory=list)
avg_score: Optional[float] = field(init=False)
min_score: Optional[float] = field(init=False)
max_score: Optional[float] = field(init=False)
std_dev_score: Optional[float] = field(init=False)
pass_rate: float = field(init=False)
def __post_init__(self):
self.compute_stats()
def compute_stats(self):
import statistics
if self.results and len(self.results) > 0:
passed = [r.passed for r in self.results]
self.pass_rate = sum(passed) / len(passed) * 100
# Compute score statistics only for numeric mode (where score is not None)
scores = [r.score for r in self.results if r.score is not None]
if scores:
self.avg_score = statistics.mean(scores)
self.min_score = min(scores)
self.max_score = max(scores)
self.std_dev_score = statistics.stdev(scores) if len(scores) > 1 else 0.0
else:
# Binary mode - no scores
self.avg_score = None
self.min_score = None
self.max_score = None
self.std_dev_score = None
else:
self.avg_score = None
self.min_score = None
self.max_score = None
self.std_dev_score = None
self.pass_rate = 0.0
def print_summary(self, console: Optional["Console"] = None):
from rich.box import ROUNDED
from rich.console import Console
from rich.table import Table
if console is None:
console = Console()
summary_table = Table(
box=ROUNDED,
border_style="blue",
show_header=False,
title="[ Agent As Judge Evaluation Summary ]",
title_style="bold sky_blue1",
title_justify="center",
padding=(0, 2), # Add horizontal padding to make table wider
min_width=45, # Ensure table is wide enough for title
)
num_results = len(self.results)
summary_table.add_row("Number of Evaluations", f"{num_results}")
summary_table.add_row("Pass Rate", f"{self.pass_rate:.1f}%")
# Only show score statistics for numeric mode (when scores exist)
if self.avg_score is not None:
# For single evaluation, show "Score" instead of statistics
if num_results == 1:
summary_table.add_row("Score", f"{self.avg_score:.2f}/10")
# For multiple evaluations, show full statistics
elif num_results > 1:
summary_table.add_row("Average Score", f"{self.avg_score:.2f}/10")
summary_table.add_row("Min Score", f"{self.min_score:.2f}/10")
summary_table.add_row("Max Score", f"{self.max_score:.2f}/10")
if self.std_dev_score and self.std_dev_score > 0:
summary_table.add_row("Std Deviation", f"{self.std_dev_score:.2f}")
console.print(summary_table)
def print_results(self, console: Optional["Console"] = None):
for result in self.results:
result.print_eval(console)
@dataclass
class AgentAsJudgeEval(BaseEval):
"""Evaluate agent outputs using custom criteria with an LLM judge."""
# Core evaluation fields
criteria: str = ""
scoring_strategy: Literal["numeric", "binary"] = "binary"
threshold: int = 7 # Only used for numeric strategy
on_fail: Optional[Callable[["AgentAsJudgeEvaluation"], None]] = None
additional_guidelines: Optional[Union[str, List[str]]] = None
# Evaluation metadata
name: Optional[str] = None
# Model configuration
model: Optional[Model] = None
evaluator_agent: Optional[Agent] = None
# Output options
print_summary: bool = False
print_results: bool = False
file_path_to_save_results: Optional[str] = None
debug_mode: bool = getenv("AGNO_DEBUG", "false").lower() == "true"
db: Optional[Union[BaseDb, AsyncBaseDb]] = None
telemetry: bool = True
run_in_background: bool = False
def __post_init__(self):
"""Validate scoring_strategy and threshold."""
if self.scoring_strategy == "numeric" and not 1 <= self.threshold <= 10:
raise ValueError(f"threshold must be between 1 and 10, got {self.threshold}")
def get_evaluator_agent(self) -> Agent:
"""Return the evaluator agent. If not provided, build it based on the model and criteria."""
# Select response schema based on scoring strategy
response_schema = NumericJudgeResponse if self.scoring_strategy == "numeric" else BinaryJudgeResponse
if self.evaluator_agent is not None:
# Ensure custom evaluator has the required output_schema for structured responses
self.evaluator_agent.output_schema = response_schema
return self.evaluator_agent
model = self.model
if model is None:
try:
from agno.models.openai import OpenAIChat
model = OpenAIChat(id="gpt-5-mini")
except (ModuleNotFoundError, ImportError) as e:
logger.exception(e)
raise EvalError(
"Agno uses `openai` as the default model provider. Please run `pip install openai` to use the default evaluator."
)
# Build instructions based on scoring strategy
instructions_parts = ["## Criteria", self.criteria, ""]
if self.scoring_strategy == "numeric":
instructions_parts.extend(
[
"## Scoring (1-10)",
"- 1-2: Completely fails the criteria",
"- 3-4: Major issues",
"- 5-6: Partial success with significant issues",
"- 7-8: Mostly meets criteria with minor issues",
"- 9-10: Fully meets or exceeds criteria",
"",
"## Instructions",
"1. Carefully evaluate the output against the criteria above",
"2. Provide a score from 1-10",
"3. Provide detailed reasoning that references specific parts of the output",
]
)
else: # binary
instructions_parts.extend(
[
"## Evaluation",
"Determine if the output PASSES or FAILS the criteria above.",
"",
"## Instructions",
"1. Carefully evaluate the output against the criteria above",
"2. Decide if it passes (true) or fails (false)",
"3. Provide detailed reasoning that references specific parts of the output",
]
)
# Add additional guidelines if provided
if self.additional_guidelines:
instructions_parts.append("")
instructions_parts.append("## Additional Guidelines")
if isinstance(self.additional_guidelines, str):
instructions_parts.append(self.additional_guidelines)
else:
for guideline in self.additional_guidelines:
instructions_parts.append(f"- {guideline}")
# Add closing instruction
instructions_parts.append("")
instructions_parts.append("Be objective and thorough in your evaluation.")
return Agent(
model=model,
description="You are an expert evaluator. Score outputs objectively based on the provided criteria.",
instructions="\n".join(instructions_parts),
output_schema=response_schema,
)
def _evaluate(
self,
input: str,
output: str,
evaluator_agent: Agent,
run_metrics: Optional["RunMetrics"] = None,
) -> Optional[AgentAsJudgeEvaluation]:
"""Evaluate a single input/output pair."""
try:
prompt = dedent(f"""\
<input>
{input}
</input>
<output>
{output}
</output>
""")
response = evaluator_agent.run(prompt, stream=False)
# Accumulate eval model metrics into the parent run_metrics
if run_metrics is not None and response.metrics is not None:
from agno.metrics import accumulate_eval_metrics
accumulate_eval_metrics(response.metrics, run_metrics)
judge_response = response.content
if not isinstance(judge_response, (NumericJudgeResponse, BinaryJudgeResponse)):
raise EvalError(f"Invalid response: {judge_response}")
# Determine pass/fail based on scoring strategy and response type
if isinstance(judge_response, NumericJudgeResponse):
score = judge_response.score
passed = score >= self.threshold
else: # BinaryJudgeResponse
score = None
passed = judge_response.passed
evaluation = AgentAsJudgeEvaluation(
input=input,
output=output,
criteria=self.criteria,
score=score,
reason=judge_response.reason,
passed=passed,
)
# Trigger on_fail callback if evaluation failed
if not passed and self.on_fail:
try:
if iscoroutinefunction(self.on_fail):
log_warning(
f"Cannot use async on_fail callback with sync evaluation. Use arun() instead. Skipping callback: {self.on_fail.__name__}"
)
else:
self.on_fail(evaluation)
except Exception as e:
logger.warning(f"on_fail callback error: {e}")
return evaluation
except Exception as e:
logger.exception(f"Evaluation failed: {e}")
return None
async def _aevaluate(
self,
input: str,
output: str,
evaluator_agent: Agent,
run_metrics: Optional["RunMetrics"] = None,
) -> Optional[AgentAsJudgeEvaluation]:
"""Evaluate a single input/output pair asynchronously."""
try:
prompt = dedent(f"""\
<input>
{input}
</input>
<output>
{output}
</output>
""")
response = await evaluator_agent.arun(prompt, stream=False) # type: ignore[misc]
# Accumulate eval model metrics into the parent run_metrics
if run_metrics is not None and response.metrics is not None:
from agno.metrics import accumulate_eval_metrics
accumulate_eval_metrics(response.metrics, run_metrics)
judge_response = response.content
if not isinstance(judge_response, (NumericJudgeResponse, BinaryJudgeResponse)):
raise EvalError(f"Invalid response: {judge_response}")
# Determine pass/fail based on response type
if isinstance(judge_response, NumericJudgeResponse):
score = judge_response.score
passed = score >= self.threshold
else: # BinaryJudgeResponse
score = None
passed = judge_response.passed
evaluation = AgentAsJudgeEvaluation(
input=input,
output=output,
criteria=self.criteria,
score=score,
reason=judge_response.reason,
passed=passed,
)
# Trigger on_fail callback if evaluation failed
if not passed and self.on_fail:
try:
if iscoroutinefunction(self.on_fail):
await self.on_fail(evaluation)
else:
self.on_fail(evaluation)
except Exception as e:
logger.warning(f"on_fail callback error: {e}")
return evaluation
except Exception as e:
logger.exception(f"Async evaluation failed: {e}")
return None
def _log_eval_to_db(
self,
run_id: str,
result: AgentAsJudgeResult,
agent_id: Optional[str] = None,
model_id: Optional[str] = None,
model_provider: Optional[str] = None,
team_id: Optional[str] = None,
evaluated_component_name: Optional[str] = None,
) -> None:
"""Helper to log evaluation to database."""
if not self.db:
return
log_eval_run(
db=self.db, # type: ignore
run_id=run_id,
run_data=asdict(result),
eval_type=EvalType.AGENT_AS_JUDGE,
agent_id=agent_id,
model_id=model_id,
model_provider=model_provider,
name=self.name,
team_id=team_id,
evaluated_component_name=evaluated_component_name,
eval_input={
"criteria": self.criteria,
"scoring_strategy": self.scoring_strategy,
"threshold": self.threshold if self.scoring_strategy == "numeric" else None,
"additional_guidelines": self.additional_guidelines,
},
)
async def _async_log_eval_to_db(
self,
run_id: str,
result: AgentAsJudgeResult,
agent_id: Optional[str] = None,
model_id: Optional[str] = None,
model_provider: Optional[str] = None,
team_id: Optional[str] = None,
evaluated_component_name: Optional[str] = None,
) -> None:
"""Helper to log evaluation to database asynchronously."""
if not self.db:
return
await async_log_eval(
db=self.db,
run_id=run_id,
run_data=asdict(result),
eval_type=EvalType.AGENT_AS_JUDGE,
agent_id=agent_id,
model_id=model_id,
model_provider=model_provider,
name=self.name,
team_id=team_id,
evaluated_component_name=evaluated_component_name,
eval_input={
"criteria": self.criteria,
"scoring_strategy": self.scoring_strategy,
"threshold": self.threshold if self.scoring_strategy == "numeric" else None,
"additional_guidelines": self.additional_guidelines,
},
)
def run(
self,
*,
input: Optional[str] = None,
output: Optional[str] = None,
cases: Optional[List[Dict[str, str]]] = None,
print_summary: bool = False,
print_results: bool = False,
run_metrics: Optional["RunMetrics"] = None,
) -> Optional[AgentAsJudgeResult]:
"""Evaluate input/output against the criteria.
Supports both single evaluation and batch evaluation:
Args:
input: Input text for single evaluation
output: Output text for single evaluation
cases: List of input/output pairs for batch evaluation
print_summary: Whether to print summary
print_results: Whether to print detailed results
"""
# Generate unique run_id for this execution
run_id = str(uuid4())
# Validate parameters
single_mode = input is not None or output is not None
batch_mode = cases is not None
if single_mode and batch_mode:
raise ValueError("Provide either (input, output) OR cases, not both")
if not single_mode and not batch_mode:
raise ValueError("Must provide either (input, output) OR cases")
# Batch mode if cases provided
if batch_mode and cases is not None:
return self._run_batch(
cases=cases,
run_id=run_id,
print_summary=print_summary,
print_results=print_results,
run_metrics=run_metrics,
)
# Validate single mode has both input and output
if input is None or output is None:
raise ValueError("Both input and output are required for single evaluation")
# Single evaluation logic
from rich.console import Console
from rich.live import Live
from rich.status import Status
if isinstance(self.db, AsyncBaseDb):
raise ValueError("Use arun() with async DB.")
set_log_level_to_debug() if self.debug_mode else set_log_level_to_info()
result = AgentAsJudgeResult(run_id=run_id)
console = Console()
with Live(console=console, transient=True) as live_log:
evaluator = self.get_evaluator_agent()
status = Status("Running evaluation...", spinner="dots", speed=1.0, refresh_per_second=10)
live_log.update(status)
evaluation = self._evaluate(input=input, output=output, evaluator_agent=evaluator, run_metrics=run_metrics)
if evaluation:
result.results.append(evaluation)
result.compute_stats()
status.stop()
# Save result to file
if self.file_path_to_save_results:
store_result_in_file(
file_path=self.file_path_to_save_results,
result=result,
eval_id=run_id,
name=self.name,
)
# Print results
if self.print_results or print_results:
result.print_results(console)
if self.print_summary or print_summary:
result.print_summary(console)
# evaluator model info
model_id = self.model.id if self.model is not None else None
model_provider = self.model.provider if self.model is not None else None
# Log to DB
self._log_eval_to_db(run_id=run_id, result=result, model_id=model_id, model_provider=model_provider)
if self.telemetry:
from agno.api.evals import EvalRunCreate, create_eval_run_telemetry
create_eval_run_telemetry(
eval_run=EvalRunCreate(
run_id=run_id, eval_type=EvalType.AGENT_AS_JUDGE, data=self._get_telemetry_data(result)
)
)
return result
async def arun(
self,
*,
input: Optional[str] = None,
output: Optional[str] = None,
cases: Optional[List[Dict[str, str]]] = None,
print_summary: bool = False,
print_results: bool = False,
run_metrics: Optional["RunMetrics"] = None,
) -> Optional[AgentAsJudgeResult]:
"""Evaluate input/output against the criteria asynchronously.
Supports both single evaluation and batch evaluation:
Args:
input: Input text for single evaluation
output: Output text for single evaluation
cases: List of input/output pairs for batch evaluation
print_summary: Whether to print summary
print_results: Whether to print detailed results
"""
# Generate unique run_id for this execution
run_id = str(uuid4())
# Validate parameters
single_mode = input is not None or output is not None
batch_mode = cases is not None
if single_mode and batch_mode:
raise ValueError("Provide either (input, output) OR cases, not both")
if not single_mode and not batch_mode:
raise ValueError("Must provide either (input, output) OR cases")
# Batch mode if cases provided
if batch_mode and cases is not None:
return await self._arun_batch(
cases=cases,
run_id=run_id,
print_summary=print_summary,
print_results=print_results,
run_metrics=run_metrics,
)
# Validate single mode has both input and output
if input is None or output is None:
raise ValueError("Both input and output are required for single evaluation")
# Single evaluation logic
from rich.console import Console
from rich.live import Live
from rich.status import Status
set_log_level_to_debug() if self.debug_mode else set_log_level_to_info()
result = AgentAsJudgeResult(run_id=run_id)
console = Console()
with Live(console=console, transient=True) as live_log:
evaluator = self.get_evaluator_agent()
status = Status("Running evaluation...", spinner="dots", speed=1.0, refresh_per_second=10)
live_log.update(status)
evaluation = await self._aevaluate(
input=input, output=output, evaluator_agent=evaluator, run_metrics=run_metrics
)
if evaluation:
result.results.append(evaluation)
result.compute_stats()
status.stop()
# Save result to file
if self.file_path_to_save_results:
store_result_in_file(
file_path=self.file_path_to_save_results,
result=result,
eval_id=run_id,
name=self.name,
)
# Print results
if self.print_results or print_results:
result.print_results(console)
if self.print_summary or print_summary:
result.print_summary(console)
# evaluator model info
model_id = self.model.id if self.model is not None else None
model_provider = self.model.provider if self.model is not None else None
# Log to DB
await self._async_log_eval_to_db(run_id=run_id, result=result, model_id=model_id, model_provider=model_provider)
if self.telemetry:
from agno.api.evals import EvalRunCreate, async_create_eval_run_telemetry
await async_create_eval_run_telemetry(
eval_run=EvalRunCreate(
run_id=run_id, eval_type=EvalType.AGENT_AS_JUDGE, data=self._get_telemetry_data(result)
)
)
return result
def _run_batch(
self,
cases: List[Dict[str, str]],
run_id: str,
*,
print_summary: bool = True,
print_results: bool = False,
run_metrics: Optional["RunMetrics"] = None,
) -> Optional[AgentAsJudgeResult]:
"""Private helper: Evaluate multiple input/output pairs.
Args:
cases: List of dicts with 'input' and 'output' keys
run_id: Unique ID for this evaluation run
"""
from rich.console import Console
from rich.live import Live
from rich.status import Status
if isinstance(self.db, AsyncBaseDb):
raise ValueError("Use arun() with async DB.")
set_log_level_to_debug() if self.debug_mode else set_log_level_to_info()
result = AgentAsJudgeResult(run_id=run_id)
console = Console()
with Live(console=console, transient=True) as live_log:
evaluator = self.get_evaluator_agent()
for i, case in enumerate(cases):
status = Status(f"Evaluating {i + 1}/{len(cases)}...", spinner="dots")
live_log.update(status)
evaluation = self._evaluate(
input=case["input"], output=case["output"], evaluator_agent=evaluator, run_metrics=run_metrics
)
if evaluation:
result.results.append(evaluation)
result.compute_stats()
status.stop()
# Save result to file
if self.file_path_to_save_results:
store_result_in_file(
file_path=self.file_path_to_save_results,
result=result,
eval_id=run_id,
name=self.name,
)
# Print results
if self.print_results or print_results:
result.print_results(console)
if self.print_summary or print_summary:
result.print_summary(console)
# evaluator model info
model_id = self.model.id if self.model is not None else None
model_provider = self.model.provider if self.model is not None else None
# Log to DB
self._log_eval_to_db(run_id=run_id, result=result, model_id=model_id, model_provider=model_provider)
if self.telemetry:
from agno.api.evals import EvalRunCreate, create_eval_run_telemetry
create_eval_run_telemetry(
eval_run=EvalRunCreate(
run_id=run_id, eval_type=EvalType.AGENT_AS_JUDGE, data=self._get_telemetry_data(result)
)
)
return result
async def _arun_batch(
self,
cases: List[Dict[str, str]],
run_id: str,
*,
print_summary: bool = True,
print_results: bool = False,
run_metrics: Optional["RunMetrics"] = None,
) -> Optional[AgentAsJudgeResult]:
"""Private helper: Evaluate multiple input/output pairs asynchronously.
Args:
cases: List of dicts with 'input' and 'output' keys
run_id: Unique ID for this evaluation run
"""
from rich.console import Console
from rich.live import Live
from rich.status import Status
set_log_level_to_debug() if self.debug_mode else set_log_level_to_info()
result = AgentAsJudgeResult(run_id=run_id)
console = Console()
with Live(console=console, transient=True) as live_log:
evaluator = self.get_evaluator_agent()
for i, case in enumerate(cases):
status = Status(f"Evaluating {i + 1}/{len(cases)}...", spinner="dots")
live_log.update(status)
evaluation = await self._aevaluate(
input=case["input"],
output=case["output"],
evaluator_agent=evaluator,
run_metrics=run_metrics,
)
if evaluation:
result.results.append(evaluation)
result.compute_stats()
status.stop()
# Save result to file
if self.file_path_to_save_results:
store_result_in_file(
file_path=self.file_path_to_save_results,
result=result,
eval_id=run_id,
name=self.name,
)
# Print results
if self.print_results or print_results:
result.print_results(console)
if self.print_summary or print_summary:
result.print_summary(console)
# evaluator model info
model_id = self.model.id if self.model is not None else None
model_provider = self.model.provider if self.model is not None else None
# Log to DB
await self._async_log_eval_to_db(run_id=run_id, result=result, model_id=model_id, model_provider=model_provider)
if self.telemetry:
from agno.api.evals import EvalRunCreate, async_create_eval_run_telemetry
await async_create_eval_run_telemetry(
eval_run=EvalRunCreate(
run_id=run_id, eval_type=EvalType.AGENT_AS_JUDGE, data=self._get_telemetry_data(result)
)
)
return result
def _get_telemetry_data(self, result: Optional[AgentAsJudgeResult] = None) -> Dict[str, Any]:
return {
"criteria_length": len(self.criteria) if self.criteria else 0,
"scoring_strategy": self.scoring_strategy,
"threshold": self.threshold if self.scoring_strategy == "numeric" else None,
"num_results": len(result.results) if result else 0,
}
# BaseEval hook methods
def pre_check(self, run_input: Union[RunInput, TeamRunInput]) -> None:
raise ValueError("Pre-hooks are not supported")
async def async_pre_check(self, run_input: Union[RunInput, TeamRunInput]) -> None:
raise ValueError("Pre-hooks are not supported")
def post_check(self, run_output: Union[RunOutput, TeamRunOutput]) -> None:
"""Perform sync post-check to evaluate agent output."""
input_str = run_output.input.input_content_string() if run_output.input else ""
output_str = str(run_output.content) if run_output.content else ""
# Temporarily disable DB logging
original_db = self.db
self.db = None
# Run evaluation and capture result (pass run_output.metrics for eval metrics accumulation)
result = self.run(
input=input_str,
output=output_str,
print_results=self.print_results,
print_summary=self.print_summary,
run_metrics=run_output.metrics,
)
# Restore DB and log with context from run_output
self.db = original_db
if isinstance(self.db, AsyncBaseDb):
raise ValueError("post_check() requires sync DB. Use async_post_check() with async DB.")
# Extract metadata from run_output
if isinstance(run_output, RunOutput):
agent_id = run_output.agent_id
team_id = None
elif isinstance(run_output, TeamRunOutput):
agent_id = None
team_id = run_output.team_id
# evaluator model info
model_id = self.model.id if self.model is not None else None
model_provider = self.model.provider if self.model is not None else None
# Log to DB if we have a valid result
if result:
self._log_eval_to_db(
run_id=result.run_id,
result=result,
agent_id=agent_id,
model_id=model_id,
model_provider=model_provider,
team_id=team_id,
)
async def async_post_check(self, run_output: Union[RunOutput, TeamRunOutput]) -> None:
"""Perform async post-check to evaluate agent output."""
input_str = run_output.input.input_content_string() if run_output.input else ""
output_str = str(run_output.content) if run_output.content else ""
# Temporarily disable DB logging
original_db = self.db
self.db = None
# Run evaluation and capture result (pass run_output.metrics for eval metrics accumulation)
result = await self.arun(
input=input_str,
output=output_str,
print_results=self.print_results,
print_summary=self.print_summary,
run_metrics=run_output.metrics,
)
# Restore DB and log with context from run_output
self.db = original_db
# Extract metadata from run_output
if isinstance(run_output, RunOutput):
agent_id = run_output.agent_id
team_id = None
elif isinstance(run_output, TeamRunOutput):
agent_id = None
team_id = run_output.team_id
# evaluator model info
model_id = self.model.id if self.model is not None else None
model_provider = self.model.provider if self.model is not None else None
# Log to DB if we have a valid result
if result:
await self._async_log_eval_to_db(
run_id=result.run_id,
result=result,
agent_id=agent_id,
model_id=model_id,
model_provider=model_provider,
team_id=team_id,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/eval/agent_as_judge.py",
"license": "Apache License 2.0",
"lines": 771,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/eval/base.py | from abc import ABC, abstractmethod
from typing import Union
from agno.run.agent import RunInput, RunOutput
from agno.run.team import TeamRunInput, TeamRunOutput
class BaseEval(ABC):
"""Abstract base class for all evaluations."""
@abstractmethod
def pre_check(self, run_input: Union[RunInput, TeamRunInput]) -> None:
"""Perform sync pre-evals."""
pass
@abstractmethod
async def async_pre_check(self, run_input: Union[RunInput, TeamRunInput]) -> None:
"""Perform async pre-evals."""
pass
@abstractmethod
def post_check(self, run_output: Union[RunOutput, TeamRunOutput]) -> None:
"""Perform sync post-evals."""
pass
@abstractmethod
async def async_post_check(self, run_output: Union[RunOutput, TeamRunOutput]) -> None:
"""Perform async post-evals."""
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/eval/base.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/unit/eval/test_agent_as_judge_eval.py | """Unit tests for AgentAsJudgeEval"""
from unittest.mock import AsyncMock, MagicMock
from agno.agent import Agent
from agno.db.in_memory import InMemoryDb
from agno.eval.agent_as_judge import AgentAsJudgeEval, BinaryJudgeResponse, NumericJudgeResponse
from agno.models.openai import OpenAIChat
from agno.run.agent import RunOutput
def _mock_evaluator_numeric(eval_instance: AgentAsJudgeEval, score: int = 8):
"""Helper to mock evaluator agent for numeric mode."""
evaluator = eval_instance.get_evaluator_agent()
evaluator.model = MagicMock()
mock_response = NumericJudgeResponse(score=score, reason="Mocked evaluation response.")
mock_output = RunOutput(content=mock_response)
evaluator.run = MagicMock(return_value=mock_output)
eval_instance.evaluator_agent = evaluator
return evaluator
def _mock_evaluator_binary(eval_instance: AgentAsJudgeEval, passed: bool = True):
"""Helper to mock evaluator agent for binary mode."""
evaluator = eval_instance.get_evaluator_agent()
evaluator.model = MagicMock()
mock_response = BinaryJudgeResponse(passed=passed, reason="Mocked evaluation response.")
mock_output = RunOutput(content=mock_response)
evaluator.run = MagicMock(return_value=mock_output)
eval_instance.evaluator_agent = evaluator
return evaluator
def test_numeric_mode_basic():
"""Test basic numeric mode evaluation."""
eval = AgentAsJudgeEval(
criteria="Response must be helpful",
scoring_strategy="numeric",
threshold=7,
)
_mock_evaluator_numeric(eval, score=8)
result = eval.run(
input="What is Python?",
output="Python is a programming language.",
print_results=False,
)
assert result is not None
assert len(result.results) == 1
assert result.results[0].score is not None
assert 1 <= result.results[0].score <= 10
assert isinstance(result.results[0].passed, bool)
def test_binary_mode_basic():
"""Test basic binary mode evaluation."""
eval = AgentAsJudgeEval(
criteria="Response must not contain personal info",
scoring_strategy="binary",
)
_mock_evaluator_binary(eval, passed=True)
result = eval.run(
input="Tell me about privacy",
output="Privacy is important.",
print_results=False,
)
assert result is not None
assert len(result.results) == 1
assert result.results[0].score is None # Binary mode doesn't have scores
assert isinstance(result.results[0].passed, bool)
def test_default_values():
"""Test that default values are correct."""
eval = AgentAsJudgeEval(criteria="Be helpful")
assert eval.scoring_strategy == "binary"
assert eval.threshold == 7
assert eval.telemetry is True
def test_batch_mode():
"""Test batch evaluation with multiple cases."""
eval = AgentAsJudgeEval(
criteria="Response must be helpful",
scoring_strategy="numeric",
threshold=7,
)
# Mock the evaluator
_mock_evaluator_numeric(eval, score=8)
result = eval.run(
cases=[
{"input": "Test 1", "output": "Response 1"},
{"input": "Test 2", "output": "Response 2"},
{"input": "Test 3", "output": "Response 3"},
],
print_results=False,
)
assert result is not None
assert len(result.results) == 3
for eval_result in result.results:
assert eval_result.score is not None
assert isinstance(eval_result.passed, bool)
def test_additional_guidelines():
"""Test evaluation with additional guidelines."""
eval = AgentAsJudgeEval(
criteria="Response must be educational",
scoring_strategy="numeric",
threshold=6,
additional_guidelines="Focus on beginner-friendly explanations",
)
# Mock the evaluator
_mock_evaluator_numeric(eval, score=7)
result = eval.run(
input="What is ML?",
output="Machine learning is when computers learn from data.",
print_results=False,
)
assert result is not None
assert len(result.results) == 1
def test_additional_guidelines_list():
"""Test evaluation with additional guidelines as a list."""
eval = AgentAsJudgeEval(
criteria="Response must be clear",
scoring_strategy="numeric",
threshold=7,
additional_guidelines=["Be concise", "Use simple language"],
)
# Mock the evaluator
_mock_evaluator_numeric(eval, score=8)
result = eval.run(
input="What is AI?",
output="AI is artificial intelligence.",
print_results=False,
)
assert result is not None
assert len(result.results) == 1
def test_custom_evaluator():
"""Test evaluation with a custom evaluator agent."""
custom_evaluator = Agent(
id="strict-evaluator",
name="Strict Evaluator",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You are VERY strict. Only give high scores for exceptional quality.",
)
eval = AgentAsJudgeEval(
criteria="Response must be excellent",
scoring_strategy="numeric",
threshold=8,
evaluator_agent=custom_evaluator,
)
# Mock the custom evaluator
_mock_evaluator_numeric(eval, score=9)
result = eval.run(
input="What is Python?",
output="Python is a programming language.",
print_results=False,
)
assert result is not None
assert len(result.results) == 1
def test_threshold_ranges():
"""Test evaluation with different threshold values."""
for threshold in [1, 5, 8, 10]:
eval = AgentAsJudgeEval(
criteria="Response must be helpful",
scoring_strategy="numeric",
threshold=threshold,
)
# Mock the evaluator
_mock_evaluator_numeric(eval, score=7)
result = eval.run(
input="Test",
output="Response",
print_results=False,
)
assert result is not None
assert len(result.results) == 1
def test_name_assignment():
"""Test that evaluation name is stored."""
eval = AgentAsJudgeEval(
name="My Custom Eval",
criteria="Be helpful",
)
assert eval.name == "My Custom Eval"
def test_telemetry_disabled():
"""Test that telemetry can be disabled."""
eval = AgentAsJudgeEval(
criteria="Be helpful",
telemetry=False,
)
assert eval.telemetry is False
def test_db_logging_numeric():
"""Test that numeric eval results are logged to database."""
db = InMemoryDb()
eval = AgentAsJudgeEval(
criteria="Response must be accurate",
scoring_strategy="numeric",
threshold=7,
db=db,
telemetry=False, # Disable telemetry for unit test
)
# Mock the evaluator
_mock_evaluator_numeric(eval, score=8)
result = eval.run(
input="What is 2+2?",
output="4",
print_results=False,
)
# Verify result
assert result is not None
# Check database
eval_runs = db.get_eval_runs()
assert len(eval_runs) == 1
eval_run = eval_runs[0]
assert eval_run.eval_type.value == "agent_as_judge"
assert eval_run.eval_input["criteria"] == "Response must be accurate"
assert eval_run.eval_input["scoring_strategy"] == "numeric"
assert eval_run.eval_input["threshold"] == 7
def test_db_logging_binary():
"""Test that binary eval results are logged to database with threshold=None."""
db = InMemoryDb()
eval = AgentAsJudgeEval(
criteria="Response must not contain offensive content",
scoring_strategy="binary",
db=db,
telemetry=False,
)
# Mock the evaluator
_mock_evaluator_binary(eval, passed=True)
result = eval.run(
input="Tell me a fact",
output="The sky is blue.",
print_results=False,
)
# Verify result
assert result is not None
assert result.results[0].score is None # Binary mode has no score
# Check database
eval_runs = db.get_eval_runs()
assert len(eval_runs) == 1
eval_run = eval_runs[0]
assert eval_run.eval_type.value == "agent_as_judge"
assert eval_run.eval_input["criteria"] == "Response must not contain offensive content"
assert eval_run.eval_input["scoring_strategy"] == "binary"
assert eval_run.eval_input["threshold"] is None # Binary mode sets threshold to None
async def test_async_run():
"""Test async evaluation."""
eval = AgentAsJudgeEval(
criteria="Response must be concise",
scoring_strategy="numeric",
threshold=6,
)
# Mock the evaluator for async
evaluator = eval.get_evaluator_agent()
evaluator.model = MagicMock() # Mock the model to avoid real API calls
mock_response = NumericJudgeResponse(score=7, reason="Mocked async evaluation.")
mock_output = RunOutput(content=mock_response)
evaluator.arun = AsyncMock(return_value=mock_output)
eval.evaluator_agent = evaluator
result = await eval.arun(
input="What is AI?",
output="Artificial intelligence.",
print_results=False,
)
assert result is not None
assert len(result.results) == 1
assert result.results[0].score is not None
def test_invalid_threshold():
"""Test that invalid threshold values raise ValueError."""
import pytest
# Threshold below 1 should raise ValueError
with pytest.raises(ValueError, match="threshold must be between 1 and 10"):
AgentAsJudgeEval(
criteria="Test criteria",
scoring_strategy="numeric",
threshold=0,
)
# Threshold above 10 should raise ValueError
with pytest.raises(ValueError, match="threshold must be between 1 and 10"):
AgentAsJudgeEval(
criteria="Test criteria",
scoring_strategy="numeric",
threshold=11,
)
def test_on_fail_callback():
"""Test that on_fail callback is called when evaluation fails."""
failed_evaluation = None
def on_fail_handler(evaluation):
nonlocal failed_evaluation
failed_evaluation = evaluation
eval = AgentAsJudgeEval(
criteria="Response must score above 8",
scoring_strategy="numeric",
threshold=8,
on_fail=on_fail_handler,
)
# Mock the evaluator to return a failing score
_mock_evaluator_numeric(eval, score=5)
result = eval.run(
input="Test input",
output="Test output",
print_results=False,
)
# Verify the evaluation failed
assert result is not None
assert result.results[0].passed is False
assert result.results[0].score == 5
# Verify on_fail was called with the failed evaluation
assert failed_evaluation is not None
assert failed_evaluation.passed is False
assert failed_evaluation.score == 5
def test_on_fail_callback_batch_mode():
"""Test that on_fail is called for each failing case in batch mode."""
failed_evaluations = []
def on_fail_handler(evaluation):
failed_evaluations.append(evaluation)
eval = AgentAsJudgeEval(
criteria="Response must be excellent",
scoring_strategy="numeric",
threshold=8,
on_fail=on_fail_handler,
)
# Mock the evaluator to return varying scores (some pass, some fail)
evaluator = eval.get_evaluator_agent()
evaluator.model = MagicMock()
# Return different scores for each case
mock_responses = [
NumericJudgeResponse(score=9, reason="Excellent response."), # Pass
NumericJudgeResponse(score=5, reason="Poor response."), # Fail
NumericJudgeResponse(score=7, reason="Below threshold."), # Fail
NumericJudgeResponse(score=10, reason="Perfect response."), # Pass
]
# Mock run to return different responses for each call
evaluator.run = MagicMock(side_effect=[RunOutput(content=resp) for resp in mock_responses])
eval.evaluator_agent = evaluator
result = eval.run(
cases=[
{"input": "Test 1", "output": "Response 1"},
{"input": "Test 2", "output": "Response 2"},
{"input": "Test 3", "output": "Response 3"},
{"input": "Test 4", "output": "Response 4"},
],
print_results=False,
)
# Verify results
assert result is not None
assert len(result.results) == 4
# Verify pass/fail status
assert result.results[0].passed is True # Score 9
assert result.results[1].passed is False # Score 5
assert result.results[2].passed is False # Score 7
assert result.results[3].passed is True # Score 10
# Verify on_fail was called exactly twice (for the two failing cases)
assert len(failed_evaluations) == 2
assert failed_evaluations[0].score == 5
assert failed_evaluations[0].passed is False
assert failed_evaluations[1].score == 7
assert failed_evaluations[1].passed is False
# Verify pass rate is 50% (2 out of 4)
assert result.pass_rate == 50.0
async def test_on_fail_callback_batch_mode_async():
"""Test that on_fail is called for each failing case in async batch mode."""
failed_evaluations = []
def on_fail_handler(evaluation):
failed_evaluations.append(evaluation)
eval = AgentAsJudgeEval(
criteria="Response must be excellent",
scoring_strategy="numeric",
threshold=8,
on_fail=on_fail_handler,
)
# Mock the evaluator to return varying scores
evaluator = eval.get_evaluator_agent()
evaluator.model = MagicMock()
# Return different scores for each case
mock_responses = [
NumericJudgeResponse(score=6, reason="Below threshold."), # Fail
NumericJudgeResponse(score=9, reason="Excellent response."), # Pass
NumericJudgeResponse(score=4, reason="Poor response."), # Fail
]
# Mock arun to return different responses for each call
evaluator.arun = AsyncMock(side_effect=[RunOutput(content=resp) for resp in mock_responses])
eval.evaluator_agent = evaluator
result = await eval.arun(
cases=[
{"input": "Test 1", "output": "Response 1"},
{"input": "Test 2", "output": "Response 2"},
{"input": "Test 3", "output": "Response 3"},
],
print_results=False,
)
# Verify results
assert result is not None
assert len(result.results) == 3
# Verify pass/fail status
assert result.results[0].passed is False # Score 6
assert result.results[1].passed is True # Score 9
assert result.results[2].passed is False # Score 4
# Verify on_fail was called exactly twice (for the two failing cases)
assert len(failed_evaluations) == 2
assert failed_evaluations[0].score == 6
assert failed_evaluations[0].passed is False
assert failed_evaluations[1].score == 4
assert failed_evaluations[1].passed is False
# Verify pass rate is ~33.33% (1 out of 3)
assert 33.0 <= result.pass_rate <= 34.0
async def test_numeric_mode_basic_async():
"""Test basic async numeric mode evaluation."""
eval = AgentAsJudgeEval(
criteria="Response must be helpful",
scoring_strategy="numeric",
threshold=7,
)
# Mock the evaluator for async
evaluator = eval.get_evaluator_agent()
evaluator.model = MagicMock()
mock_response = NumericJudgeResponse(score=8, reason="Mocked async evaluation.")
mock_output = RunOutput(content=mock_response)
evaluator.arun = AsyncMock(return_value=mock_output)
eval.evaluator_agent = evaluator
result = await eval.arun(
input="What is Python?",
output="Python is a programming language.",
print_results=False,
)
assert result is not None
assert len(result.results) == 1
assert result.results[0].score is not None
assert 1 <= result.results[0].score <= 10
assert isinstance(result.results[0].passed, bool)
async def test_binary_mode_basic_async():
"""Test basic async binary mode evaluation."""
eval = AgentAsJudgeEval(
criteria="Response must not contain personal info",
scoring_strategy="binary",
)
# Mock the evaluator for async
evaluator = eval.get_evaluator_agent()
evaluator.model = MagicMock()
mock_response = BinaryJudgeResponse(passed=True, reason="Mocked async binary evaluation.")
mock_output = RunOutput(content=mock_response)
evaluator.arun = AsyncMock(return_value=mock_output)
eval.evaluator_agent = evaluator
result = await eval.arun(
input="Tell me about privacy",
output="Privacy is important.",
print_results=False,
)
assert result is not None
assert len(result.results) == 1
assert result.results[0].score is None # Binary mode doesn't have scores
assert isinstance(result.results[0].passed, bool)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/eval/test_agent_as_judge_eval.py",
"license": "Apache License 2.0",
"lines": 436,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/models/google/utils.py | from enum import Enum
class GeminiFinishReason(Enum):
"""Gemini API finish reasons"""
STOP = "STOP"
MAX_TOKENS = "MAX_TOKENS"
SAFETY = "SAFETY"
RECITATION = "RECITATION"
MALFORMED_FUNCTION_CALL = "MALFORMED_FUNCTION_CALL"
OTHER = "OTHER"
# Guidance message used to retry a Gemini invocation after a MALFORMED_FUNCTION_CALL error
MALFORMED_FUNCTION_CALL_GUIDANCE = """The previous function call was malformed. Please try again with a valid function call.
Guidelines:
- Generate the function call JSON directly, do not generate code
- Use the function name exactly as defined (no namespace prefixes like 'default_api.')
- Ensure all required parameters are provided with correct types
"""
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/google/utils.py",
"license": "Apache License 2.0",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/tests/integration/models/google/test_retryable_exceptions.py | """Test the regeneration mechanism for known Gemini errors."""
from unittest.mock import Mock, patch
import pytest
from agno.agent import Agent
from agno.models.google import Gemini
from agno.models.google.gemini import RetryableModelProviderError
from agno.models.google.utils import MALFORMED_FUNCTION_CALL_GUIDANCE
from agno.run.agent import RunEvent
from agno.run.base import RunStatus
@pytest.fixture
def model():
"""Fixture to create a Gemini model."""
return Gemini(id="gemini-2.0-flash-001")
def create_mock_response(finish_reason: str = "STOP", content: str = "Test response"):
"""Helper to create a mock Gemini response."""
from google.genai.types import Content, GenerateContentResponse, Part
# Create a proper Part object
part = Part.from_text(text=content) if content else Part.from_text(text="")
# Create Content with parts
content_obj = Content(role="model", parts=[part] if content else [])
# Create mock candidate
mock_candidate = Mock()
mock_candidate.finish_reason = finish_reason
mock_candidate.content = content_obj
mock_candidate.grounding_metadata = None # Add this to avoid iteration errors
mock_candidate.url_context_metadata = None # Add this too
# Create mock response
mock_response = Mock(spec=GenerateContentResponse)
mock_response.candidates = [mock_candidate]
# Add usage metadata
mock_usage = Mock()
mock_usage.prompt_token_count = 10
mock_usage.candidates_token_count = 20
mock_usage.thoughts_token_count = None
mock_usage.cached_content_token_count = 0
mock_usage.traffic_type = None
mock_response.usage_metadata = mock_usage
return mock_response
def test_malformed_function_call_error_triggers_regeneration_attempt(model):
"""Test that a regeneration attempt is triggered after Gemini's MALFORMED_FUNCTION_CALL error."""
agent = Agent(
name="Test Agent",
model=model,
tools=[lambda x: f"Result: {x}"],
)
call_count = {"count": 0}
def mock_generate_content(*args, **kwargs):
"""Mock that returns MALFORMED_FUNCTION_CALL on first call, then success."""
call_count["count"] += 1
if call_count["count"] == 1:
return create_mock_response(finish_reason="MALFORMED_FUNCTION_CALL", content="")
else:
# Assert that the regeneration marker is in the second call
assert MALFORMED_FUNCTION_CALL_GUIDANCE == kwargs["contents"][1].parts[0].text
return create_mock_response(content="Successfully regenerated response")
with patch.object(model.get_client().models, "generate_content", side_effect=mock_generate_content):
response = agent.run("Test message")
# Verify that regeneration happened
assert call_count["count"] == 2, "Expected exactly 2 calls (1 initial + 1 regeneration)"
assert response is not None
assert response.content is not None
assert "Successfully regenerated" in response.content
@pytest.mark.asyncio
async def test_malformed_function_call_error_triggers_regeneration_attempt_async(model):
"""Test async regeneration after MALFORMED_FUNCTION_CALL error."""
agent = Agent(
name="Test Agent",
model=model,
tools=[lambda x: f"Result: {x}"],
)
call_count = {"count": 0}
async def mock_generate_content(*args, **kwargs):
"""Mock that returns MALFORMED_FUNCTION_CALL on first call, then success."""
call_count["count"] += 1
if call_count["count"] == 1:
return create_mock_response(finish_reason="MALFORMED_FUNCTION_CALL", content="")
else:
return create_mock_response(finish_reason="STOP", content="Successfully regenerated async response")
with patch.object(model.get_client().aio.models, "generate_content", side_effect=mock_generate_content):
response = await agent.arun("Test message")
assert call_count["count"] == 2, "Expected exactly 2 calls (1 initial + 1 regeneration)"
assert response is not None
assert response.content is not None
assert "Successfully regenerated" in response.content
def test_malformed_function_call_error_triggers_regeneration_attempt_stream(model):
"""Test streaming regeneration after MALFORMED_FUNCTION_CALL error."""
agent = Agent(
name="Test Agent",
model=model,
tools=[lambda x: f"Result: {x}"],
)
call_count = {"count": 0}
def mock_generate_content_stream(*args, **kwargs):
"""Mock stream that returns MALFORMED_FUNCTION_CALL on first call, then success."""
call_count["count"] += 1
if call_count["count"] == 1:
# First call: yield malformed function call error
yield create_mock_response(finish_reason="MALFORMED_FUNCTION_CALL", content="")
else:
# Second call: yield successful chunks
for i in range(3):
yield create_mock_response(
finish_reason="STOP" if i == 2 else None, # type: ignore
content=f"Chunk {i}",
)
with patch.object(model.get_client().models, "generate_content_stream", side_effect=mock_generate_content_stream):
response_stream = agent.run("Test message", stream=True)
responses = list(response_stream)
assert call_count["count"] == 2, "Expected exactly 2 calls (1 initial + 1 regeneration)"
assert len(responses) > 0
# Verify we got content from the regenerated stream
full_content = "".join([r.content for r in responses if r.content])
assert "Chunk" in full_content
@pytest.mark.asyncio
async def test_malformed_function_call_error_triggers_regeneration_attempt_async_stream(model):
"""Test async streaming regeneration after MALFORMED_FUNCTION_CALL error."""
agent = Agent(
name="Test Agent",
model=model,
tools=[lambda x: f"Result: {x}"],
)
call_count = {"count": 0}
async def mock_generate_content_stream(*args, **kwargs):
"""Mock async stream that returns MALFORMED_FUNCTION_CALL on first call, then success."""
call_count["count"] += 1
if call_count["count"] == 1:
yield create_mock_response(finish_reason="MALFORMED_FUNCTION_CALL", content="")
else:
for i in range(3):
yield create_mock_response(
finish_reason="STOP" if i == 2 else None, # type: ignore
content=f"Async chunk {i}",
)
# Mock the async generator properly
async def mock_aio_stream(*args, **kwargs):
async for item in mock_generate_content_stream(*args, **kwargs):
yield item
with patch.object(model.get_client().aio.models, "generate_content_stream", return_value=mock_aio_stream()):
# Need to handle this differently - the mock needs to return an async generator
# Let's patch at a different level
pass
# Alternative approach: patch the model's ainvoke_stream directly
original_ainvoke_stream = model.ainvoke_stream
async def patched_ainvoke_stream(*args, **kwargs):
"""Wrapper to track calls and inject malformed error."""
call_count["count"] += 1
if call_count["count"] == 1:
# Simulate malformed function call by yielding a response with that finish reason
from agno.models.response import ModelResponse
malformed_response = ModelResponse()
malformed_response.role = "assistant"
malformed_response.content = ""
# This won't actually work as we need to raise the error within the flow
# Let's use a different approach
# Actually raise the error from within _parse_provider_response_delta
raise RetryableModelProviderError(retry_guidance_message="Call the tool properly.")
else:
# Return the real stream on second call
async for chunk in original_ainvoke_stream(*args, **kwargs):
yield chunk
# This is getting complex, let's test at the model level directly
call_count["count"] = 0
async def mock_aio_generate_content(*args, **kwargs):
call_count["count"] += 1
if call_count["count"] == 1:
return create_mock_response(finish_reason="MALFORMED_FUNCTION_CALL", content="")
else:
return create_mock_response(finish_reason="STOP", content="Async stream regenerated")
with patch.object(model.get_client().aio.models, "generate_content", side_effect=mock_aio_generate_content):
response = await agent.arun("Test message")
assert call_count["count"] == 2
assert response.content is not None
assert "regenerated" in response.content.lower()
def test_guidance_message_is_added_to_messages(model):
"""Test that the guidance message is added to the messages list."""
agent = Agent(
name="Test Agent",
model=model,
tools=[lambda x: f"Result: {x}"],
)
call_count = {"count": 0}
def mock_generate_content(*args, **kwargs):
"""Mock that returns MALFORMED_FUNCTION_CALL on first call, then success."""
call_count["count"] += 1
if call_count["count"] == 1:
return create_mock_response(finish_reason="MALFORMED_FUNCTION_CALL", content="")
else:
# Assert that the regeneration marker is in the second call
assert MALFORMED_FUNCTION_CALL_GUIDANCE == kwargs["contents"][1].parts[0].text
return create_mock_response(content="Successfully regenerated response")
with patch.object(model.get_client().models, "generate_content", side_effect=mock_generate_content):
_ = agent.run("Test message")
def test_guidance_message_is_not_in_final_response(model):
"""Test that the guidance message is not in the final response."""
agent = Agent(
name="Test Agent",
model=model,
tools=[lambda x: f"Result: {x}"],
)
call_count = {"count": 0}
def mock_generate_content(*args, **kwargs):
"""Mock that returns MALFORMED_FUNCTION_CALL on first call, then success."""
call_count["count"] += 1
if call_count["count"] == 1:
return create_mock_response(finish_reason="MALFORMED_FUNCTION_CALL", content="")
else:
# Assert that the guidance message is there before the second generation attempt
assert MALFORMED_FUNCTION_CALL_GUIDANCE == kwargs["contents"][1].parts[0].text
return create_mock_response(content="Successfully regenerated response")
with patch.object(model.get_client().models, "generate_content", side_effect=mock_generate_content):
response = agent.run("Test message")
# Assert that the guidance message is not in the final response
assert response.content is not None
assert MALFORMED_FUNCTION_CALL_GUIDANCE not in response.content
# Tests for retry_with_guidance_limit
def test_retry_with_guidance_limit_zero_raises_immediately(model):
"""Test that retry_with_guidance_limit=0 raises ModelProviderError immediately without retrying."""
model.retry_with_guidance_limit = 0
agent = Agent(
name="Test Agent",
model=model,
tools=[lambda x: f"Result: {x}"],
)
call_count = {"count": 0}
def mock_generate_content(*args, **kwargs):
"""Mock that always returns MALFORMED_FUNCTION_CALL."""
call_count["count"] += 1
return create_mock_response(finish_reason="MALFORMED_FUNCTION_CALL", content="")
with patch.object(model.get_client().models, "generate_content", side_effect=mock_generate_content):
response = agent.run("Test message")
assert response.status == RunStatus.error
# Error message should include the error_code
assert "Max retries with guidance reached" in response.content
assert "MALFORMED_FUNCTION_CALL" in response.content
# Should only be called once (no retries)
assert call_count["count"] == 1
def test_retry_with_guidance_limit_one_retries_once_then_raises(model):
"""Test that retry_with_guidance_limit=1 retries once, then raises if it still fails."""
model.retry_with_guidance_limit = 1
agent = Agent(
name="Test Agent",
model=model,
tools=[lambda x: f"Result: {x}"],
)
call_count = {"count": 0}
def mock_generate_content(*args, **kwargs):
"""Mock that always returns MALFORMED_FUNCTION_CALL."""
call_count["count"] += 1
return create_mock_response(finish_reason="MALFORMED_FUNCTION_CALL", content="")
with patch.object(model.get_client().models, "generate_content", side_effect=mock_generate_content):
response = agent.run("Test message")
assert response.status == RunStatus.error
assert "Max retries with guidance reached" in response.content
assert "MALFORMED_FUNCTION_CALL" in response.content
# Should be called twice (initial + 1 retry)
assert call_count["count"] == 2
def test_retry_with_guidance_limit_two_retries_twice_then_raises(model):
"""Test that retry_with_guidance_limit=2 retries twice, then raises if it still fails."""
model.retry_with_guidance_limit = 2
agent = Agent(
name="Test Agent",
model=model,
tools=[lambda x: f"Result: {x}"],
)
call_count = {"count": 0}
def mock_generate_content(*args, **kwargs):
"""Mock that always returns MALFORMED_FUNCTION_CALL."""
call_count["count"] += 1
return create_mock_response(finish_reason="MALFORMED_FUNCTION_CALL", content="")
with patch.object(model.get_client().models, "generate_content", side_effect=mock_generate_content):
response = agent.run("Test message")
assert response.status == RunStatus.error
# Should be called three times (initial + 2 retries)
assert call_count["count"] == 3
assert "Max retries with guidance reached" in response.content
assert "MALFORMED_FUNCTION_CALL" in response.content
@pytest.mark.asyncio
async def test_retry_with_guidance_limit_async_raises_after_limit(model):
"""Test async: retry_with_guidance_limit enforces the limit."""
model.retry_with_guidance_limit = 2
agent = Agent(
name="Test Agent",
model=model,
tools=[lambda x: f"Result: {x}"],
)
call_count = {"count": 0}
async def mock_generate_content(*args, **kwargs):
"""Mock that always returns MALFORMED_FUNCTION_CALL."""
call_count["count"] += 1
return create_mock_response(finish_reason="MALFORMED_FUNCTION_CALL", content="")
with patch.object(model.get_client().aio.models, "generate_content", side_effect=mock_generate_content):
response = await agent.arun("Test message")
assert response.status == RunStatus.error
assert call_count["count"] == 3 # initial + 2 retries
assert "Max retries with guidance reached" in response.content
assert "MALFORMED_FUNCTION_CALL" in response.content
def test_retry_with_guidance_limit_stream_raises_after_limit(model):
"""Test stream: retry_with_guidance_limit enforces the limit."""
model.retry_with_guidance_limit = 2
agent = Agent(
name="Test Agent",
model=model,
tools=[lambda x: f"Result: {x}"],
)
call_count = {"count": 0}
def mock_generate_content_stream(*args, **kwargs):
"""Mock stream that always returns MALFORMED_FUNCTION_CALL."""
call_count["count"] += 1
yield create_mock_response(finish_reason="MALFORMED_FUNCTION_CALL", content="")
with patch.object(model.get_client().models, "generate_content_stream", side_effect=mock_generate_content_stream):
saw_error = False
for response in agent.run("Test message", stream=True):
if response.event == RunEvent.run_error:
saw_error = True
break
assert saw_error
assert call_count["count"] == 3 # initial + 2 retries
@pytest.mark.asyncio
async def test_retry_with_guidance_limit_async_stream_raises_after_limit(model):
"""Test async stream: retry_with_guidance_limit enforces the limit."""
model.retry_with_guidance_limit = 2
agent = Agent(
name="Test Agent",
model=model,
tools=[lambda x: f"Result: {x}"],
)
call_count = {"count": 0}
async def mock_generate_content_stream(*args, **kwargs):
"""Mock async stream that always returns MALFORMED_FUNCTION_CALL."""
call_count["count"] += 1
yield create_mock_response(finish_reason="MALFORMED_FUNCTION_CALL", content="")
# Patch the async stream method
with patch.object(
model.get_client().aio.models, "generate_content_stream", side_effect=mock_generate_content_stream
):
saw_error = False
async for response in agent.arun("Test message", stream=True):
if response.event == RunEvent.run_error:
saw_error = True
break
assert saw_error
assert call_count["count"] == 3 # initial + 2 retries
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/google/test_retryable_exceptions.py",
"license": "Apache License 2.0",
"lines": 342,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/test_introduction.py | import uuid
from agno.agent.agent import Agent
from agno.db.base import SessionType
from agno.models.openai.chat import OpenAIChat
def test_agent_with_introduction(shared_db):
"""Test that introduction is added to the session as the first assistant message."""
session_id = str(uuid.uuid4())
introduction_text = "Hello! I'm your helpful assistant. I can help you with various tasks."
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
introduction=introduction_text,
)
# First run
response = agent.run("What can you help me with?")
assert response is not None
assert response.content is not None
# Verify introduction was stored in the session
session = shared_db.get_session(session_id=session_id, session_type=SessionType.AGENT)
assert session is not None
assert len(session.runs) == 2 # Introduction + actual run
# Verify introduction is the first run
introduction_run = session.runs[0]
assert introduction_run.content == introduction_text
assert len(introduction_run.messages) == 1
assert introduction_run.messages[0].role == "assistant"
assert introduction_run.messages[0].content == introduction_text
def test_agent_introduction_only_added_once(shared_db):
"""Test that introduction is only added once, not on subsequent runs."""
session_id = str(uuid.uuid4())
introduction_text = "Welcome! I'm here to help."
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
introduction=introduction_text,
)
# First run
agent.run("Hello")
# Get session after first run
session_after_first = shared_db.get_session(session_id=session_id, session_type=SessionType.AGENT)
assert len(session_after_first.runs) == 2 # Introduction + first run
# Second run
agent.run("How are you?")
# Get session after second run
session_after_second = shared_db.get_session(session_id=session_id, session_type=SessionType.AGENT)
assert len(session_after_second.runs) == 3 # Introduction + first run + second run
# Verify introduction is still the first run and hasn't been duplicated
assert session_after_second.runs[0].content == introduction_text
assert session_after_second.runs[0].messages[0].role == "assistant"
def test_agent_introduction_with_chat_history(shared_db):
"""Test that introduction works correctly with add_history_to_context."""
session_id = str(uuid.uuid4())
introduction_text = "I'm a specialized assistant for Python programming."
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
introduction=introduction_text,
add_history_to_context=True,
num_history_runs=5,
)
# First interaction
response1 = agent.run("Tell me about Python lists")
assert response1 is not None
# Second interaction - should have access to introduction and first message
response2 = agent.run("What did you introduce yourself as?")
assert response2 is not None
# Verify session structure
session = shared_db.get_session(session_id=session_id, session_type=SessionType.AGENT)
assert session is not None
assert len(session.runs) == 3 # Introduction + 2 runs
assert session.runs[0].content == introduction_text
def test_agent_introduction_streaming(shared_db):
"""Test that introduction works with streaming mode."""
session_id = str(uuid.uuid4())
introduction_text = "Hello! Streaming assistant here."
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
introduction=introduction_text,
)
# Run with streaming
response_chunks = []
for chunk in agent.run("Hi there!", stream=True):
response_chunks.append(chunk)
assert len(response_chunks) > 0
# Verify introduction was stored
session = shared_db.get_session(session_id=session_id, session_type=SessionType.AGENT)
assert session is not None
assert len(session.runs) == 2 # Introduction + streamed run
assert session.runs[0].content == introduction_text
assert session.runs[0].messages[0].role == "assistant"
async def test_agent_introduction_async(shared_db):
"""Test that introduction works with async mode."""
session_id = str(uuid.uuid4())
introduction_text = "Async assistant at your service!"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
introduction=introduction_text,
)
# Async run
response = await agent.arun("Hello!")
assert response is not None
assert response.content is not None
# Verify introduction was stored
session = shared_db.get_session(session_id=session_id, session_type=SessionType.AGENT)
assert session is not None
assert len(session.runs) == 2 # Introduction + async run
assert session.runs[0].content == introduction_text
assert session.runs[0].messages[0].role == "assistant"
assert session.runs[0].messages[0].content == introduction_text
async def test_agent_introduction_async_streaming(shared_db):
"""Test that introduction works with async streaming mode."""
session_id = str(uuid.uuid4())
introduction_text = "Async streaming assistant ready!"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
introduction=introduction_text,
)
# Async streaming run
response_chunks = []
async for chunk in agent.arun("Hello there!", stream=True):
response_chunks.append(chunk)
assert len(response_chunks) > 0
# Verify introduction was stored
session = shared_db.get_session(session_id=session_id, session_type=SessionType.AGENT)
assert session is not None
assert len(session.runs) == 2 # Introduction + async streamed run
assert session.runs[0].content == introduction_text
assert session.runs[0].messages[0].role == "assistant"
def test_agent_without_introduction(shared_db):
"""Test that agent works normally without introduction."""
session_id = str(uuid.uuid4())
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
introduction=None, # Explicitly no introduction
)
response = agent.run("Hello!")
assert response is not None
# Verify no introduction run was created
session = shared_db.get_session(session_id=session_id, session_type=SessionType.AGENT)
assert session is not None
assert len(session.runs) == 1 # Only the actual run, no introduction
def test_agent_introduction_with_different_sessions(shared_db):
"""Test that introduction is added to each new session."""
introduction_text = "I'm your multi-session assistant."
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
introduction=introduction_text,
)
# First session
session_id_1 = str(uuid.uuid4())
agent.run("Hello from session 1", session_id=session_id_1)
session_1 = shared_db.get_session(session_id=session_id_1, session_type=SessionType.AGENT)
assert session_1 is not None
assert len(session_1.runs) == 2 # Introduction + run
assert session_1.runs[0].content == introduction_text
# Second session
session_id_2 = str(uuid.uuid4())
agent.run("Hello from session 2", session_id=session_id_2)
session_2 = shared_db.get_session(session_id=session_id_2, session_type=SessionType.AGENT)
assert session_2 is not None
assert len(session_2.runs) == 2 # Introduction + run
assert session_2.runs[0].content == introduction_text
def test_agent_introduction_multiline(shared_db):
"""Test that multiline introduction text is handled correctly."""
session_id = str(uuid.uuid4())
introduction_text = """Hello! I'm your personal assistant.
I can help you with:
- Programming questions
- General knowledge
- Task planning
How can I assist you today?"""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
introduction=introduction_text,
)
response = agent.run("What can you do?")
assert response is not None
# Verify multiline introduction was stored correctly
session = shared_db.get_session(session_id=session_id, session_type=SessionType.AGENT)
assert session is not None
assert len(session.runs) == 2
assert session.runs[0].content == introduction_text
assert "Programming questions" in session.runs[0].content
assert "General knowledge" in session.runs[0].content
def test_agent_get_chat_history_with_introduction(shared_db):
"""Test that get_chat_history includes the introduction message."""
session_id = str(uuid.uuid4())
introduction_text = "I'm your chat history assistant."
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
introduction=introduction_text,
)
# Make a few runs
agent.run("First message")
agent.run("Second message")
# Get chat history
chat_history = agent.get_chat_history(session_id=session_id)
assert chat_history is not None
assert len(chat_history) >= 5 # Introduction + (2 user messages + 2 assistant responses)
# First message should be the introduction from assistant
assert chat_history[0].role == "assistant"
assert chat_history[0].content == introduction_text
def test_agent_introduction_with_system_message(shared_db):
"""Test that introduction works correctly with a custom system_message."""
session_id = str(uuid.uuid4())
introduction_text = "Hello! I'm a specialized Python assistant."
system_message_text = "You are a Python programming expert. Always provide code examples."
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
introduction=introduction_text,
system_message=system_message_text,
)
# Run agent
response = agent.run("What do you do?")
assert response is not None
assert response.content is not None
# Verify introduction was stored in the session
session = shared_db.get_session(session_id=session_id, session_type=SessionType.AGENT)
assert session is not None
assert len(session.runs) == 2 # Introduction + actual run
# Verify introduction is the first run
introduction_run = session.runs[0]
assert introduction_run.content == introduction_text
assert introduction_run.messages[0].role == "assistant"
assert introduction_run.messages[0].content == introduction_text
def test_agent_introduction_with_system_message_callable(shared_db):
"""Test that introduction works with a system_message as a callable."""
session_id = str(uuid.uuid4())
introduction_text = "Welcome! I'm your dynamic assistant."
def dynamic_system_message(agent):
return f"You are {agent.name}. Provide helpful responses."
agent = Agent(
name="DynamicBot",
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
introduction=introduction_text,
system_message=dynamic_system_message,
)
response = agent.run("Tell me about yourself")
assert response is not None
# Verify introduction was stored
session = shared_db.get_session(session_id=session_id, session_type=SessionType.AGENT)
assert session is not None
assert len(session.runs) == 2
assert session.runs[0].content == introduction_text
assert session.runs[0].messages[0].role == "assistant"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_introduction.py",
"license": "Apache License 2.0",
"lines": 265,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/teams/test_introduction.py | import uuid
from agno.agent.agent import Agent
from agno.db.base import SessionType
from agno.models.openai.chat import OpenAIChat
from agno.team.team import Team
def test_team_with_introduction(shared_db):
"""Test that introduction is added to the session as the first assistant message."""
session_id = str(uuid.uuid4())
introduction_text = "Hello! I'm your helpful assistant. I can help you with various tasks."
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
members=[agent],
introduction=introduction_text,
)
# First run
response = team.run("What can you help me with?")
assert response is not None
assert response.content is not None
# Verify introduction was stored in the session
session = shared_db.get_session(session_id=session_id, session_type=SessionType.TEAM)
assert session is not None
assert len(session.runs) == 2 # Introduction + actual run
# Verify introduction is the first run
introduction_run = session.runs[0]
assert introduction_run.content == introduction_text
assert len(introduction_run.messages) == 1
assert introduction_run.messages[0].role == "assistant"
assert introduction_run.messages[0].content == introduction_text
def test_team_introduction_only_added_once(shared_db):
"""Test that introduction is only added once, not on subsequent runs."""
session_id = str(uuid.uuid4())
introduction_text = "Welcome! I'm here to help."
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
members=[agent],
introduction=introduction_text,
)
# First run
team.run("Hello")
# Get session after first run
session_after_first = shared_db.get_session(session_id=session_id, session_type=SessionType.TEAM)
assert len(session_after_first.runs) == 2 # Introduction + first run
# Second run
team.run("How are you?")
# Get session after second run
session_after_second = shared_db.get_session(session_id=session_id, session_type=SessionType.TEAM)
assert len(session_after_second.runs) == 3 # Introduction + first run + second run
# Verify introduction is still the first run and hasn't been duplicated
assert session_after_second.runs[0].content == introduction_text
assert session_after_second.runs[0].messages[0].role == "assistant"
def test_team_introduction_with_chat_history(shared_db):
"""Test that introduction works correctly with add_history_to_context."""
session_id = str(uuid.uuid4())
introduction_text = "I'm a specialized assistant for Python programming."
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
members=[agent],
introduction=introduction_text,
add_history_to_context=True,
num_history_runs=5,
)
# First interaction
response1 = team.run("Tell me about Python lists")
assert response1 is not None
# Second interaction - should have access to introduction and first message
response2 = team.run("What did you introduce yourself as?")
assert response2 is not None
# Verify session structure
session = shared_db.get_session(session_id=session_id, session_type=SessionType.TEAM)
assert session is not None
assert len(session.runs) == 3 # Introduction + 2 runs
assert session.runs[0].content == introduction_text
def test_team_introduction_streaming(shared_db):
"""Test that introduction works with streaming mode."""
session_id = str(uuid.uuid4())
introduction_text = "Hello! Streaming assistant here."
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
members=[agent],
introduction=introduction_text,
)
# Run with streaming
response_chunks = []
for chunk in team.run("Hi there!", stream=True):
response_chunks.append(chunk)
assert len(response_chunks) > 0
# Verify introduction was stored
session = shared_db.get_session(session_id=session_id, session_type=SessionType.TEAM)
assert session is not None
assert len(session.runs) == 2 # Introduction + streamed run
assert session.runs[0].content == introduction_text
assert session.runs[0].messages[0].role == "assistant"
async def test_team_introduction_async(shared_db):
"""Test that introduction works with async mode."""
session_id = str(uuid.uuid4())
introduction_text = "Async assistant at your service!"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
members=[agent],
introduction=introduction_text,
)
# Async run
response = await team.arun("Hello!")
assert response is not None
assert response.content is not None
# Verify introduction was stored
session = shared_db.get_session(session_id=session_id, session_type=SessionType.TEAM)
assert session is not None
assert len(session.runs) == 2 # Introduction + async run
assert session.runs[0].content == introduction_text
assert session.runs[0].messages[0].role == "assistant"
assert session.runs[0].messages[0].content == introduction_text
async def test_team_introduction_async_streaming(shared_db):
"""Test that introduction works with async streaming mode."""
session_id = str(uuid.uuid4())
introduction_text = "Async streaming assistant ready!"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
members=[agent],
introduction=introduction_text,
)
# Async streaming run
response_chunks = []
async for chunk in team.arun("Hello there!", stream=True):
response_chunks.append(chunk)
assert len(response_chunks) > 0
# Verify introduction was stored
session = shared_db.get_session(session_id=session_id, session_type=SessionType.TEAM)
assert session is not None
assert len(session.runs) == 2 # Introduction + async streamed run
assert session.runs[0].content == introduction_text
assert session.runs[0].messages[0].role == "assistant"
def test_team_without_introduction(shared_db):
"""Test that team works normally without introduction."""
session_id = str(uuid.uuid4())
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
members=[agent],
introduction=None, # Explicitly no introduction
)
response = team.run("Hello!")
assert response is not None
# Verify no introduction run was created
session = shared_db.get_session(session_id=session_id, session_type=SessionType.TEAM)
assert session is not None
assert len(session.runs) == 1 # Only the actual run, no introduction
def test_team_introduction_with_different_sessions(shared_db):
"""Test that introduction is added to each new session."""
introduction_text = "I'm your multi-session assistant."
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
members=[agent],
introduction=introduction_text,
)
# First session
session_id_1 = str(uuid.uuid4())
team.run("Hello from session 1", session_id=session_id_1)
session_1 = shared_db.get_session(session_id=session_id_1, session_type=SessionType.TEAM)
assert session_1 is not None
assert len(session_1.runs) == 2 # Introduction + run
assert session_1.runs[0].content == introduction_text
# Second session
session_id_2 = str(uuid.uuid4())
team.run("Hello from session 2", session_id=session_id_2)
session_2 = shared_db.get_session(session_id=session_id_2, session_type=SessionType.TEAM)
assert session_2 is not None
assert len(session_2.runs) == 2 # Introduction + run
assert session_2.runs[0].content == introduction_text
def test_team_introduction_multiline(shared_db):
"""Test that multiline introduction text is handled correctly."""
session_id = str(uuid.uuid4())
introduction_text = """Hello! I'm your personal assistant.
I can help you with:
- Programming questions
- General knowledge
- Task planning
How can I assist you today?"""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
members=[agent],
introduction=introduction_text,
)
response = team.run("What can you do?")
assert response is not None
# Verify multiline introduction was stored correctly
session = shared_db.get_session(session_id=session_id, session_type=SessionType.TEAM)
assert session is not None
assert len(session.runs) == 2
assert session.runs[0].content == introduction_text
assert "Programming questions" in session.runs[0].content
assert "General knowledge" in session.runs[0].content
def test_team_get_chat_history_with_introduction(shared_db):
"""Test that get_chat_history includes the introduction message."""
session_id = str(uuid.uuid4())
introduction_text = "I'm your chat history assistant."
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
members=[agent],
introduction=introduction_text,
)
# Make a few runs
team.run("First message")
team.run("Second message")
# Get chat history
chat_history = team.get_chat_history(session_id=session_id)
assert chat_history is not None
# Introduction + (2 user messages + 2 assistant responses)
# System messages are included in team runs
assert len(chat_history) >= 5
# First message should be the introduction from assistant
assert chat_history[0].role == "assistant"
assert chat_history[0].content == introduction_text
def test_team_introduction_with_system_message(shared_db):
"""Test that introduction works correctly with a custom system_message."""
session_id = str(uuid.uuid4())
introduction_text = "Hello! I'm a specialized Python assistant."
system_message_text = "You are a Python programming expert. Always provide code examples."
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
members=[agent],
introduction=introduction_text,
system_message=system_message_text,
)
# Run team
response = team.run("What do you do?")
assert response is not None
assert response.content is not None
# Verify introduction was stored in the session
session = shared_db.get_session(session_id=session_id, session_type=SessionType.TEAM)
assert session is not None
assert len(session.runs) == 2 # Introduction + actual run
# Verify introduction is the first run
introduction_run = session.runs[0]
assert introduction_run.content == introduction_text
assert introduction_run.messages[0].role == "assistant"
assert introduction_run.messages[0].content == introduction_text
def test_team_introduction_with_system_message_callable(shared_db):
"""Test that introduction works with a system_message as a callable."""
session_id = str(uuid.uuid4())
introduction_text = "Welcome! I'm your dynamic assistant."
def dynamic_system_message(team):
return f"You are {team.name}. Provide helpful responses."
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="DynamicBot",
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
session_id=session_id,
members=[agent],
introduction=introduction_text,
system_message=dynamic_system_message,
)
response = team.run("Tell me about yourself")
assert response is not None
# Verify introduction was stored
session = shared_db.get_session(session_id=session_id, session_type=SessionType.TEAM)
assert session is not None
assert len(session.runs) == 2
assert session.runs[0].content == introduction_text
assert session.runs[0].messages[0].role == "assistant"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/test_introduction.py",
"license": "Apache License 2.0",
"lines": 316,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/tools/test_hackernews.py | """Unit tests for HackerNewsTools class."""
import json
from unittest.mock import MagicMock, patch
import pytest
from agno.tools.hackernews import HackerNewsTools
@pytest.fixture
def hackernews_tools():
"""Create a HackerNewsTools instance with all tools enabled."""
return HackerNewsTools()
@pytest.fixture
def stories_only_tools():
"""Create a HackerNewsTools instance with only get_top_stories enabled."""
return HackerNewsTools(enable_get_top_stories=True, enable_get_user_details=False)
@pytest.fixture
def user_details_only_tools():
"""Create a HackerNewsTools instance with only get_user_details enabled."""
return HackerNewsTools(enable_get_top_stories=False, enable_get_user_details=True)
class TestHackerNewsToolsInitialization:
"""Tests for HackerNewsTools initialization."""
def test_default_initialization(self):
"""Test default initialization enables all tools."""
tools = HackerNewsTools()
function_names = [func.name for func in tools.functions.values()]
assert "get_top_hackernews_stories" in function_names
assert "get_user_details" in function_names
assert tools.name == "hackers_news"
def test_initialization_with_all_flag(self):
"""Test initialization with all=True enables all tools."""
tools = HackerNewsTools(enable_get_top_stories=False, enable_get_user_details=False, all=True)
function_names = [func.name for func in tools.functions.values()]
assert "get_top_hackernews_stories" in function_names
assert "get_user_details" in function_names
def test_initialization_stories_only(self):
"""Test initialization with only stories enabled."""
tools = HackerNewsTools(enable_get_top_stories=True, enable_get_user_details=False)
function_names = [func.name for func in tools.functions.values()]
assert "get_top_hackernews_stories" in function_names
assert "get_user_details" not in function_names
def test_initialization_user_details_only(self):
"""Test initialization with only user details enabled."""
tools = HackerNewsTools(enable_get_top_stories=False, enable_get_user_details=True)
function_names = [func.name for func in tools.functions.values()]
assert "get_top_hackernews_stories" not in function_names
assert "get_user_details" in function_names
def test_initialization_no_tools_enabled(self):
"""Test initialization with no tools enabled."""
tools = HackerNewsTools(enable_get_top_stories=False, enable_get_user_details=False)
function_names = [func.name for func in tools.functions.values()]
assert "get_top_hackernews_stories" not in function_names
assert "get_user_details" not in function_names
def test_toolkit_name(self):
"""Test that toolkit has correct name."""
tools = HackerNewsTools()
assert tools.name == "hackers_news"
class TestGetTopHackerNewsStories:
"""Tests for get_top_hackernews_stories method."""
def test_get_top_stories_default_count(self, hackernews_tools):
"""Test getting top stories with default count."""
mock_story_ids = [12345, 67890, 11111]
mock_stories = [
{"id": 12345, "title": "Story 1", "by": "user1", "url": "https://example1.com"},
{"id": 67890, "title": "Story 2", "by": "user2", "url": "https://example2.com"},
{"id": 11111, "title": "Story 3", "by": "user3", "url": "https://example3.com"},
]
def mock_get(url):
response = MagicMock()
if "topstories" in url:
response.json.return_value = mock_story_ids
else:
# Extract story ID from URL
story_id = int(url.split("/")[-1].replace(".json", ""))
story = next((s for s in mock_stories if s["id"] == story_id), None)
response.json.return_value = story
return response
with patch("agno.tools.hackernews.httpx.get", side_effect=mock_get):
result = hackernews_tools.get_top_hackernews_stories(num_stories=3)
stories = json.loads(result)
assert len(stories) == 3
assert stories[0]["title"] == "Story 1"
assert stories[0]["username"] == "user1"
assert stories[1]["title"] == "Story 2"
assert stories[2]["title"] == "Story 3"
def test_get_top_stories_custom_count(self, hackernews_tools):
"""Test getting top stories with custom count."""
mock_story_ids = [1, 2, 3, 4, 5]
mock_stories = {
1: {"id": 1, "title": "Story 1", "by": "user1"},
2: {"id": 2, "title": "Story 2", "by": "user2"},
}
def mock_get(url):
response = MagicMock()
if "topstories" in url:
response.json.return_value = mock_story_ids
else:
story_id = int(url.split("/")[-1].replace(".json", ""))
response.json.return_value = mock_stories.get(story_id, {})
return response
with patch("agno.tools.hackernews.httpx.get", side_effect=mock_get):
result = hackernews_tools.get_top_hackernews_stories(num_stories=2)
stories = json.loads(result)
assert len(stories) == 2
def test_get_top_stories_adds_username_field(self, hackernews_tools):
"""Test that username field is added from 'by' field."""
mock_story_ids = [12345]
mock_story = {"id": 12345, "title": "Test Story", "by": "testuser", "score": 100}
def mock_get(url):
response = MagicMock()
if "topstories" in url:
response.json.return_value = mock_story_ids
else:
response.json.return_value = mock_story
return response
with patch("agno.tools.hackernews.httpx.get", side_effect=mock_get):
result = hackernews_tools.get_top_hackernews_stories(num_stories=1)
stories = json.loads(result)
assert stories[0]["username"] == "testuser"
assert stories[0]["by"] == "testuser"
def test_get_top_stories_empty_response(self, hackernews_tools):
"""Test handling of empty story list."""
def mock_get(url):
response = MagicMock()
response.json.return_value = []
return response
with patch("agno.tools.hackernews.httpx.get", side_effect=mock_get):
result = hackernews_tools.get_top_hackernews_stories(num_stories=10)
stories = json.loads(result)
assert len(stories) == 0
def test_get_top_stories_with_story_metadata(self, hackernews_tools):
"""Test that all story metadata is preserved."""
mock_story_ids = [12345]
mock_story = {
"id": 12345,
"title": "Test Story",
"by": "testuser",
"score": 100,
"time": 1234567890,
"descendants": 50,
"url": "https://example.com/story",
"type": "story",
}
def mock_get(url):
response = MagicMock()
if "topstories" in url:
response.json.return_value = mock_story_ids
else:
response.json.return_value = mock_story
return response
with patch("agno.tools.hackernews.httpx.get", side_effect=mock_get):
result = hackernews_tools.get_top_hackernews_stories(num_stories=1)
stories = json.loads(result)
assert stories[0]["id"] == 12345
assert stories[0]["score"] == 100
assert stories[0]["time"] == 1234567890
assert stories[0]["descendants"] == 50
assert stories[0]["url"] == "https://example.com/story"
class TestGetUserDetails:
"""Tests for get_user_details method."""
def test_get_user_details_success(self, hackernews_tools):
"""Test successful user details retrieval."""
mock_user = {
"id": "testuser",
"karma": 5000,
"about": "A test user",
"submitted": [1, 2, 3, 4, 5],
}
mock_response = MagicMock()
mock_response.json.return_value = mock_user
with patch("agno.tools.hackernews.httpx.get", return_value=mock_response):
result = hackernews_tools.get_user_details("testuser")
user_details = json.loads(result)
assert user_details["karma"] == 5000
assert user_details["about"] == "A test user"
assert user_details["total_items_submitted"] == 5
def test_get_user_details_user_id_field(self, hackernews_tools):
"""Test that user_id is extracted correctly."""
mock_user = {
"user_id": "testuser123",
"karma": 1000,
"about": None,
"submitted": [],
}
mock_response = MagicMock()
mock_response.json.return_value = mock_user
with patch("agno.tools.hackernews.httpx.get", return_value=mock_response):
result = hackernews_tools.get_user_details("testuser123")
user_details = json.loads(result)
assert user_details["id"] == "testuser123"
def test_get_user_details_no_submitted_items(self, hackernews_tools):
"""Test user with no submitted items."""
mock_user = {
"id": "newuser",
"karma": 1,
"about": "New to HN",
# No 'submitted' key
}
mock_response = MagicMock()
mock_response.json.return_value = mock_user
with patch("agno.tools.hackernews.httpx.get", return_value=mock_response):
result = hackernews_tools.get_user_details("newuser")
user_details = json.loads(result)
assert user_details["total_items_submitted"] == 0
def test_get_user_details_empty_about(self, hackernews_tools):
"""Test user with empty about field."""
mock_user = {
"id": "quietuser",
"karma": 500,
"submitted": [1, 2],
}
mock_response = MagicMock()
mock_response.json.return_value = mock_user
with patch("agno.tools.hackernews.httpx.get", return_value=mock_response):
result = hackernews_tools.get_user_details("quietuser")
user_details = json.loads(result)
assert user_details["about"] is None
def test_get_user_details_error_handling(self, hackernews_tools):
"""Test error handling when API call fails."""
with patch("agno.tools.hackernews.httpx.get", side_effect=Exception("Network error")):
result = hackernews_tools.get_user_details("testuser")
assert "Error getting user details" in result
assert "Network error" in result
def test_get_user_details_null_user(self, hackernews_tools):
"""Test handling of non-existent user (returns null)."""
mock_response = MagicMock()
mock_response.json.return_value = None
with patch("agno.tools.hackernews.httpx.get", return_value=mock_response):
result = hackernews_tools.get_user_details("nonexistentuser12345")
# Should handle None gracefully by catching the exception
assert "Error getting user details" in result
class TestToolkitIntegration:
"""Integration tests for HackerNewsTools as a Toolkit."""
def test_tools_list_populated(self, hackernews_tools):
"""Test that tools list is properly populated."""
assert len(hackernews_tools.tools) == 2
def test_tools_list_stories_only(self, stories_only_tools):
"""Test tools list with only stories enabled."""
assert len(stories_only_tools.tools) == 1
assert stories_only_tools.tools[0].__name__ == "get_top_hackernews_stories"
def test_tools_list_user_details_only(self, user_details_only_tools):
"""Test tools list with only user details enabled."""
assert len(user_details_only_tools.tools) == 1
assert user_details_only_tools.tools[0].__name__ == "get_user_details"
def test_functions_dict_populated(self, hackernews_tools):
"""Test that functions dict is properly populated."""
function_names = list(hackernews_tools.functions.keys())
assert "get_top_hackernews_stories" in function_names
assert "get_user_details" in function_names
class TestEdgeCases:
"""Edge case tests for HackerNewsTools."""
def test_get_top_stories_zero_count(self, hackernews_tools):
"""Test requesting zero stories."""
mock_story_ids = [1, 2, 3]
def mock_get(url):
response = MagicMock()
response.json.return_value = mock_story_ids
return response
with patch("agno.tools.hackernews.httpx.get", side_effect=mock_get):
result = hackernews_tools.get_top_hackernews_stories(num_stories=0)
stories = json.loads(result)
assert len(stories) == 0
def test_get_top_stories_more_than_available(self, hackernews_tools):
"""Test requesting more stories than available."""
mock_story_ids = [1, 2]
mock_stories = {
1: {"id": 1, "title": "Story 1", "by": "user1"},
2: {"id": 2, "title": "Story 2", "by": "user2"},
}
def mock_get(url):
response = MagicMock()
if "topstories" in url:
response.json.return_value = mock_story_ids
else:
story_id = int(url.split("/")[-1].replace(".json", ""))
response.json.return_value = mock_stories.get(story_id, {})
return response
with patch("agno.tools.hackernews.httpx.get", side_effect=mock_get):
result = hackernews_tools.get_top_hackernews_stories(num_stories=100)
stories = json.loads(result)
assert len(stories) == 2 # Only 2 available
def test_get_user_details_special_characters_username(self, hackernews_tools):
"""Test user details with special characters in username."""
mock_user = {
"id": "user_with-special.chars",
"karma": 100,
"about": "Test",
"submitted": [],
}
mock_response = MagicMock()
mock_response.json.return_value = mock_user
with patch("agno.tools.hackernews.httpx.get", return_value=mock_response):
result = hackernews_tools.get_user_details("user_with-special.chars")
user_details = json.loads(result)
assert user_details["karma"] == 100
def test_get_top_stories_high_karma_user(self, hackernews_tools):
"""Test story from high karma user."""
mock_story_ids = [12345]
mock_story = {
"id": 12345,
"title": "Story from popular user",
"by": "pg", # Paul Graham's username
"score": 5000,
}
def mock_get(url):
response = MagicMock()
if "topstories" in url:
response.json.return_value = mock_story_ids
else:
response.json.return_value = mock_story
return response
with patch("agno.tools.hackernews.httpx.get", side_effect=mock_get):
result = hackernews_tools.get_top_hackernews_stories(num_stories=1)
stories = json.loads(result)
assert stories[0]["by"] == "pg"
assert stories[0]["score"] == 5000
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_hackernews.py",
"license": "Apache License 2.0",
"lines": 313,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/db/mysql/async_mysql.py | import time
from datetime import date, datetime, timedelta, timezone
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
from uuid import uuid4
if TYPE_CHECKING:
from agno.tracing.schemas import Span, Trace
from agno.db.base import AsyncBaseDb, SessionType
from agno.db.migrations.manager import MigrationManager
from agno.db.mysql.schemas import get_table_schema_definition
from agno.db.mysql.utils import (
abulk_upsert_metrics,
acreate_schema,
ais_table_available,
ais_valid_table,
apply_sorting,
calculate_date_metrics,
deserialize_cultural_knowledge_from_db,
fetch_all_sessions_data,
get_dates_to_calculate_metrics_for,
serialize_cultural_knowledge_for_db,
)
from agno.db.schemas.culture import CulturalKnowledge
from agno.db.schemas.evals import EvalFilterType, EvalRunRecord, EvalType
from agno.db.schemas.knowledge import KnowledgeRow
from agno.db.schemas.memory import UserMemory
from agno.session import AgentSession, Session, TeamSession, WorkflowSession
from agno.utils.log import log_debug, log_error, log_info, log_warning
from agno.utils.string import generate_id
try:
from sqlalchemy import TEXT, ForeignKey, Index, UniqueConstraint, and_, cast, func, update
from sqlalchemy.dialects import mysql
from sqlalchemy.ext.asyncio import AsyncEngine, async_sessionmaker, create_async_engine
from sqlalchemy.schema import Column, MetaData, Table
from sqlalchemy.sql.expression import select, text
except ImportError:
raise ImportError("`sqlalchemy` not installed. Please install it using `pip install sqlalchemy`")
class AsyncMySQLDb(AsyncBaseDb):
def __init__(
self,
id: Optional[str] = None,
db_url: Optional[str] = None,
db_engine: Optional[AsyncEngine] = None,
db_schema: Optional[str] = None,
session_table: Optional[str] = None,
memory_table: Optional[str] = None,
metrics_table: Optional[str] = None,
eval_table: Optional[str] = None,
knowledge_table: Optional[str] = None,
culture_table: Optional[str] = None,
traces_table: Optional[str] = None,
spans_table: Optional[str] = None,
versions_table: Optional[str] = None,
create_schema: bool = True,
):
"""
Async interface for interacting with a MySQL database.
The following order is used to determine the database connection:
1. Use the db_engine if provided
2. Use the db_url
3. Raise an error if neither is provided
Args:
id (Optional[str]): The ID of the database.
db_url (Optional[str]): The database URL to connect to. Should use asyncmy driver (e.g. mysql+asyncmy://...)
db_engine (Optional[AsyncEngine]): The SQLAlchemy async database engine to use.
db_schema (Optional[str]): The database schema to use.
session_table (Optional[str]): Name of the table to store Agent, Team and Workflow sessions.
memory_table (Optional[str]): Name of the table to store memories.
metrics_table (Optional[str]): Name of the table to store metrics.
eval_table (Optional[str]): Name of the table to store evaluation runs data.
knowledge_table (Optional[str]): Name of the table to store knowledge content.
culture_table (Optional[str]): Name of the table to store cultural knowledge.
traces_table (Optional[str]): Name of the table to store run traces.
spans_table (Optional[str]): Name of the table to store span events.
versions_table (Optional[str]): Name of the table to store schema versions.
create_schema (bool): Whether to automatically create the database schema if it doesn't exist.
Set to False if schema is managed externally (e.g., via migrations). Defaults to True.
Raises:
ValueError: If neither db_url nor db_engine is provided.
ValueError: If none of the tables are provided.
"""
if id is None:
base_seed = db_url or str(db_engine.url) if db_engine else "" # type: ignore
schema_suffix = db_schema if db_schema is not None else "ai"
seed = f"{base_seed}#{schema_suffix}"
id = generate_id(seed)
super().__init__(
id=id,
session_table=session_table,
memory_table=memory_table,
metrics_table=metrics_table,
eval_table=eval_table,
knowledge_table=knowledge_table,
culture_table=culture_table,
traces_table=traces_table,
spans_table=spans_table,
versions_table=versions_table,
)
_engine: Optional[AsyncEngine] = db_engine
if _engine is None and db_url is not None:
_engine = create_async_engine(db_url)
if _engine is None:
raise ValueError("One of db_url or db_engine must be provided")
self.db_url: Optional[str] = db_url
self.db_engine: AsyncEngine = _engine
self.db_schema: str = db_schema if db_schema is not None else "ai"
self.metadata: MetaData = MetaData(schema=self.db_schema)
self.create_schema: bool = create_schema
# Initialize database session factory
self.async_session_factory = async_sessionmaker(
bind=self.db_engine,
expire_on_commit=False,
)
async def close(self) -> None:
"""Close database connections and dispose of the connection pool.
Should be called during application shutdown to properly release
all database connections.
"""
if self.db_engine is not None:
await self.db_engine.dispose()
# -- DB methods --
async def table_exists(self, table_name: str) -> bool:
"""Check if a table with the given name exists in the MySQL database.
Args:
table_name: Name of the table to check
Returns:
bool: True if the table exists in the database, False otherwise
"""
async with self.async_session_factory() as sess:
return await ais_table_available(session=sess, table_name=table_name, db_schema=self.db_schema)
async def _create_table(self, table_name: str, table_type: str) -> Table:
"""
Create a table with the appropriate schema based on the table type.
Args:
table_name (str): Name of the table to create
table_type (str): Type of table (used to get schema definition)
db_schema (str): Database schema name
Returns:
Table: SQLAlchemy Table object
"""
try:
# Pass traces_table_name and db_schema for spans table foreign key resolution
table_schema = get_table_schema_definition(
table_type, traces_table_name=self.trace_table_name, db_schema=self.db_schema
).copy()
log_debug(f"Creating table {self.db_schema}.{table_name} with schema: {table_schema}")
columns: List[Column] = []
indexes: List[str] = []
unique_constraints: List[str] = []
schema_unique_constraints = table_schema.pop("_unique_constraints", [])
# Get the columns, indexes, and unique constraints from the table schema
for col_name, col_config in table_schema.items():
column_args = [col_name, col_config["type"]()]
column_kwargs = {}
if col_config.get("primary_key", False):
column_kwargs["primary_key"] = True
if "nullable" in col_config:
column_kwargs["nullable"] = col_config["nullable"]
if col_config.get("index", False):
indexes.append(col_name)
if col_config.get("unique", False):
column_kwargs["unique"] = True
unique_constraints.append(col_name)
# Handle foreign key constraint
if "foreign_key" in col_config:
column_args.append(ForeignKey(col_config["foreign_key"]))
columns.append(Column(*column_args, **column_kwargs)) # type: ignore
# Create the table object - use self.metadata to maintain FK references
table = Table(table_name, self.metadata, *columns, schema=self.db_schema)
# Add multi-column unique constraints with table-specific names
for constraint in schema_unique_constraints:
constraint_name = f"{table_name}_{constraint['name']}"
constraint_columns = constraint["columns"]
table.append_constraint(UniqueConstraint(*constraint_columns, name=constraint_name))
# Add indexes to the table definition
for idx_col in indexes:
idx_name = f"idx_{table_name}_{idx_col}"
table.append_constraint(Index(idx_name, idx_col))
# Create schema if not exists
if self.create_schema:
async with self.async_session_factory() as sess, sess.begin():
await acreate_schema(session=sess, db_schema=self.db_schema)
# Create table
table_created = False
if not await self.table_exists(table_name):
async with self.db_engine.begin() as conn:
await conn.run_sync(table.create, checkfirst=True)
log_debug(f"Successfully created table '{table_name}'")
table_created = True
else:
log_debug(f"Table {self.db_schema}.{table_name} already exists, skipping creation")
# Create indexes
for idx in table.indexes:
try:
# Check if index already exists
async with self.async_session_factory() as sess:
exists_query = text(
"SELECT 1 FROM information_schema.statistics WHERE table_schema = :schema "
"AND table_name = :table_name AND index_name = :index_name"
)
result = await sess.execute(
exists_query, {"schema": self.db_schema, "table_name": table_name, "index_name": idx.name}
)
exists = result.scalar() is not None
if exists:
log_debug(
f"Index {idx.name} already exists in {self.db_schema}.{table_name}, skipping creation"
)
continue
async with self.db_engine.begin() as conn:
await conn.run_sync(idx.create)
log_debug(f"Created index: {idx.name} for table {self.db_schema}.{table_name}")
except Exception as e:
log_error(f"Error creating index {idx.name}: {e}")
log_debug(f"Successfully created table {table_name} in schema {self.db_schema}")
# Store the schema version for the created table
if table_name != self.versions_table_name and table_created:
latest_schema_version = MigrationManager(self).latest_schema_version
await self.upsert_schema_version(table_name=table_name, version=latest_schema_version.public)
log_info(
f"Successfully stored version {latest_schema_version.public} in database for table {table_name}"
)
return table
except Exception as e:
log_error(f"Could not create table {self.db_schema}.{table_name}: {e}")
raise
async def _create_all_tables(self):
"""Create all tables for the database."""
tables_to_create = [
(self.session_table_name, "sessions"),
(self.memory_table_name, "memories"),
(self.metrics_table_name, "metrics"),
(self.eval_table_name, "evals"),
(self.knowledge_table_name, "knowledge"),
(self.culture_table_name, "culture"),
(self.trace_table_name, "traces"),
(self.span_table_name, "spans"),
(self.versions_table_name, "versions"),
]
for table_name, table_type in tables_to_create:
await self._get_or_create_table(
table_name=table_name, table_type=table_type, create_table_if_not_found=True
)
async def _get_table(self, table_type: str, create_table_if_not_found: Optional[bool] = False) -> Table:
if table_type == "sessions":
self.session_table = await self._get_or_create_table(
table_name=self.session_table_name,
table_type="sessions",
create_table_if_not_found=create_table_if_not_found,
)
return self.session_table
if table_type == "memories":
self.memory_table = await self._get_or_create_table(
table_name=self.memory_table_name,
table_type="memories",
create_table_if_not_found=create_table_if_not_found,
)
return self.memory_table
if table_type == "metrics":
self.metrics_table = await self._get_or_create_table(
table_name=self.metrics_table_name,
table_type="metrics",
create_table_if_not_found=create_table_if_not_found,
)
return self.metrics_table
if table_type == "evals":
self.eval_table = await self._get_or_create_table(
table_name=self.eval_table_name,
table_type="evals",
create_table_if_not_found=create_table_if_not_found,
)
return self.eval_table
if table_type == "knowledge":
self.knowledge_table = await self._get_or_create_table(
table_name=self.knowledge_table_name,
table_type="knowledge",
create_table_if_not_found=create_table_if_not_found,
)
return self.knowledge_table
if table_type == "culture":
self.culture_table = await self._get_or_create_table(
table_name=self.culture_table_name,
table_type="culture",
create_table_if_not_found=create_table_if_not_found,
)
return self.culture_table
if table_type == "versions":
self.versions_table = await self._get_or_create_table(
table_name=self.versions_table_name,
table_type="versions",
create_table_if_not_found=create_table_if_not_found,
)
return self.versions_table
if table_type == "traces":
self.traces_table = await self._get_or_create_table(
table_name=self.trace_table_name,
table_type="traces",
create_table_if_not_found=create_table_if_not_found,
)
return self.traces_table
if table_type == "spans":
# Ensure traces table exists first (spans has FK to traces)
if create_table_if_not_found:
await self._get_table(table_type="traces", create_table_if_not_found=True)
self.spans_table = await self._get_or_create_table(
table_name=self.span_table_name,
table_type="spans",
create_table_if_not_found=create_table_if_not_found,
)
return self.spans_table
raise ValueError(f"Unknown table type: {table_type}")
async def _get_or_create_table(
self, table_name: str, table_type: str, create_table_if_not_found: Optional[bool] = False
) -> Table:
"""
Check if the table exists and is valid, else create it.
Args:
table_name (str): Name of the table to get or create
table_type (str): Type of table (used to get schema definition)
Returns:
Table: SQLAlchemy Table object representing the schema.
"""
async with self.async_session_factory() as sess, sess.begin():
table_is_available = await ais_table_available(
session=sess, table_name=table_name, db_schema=self.db_schema
)
if (not table_is_available) and create_table_if_not_found:
return await self._create_table(table_name=table_name, table_type=table_type)
if not await ais_valid_table(
db_engine=self.db_engine,
table_name=table_name,
table_type=table_type,
db_schema=self.db_schema,
):
raise ValueError(f"Table {self.db_schema}.{table_name} has an invalid schema")
try:
async with self.db_engine.connect() as conn:
def create_table(connection):
return Table(table_name, self.metadata, schema=self.db_schema, autoload_with=connection)
table = await conn.run_sync(create_table)
return table
except Exception as e:
log_error(f"Error loading existing table {self.db_schema}.{table_name}: {e}")
raise
async def get_latest_schema_version(self, table_name: str) -> str:
"""Get the latest version of the database schema."""
table = await self._get_table(table_type="versions", create_table_if_not_found=True)
async with self.async_session_factory() as sess:
# Latest version for the given table
stmt = select(table).where(table.c.table_name == table_name).order_by(table.c.version.desc()).limit(1) # type: ignore
result = await sess.execute(stmt)
row = result.fetchone()
if row is None:
return "2.0.0"
version_dict = dict(row._mapping)
return version_dict.get("version") or "2.0.0"
async def upsert_schema_version(self, table_name: str, version: str) -> None:
"""Upsert the schema version into the database."""
table = await self._get_table(table_type="versions", create_table_if_not_found=True)
current_datetime = datetime.now().isoformat()
async with self.async_session_factory() as sess, sess.begin():
stmt = mysql.insert(table).values( # type: ignore
table_name=table_name,
version=version,
created_at=current_datetime, # Store as ISO format string
updated_at=current_datetime,
)
# Update version if table_name already exists
stmt = stmt.on_duplicate_key_update(
version=version,
created_at=current_datetime,
updated_at=current_datetime,
)
await sess.execute(stmt)
# -- Session methods --
async def delete_session(self, session_id: str, user_id: Optional[str] = None) -> bool:
"""
Delete a session from the database.
Args:
session_id (str): ID of the session to delete
user_id (Optional[str]): User ID to filter by. Defaults to None.
Returns:
bool: True if the session was deleted, False otherwise.
Raises:
Exception: If an error occurs during deletion.
"""
try:
table = await self._get_table(table_type="sessions")
async with self.async_session_factory() as sess, sess.begin():
delete_stmt = table.delete().where(table.c.session_id == session_id)
if user_id is not None:
delete_stmt = delete_stmt.where(table.c.user_id == user_id)
result = await sess.execute(delete_stmt)
if result.rowcount == 0: # type: ignore
log_debug(f"No session found to delete with session_id: {session_id} in table {table.name}")
return False
else:
log_debug(f"Successfully deleted session with session_id: {session_id} in table {table.name}")
return True
except Exception as e:
log_error(f"Error deleting session: {e}")
return False
async def delete_sessions(self, session_ids: List[str], user_id: Optional[str] = None) -> None:
"""Delete all given sessions from the database.
Can handle multiple session types in the same run.
Args:
session_ids (List[str]): The IDs of the sessions to delete.
user_id (Optional[str]): User ID to filter by. Defaults to None.
Raises:
Exception: If an error occurs during deletion.
"""
try:
table = await self._get_table(table_type="sessions")
async with self.async_session_factory() as sess, sess.begin():
delete_stmt = table.delete().where(table.c.session_id.in_(session_ids))
if user_id is not None:
delete_stmt = delete_stmt.where(table.c.user_id == user_id)
result = await sess.execute(delete_stmt)
log_debug(f"Successfully deleted {result.rowcount} sessions") # type: ignore
except Exception as e:
log_error(f"Error deleting sessions: {e}")
async def get_session(
self,
session_id: str,
session_type: SessionType,
user_id: Optional[str] = None,
deserialize: Optional[bool] = True,
) -> Optional[Union[Session, Dict[str, Any]]]:
"""
Read a session from the database.
Args:
session_id (str): ID of the session to read.
user_id (Optional[str]): User ID to filter by. Defaults to None.
session_type (Optional[SessionType]): Type of session to read. Defaults to None.
deserialize (Optional[bool]): Whether to serialize the session. Defaults to True.
Returns:
Union[Session, Dict[str, Any], None]:
- When deserialize=True: Session object
- When deserialize=False: Session dictionary
Raises:
Exception: If an error occurs during retrieval.
"""
try:
table = await self._get_table(table_type="sessions")
async with self.async_session_factory() as sess:
stmt = select(table).where(table.c.session_id == session_id)
if user_id is not None:
stmt = stmt.where(table.c.user_id == user_id)
result = await sess.execute(stmt)
row = result.fetchone()
if row is None:
return None
session = dict(row._mapping)
if not deserialize:
return session
if session_type == SessionType.AGENT:
return AgentSession.from_dict(session)
elif session_type == SessionType.TEAM:
return TeamSession.from_dict(session)
elif session_type == SessionType.WORKFLOW:
return WorkflowSession.from_dict(session)
else:
raise ValueError(f"Invalid session type: {session_type}")
except Exception as e:
log_error(f"Exception reading from session table: {e}")
return None
async def get_sessions(
self,
session_type: Optional[SessionType] = None,
user_id: Optional[str] = None,
component_id: Optional[str] = None,
session_name: Optional[str] = None,
start_timestamp: Optional[int] = None,
end_timestamp: Optional[int] = None,
limit: Optional[int] = None,
page: Optional[int] = None,
sort_by: Optional[str] = None,
sort_order: Optional[str] = None,
deserialize: Optional[bool] = True,
) -> Union[List[Session], Tuple[List[Dict[str, Any]], int]]:
"""
Get all sessions in the given table. Can filter by user_id and entity_id.
Args:
user_id (Optional[str]): The ID of the user to filter by.
component_id (Optional[str]): The ID of the agent / workflow to filter by.
start_timestamp (Optional[int]): The start timestamp to filter by.
end_timestamp (Optional[int]): The end timestamp to filter by.
session_name (Optional[str]): The name of the session to filter by.
limit (Optional[int]): The maximum number of sessions to return. Defaults to None.
page (Optional[int]): The page number to return. Defaults to None.
sort_by (Optional[str]): The field to sort by. Defaults to None.
sort_order (Optional[str]): The sort order. Defaults to None.
deserialize (Optional[bool]): Whether to serialize the sessions. Defaults to True.
Returns:
Union[List[Session], Tuple[List[Dict], int]]:
- When deserialize=True: List of Session objects
- When deserialize=False: Tuple of (session dictionaries, total count)
Raises:
Exception: If an error occurs during retrieval.
"""
try:
table = await self._get_table(table_type="sessions")
async with self.async_session_factory() as sess, sess.begin():
stmt = select(table)
# Filtering
if user_id is not None:
stmt = stmt.where(table.c.user_id == user_id)
if component_id is not None:
if session_type == SessionType.AGENT:
stmt = stmt.where(table.c.agent_id == component_id)
elif session_type == SessionType.TEAM:
stmt = stmt.where(table.c.team_id == component_id)
elif session_type == SessionType.WORKFLOW:
stmt = stmt.where(table.c.workflow_id == component_id)
if start_timestamp is not None:
stmt = stmt.where(table.c.created_at >= start_timestamp)
if end_timestamp is not None:
stmt = stmt.where(table.c.created_at <= end_timestamp)
if session_name is not None:
# MySQL JSON extraction syntax
stmt = stmt.where(
func.coalesce(
func.json_unquote(func.json_extract(table.c.session_data, "$.session_name")), ""
).ilike(f"%{session_name}%")
)
if session_type is not None:
session_type_value = session_type.value if isinstance(session_type, SessionType) else session_type
stmt = stmt.where(table.c.session_type == session_type_value)
count_stmt = select(func.count()).select_from(stmt.alias())
total_count = await sess.scalar(count_stmt) or 0
# Sorting
stmt = apply_sorting(stmt, table, sort_by, sort_order)
# Paginating
if limit is not None:
stmt = stmt.limit(limit)
if page is not None:
stmt = stmt.offset((page - 1) * limit)
result = await sess.execute(stmt)
records = result.fetchall()
if records is None:
return [], 0
session = [dict(record._mapping) for record in records]
if not deserialize:
return session, total_count
if session_type == SessionType.AGENT:
return [AgentSession.from_dict(record) for record in session] # type: ignore
elif session_type == SessionType.TEAM:
return [TeamSession.from_dict(record) for record in session] # type: ignore
elif session_type == SessionType.WORKFLOW:
return [WorkflowSession.from_dict(record) for record in session] # type: ignore
else:
raise ValueError(f"Invalid session type: {session_type}")
except Exception as e:
log_error(f"Exception reading from session table: {e}")
return [] if deserialize else ([], 0)
async def rename_session(
self,
session_id: str,
session_type: SessionType,
session_name: str,
user_id: Optional[str] = None,
deserialize: Optional[bool] = True,
) -> Optional[Union[Session, Dict[str, Any]]]:
"""
Rename a session in the database.
Args:
session_id (str): The ID of the session to rename.
session_type (SessionType): The type of session to rename.
session_name (str): The new name for the session.
user_id (Optional[str]): User ID to filter by. Defaults to None.
deserialize (Optional[bool]): Whether to serialize the session. Defaults to True.
Returns:
Optional[Union[Session, Dict[str, Any]]]:
- When deserialize=True: Session object
- When deserialize=False: Session dictionary
Raises:
Exception: If an error occurs during renaming.
"""
try:
table = await self._get_table(table_type="sessions")
async with self.async_session_factory() as sess, sess.begin():
# MySQL JSON_SET syntax
stmt = (
update(table)
.where(table.c.session_id == session_id)
.where(table.c.session_type == session_type.value)
.values(session_data=func.json_set(table.c.session_data, "$.session_name", session_name))
)
if user_id is not None:
stmt = stmt.where(table.c.user_id == user_id)
await sess.execute(stmt)
# Fetch the updated row
select_stmt = select(table).where(table.c.session_id == session_id)
if user_id is not None:
select_stmt = select_stmt.where(table.c.user_id == user_id)
result = await sess.execute(select_stmt)
row = result.fetchone()
if not row:
return None
log_debug(f"Renamed session with id '{session_id}' to '{session_name}'")
session = dict(row._mapping)
if not deserialize:
return session
# Return the appropriate session type
if session_type == SessionType.AGENT:
return AgentSession.from_dict(session)
elif session_type == SessionType.TEAM:
return TeamSession.from_dict(session)
elif session_type == SessionType.WORKFLOW:
return WorkflowSession.from_dict(session)
else:
raise ValueError(f"Invalid session type: {session_type}")
except Exception as e:
log_error(f"Exception renaming session: {e}")
return None
async def upsert_session(
self, session: Session, deserialize: Optional[bool] = True
) -> Optional[Union[Session, Dict[str, Any]]]:
"""
Insert or update a session in the database.
Args:
session (Session): The session data to upsert.
deserialize (Optional[bool]): Whether to deserialize the session. Defaults to True.
Returns:
Optional[Union[Session, Dict[str, Any]]]:
- When deserialize=True: Session object
- When deserialize=False: Session dictionary
Raises:
Exception: If an error occurs during upsert.
"""
try:
table = await self._get_table(table_type="sessions", create_table_if_not_found=True)
session_dict = session.to_dict()
if isinstance(session, AgentSession):
async with self.async_session_factory() as sess, sess.begin():
existing_result = await sess.execute(
select(table.c.user_id)
.where(table.c.session_id == session_dict.get("session_id"))
.with_for_update()
)
existing_row = existing_result.fetchone()
if existing_row is not None:
existing_uid = existing_row[0]
if existing_uid is not None and existing_uid != session_dict.get("user_id"):
return None
current_time = int(time.time())
stmt = mysql.insert(table).values(
session_id=session_dict.get("session_id"),
session_type=SessionType.AGENT.value,
agent_id=session_dict.get("agent_id"),
user_id=session_dict.get("user_id"),
runs=session_dict.get("runs"),
agent_data=session_dict.get("agent_data"),
session_data=session_dict.get("session_data"),
summary=session_dict.get("summary"),
metadata=session_dict.get("metadata"),
created_at=session_dict.get("created_at") or current_time,
updated_at=session_dict.get("updated_at") or current_time,
)
stmt = stmt.on_duplicate_key_update(
agent_id=session_dict.get("agent_id"),
user_id=session_dict.get("user_id"),
agent_data=session_dict.get("agent_data"),
session_data=session_dict.get("session_data"),
summary=session_dict.get("summary"),
metadata=session_dict.get("metadata"),
runs=session_dict.get("runs"),
updated_at=int(time.time()),
)
await sess.execute(stmt)
# Fetch the row
select_stmt = select(table).where(table.c.session_id == session_dict.get("session_id"))
result = await sess.execute(select_stmt)
row = result.fetchone()
if row is None:
return None
session_dict = dict(row._mapping)
log_debug(f"Upserted agent session with id '{session_dict.get('session_id')}'")
if not deserialize:
return session_dict
return AgentSession.from_dict(session_dict)
elif isinstance(session, TeamSession):
async with self.async_session_factory() as sess, sess.begin():
existing_result = await sess.execute(
select(table.c.user_id)
.where(table.c.session_id == session_dict.get("session_id"))
.with_for_update()
)
existing_row = existing_result.fetchone()
if existing_row is not None:
existing_uid = existing_row[0]
if existing_uid is not None and existing_uid != session_dict.get("user_id"):
return None
current_time = int(time.time())
stmt = mysql.insert(table).values(
session_id=session_dict.get("session_id"),
session_type=SessionType.TEAM.value,
team_id=session_dict.get("team_id"),
user_id=session_dict.get("user_id"),
runs=session_dict.get("runs"),
team_data=session_dict.get("team_data"),
session_data=session_dict.get("session_data"),
summary=session_dict.get("summary"),
metadata=session_dict.get("metadata"),
created_at=session_dict.get("created_at") or current_time,
updated_at=session_dict.get("updated_at") or current_time,
)
stmt = stmt.on_duplicate_key_update(
team_id=session_dict.get("team_id"),
user_id=session_dict.get("user_id"),
team_data=session_dict.get("team_data"),
session_data=session_dict.get("session_data"),
summary=session_dict.get("summary"),
metadata=session_dict.get("metadata"),
runs=session_dict.get("runs"),
updated_at=int(time.time()),
)
await sess.execute(stmt)
# Fetch the row
select_stmt = select(table).where(table.c.session_id == session_dict.get("session_id"))
result = await sess.execute(select_stmt)
row = result.fetchone()
if row is None:
return None
session_dict = dict(row._mapping)
log_debug(f"Upserted team session with id '{session_dict.get('session_id')}'")
if not deserialize:
return session_dict
return TeamSession.from_dict(session_dict)
elif isinstance(session, WorkflowSession):
async with self.async_session_factory() as sess, sess.begin():
existing_result = await sess.execute(
select(table.c.user_id)
.where(table.c.session_id == session_dict.get("session_id"))
.with_for_update()
)
existing_row = existing_result.fetchone()
if existing_row is not None:
existing_uid = existing_row[0]
if existing_uid is not None and existing_uid != session_dict.get("user_id"):
return None
current_time = int(time.time())
stmt = mysql.insert(table).values(
session_id=session_dict.get("session_id"),
session_type=SessionType.WORKFLOW.value,
workflow_id=session_dict.get("workflow_id"),
user_id=session_dict.get("user_id"),
runs=session_dict.get("runs"),
workflow_data=session_dict.get("workflow_data"),
session_data=session_dict.get("session_data"),
summary=session_dict.get("summary"),
metadata=session_dict.get("metadata"),
created_at=session_dict.get("created_at") or current_time,
updated_at=session_dict.get("updated_at") or current_time,
)
stmt = stmt.on_duplicate_key_update(
workflow_id=session_dict.get("workflow_id"),
user_id=session_dict.get("user_id"),
workflow_data=session_dict.get("workflow_data"),
session_data=session_dict.get("session_data"),
summary=session_dict.get("summary"),
metadata=session_dict.get("metadata"),
runs=session_dict.get("runs"),
updated_at=int(time.time()),
)
await sess.execute(stmt)
# Fetch the row
select_stmt = select(table).where(table.c.session_id == session_dict.get("session_id"))
result = await sess.execute(select_stmt)
row = result.fetchone()
if row is None:
return None
session_dict = dict(row._mapping)
log_debug(f"Upserted workflow session with id '{session_dict.get('session_id')}'")
if not deserialize:
return session_dict
return WorkflowSession.from_dict(session_dict)
else:
raise ValueError(f"Invalid session type: {session.session_type}")
except Exception as e:
log_error(f"Exception upserting into sessions table: {e}")
return None
async def upsert_sessions(
self, sessions: List[Session], deserialize: Optional[bool] = True, preserve_updated_at: bool = False
) -> List[Union[Session, Dict[str, Any]]]:
"""
Bulk upsert multiple sessions for improved performance on large datasets.
Args:
sessions (List[Session]): List of sessions to upsert.
deserialize (Optional[bool]): Whether to deserialize the sessions. Defaults to True.
preserve_updated_at (bool): If True, preserve the updated_at from the session object.
Returns:
List[Union[Session, Dict[str, Any]]]: List of upserted sessions.
Raises:
Exception: If an error occurs during bulk upsert.
"""
if not sessions:
return []
try:
table = await self._get_table(table_type="sessions")
# Group sessions by type for batch processing
agent_sessions = []
team_sessions = []
workflow_sessions = []
for session in sessions:
if isinstance(session, AgentSession):
agent_sessions.append(session)
elif isinstance(session, TeamSession):
team_sessions.append(session)
elif isinstance(session, WorkflowSession):
workflow_sessions.append(session)
results: List[Union[Session, Dict[str, Any]]] = []
# Process each session type in bulk
async with self.async_session_factory() as sess, sess.begin():
# Bulk upsert agent sessions
if agent_sessions:
agent_data = []
for session in agent_sessions:
session_dict = session.to_dict()
# Use preserved updated_at if flag is set and value exists, otherwise use current time
updated_at = session_dict.get("updated_at") if preserve_updated_at else int(time.time())
agent_data.append(
{
"session_id": session_dict.get("session_id"),
"session_type": SessionType.AGENT.value,
"agent_id": session_dict.get("agent_id"),
"user_id": session_dict.get("user_id"),
"runs": session_dict.get("runs"),
"agent_data": session_dict.get("agent_data"),
"session_data": session_dict.get("session_data"),
"summary": session_dict.get("summary"),
"metadata": session_dict.get("metadata"),
"created_at": session_dict.get("created_at"),
"updated_at": updated_at,
}
)
if agent_data:
stmt = mysql.insert(table)
stmt = stmt.on_duplicate_key_update(
agent_id=stmt.inserted.agent_id,
user_id=stmt.inserted.user_id,
agent_data=stmt.inserted.agent_data,
session_data=stmt.inserted.session_data,
summary=stmt.inserted.summary,
metadata=stmt.inserted.metadata,
runs=stmt.inserted.runs,
updated_at=stmt.inserted.updated_at,
)
await sess.execute(stmt, agent_data)
# Fetch the results for agent sessions
agent_ids = [session.session_id for session in agent_sessions]
select_stmt = select(table).where(table.c.session_id.in_(agent_ids))
result = await sess.execute(select_stmt)
fetched_rows = result.fetchall()
for row in fetched_rows:
session_dict = dict(row._mapping)
if deserialize:
deserialized_agent_session = AgentSession.from_dict(session_dict)
if deserialized_agent_session is None:
continue
results.append(deserialized_agent_session)
else:
results.append(session_dict)
# Bulk upsert team sessions
if team_sessions:
team_data = []
for session in team_sessions:
session_dict = session.to_dict()
# Use preserved updated_at if flag is set and value exists, otherwise use current time
updated_at = session_dict.get("updated_at") if preserve_updated_at else int(time.time())
team_data.append(
{
"session_id": session_dict.get("session_id"),
"session_type": SessionType.TEAM.value,
"team_id": session_dict.get("team_id"),
"user_id": session_dict.get("user_id"),
"runs": session_dict.get("runs"),
"team_data": session_dict.get("team_data"),
"session_data": session_dict.get("session_data"),
"summary": session_dict.get("summary"),
"metadata": session_dict.get("metadata"),
"created_at": session_dict.get("created_at"),
"updated_at": updated_at,
}
)
if team_data:
stmt = mysql.insert(table)
stmt = stmt.on_duplicate_key_update(
team_id=stmt.inserted.team_id,
user_id=stmt.inserted.user_id,
team_data=stmt.inserted.team_data,
session_data=stmt.inserted.session_data,
summary=stmt.inserted.summary,
metadata=stmt.inserted.metadata,
runs=stmt.inserted.runs,
updated_at=stmt.inserted.updated_at,
)
await sess.execute(stmt, team_data)
# Fetch the results for team sessions
team_ids = [session.session_id for session in team_sessions]
select_stmt = select(table).where(table.c.session_id.in_(team_ids))
result = await sess.execute(select_stmt)
fetched_rows = result.fetchall()
for row in fetched_rows:
session_dict = dict(row._mapping)
if deserialize:
deserialized_team_session = TeamSession.from_dict(session_dict)
if deserialized_team_session is None:
continue
results.append(deserialized_team_session)
else:
results.append(session_dict)
# Bulk upsert workflow sessions
if workflow_sessions:
workflow_data = []
for session in workflow_sessions:
session_dict = session.to_dict()
# Use preserved updated_at if flag is set and value exists, otherwise use current time
updated_at = session_dict.get("updated_at") if preserve_updated_at else int(time.time())
workflow_data.append(
{
"session_id": session_dict.get("session_id"),
"session_type": SessionType.WORKFLOW.value,
"workflow_id": session_dict.get("workflow_id"),
"user_id": session_dict.get("user_id"),
"runs": session_dict.get("runs"),
"workflow_data": session_dict.get("workflow_data"),
"session_data": session_dict.get("session_data"),
"summary": session_dict.get("summary"),
"metadata": session_dict.get("metadata"),
"created_at": session_dict.get("created_at"),
"updated_at": updated_at,
}
)
if workflow_data:
stmt = mysql.insert(table)
stmt = stmt.on_duplicate_key_update(
workflow_id=stmt.inserted.workflow_id,
user_id=stmt.inserted.user_id,
workflow_data=stmt.inserted.workflow_data,
session_data=stmt.inserted.session_data,
summary=stmt.inserted.summary,
metadata=stmt.inserted.metadata,
runs=stmt.inserted.runs,
updated_at=stmt.inserted.updated_at,
)
await sess.execute(stmt, workflow_data)
# Fetch the results for workflow sessions
workflow_ids = [session.session_id for session in workflow_sessions]
select_stmt = select(table).where(table.c.session_id.in_(workflow_ids))
result = await sess.execute(select_stmt)
fetched_rows = result.fetchall()
for row in fetched_rows:
session_dict = dict(row._mapping)
if deserialize:
deserialized_workflow_session = WorkflowSession.from_dict(session_dict)
if deserialized_workflow_session is None:
continue
results.append(deserialized_workflow_session)
else:
results.append(session_dict)
return results
except Exception as e:
log_error(f"Exception during bulk session upsert, falling back to individual upserts: {e}")
# Fallback to individual upserts
return [
result
for session in sessions
if session is not None
for result in [await self.upsert_session(session, deserialize=deserialize)]
if result is not None
]
# -- Memory methods --
async def delete_user_memory(self, memory_id: str, user_id: Optional[str] = None) -> None:
"""Delete a user memory from the database.
Returns:
bool: True if deletion was successful, False otherwise.
Raises:
Exception: If an error occurs during deletion.
"""
try:
table = await self._get_table(table_type="memories")
async with self.async_session_factory() as sess, sess.begin():
delete_stmt = table.delete().where(table.c.memory_id == memory_id)
if user_id is not None:
delete_stmt = delete_stmt.where(table.c.user_id == user_id)
result = await sess.execute(delete_stmt)
success = result.rowcount > 0 # type: ignore
if success:
log_debug(f"Successfully deleted user memory id: {memory_id}")
else:
log_debug(f"No user memory found with id: {memory_id}")
except Exception as e:
log_error(f"Error deleting user memory: {e}")
async def delete_user_memories(self, memory_ids: List[str], user_id: Optional[str] = None) -> None:
"""Delete user memories from the database.
Args:
memory_ids (List[str]): The IDs of the memories to delete.
user_id (Optional[str]): Optional user ID to filter deletions.
Raises:
Exception: If an error occurs during deletion.
"""
try:
table = await self._get_table(table_type="memories")
async with self.async_session_factory() as sess, sess.begin():
delete_stmt = table.delete().where(table.c.memory_id.in_(memory_ids))
if user_id is not None:
delete_stmt = delete_stmt.where(table.c.user_id == user_id)
result = await sess.execute(delete_stmt)
if result.rowcount == 0: # type: ignore
log_debug(f"No user memories found with ids: {memory_ids}")
else:
log_debug(f"Successfully deleted {result.rowcount} user memories") # type: ignore
except Exception as e:
log_error(f"Error deleting user memories: {e}")
async def get_all_memory_topics(self, user_id: Optional[str] = None) -> List[str]:
"""Get all memory topics from the database.
Args:
user_id (Optional[str]): Optional user ID to filter topics.
Returns:
List[str]: List of memory topics.
"""
try:
table = await self._get_table(table_type="memories")
async with self.async_session_factory() as sess, sess.begin():
# MySQL approach: extract JSON array elements differently
stmt = select(table.c.topics)
result = await sess.execute(stmt)
records = result.fetchall()
topics_set = set()
for row in records:
if row[0]:
# Parse JSON array and add topics to set
import json
try:
topics = json.loads(row[0]) if isinstance(row[0], str) else row[0]
if isinstance(topics, list):
topics_set.update(topics)
except Exception:
pass
return list(topics_set)
except Exception as e:
log_error(f"Exception reading from memory table: {e}")
return []
async def get_user_memory(
self, memory_id: str, deserialize: Optional[bool] = True, user_id: Optional[str] = None
) -> Optional[Union[UserMemory, Dict[str, Any]]]:
"""Get a memory from the database.
Args:
memory_id (str): The ID of the memory to get.
deserialize (Optional[bool]): Whether to serialize the memory. Defaults to True.
Returns:
Union[UserMemory, Dict[str, Any], None]:
- When deserialize=True: UserMemory object
- When deserialize=False: UserMemory dictionary
Raises:
Exception: If an error occurs during retrieval.
"""
try:
table = await self._get_table(table_type="memories")
async with self.async_session_factory() as sess, sess.begin():
stmt = select(table).where(table.c.memory_id == memory_id)
if user_id is not None:
stmt = stmt.where(table.c.user_id == user_id)
result = await sess.execute(stmt)
row = result.fetchone()
if not row:
return None
memory_raw = dict(row._mapping)
if not deserialize:
return memory_raw
return UserMemory.from_dict(memory_raw)
except Exception as e:
log_error(f"Exception reading from memory table: {e}")
return None
async def get_user_memories(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
topics: Optional[List[str]] = None,
search_content: Optional[str] = None,
limit: Optional[int] = None,
page: Optional[int] = None,
sort_by: Optional[str] = None,
sort_order: Optional[str] = None,
deserialize: Optional[bool] = True,
) -> Union[List[UserMemory], Tuple[List[Dict[str, Any]], int]]:
"""Get all memories from the database as UserMemory objects.
Args:
user_id (Optional[str]): The ID of the user to filter by.
agent_id (Optional[str]): The ID of the agent to filter by.
team_id (Optional[str]): The ID of the team to filter by.
topics (Optional[List[str]]): The topics to filter by.
search_content (Optional[str]): The content to search for.
limit (Optional[int]): The maximum number of memories to return.
page (Optional[int]): The page number.
sort_by (Optional[str]): The column to sort by.
sort_order (Optional[str]): The order to sort by.
deserialize (Optional[bool]): Whether to serialize the memories. Defaults to True.
Returns:
Union[List[UserMemory], Tuple[List[Dict[str, Any]], int]]:
- When deserialize=True: List of UserMemory objects
- When deserialize=False: Tuple of (memory dictionaries, total count)
Raises:
Exception: If an error occurs during retrieval.
"""
try:
table = await self._get_table(table_type="memories")
async with self.async_session_factory() as sess, sess.begin():
stmt = select(table)
# Filtering
if user_id is not None:
stmt = stmt.where(table.c.user_id == user_id)
if agent_id is not None:
stmt = stmt.where(table.c.agent_id == agent_id)
if team_id is not None:
stmt = stmt.where(table.c.team_id == team_id)
if topics is not None:
# MySQL JSON contains syntax
topic_conditions = []
for topic in topics:
topic_conditions.append(func.json_contains(table.c.topics, f'"{topic}"'))
stmt = stmt.where(and_(*topic_conditions))
if search_content is not None:
stmt = stmt.where(cast(table.c.memory, TEXT).ilike(f"%{search_content}%"))
# Get total count after applying filtering
count_stmt = select(func.count()).select_from(stmt.alias())
total_count = await sess.scalar(count_stmt) or 0
# Sorting
stmt = apply_sorting(stmt, table, sort_by, sort_order)
# Paginating
if limit is not None:
stmt = stmt.limit(limit)
if page is not None:
stmt = stmt.offset((page - 1) * limit)
result = await sess.execute(stmt)
records = result.fetchall()
if not records:
return [] if deserialize else ([], 0)
memories_raw = [dict(record._mapping) for record in records]
if not deserialize:
return memories_raw, total_count
return [UserMemory.from_dict(record) for record in memories_raw]
except Exception as e:
log_error(f"Exception reading from memory table: {e}")
return [] if deserialize else ([], 0)
async def clear_memories(self) -> None:
"""Delete all memories from the database.
Raises:
Exception: If an error occurs during deletion.
"""
try:
table = await self._get_table(table_type="memories")
async with self.async_session_factory() as sess, sess.begin():
await sess.execute(table.delete())
except Exception as e:
log_warning(f"Exception deleting all memories: {e}")
# -- Cultural Knowledge methods --
async def clear_cultural_knowledge(self) -> None:
"""Delete all cultural knowledge from the database.
Raises:
Exception: If an error occurs during deletion.
"""
try:
table = await self._get_table(table_type="culture")
async with self.async_session_factory() as sess, sess.begin():
await sess.execute(table.delete())
except Exception as e:
log_warning(f"Exception deleting all cultural knowledge: {e}")
async def delete_cultural_knowledge(self, id: str) -> None:
"""Delete cultural knowledge by ID.
Args:
id (str): The ID of the cultural knowledge to delete.
Raises:
Exception: If an error occurs during deletion.
"""
try:
table = await self._get_table(table_type="culture")
async with self.async_session_factory() as sess, sess.begin():
stmt = table.delete().where(table.c.id == id)
await sess.execute(stmt)
except Exception as e:
log_warning(f"Exception deleting cultural knowledge: {e}")
raise e
async def get_cultural_knowledge(
self, id: str, deserialize: Optional[bool] = True
) -> Optional[Union[CulturalKnowledge, Dict[str, Any]]]:
"""Get cultural knowledge by ID.
Args:
id (str): The ID of the cultural knowledge to retrieve.
deserialize (Optional[bool]): Whether to deserialize to CulturalKnowledge object. Defaults to True.
Returns:
Optional[Union[CulturalKnowledge, Dict[str, Any]]]: The cultural knowledge if found, None otherwise.
Raises:
Exception: If an error occurs during retrieval.
"""
try:
table = await self._get_table(table_type="culture")
async with self.async_session_factory() as sess:
stmt = select(table).where(table.c.id == id)
result = await sess.execute(stmt)
row = result.fetchone()
if row is None:
return None
db_row = dict(row._mapping)
if not deserialize:
return db_row
return deserialize_cultural_knowledge_from_db(db_row)
except Exception as e:
log_warning(f"Exception reading cultural knowledge: {e}")
raise e
async def get_all_cultural_knowledge(
self,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
name: Optional[str] = None,
limit: Optional[int] = None,
page: Optional[int] = None,
sort_by: Optional[str] = None,
sort_order: Optional[str] = None,
deserialize: Optional[bool] = True,
) -> Union[List[CulturalKnowledge], Tuple[List[Dict[str, Any]], int]]:
"""Get all cultural knowledge with filtering and pagination.
Args:
agent_id (Optional[str]): Filter by agent ID.
team_id (Optional[str]): Filter by team ID.
name (Optional[str]): Filter by name (case-insensitive partial match).
limit (Optional[int]): Maximum number of results to return.
page (Optional[int]): Page number for pagination.
sort_by (Optional[str]): Field to sort by.
sort_order (Optional[str]): Sort order ('asc' or 'desc').
deserialize (Optional[bool]): Whether to deserialize to CulturalKnowledge objects. Defaults to True.
Returns:
Union[List[CulturalKnowledge], Tuple[List[Dict[str, Any]], int]]:
- When deserialize=True: List of CulturalKnowledge objects
- When deserialize=False: Tuple with list of dictionaries and total count
Raises:
Exception: If an error occurs during retrieval.
"""
try:
table = await self._get_table(table_type="culture")
async with self.async_session_factory() as sess:
# Build query with filters
stmt = select(table)
if agent_id is not None:
stmt = stmt.where(table.c.agent_id == agent_id)
if team_id is not None:
stmt = stmt.where(table.c.team_id == team_id)
if name is not None:
stmt = stmt.where(table.c.name.ilike(f"%{name}%"))
# Get total count
count_stmt = select(func.count()).select_from(stmt.alias())
total_count_result = await sess.execute(count_stmt)
total_count = total_count_result.scalar() or 0
# Apply sorting
stmt = apply_sorting(stmt, table, sort_by, sort_order)
# Apply pagination
if limit is not None:
stmt = stmt.limit(limit)
if page is not None:
stmt = stmt.offset((page - 1) * limit)
# Execute query
result = await sess.execute(stmt)
rows = result.fetchall()
db_rows = [dict(row._mapping) for row in rows]
if not deserialize:
return db_rows, total_count
return [deserialize_cultural_knowledge_from_db(row) for row in db_rows]
except Exception as e:
log_warning(f"Exception reading all cultural knowledge: {e}")
raise e
async def upsert_cultural_knowledge(
self, cultural_knowledge: CulturalKnowledge, deserialize: Optional[bool] = True
) -> Optional[Union[CulturalKnowledge, Dict[str, Any]]]:
"""Upsert cultural knowledge in the database.
Args:
cultural_knowledge (CulturalKnowledge): The cultural knowledge to upsert.
deserialize (Optional[bool]): Whether to deserialize the result. Defaults to True.
Returns:
Optional[Union[CulturalKnowledge, Dict[str, Any]]]: The upserted cultural knowledge.
Raises:
Exception: If an error occurs during upsert.
"""
try:
table = await self._get_table(table_type="culture")
# Generate ID if not present
if cultural_knowledge.id is None:
cultural_knowledge.id = str(uuid4())
# Serialize content, categories, and notes into a JSON dict for DB storage
content_dict = serialize_cultural_knowledge_for_db(cultural_knowledge)
async with self.async_session_factory() as sess, sess.begin():
# Use MySQL-specific insert with on_duplicate_key_update
insert_stmt = mysql.insert(table).values(
id=cultural_knowledge.id,
name=cultural_knowledge.name,
summary=cultural_knowledge.summary,
content=content_dict if content_dict else None,
metadata=cultural_knowledge.metadata,
input=cultural_knowledge.input,
created_at=cultural_knowledge.created_at,
updated_at=int(time.time()),
agent_id=cultural_knowledge.agent_id,
team_id=cultural_knowledge.team_id,
)
# Update all fields except id on conflict
upsert_stmt = insert_stmt.on_duplicate_key_update(
name=cultural_knowledge.name,
summary=cultural_knowledge.summary,
content=content_dict if content_dict else None,
metadata=cultural_knowledge.metadata,
input=cultural_knowledge.input,
updated_at=int(time.time()),
agent_id=cultural_knowledge.agent_id,
team_id=cultural_knowledge.team_id,
)
await sess.execute(upsert_stmt)
# Fetch the inserted/updated row
select_stmt = select(table).where(table.c.id == cultural_knowledge.id)
result = await sess.execute(select_stmt)
row = result.fetchone()
if row is None:
return None
db_row = dict(row._mapping)
if not deserialize:
return db_row
# Deserialize from DB format to model format
return deserialize_cultural_knowledge_from_db(db_row)
except Exception as e:
log_warning(f"Exception upserting cultural knowledge: {e}")
raise e
async def get_user_memory_stats(
self, limit: Optional[int] = None, page: Optional[int] = None, user_id: Optional[str] = None
) -> Tuple[List[Dict[str, Any]], int]:
"""Get user memories stats.
Args:
limit (Optional[int]): The maximum number of user stats to return.
page (Optional[int]): The page number.
Returns:
Tuple[List[Dict[str, Any]], int]: A list of dictionaries containing user stats and total count.
Example:
(
[
{
"user_id": "123",
"total_memories": 10,
"last_memory_updated_at": 1714560000,
},
],
total_count: 1,
)
"""
try:
table = await self._get_table(table_type="memories")
async with self.async_session_factory() as sess, sess.begin():
stmt = select(
table.c.user_id,
func.count(table.c.memory_id).label("total_memories"),
func.max(table.c.updated_at).label("last_memory_updated_at"),
)
if user_id is not None:
stmt = stmt.where(table.c.user_id == user_id)
else:
stmt = stmt.where(table.c.user_id.is_not(None))
stmt = stmt.group_by(table.c.user_id)
stmt = stmt.order_by(func.max(table.c.updated_at).desc())
count_stmt = select(func.count()).select_from(stmt.alias())
total_count = await sess.scalar(count_stmt) or 0
# Pagination
if limit is not None:
stmt = stmt.limit(limit)
if page is not None:
stmt = stmt.offset((page - 1) * limit)
result = await sess.execute(stmt)
records = result.fetchall()
if not records:
return [], 0
return [
{
"user_id": record.user_id, # type: ignore
"total_memories": record.total_memories,
"last_memory_updated_at": record.last_memory_updated_at,
}
for record in records
], total_count
except Exception as e:
log_error(f"Exception getting user memory stats: {e}")
return [], 0
async def upsert_user_memory(
self, memory: UserMemory, deserialize: Optional[bool] = True
) -> Optional[Union[UserMemory, Dict[str, Any]]]:
"""Upsert a user memory in the database.
Args:
memory (UserMemory): The user memory to upsert.
deserialize (Optional[bool]): Whether to serialize the memory. Defaults to True.
Returns:
Optional[Union[UserMemory, Dict[str, Any]]]:
- When deserialize=True: UserMemory object
- When deserialize=False: UserMemory dictionary
Raises:
Exception: If an error occurs during upsert.
"""
try:
table = await self._get_table(table_type="memories")
async with self.async_session_factory() as sess, sess.begin():
if memory.memory_id is None:
memory.memory_id = str(uuid4())
current_time = int(time.time())
stmt = mysql.insert(table).values(
memory_id=memory.memory_id,
memory=memory.memory,
input=memory.input,
user_id=memory.user_id,
agent_id=memory.agent_id,
team_id=memory.team_id,
topics=memory.topics,
feedback=memory.feedback,
created_at=memory.created_at,
updated_at=memory.created_at,
)
stmt = stmt.on_duplicate_key_update(
memory=memory.memory,
topics=memory.topics,
input=memory.input,
agent_id=memory.agent_id,
team_id=memory.team_id,
feedback=memory.feedback,
updated_at=current_time,
# Preserve created_at on update - don't overwrite existing value
created_at=table.c.created_at,
)
await sess.execute(stmt)
# Fetch the row
select_stmt = select(table).where(table.c.memory_id == memory.memory_id)
result = await sess.execute(select_stmt)
row = result.fetchone()
if row is None:
return None
memory_raw = dict(row._mapping)
log_debug(f"Upserted user memory with id '{memory.memory_id}'")
if not memory_raw or not deserialize:
return memory_raw
return UserMemory.from_dict(memory_raw)
except Exception as e:
log_error(f"Exception upserting user memory: {e}")
return None
async def upsert_memories(
self, memories: List[UserMemory], deserialize: Optional[bool] = True, preserve_updated_at: bool = False
) -> List[Union[UserMemory, Dict[str, Any]]]:
"""
Bulk upsert multiple user memories for improved performance on large datasets.
Args:
memories (List[UserMemory]): List of memories to upsert.
deserialize (Optional[bool]): Whether to deserialize the memories. Defaults to True.
preserve_updated_at (bool): If True, preserve the updated_at from the memory object.
Returns:
List[Union[UserMemory, Dict[str, Any]]]: List of upserted memories.
Raises:
Exception: If an error occurs during bulk upsert.
"""
if not memories:
return []
try:
table = await self._get_table(table_type="memories")
# Prepare bulk data
bulk_data = []
current_time = int(time.time())
for memory in memories:
if memory.memory_id is None:
memory.memory_id = str(uuid4())
# Use preserved updated_at if flag is set and value exists, otherwise use current time
updated_at = memory.updated_at if preserve_updated_at else current_time
bulk_data.append(
{
"memory_id": memory.memory_id,
"memory": memory.memory,
"input": memory.input,
"user_id": memory.user_id,
"agent_id": memory.agent_id,
"team_id": memory.team_id,
"topics": memory.topics,
"feedback": memory.feedback,
"created_at": memory.created_at,
"updated_at": updated_at,
}
)
results: List[Union[UserMemory, Dict[str, Any]]] = []
async with self.async_session_factory() as sess, sess.begin():
# Bulk upsert memories using MySQL ON DUPLICATE KEY UPDATE
stmt = mysql.insert(table)
stmt = stmt.on_duplicate_key_update(
memory=stmt.inserted.memory,
topics=stmt.inserted.topics,
input=stmt.inserted.input,
agent_id=stmt.inserted.agent_id,
team_id=stmt.inserted.team_id,
feedback=stmt.inserted.feedback,
updated_at=stmt.inserted.updated_at,
# Preserve created_at on update
created_at=table.c.created_at,
)
await sess.execute(stmt, bulk_data)
# Fetch results
memory_ids = [memory.memory_id for memory in memories if memory.memory_id]
select_stmt = select(table).where(table.c.memory_id.in_(memory_ids))
result = await sess.execute(select_stmt)
fetched_rows = result.fetchall()
for row in fetched_rows:
memory_dict = dict(row._mapping)
if deserialize:
results.append(UserMemory.from_dict(memory_dict))
else:
results.append(memory_dict)
return results
except Exception as e:
log_error(f"Exception during bulk memory upsert, falling back to individual upserts: {e}")
# Fallback to individual upserts
return [
result
for memory in memories
if memory is not None
for result in [await self.upsert_user_memory(memory, deserialize=deserialize)]
if result is not None
]
# -- Metrics methods --
async def _get_all_sessions_for_metrics_calculation(
self, start_timestamp: Optional[int] = None, end_timestamp: Optional[int] = None
) -> List[Dict[str, Any]]:
"""
Get all sessions of all types (agent, team, workflow) as raw dictionaries.
Args:
start_timestamp (Optional[int]): The start timestamp to filter by. Defaults to None.
end_timestamp (Optional[int]): The end timestamp to filter by. Defaults to None.
Returns:
List[Dict[str, Any]]: List of session dictionaries with session_type field.
Raises:
Exception: If an error occurs during retrieval.
"""
try:
table = await self._get_table(table_type="sessions")
stmt = select(
table.c.user_id,
table.c.session_data,
table.c.runs,
table.c.created_at,
table.c.session_type,
)
if start_timestamp is not None:
stmt = stmt.where(table.c.created_at >= start_timestamp)
if end_timestamp is not None:
stmt = stmt.where(table.c.created_at <= end_timestamp)
async with self.async_session_factory() as sess:
result = await sess.execute(stmt)
records = result.fetchall()
return [dict(record._mapping) for record in records]
except Exception as e:
log_error(f"Exception reading from sessions table: {e}")
return []
async def _get_metrics_calculation_starting_date(self, table: Table) -> Optional[date]:
"""Get the first date for which metrics calculation is needed:
1. If there are metrics records, return the date of the first day without a complete metrics record.
2. If there are no metrics records, return the date of the first recorded session.
3. If there are no metrics records and no sessions records, return None.
Args:
table (Table): The table to get the starting date for.
Returns:
Optional[date]: The starting date for which metrics calculation is needed.
"""
async with self.async_session_factory() as sess:
stmt = select(table).order_by(table.c.date.desc()).limit(1)
result = await sess.execute(stmt)
row = result.fetchone()
# 1. Return the date of the first day without a complete metrics record.
if row is not None:
if row.completed:
return row._mapping["date"] + timedelta(days=1)
else:
return row._mapping["date"]
# 2. No metrics records. Return the date of the first recorded session.
first_session, _ = await self.get_sessions(sort_by="created_at", sort_order="asc", limit=1, deserialize=False)
first_session_date = first_session[0]["created_at"] if first_session else None # type: ignore[index]
# 3. No metrics records and no sessions records. Return None.
if first_session_date is None:
return None
return datetime.fromtimestamp(first_session_date, tz=timezone.utc).date()
async def calculate_metrics(self) -> Optional[list[dict]]:
"""Calculate metrics for all dates without complete metrics.
Returns:
Optional[list[dict]]: The calculated metrics.
Raises:
Exception: If an error occurs during metrics calculation.
"""
try:
table = await self._get_table(table_type="metrics")
starting_date = await self._get_metrics_calculation_starting_date(table)
if starting_date is None:
log_info("No session data found. Won't calculate metrics.")
return None
dates_to_process = get_dates_to_calculate_metrics_for(starting_date)
if not dates_to_process:
log_info("Metrics already calculated for all relevant dates.")
return None
start_timestamp = int(
datetime.combine(dates_to_process[0], datetime.min.time()).replace(tzinfo=timezone.utc).timestamp()
)
end_timestamp = int(
datetime.combine(dates_to_process[-1] + timedelta(days=1), datetime.min.time())
.replace(tzinfo=timezone.utc)
.timestamp()
)
sessions = await self._get_all_sessions_for_metrics_calculation(
start_timestamp=start_timestamp, end_timestamp=end_timestamp
)
all_sessions_data = fetch_all_sessions_data(
sessions=sessions, dates_to_process=dates_to_process, start_timestamp=start_timestamp
)
if not all_sessions_data:
log_info("No new session data found. Won't calculate metrics.")
return None
results = []
metrics_records = []
for date_to_process in dates_to_process:
date_key = date_to_process.isoformat()
sessions_for_date = all_sessions_data.get(date_key, {})
# Skip dates with no sessions
if not any(len(sessions) > 0 for sessions in sessions_for_date.values()):
continue
metrics_record = calculate_date_metrics(date_to_process, sessions_for_date)
metrics_records.append(metrics_record)
if metrics_records:
async with self.async_session_factory() as sess, sess.begin():
results = await abulk_upsert_metrics(session=sess, table=table, metrics_records=metrics_records)
log_debug("Updated metrics calculations")
return results
except Exception as e:
log_error(f"Exception refreshing metrics: {e}")
return None
async def get_metrics(
self, starting_date: Optional[date] = None, ending_date: Optional[date] = None
) -> Tuple[List[dict], Optional[int]]:
"""Get all metrics matching the given date range.
Args:
starting_date (Optional[date]): The starting date to filter metrics by.
ending_date (Optional[date]): The ending date to filter metrics by.
Returns:
Tuple[List[dict], Optional[int]]: A tuple containing the metrics and the timestamp of the latest update.
Raises:
Exception: If an error occurs during retrieval.
"""
try:
table = await self._get_table(table_type="metrics")
async with self.async_session_factory() as sess, sess.begin():
stmt = select(table)
if starting_date:
stmt = stmt.where(table.c.date >= starting_date)
if ending_date:
stmt = stmt.where(table.c.date <= ending_date)
result = await sess.execute(stmt)
records = result.fetchall()
if not records:
return [], None
# Get the latest updated_at
latest_stmt = select(func.max(table.c.updated_at))
latest_result = await sess.execute(latest_stmt)
latest_updated_at = latest_result.scalar()
return [dict(row._mapping) for row in records], latest_updated_at
except Exception as e:
log_warning(f"Exception getting metrics: {e}")
return [], None
# -- Knowledge methods --
async def delete_knowledge_content(self, id: str):
"""Delete a knowledge row from the database.
Args:
id (str): The ID of the knowledge row to delete.
"""
table = await self._get_table(table_type="knowledge")
try:
async with self.async_session_factory() as sess, sess.begin():
stmt = table.delete().where(table.c.id == id)
await sess.execute(stmt)
except Exception as e:
log_error(f"Exception deleting knowledge content: {e}")
async def get_knowledge_content(self, id: str) -> Optional[KnowledgeRow]:
"""Get a knowledge row from the database.
Args:
id (str): The ID of the knowledge row to get.
Returns:
Optional[KnowledgeRow]: The knowledge row, or None if it doesn't exist.
"""
table = await self._get_table(table_type="knowledge")
try:
async with self.async_session_factory() as sess, sess.begin():
stmt = select(table).where(table.c.id == id)
result = await sess.execute(stmt)
row = result.fetchone()
if row is None:
return None
return KnowledgeRow.model_validate(row._mapping)
except Exception as e:
log_error(f"Exception getting knowledge content: {e}")
return None
async def get_knowledge_contents(
self,
limit: Optional[int] = None,
page: Optional[int] = None,
sort_by: Optional[str] = None,
sort_order: Optional[str] = None,
linked_to: Optional[str] = None,
) -> Tuple[List[KnowledgeRow], int]:
"""Get all knowledge contents from the database.
Args:
limit (Optional[int]): The maximum number of knowledge contents to return.
page (Optional[int]): The page number.
sort_by (Optional[str]): The column to sort by.
sort_order (Optional[str]): The order to sort by.
linked_to (Optional[str]): Filter by linked_to value (knowledge instance name).
Returns:
List[KnowledgeRow]: The knowledge contents.
Raises:
Exception: If an error occurs during retrieval.
"""
table = await self._get_table(table_type="knowledge")
try:
async with self.async_session_factory() as sess, sess.begin():
stmt = select(table)
# Apply linked_to filter if provided
if linked_to is not None:
stmt = stmt.where(table.c.linked_to == linked_to)
# Apply sorting
if sort_by is not None:
stmt = stmt.order_by(getattr(table.c, sort_by) * (1 if sort_order == "asc" else -1))
# Get total count before applying limit and pagination
count_stmt = select(func.count()).select_from(stmt.alias())
total_count = await sess.scalar(count_stmt) or 0
# Apply pagination after count
if limit is not None:
stmt = stmt.limit(limit)
if page is not None:
stmt = stmt.offset((page - 1) * limit)
result = await sess.execute(stmt)
records = result.fetchall()
return [KnowledgeRow.model_validate(record._mapping) for record in records], total_count
except Exception as e:
log_error(f"Exception getting knowledge contents: {e}")
return [], 0
async def upsert_knowledge_content(self, knowledge_row: KnowledgeRow):
"""Upsert knowledge content in the database.
Args:
knowledge_row (KnowledgeRow): The knowledge row to upsert.
Returns:
Optional[KnowledgeRow]: The upserted knowledge row, or None if the operation fails.
"""
try:
table = await self._get_table(table_type="knowledge")
async with self.async_session_factory() as sess, sess.begin():
# Get the actual table columns to avoid "unconsumed column names" error
table_columns = set(table.columns.keys())
# Only include fields that exist in the table and are not None
insert_data = {}
update_fields = {}
# Map of KnowledgeRow fields to table columns
field_mapping = {
"id": "id",
"name": "name",
"description": "description",
"metadata": "metadata",
"type": "type",
"size": "size",
"linked_to": "linked_to",
"access_count": "access_count",
"status": "status",
"status_message": "status_message",
"created_at": "created_at",
"updated_at": "updated_at",
"external_id": "external_id",
}
# Build insert and update data only for fields that exist in the table
for model_field, table_column in field_mapping.items():
if table_column in table_columns:
value = getattr(knowledge_row, model_field, None)
if value is not None:
insert_data[table_column] = value
# Don't include ID in update_fields since it's the primary key
if table_column != "id":
update_fields[table_column] = value
# Ensure id is always included for the insert
if "id" in table_columns and knowledge_row.id:
insert_data["id"] = knowledge_row.id
# Handle case where update_fields is empty (all fields are None or don't exist in table)
if not update_fields:
# If we have insert_data, just do an insert without conflict resolution
if insert_data:
stmt = mysql.insert(table).values(insert_data)
await sess.execute(stmt)
else:
# If we have no data at all, this is an error
log_error("No valid fields found for knowledge row upsert")
return None
else:
# Normal upsert with conflict resolution
stmt = mysql.insert(table).values(insert_data).on_duplicate_key_update(**update_fields)
await sess.execute(stmt)
log_debug(f"Upserted knowledge row with id '{knowledge_row.id}'")
return knowledge_row
except Exception as e:
log_error(f"Error upserting knowledge row: {e}")
return None
# -- Eval methods --
async def create_eval_run(self, eval_run: EvalRunRecord) -> Optional[EvalRunRecord]:
"""Create an EvalRunRecord in the database.
Args:
eval_run (EvalRunRecord): The eval run to create.
Returns:
Optional[EvalRunRecord]: The created eval run, or None if the operation fails.
Raises:
Exception: If an error occurs during creation.
"""
try:
table = await self._get_table(table_type="evals")
async with self.async_session_factory() as sess, sess.begin():
current_time = int(time.time())
stmt = mysql.insert(table).values(
{"created_at": current_time, "updated_at": current_time, **eval_run.model_dump()}
)
await sess.execute(stmt)
log_debug(f"Created eval run with id '{eval_run.run_id}'")
return eval_run
except Exception as e:
log_error(f"Error creating eval run: {e}")
return None
async def delete_eval_run(self, eval_run_id: str) -> None:
"""Delete an eval run from the database.
Args:
eval_run_id (str): The ID of the eval run to delete.
"""
try:
table = await self._get_table(table_type="evals")
async with self.async_session_factory() as sess, sess.begin():
stmt = table.delete().where(table.c.run_id == eval_run_id)
result = await sess.execute(stmt)
if result.rowcount == 0: # type: ignore
log_warning(f"No eval run found with ID: {eval_run_id}")
else:
log_debug(f"Deleted eval run with ID: {eval_run_id}")
except Exception as e:
log_error(f"Error deleting eval run {eval_run_id}: {e}")
async def delete_eval_runs(self, eval_run_ids: List[str]) -> None:
"""Delete multiple eval runs from the database.
Args:
eval_run_ids (List[str]): List of eval run IDs to delete.
"""
try:
table = await self._get_table(table_type="evals")
async with self.async_session_factory() as sess, sess.begin():
stmt = table.delete().where(table.c.run_id.in_(eval_run_ids))
result = await sess.execute(stmt)
if result.rowcount == 0: # type: ignore
log_warning(f"No eval runs found with IDs: {eval_run_ids}")
else:
log_debug(f"Deleted {result.rowcount} eval runs") # type: ignore
except Exception as e:
log_error(f"Error deleting eval runs {eval_run_ids}: {e}")
async def get_eval_run(
self, eval_run_id: str, deserialize: Optional[bool] = True
) -> Optional[Union[EvalRunRecord, Dict[str, Any]]]:
"""Get an eval run from the database.
Args:
eval_run_id (str): The ID of the eval run to get.
deserialize (Optional[bool]): Whether to serialize the eval run. Defaults to True.
Returns:
Optional[Union[EvalRunRecord, Dict[str, Any]]]:
- When deserialize=True: EvalRunRecord object
- When deserialize=False: EvalRun dictionary
Raises:
Exception: If an error occurs during retrieval.
"""
try:
table = await self._get_table(table_type="evals")
async with self.async_session_factory() as sess, sess.begin():
stmt = select(table).where(table.c.run_id == eval_run_id)
result = await sess.execute(stmt)
row = result.fetchone()
if row is None:
return None
eval_run_raw = dict(row._mapping)
if not deserialize:
return eval_run_raw
return EvalRunRecord.model_validate(eval_run_raw)
except Exception as e:
log_error(f"Exception getting eval run {eval_run_id}: {e}")
return None
async def get_eval_runs(
self,
limit: Optional[int] = None,
page: Optional[int] = None,
sort_by: Optional[str] = None,
sort_order: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
workflow_id: Optional[str] = None,
model_id: Optional[str] = None,
filter_type: Optional[EvalFilterType] = None,
eval_type: Optional[List[EvalType]] = None,
deserialize: Optional[bool] = True,
) -> Union[List[EvalRunRecord], Tuple[List[Dict[str, Any]], int]]:
"""Get all eval runs from the database.
Args:
limit (Optional[int]): The maximum number of eval runs to return.
page (Optional[int]): The page number.
sort_by (Optional[str]): The column to sort by.
sort_order (Optional[str]): The order to sort by.
agent_id (Optional[str]): The ID of the agent to filter by.
team_id (Optional[str]): The ID of the team to filter by.
workflow_id (Optional[str]): The ID of the workflow to filter by.
model_id (Optional[str]): The ID of the model to filter by.
eval_type (Optional[List[EvalType]]): The type(s) of eval to filter by.
filter_type (Optional[EvalFilterType]): Filter by component type (agent, team, workflow).
deserialize (Optional[bool]): Whether to serialize the eval runs. Defaults to True.
Returns:
Union[List[EvalRunRecord], Tuple[List[Dict[str, Any]], int]]:
- When deserialize=True: List of EvalRunRecord objects
- When deserialize=False: List of dictionaries
Raises:
Exception: If an error occurs during retrieval.
"""
try:
table = await self._get_table(table_type="evals")
async with self.async_session_factory() as sess, sess.begin():
stmt = select(table)
# Filtering
if agent_id is not None:
stmt = stmt.where(table.c.agent_id == agent_id)
if team_id is not None:
stmt = stmt.where(table.c.team_id == team_id)
if workflow_id is not None:
stmt = stmt.where(table.c.workflow_id == workflow_id)
if model_id is not None:
stmt = stmt.where(table.c.model_id == model_id)
if eval_type is not None and len(eval_type) > 0:
stmt = stmt.where(table.c.eval_type.in_(eval_type))
if filter_type is not None:
if filter_type == EvalFilterType.AGENT:
stmt = stmt.where(table.c.agent_id.is_not(None))
elif filter_type == EvalFilterType.TEAM:
stmt = stmt.where(table.c.team_id.is_not(None))
elif filter_type == EvalFilterType.WORKFLOW:
stmt = stmt.where(table.c.workflow_id.is_not(None))
# Get total count after applying filtering
count_stmt = select(func.count()).select_from(stmt.alias())
total_count = await sess.scalar(count_stmt) or 0
# Sorting
if sort_by is None:
stmt = stmt.order_by(table.c.created_at.desc())
else:
stmt = apply_sorting(stmt, table, sort_by, sort_order)
# Paginating
if limit is not None:
stmt = stmt.limit(limit)
if page is not None:
stmt = stmt.offset((page - 1) * limit)
result = await sess.execute(stmt)
records = result.fetchall()
if not records:
return [] if deserialize else ([], 0)
eval_runs_raw = [dict(row._mapping) for row in records]
if not deserialize:
return eval_runs_raw, total_count
return [EvalRunRecord.model_validate(row) for row in eval_runs_raw]
except Exception as e:
log_error(f"Exception getting eval runs: {e}")
return [] if deserialize else ([], 0)
async def rename_eval_run(
self, eval_run_id: str, name: str, deserialize: Optional[bool] = True
) -> Optional[Union[EvalRunRecord, Dict[str, Any]]]:
"""Upsert the name of an eval run in the database, returning raw dictionary.
Args:
eval_run_id (str): The ID of the eval run to update.
name (str): The new name of the eval run.
Returns:
Optional[Dict[str, Any]]: The updated eval run, or None if the operation fails.
Raises:
Exception: If an error occurs during update.
"""
try:
table = await self._get_table(table_type="evals")
async with self.async_session_factory() as sess, sess.begin():
stmt = (
table.update().where(table.c.run_id == eval_run_id).values(name=name, updated_at=int(time.time()))
)
await sess.execute(stmt)
eval_run_raw = await self.get_eval_run(eval_run_id=eval_run_id, deserialize=deserialize)
if not eval_run_raw or not deserialize:
return eval_run_raw
return EvalRunRecord.model_validate(eval_run_raw)
except Exception as e:
log_error(f"Error upserting eval run name {eval_run_id}: {e}")
return None
# -- Migrations --
async def migrate_table_from_v1_to_v2(self, v1_db_schema: str, v1_table_name: str, v1_table_type: str):
"""Migrate all content in the given table to the right v2 table"""
from typing import Sequence
from agno.db.migrations.v1_to_v2 import (
get_all_table_content,
parse_agent_sessions,
parse_memories,
parse_team_sessions,
parse_workflow_sessions,
)
# Get all content from the old table
old_content: list[dict[str, Any]] = get_all_table_content(
db=self,
db_schema=v1_db_schema,
table_name=v1_table_name,
)
if not old_content:
log_info(f"No content to migrate from table {v1_table_name}")
return
# Parse the content into the new format
memories: List[UserMemory] = []
sessions: Sequence[Union[AgentSession, TeamSession, WorkflowSession]] = []
if v1_table_type == "agent_sessions":
sessions = parse_agent_sessions(old_content)
elif v1_table_type == "team_sessions":
sessions = parse_team_sessions(old_content)
elif v1_table_type == "workflow_sessions":
sessions = parse_workflow_sessions(old_content)
elif v1_table_type == "memories":
memories = parse_memories(old_content)
else:
raise ValueError(f"Invalid table type: {v1_table_type}")
# Insert the new content into the new table
if v1_table_type == "agent_sessions":
for session in sessions:
await self.upsert_session(session)
log_info(f"Migrated {len(sessions)} Agent sessions to table: {self.session_table_name}")
elif v1_table_type == "team_sessions":
for session in sessions:
await self.upsert_session(session)
log_info(f"Migrated {len(sessions)} Team sessions to table: {self.session_table_name}")
elif v1_table_type == "workflow_sessions":
for session in sessions:
await self.upsert_session(session)
log_info(f"Migrated {len(sessions)} Workflow sessions to table: {self.session_table_name}")
elif v1_table_type == "memories":
for memory in memories:
await self.upsert_user_memory(memory)
log_info(f"Migrated {len(memories)} memories to table: {self.memory_table}")
# --- Traces ---
def _get_traces_base_query(self, table: Table, spans_table: Optional[Table] = None):
"""Build base query for traces with aggregated span counts.
Args:
table: The traces table.
spans_table: The spans table (optional).
Returns:
SQLAlchemy select statement with total_spans and error_count calculated dynamically.
"""
from sqlalchemy import case, literal
if spans_table is not None:
# JOIN with spans table to calculate total_spans and error_count
return (
select(
table,
func.coalesce(func.count(spans_table.c.span_id), 0).label("total_spans"),
func.coalesce(func.sum(case((spans_table.c.status_code == "ERROR", 1), else_=0)), 0).label(
"error_count"
),
)
.select_from(table.outerjoin(spans_table, table.c.trace_id == spans_table.c.trace_id))
.group_by(table.c.trace_id)
)
else:
# Fallback if spans table doesn't exist
return select(table, literal(0).label("total_spans"), literal(0).label("error_count"))
def _get_trace_component_level_expr(self, workflow_id_col, team_id_col, agent_id_col, name_col):
"""Build a SQL CASE expression that returns the component level for a trace.
Component levels (higher = more important):
- 3: Workflow root (.run or .arun with workflow_id)
- 2: Team root (.run or .arun with team_id)
- 1: Agent root (.run or .arun with agent_id)
- 0: Child span (not a root)
Args:
workflow_id_col: SQL column/expression for workflow_id
team_id_col: SQL column/expression for team_id
agent_id_col: SQL column/expression for agent_id
name_col: SQL column/expression for name
Returns:
SQLAlchemy CASE expression returning the component level as an integer.
"""
from sqlalchemy import and_, case, or_
is_root_name = or_(name_col.like("%.run%"), name_col.like("%.arun%"))
return case(
# Workflow root (level 3)
(and_(workflow_id_col.isnot(None), is_root_name), 3),
# Team root (level 2)
(and_(team_id_col.isnot(None), is_root_name), 2),
# Agent root (level 1)
(and_(agent_id_col.isnot(None), is_root_name), 1),
# Child span or unknown (level 0)
else_=0,
)
async def upsert_trace(self, trace: "Trace") -> None:
"""Create or update a single trace record in the database.
Uses INSERT ... ON DUPLICATE KEY UPDATE (upsert) to handle concurrent inserts
atomically and avoid race conditions.
Args:
trace: The Trace object to store (one per trace_id).
"""
from sqlalchemy import case
try:
table = await self._get_table(table_type="traces", create_table_if_not_found=True)
if table is None:
return
trace_dict = trace.to_dict()
trace_dict.pop("total_spans", None)
trace_dict.pop("error_count", None)
async with self.async_session_factory() as sess, sess.begin():
# Use upsert to handle concurrent inserts atomically
# On conflict, update fields while preserving existing non-null context values
# and keeping the earliest start_time
insert_stmt = mysql.insert(table).values(trace_dict)
# Build component level expressions for comparing trace priority
new_level = self._get_trace_component_level_expr(
insert_stmt.inserted.workflow_id,
insert_stmt.inserted.team_id,
insert_stmt.inserted.agent_id,
insert_stmt.inserted.name,
)
existing_level = self._get_trace_component_level_expr(
table.c.workflow_id,
table.c.team_id,
table.c.agent_id,
table.c.name,
)
# Build the ON DUPLICATE KEY UPDATE clause
# Use LEAST for start_time, GREATEST for end_time to capture full trace duration
# MySQL stores timestamps as ISO strings, so string comparison works for ISO format
# Duration is calculated using TIMESTAMPDIFF in microseconds then converted to ms
upsert_stmt = insert_stmt.on_duplicate_key_update(
end_time=func.greatest(table.c.end_time, insert_stmt.inserted.end_time),
start_time=func.least(table.c.start_time, insert_stmt.inserted.start_time),
# Calculate duration in milliseconds using TIMESTAMPDIFF
# TIMESTAMPDIFF(MICROSECOND, start, end) / 1000 gives milliseconds
duration_ms=func.timestampdiff(
text("MICROSECOND"),
func.least(table.c.start_time, insert_stmt.inserted.start_time),
func.greatest(table.c.end_time, insert_stmt.inserted.end_time),
)
/ 1000,
status=insert_stmt.inserted.status,
# Update name only if new trace is from a higher-level component
# Priority: workflow (3) > team (2) > agent (1) > child spans (0)
name=case(
(new_level > existing_level, insert_stmt.inserted.name),
else_=table.c.name,
),
# Preserve existing non-null context values using COALESCE
run_id=func.coalesce(insert_stmt.inserted.run_id, table.c.run_id),
session_id=func.coalesce(insert_stmt.inserted.session_id, table.c.session_id),
user_id=func.coalesce(insert_stmt.inserted.user_id, table.c.user_id),
agent_id=func.coalesce(insert_stmt.inserted.agent_id, table.c.agent_id),
team_id=func.coalesce(insert_stmt.inserted.team_id, table.c.team_id),
workflow_id=func.coalesce(insert_stmt.inserted.workflow_id, table.c.workflow_id),
)
await sess.execute(upsert_stmt)
except Exception as e:
log_error(f"Error creating trace: {e}")
# Don't raise - tracing should not break the main application flow
async def get_trace(
self,
trace_id: Optional[str] = None,
run_id: Optional[str] = None,
):
"""Get a single trace by trace_id or other filters.
Args:
trace_id: The unique trace identifier.
run_id: Filter by run ID (returns first match).
Returns:
Optional[Trace]: The trace if found, None otherwise.
Note:
If multiple filters are provided, trace_id takes precedence.
For other filters, the most recent trace is returned.
"""
try:
from agno.tracing.schemas import Trace
table = await self._get_table(table_type="traces")
if table is None:
return None
# Get spans table for JOIN
spans_table = await self._get_table(table_type="spans")
async with self.async_session_factory() as sess:
# Build query with aggregated span counts
stmt = self._get_traces_base_query(table, spans_table)
if trace_id:
stmt = stmt.where(table.c.trace_id == trace_id)
elif run_id:
stmt = stmt.where(table.c.run_id == run_id)
else:
log_debug("get_trace called without any filter parameters")
return None
# Order by most recent and get first result
stmt = stmt.order_by(table.c.start_time.desc()).limit(1)
result = await sess.execute(stmt)
row = result.fetchone()
if row:
return Trace.from_dict(dict(row._mapping))
return None
except Exception as e:
log_error(f"Error getting trace: {e}")
return None
async def get_traces(
self,
run_id: Optional[str] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
workflow_id: Optional[str] = None,
status: Optional[str] = None,
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
limit: Optional[int] = 20,
page: Optional[int] = 1,
) -> tuple[List, int]:
"""Get traces matching the provided filters with pagination.
Args:
run_id: Filter by run ID.
session_id: Filter by session ID.
user_id: Filter by user ID.
agent_id: Filter by agent ID.
team_id: Filter by team ID.
workflow_id: Filter by workflow ID.
status: Filter by status (OK, ERROR, UNSET).
start_time: Filter traces starting after this datetime.
end_time: Filter traces ending before this datetime.
limit: Maximum number of traces to return per page.
page: Page number (1-indexed).
Returns:
tuple[List[Trace], int]: Tuple of (list of matching traces, total count).
"""
try:
from agno.tracing.schemas import Trace
log_debug(
f"get_traces called with filters: run_id={run_id}, session_id={session_id}, user_id={user_id}, agent_id={agent_id}, page={page}, limit={limit}"
)
table = await self._get_table(table_type="traces")
if table is None:
log_debug("Traces table not found")
return [], 0
# Get spans table for JOIN
spans_table = await self._get_table(table_type="spans")
async with self.async_session_factory() as sess:
# Build base query with aggregated span counts
base_stmt = self._get_traces_base_query(table, spans_table)
# Apply filters
if run_id:
base_stmt = base_stmt.where(table.c.run_id == run_id)
if session_id:
base_stmt = base_stmt.where(table.c.session_id == session_id)
if user_id is not None:
base_stmt = base_stmt.where(table.c.user_id == user_id)
if agent_id:
base_stmt = base_stmt.where(table.c.agent_id == agent_id)
if team_id:
base_stmt = base_stmt.where(table.c.team_id == team_id)
if workflow_id:
base_stmt = base_stmt.where(table.c.workflow_id == workflow_id)
if status:
base_stmt = base_stmt.where(table.c.status == status)
if start_time:
# Convert datetime to ISO string for comparison
base_stmt = base_stmt.where(table.c.start_time >= start_time.isoformat())
if end_time:
# Convert datetime to ISO string for comparison
base_stmt = base_stmt.where(table.c.end_time <= end_time.isoformat())
# Get total count
count_stmt = select(func.count()).select_from(base_stmt.alias())
count_result = await sess.execute(count_stmt)
total_count = count_result.scalar() or 0
# Apply pagination
offset = (page - 1) * limit if page and limit else 0
paginated_stmt = base_stmt.order_by(table.c.start_time.desc()).limit(limit).offset(offset)
result = await sess.execute(paginated_stmt)
results = result.fetchall()
traces = [Trace.from_dict(dict(row._mapping)) for row in results]
return traces, total_count
except Exception as e:
log_error(f"Error getting traces: {e}")
return [], 0
async def get_trace_stats(
self,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
workflow_id: Optional[str] = None,
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
limit: Optional[int] = 20,
page: Optional[int] = 1,
) -> tuple[List[Dict[str, Any]], int]:
"""Get trace statistics grouped by session.
Args:
user_id: Filter by user ID.
agent_id: Filter by agent ID.
team_id: Filter by team ID.
workflow_id: Filter by workflow ID.
start_time: Filter sessions with traces created after this datetime.
end_time: Filter sessions with traces created before this datetime.
limit: Maximum number of sessions to return per page.
page: Page number (1-indexed).
Returns:
tuple[List[Dict], int]: Tuple of (list of session stats dicts, total count).
Each dict contains: session_id, user_id, agent_id, team_id, total_traces,
workflow_id, first_trace_at, last_trace_at.
"""
try:
table = await self._get_table(table_type="traces")
if table is None:
log_debug("Traces table not found")
return [], 0
async with self.async_session_factory() as sess:
# Build base query grouped by session_id
base_stmt = (
select(
table.c.session_id,
table.c.user_id,
table.c.agent_id,
table.c.team_id,
table.c.workflow_id,
func.count(table.c.trace_id).label("total_traces"),
func.min(table.c.created_at).label("first_trace_at"),
func.max(table.c.created_at).label("last_trace_at"),
)
.where(table.c.session_id.isnot(None)) # Only sessions with session_id
.group_by(
table.c.session_id, table.c.user_id, table.c.agent_id, table.c.team_id, table.c.workflow_id
)
)
# Apply filters
if user_id is not None:
base_stmt = base_stmt.where(table.c.user_id == user_id)
if workflow_id:
base_stmt = base_stmt.where(table.c.workflow_id == workflow_id)
if team_id:
base_stmt = base_stmt.where(table.c.team_id == team_id)
if agent_id:
base_stmt = base_stmt.where(table.c.agent_id == agent_id)
if start_time:
# Convert datetime to ISO string for comparison
base_stmt = base_stmt.where(table.c.created_at >= start_time.isoformat())
if end_time:
# Convert datetime to ISO string for comparison
base_stmt = base_stmt.where(table.c.created_at <= end_time.isoformat())
# Get total count of sessions
count_stmt = select(func.count()).select_from(base_stmt.alias())
count_result = await sess.execute(count_stmt)
total_count = count_result.scalar() or 0
# Apply pagination and ordering
offset = (page - 1) * limit if page and limit else 0
paginated_stmt = base_stmt.order_by(func.max(table.c.created_at).desc()).limit(limit).offset(offset)
result = await sess.execute(paginated_stmt)
results = result.fetchall()
# Convert to list of dicts with datetime objects
stats_list = []
for row in results:
# Convert ISO strings to datetime objects
first_trace_at_str = row.first_trace_at
last_trace_at_str = row.last_trace_at
# Parse ISO format strings to datetime objects
first_trace_at = datetime.fromisoformat(first_trace_at_str.replace("Z", "+00:00"))
last_trace_at = datetime.fromisoformat(last_trace_at_str.replace("Z", "+00:00"))
stats_list.append(
{
"session_id": row.session_id,
"user_id": row.user_id,
"agent_id": row.agent_id,
"team_id": row.team_id,
"workflow_id": row.workflow_id,
"total_traces": row.total_traces,
"first_trace_at": first_trace_at,
"last_trace_at": last_trace_at,
}
)
return stats_list, total_count
except Exception as e:
log_error(f"Error getting trace stats: {e}")
return [], 0
# --- Spans ---
async def create_span(self, span: "Span") -> None:
"""Create a single span in the database.
Args:
span: The Span object to store.
"""
try:
table = await self._get_table(table_type="spans", create_table_if_not_found=True)
if table is None:
return
async with self.async_session_factory() as sess, sess.begin():
stmt = mysql.insert(table).values(span.to_dict())
await sess.execute(stmt)
except Exception as e:
log_error(f"Error creating span: {e}")
async def create_spans(self, spans: List) -> None:
"""Create multiple spans in the database as a batch.
Args:
spans: List of Span objects to store.
"""
if not spans:
return
try:
table = await self._get_table(table_type="spans", create_table_if_not_found=True)
if table is None:
return
async with self.async_session_factory() as sess, sess.begin():
for span in spans:
stmt = mysql.insert(table).values(span.to_dict())
await sess.execute(stmt)
except Exception as e:
log_error(f"Error creating spans batch: {e}")
async def get_span(self, span_id: str):
"""Get a single span by its span_id.
Args:
span_id: The unique span identifier.
Returns:
Optional[Span]: The span if found, None otherwise.
"""
try:
from agno.tracing.schemas import Span
table = await self._get_table(table_type="spans")
if table is None:
return None
async with self.async_session_factory() as sess:
stmt = select(table).where(table.c.span_id == span_id)
result = await sess.execute(stmt)
row = result.fetchone()
if row:
return Span.from_dict(dict(row._mapping))
return None
except Exception as e:
log_error(f"Error getting span: {e}")
return None
async def get_spans(
self,
trace_id: Optional[str] = None,
parent_span_id: Optional[str] = None,
limit: Optional[int] = 1000,
) -> List:
"""Get spans matching the provided filters.
Args:
trace_id: Filter by trace ID.
parent_span_id: Filter by parent span ID.
limit: Maximum number of spans to return.
Returns:
List[Span]: List of matching spans.
"""
try:
from agno.tracing.schemas import Span
table = await self._get_table(table_type="spans")
if table is None:
return []
async with self.async_session_factory() as sess:
stmt = select(table)
# Apply filters
if trace_id:
stmt = stmt.where(table.c.trace_id == trace_id)
if parent_span_id:
stmt = stmt.where(table.c.parent_span_id == parent_span_id)
if limit:
stmt = stmt.limit(limit)
result = await sess.execute(stmt)
results = result.fetchall()
return [Span.from_dict(dict(row._mapping)) for row in results]
except Exception as e:
log_error(f"Error getting spans: {e}")
return []
# -- Learning methods (stubs) --
async def get_learning(
self,
learning_type: str,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
session_id: Optional[str] = None,
namespace: Optional[str] = None,
entity_id: Optional[str] = None,
entity_type: Optional[str] = None,
) -> Optional[Dict[str, Any]]:
raise NotImplementedError("Learning methods not yet implemented for AsyncMySQLDb")
async def upsert_learning(
self,
id: str,
learning_type: str,
content: Dict[str, Any],
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
session_id: Optional[str] = None,
namespace: Optional[str] = None,
entity_id: Optional[str] = None,
entity_type: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> None:
raise NotImplementedError("Learning methods not yet implemented for AsyncMySQLDb")
async def delete_learning(self, id: str) -> bool:
raise NotImplementedError("Learning methods not yet implemented for AsyncMySQLDb")
async def get_learnings(
self,
learning_type: Optional[str] = None,
user_id: Optional[str] = None,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
session_id: Optional[str] = None,
namespace: Optional[str] = None,
entity_id: Optional[str] = None,
entity_type: Optional[str] = None,
limit: Optional[int] = None,
) -> List[Dict[str, Any]]:
raise NotImplementedError("Learning methods not yet implemented for AsyncMySQLDb")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/db/mysql/async_mysql.py",
"license": "Apache License 2.0",
"lines": 2516,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/db/async_mysql/test_db.py | """Integration tests for the setup and main methods of the AsyncMySQLDb class"""
from datetime import datetime, timezone
from unittest.mock import AsyncMock, patch
import pytest
from sqlalchemy import text
from agno.db.mysql import AsyncMySQLDb
@pytest.mark.asyncio
async def test_init_with_db_url():
"""Test initialization with actual database URL format"""
db_url = "mysql+asyncmy://ai:ai@localhost:3306/ai"
db = AsyncMySQLDb(db_url=db_url, session_table="test_async_mysql_sessions")
assert db.db_url == db_url
assert db.session_table_name == "test_async_mysql_sessions"
assert db.db_schema == "ai"
# Test connection
async with db.async_session_factory() as sess:
result = await sess.execute(text("SELECT 1"))
assert result.scalar() == 1
await db.db_engine.dispose()
@pytest.mark.asyncio
async def test_create_session_table_integration(async_mysql_db_real):
"""Test actual session table creation with MySQL"""
# Create table
await async_mysql_db_real._create_table("test_async_mysql_sessions", "sessions", "test_schema")
# Verify table exists in database with correct schema
async with async_mysql_db_real.async_session_factory() as sess:
result = await sess.execute(
text(
"SELECT table_name FROM information_schema.tables WHERE table_schema = :schema AND table_name = :table"
),
{"schema": "test_schema", "table": "test_async_mysql_sessions"},
)
assert result.fetchone() is not None
# Verify columns exist and have correct types
async with async_mysql_db_real.async_session_factory() as sess:
result = await sess.execute(
text(
"SELECT column_name, data_type, is_nullable "
"FROM information_schema.columns "
"WHERE table_schema = :schema AND table_name = :table "
"ORDER BY ordinal_position"
),
{"schema": "test_schema", "table": "test_async_mysql_sessions"},
)
rows = result.fetchall()
columns = {row[0]: {"type": row[1], "nullable": row[2]} for row in rows}
# Verify key columns
assert "session_id" in columns
assert columns["session_id"]["nullable"] == "NO"
assert "created_at" in columns
assert columns["created_at"]["type"] == "bigint"
assert "session_data" in columns
assert columns["session_data"]["type"] in ["json", "longtext"]
@pytest.mark.asyncio
async def test_create_metrics_table_with_constraints(async_mysql_db_real):
"""Test creating metrics table with unique constraints"""
await async_mysql_db_real._create_table("test_metrics", "metrics", "test_schema")
# Verify unique constraint exists
async with async_mysql_db_real.async_session_factory() as sess:
result = await sess.execute(
text(
"SELECT constraint_name FROM information_schema.table_constraints "
"WHERE table_schema = :schema AND table_name = :table "
"AND constraint_type = 'UNIQUE'"
),
{"schema": "test_schema", "table": "test_metrics"},
)
rows = result.fetchall()
constraints = [row[0] for row in rows]
assert any("uq_metrics_date_period" in c for c in constraints)
@pytest.mark.asyncio
async def test_create_table_with_indexes(async_mysql_db_real):
"""Test that indexes are created correctly"""
await async_mysql_db_real._create_table("test_memories", "memories", "test_schema")
# Verify indexes exist
async with async_mysql_db_real.async_session_factory() as sess:
result = await sess.execute(
text(
"SELECT index_name FROM information_schema.statistics "
"WHERE table_schema = :schema AND table_name = :table"
),
{"schema": "test_schema", "table": "test_memories"},
)
rows = result.fetchall()
indexes = [row[0] for row in rows]
# Should have indexes on user_id and updated_at
assert any("user_id" in idx for idx in indexes)
assert any("updated_at" in idx for idx in indexes)
@pytest.mark.asyncio
async def test_get_or_create_existing_table(async_mysql_db_real):
"""Test getting an existing table"""
# First create the table
await async_mysql_db_real._create_table("test_async_mysql_sessions", "sessions", "test_schema")
# Clear the cached table attribute
if hasattr(async_mysql_db_real, "session_table"):
delattr(async_mysql_db_real, "session_table")
# Now get it again - should not recreate
with patch.object(async_mysql_db_real, "_create_table", new=AsyncMock()) as mock_create:
table = await async_mysql_db_real._get_or_create_table("test_async_mysql_sessions", "sessions", "test_schema")
# Should not call create since table exists
mock_create.assert_not_called()
assert table.name == "test_async_mysql_sessions"
@pytest.mark.asyncio
async def test_full_workflow(async_mysql_db_real):
"""Test a complete workflow of creating and using tables"""
# Get tables (will create them)
session_table = await async_mysql_db_real._get_table("sessions")
await async_mysql_db_real._get_table("memories")
# Verify tables are cached
assert hasattr(async_mysql_db_real, "session_table")
assert hasattr(async_mysql_db_real, "memory_table")
# Verify we can insert data (basic smoke test)
async with async_mysql_db_real.async_session_factory() as sess:
# Insert a test session
await sess.execute(
session_table.insert().values(
session_id="test-session-123",
session_type="agent",
created_at=int(datetime.now(timezone.utc).timestamp() * 1000),
session_data={"test": "data"},
)
)
await sess.commit()
# Query it back
result = await sess.execute(session_table.select().where(session_table.c.session_id == "test-session-123"))
row = result.fetchone()
assert row is not None
assert row.session_type == "agent"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/async_mysql/test_db.py",
"license": "Apache License 2.0",
"lines": 128,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/async_mysql/test_evals.py | """Integration tests for AsyncMySQLDb eval methods"""
import time
import pytest
from agno.db.schemas.evals import EvalRunRecord, EvalType
@pytest.mark.asyncio
async def test_create_and_get_eval_run(async_mysql_db_real):
"""Test creating and retrieving an eval run"""
eval_run = EvalRunRecord(
run_id="test-eval-1",
name="Test Eval",
eval_type=EvalType.ACCURACY,
agent_id="test-agent",
model_id="gpt-4",
eval_data={"score": 0.9, "accuracy": "high"},
eval_input={"prompt": "test prompt"},
created_at=int(time.time()),
updated_at=int(time.time()),
)
# Create eval run
result = await async_mysql_db_real.create_eval_run(eval_run)
assert result is not None
assert result.run_id == "test-eval-1"
# Get eval run back
retrieved = await async_mysql_db_real.get_eval_run("test-eval-1")
assert retrieved is not None
assert retrieved.name == "Test Eval"
assert retrieved.eval_type == EvalType.ACCURACY
@pytest.mark.asyncio
async def test_get_eval_runs_with_filters(async_mysql_db_real):
"""Test getting eval runs with various filters"""
# Create multiple eval runs
for i in range(3):
eval_run = EvalRunRecord(
run_id=f"test-filter-eval-{i}",
name=f"Eval {i}",
eval_type=EvalType.ACCURACY if i % 2 == 0 else EvalType.RELIABILITY,
agent_id=f"agent-{i % 2}",
eval_data={"score": 0.8 + i * 0.05},
eval_input={"test": f"input-{i}"},
created_at=int(time.time()),
updated_at=int(time.time()),
)
await async_mysql_db_real.create_eval_run(eval_run)
# Get all eval runs
eval_runs = await async_mysql_db_real.get_eval_runs()
assert len(eval_runs) >= 3
# Filter by agent_id
agent_evals = await async_mysql_db_real.get_eval_runs(agent_id="agent-0")
assert len(agent_evals) >= 1
# Filter by eval_type
accuracy_evals = await async_mysql_db_real.get_eval_runs(eval_type=[EvalType.ACCURACY])
assert len(accuracy_evals) >= 2
@pytest.mark.asyncio
async def test_delete_eval_run(async_mysql_db_real):
"""Test deleting an eval run"""
eval_run = EvalRunRecord(
run_id="test-delete-eval",
name="To be deleted",
eval_type=EvalType.ACCURACY,
eval_data={"score": 0.75},
eval_input={"test": "delete"},
created_at=int(time.time()),
updated_at=int(time.time()),
)
# Create and then delete
await async_mysql_db_real.create_eval_run(eval_run)
await async_mysql_db_real.delete_eval_run("test-delete-eval")
# Verify it's gone
retrieved = await async_mysql_db_real.get_eval_run("test-delete-eval")
assert retrieved is None
@pytest.mark.asyncio
async def test_delete_multiple_eval_runs(async_mysql_db_real):
"""Test deleting multiple eval runs"""
# Create multiple eval runs
run_ids = []
for i in range(3):
eval_run = EvalRunRecord(
run_id=f"test-bulk-delete-eval-{i}",
name=f"Eval {i}",
eval_type=EvalType.ACCURACY,
eval_data={"score": 0.8},
eval_input={"test": f"bulk-{i}"},
created_at=int(time.time()),
updated_at=int(time.time()),
)
await async_mysql_db_real.create_eval_run(eval_run)
run_ids.append(eval_run.run_id)
# Delete all at once
await async_mysql_db_real.delete_eval_runs(run_ids)
# Verify all are gone
for run_id in run_ids:
retrieved = await async_mysql_db_real.get_eval_run(run_id)
assert retrieved is None
@pytest.mark.asyncio
async def test_rename_eval_run(async_mysql_db_real):
"""Test renaming an eval run"""
eval_run = EvalRunRecord(
run_id="test-rename-eval",
name="Original Name",
eval_type=EvalType.ACCURACY,
eval_data={"score": 0.85},
eval_input={"test": "rename"},
created_at=int(time.time()),
updated_at=int(time.time()),
)
await async_mysql_db_real.create_eval_run(eval_run)
# Rename the eval run
renamed = await async_mysql_db_real.rename_eval_run(eval_run_id="test-rename-eval", name="New Name")
assert renamed is not None
assert renamed.name == "New Name"
@pytest.mark.asyncio
async def test_get_eval_runs_pagination(async_mysql_db_real):
"""Test getting eval runs with pagination"""
# Create multiple eval runs
for i in range(5):
eval_run = EvalRunRecord(
run_id=f"test-pagination-eval-{i}",
name=f"Eval {i}",
eval_type=EvalType.ACCURACY,
eval_data={"score": 0.7 + i * 0.05},
eval_input={"test": f"page-{i}"},
created_at=int(time.time()),
updated_at=int(time.time()),
)
await async_mysql_db_real.create_eval_run(eval_run)
# Get with pagination
page1, total = await async_mysql_db_real.get_eval_runs(limit=2, page=1, deserialize=False)
assert len(page1) <= 2
assert total >= 5
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/async_mysql/test_evals.py",
"license": "Apache License 2.0",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/async_mysql/test_knowledge.py | """Integration tests for AsyncMySQLDb knowledge methods"""
import time
import pytest
from agno.db.schemas.knowledge import KnowledgeRow
@pytest.mark.asyncio
async def test_upsert_and_get_knowledge_content(async_mysql_db_real):
"""Test upserting and retrieving knowledge content"""
knowledge = KnowledgeRow(
id="test-knowledge-1",
name="Test Document",
description="A test document",
type="document",
size=1024,
created_at=int(time.time()),
updated_at=int(time.time()),
)
# Upsert knowledge
result = await async_mysql_db_real.upsert_knowledge_content(knowledge)
assert result is not None
assert result.id == "test-knowledge-1"
# Get knowledge back
retrieved = await async_mysql_db_real.get_knowledge_content("test-knowledge-1")
assert retrieved is not None
assert retrieved.name == "Test Document"
assert retrieved.type == "document"
@pytest.mark.asyncio
async def test_get_knowledge_contents_with_pagination(async_mysql_db_real):
"""Test getting knowledge contents with pagination"""
# Create multiple knowledge items
for i in range(5):
knowledge = KnowledgeRow(
id=f"test-pagination-knowledge-{i}",
name=f"Document {i}",
description=f"Test document {i}",
type="document",
size=1024 + i * 100,
created_at=int(time.time()),
updated_at=int(time.time()),
)
await async_mysql_db_real.upsert_knowledge_content(knowledge)
# Get with pagination
contents, total = await async_mysql_db_real.get_knowledge_contents(limit=2, page=1)
assert len(contents) <= 2
assert total >= 5
@pytest.mark.asyncio
async def test_delete_knowledge_content(async_mysql_db_real):
"""Test deleting knowledge content"""
knowledge = KnowledgeRow(
id="test-delete-knowledge",
name="To be deleted",
description="Document to be deleted",
type="document",
size=512,
created_at=int(time.time()),
updated_at=int(time.time()),
)
# Upsert and then delete
await async_mysql_db_real.upsert_knowledge_content(knowledge)
await async_mysql_db_real.delete_knowledge_content("test-delete-knowledge")
# Verify it's gone
retrieved = await async_mysql_db_real.get_knowledge_content("test-delete-knowledge")
assert retrieved is None
@pytest.mark.asyncio
async def test_upsert_knowledge_updates_existing(async_mysql_db_real):
"""Test that upserting updates existing knowledge"""
knowledge = KnowledgeRow(
id="test-update-knowledge",
name="Original Name",
description="Original description",
type="document",
size=2048,
created_at=int(time.time()),
updated_at=int(time.time()),
)
# Initial upsert
await async_mysql_db_real.upsert_knowledge_content(knowledge)
# Update
knowledge.name = "Updated Name"
await async_mysql_db_real.upsert_knowledge_content(knowledge)
# Verify update
retrieved = await async_mysql_db_real.get_knowledge_content("test-update-knowledge")
assert retrieved is not None
assert retrieved.name == "Updated Name"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/async_mysql/test_knowledge.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/async_mysql/test_memory.py | """Integration tests for AsyncMySQLDb memory methods"""
import pytest
from agno.db.schemas.memory import UserMemory
@pytest.mark.asyncio
async def test_upsert_and_get_user_memory(async_mysql_db_real):
"""Test upserting and retrieving a user memory"""
memory = UserMemory(
memory_id="test-memory-1",
memory="User likes Python programming",
user_id="test-user",
topics=["programming", "python"],
)
# Upsert memory
result = await async_mysql_db_real.upsert_user_memory(memory)
assert result is not None
assert result.memory_id == "test-memory-1"
# Get memory back
retrieved = await async_mysql_db_real.get_user_memory("test-memory-1")
assert retrieved is not None
assert retrieved.memory == "User likes Python programming"
assert "python" in retrieved.topics
@pytest.mark.asyncio
async def test_get_user_memories_with_filters(async_mysql_db_real):
"""Test getting memories with various filters"""
# Create multiple memories
for i in range(3):
memory = UserMemory(
memory_id=f"test-filter-memory-{i}",
memory=f"Memory content {i}",
user_id=f"user-{i % 2}",
topics=["topic1"] if i % 2 == 0 else ["topic2"],
)
await async_mysql_db_real.upsert_user_memory(memory)
# Get all memories for user-0
user_memories = await async_mysql_db_real.get_user_memories(user_id="user-0")
assert len(user_memories) >= 2
# Filter by topics
topic_memories = await async_mysql_db_real.get_user_memories(topics=["topic1"])
assert len(topic_memories) >= 2
@pytest.mark.asyncio
async def test_delete_user_memory(async_mysql_db_real):
"""Test deleting a user memory"""
memory = UserMemory(
memory_id="test-delete-memory",
memory="This will be deleted",
user_id="test-user",
)
# Upsert and then delete
await async_mysql_db_real.upsert_user_memory(memory)
await async_mysql_db_real.delete_user_memory("test-delete-memory")
# Verify it's gone
retrieved = await async_mysql_db_real.get_user_memory("test-delete-memory")
assert retrieved is None
@pytest.mark.asyncio
async def test_delete_multiple_user_memories(async_mysql_db_real):
"""Test deleting multiple user memories"""
# Create multiple memories
memory_ids = []
for i in range(3):
memory = UserMemory(
memory_id=f"test-bulk-delete-{i}",
memory=f"Memory {i}",
user_id="test-user",
)
await async_mysql_db_real.upsert_user_memory(memory)
memory_ids.append(memory.memory_id)
# Delete all at once
await async_mysql_db_real.delete_user_memories(memory_ids)
# Verify all are gone
for memory_id in memory_ids:
retrieved = await async_mysql_db_real.get_user_memory(memory_id)
assert retrieved is None
@pytest.mark.asyncio
async def test_get_all_memory_topics(async_mysql_db_real):
"""Test getting all unique memory topics"""
# Create memories with different topics
memories = [
UserMemory(memory_id="m1", memory="Memory 1", topics=["ai", "ml"]),
UserMemory(memory_id="m2", memory="Memory 2", topics=["python", "ai"]),
UserMemory(memory_id="m3", memory="Memory 3", topics=["ml", "data"]),
]
for memory in memories:
await async_mysql_db_real.upsert_user_memory(memory)
# Get all topics
topics = await async_mysql_db_real.get_all_memory_topics()
assert "ai" in topics
assert "ml" in topics
assert "python" in topics
assert "data" in topics
@pytest.mark.asyncio
async def test_get_user_memory_stats(async_mysql_db_real):
"""Test getting user memory statistics"""
# Create memories for different users
for i in range(5):
memory = UserMemory(
memory_id=f"test-stats-memory-{i}",
memory=f"Memory {i}",
user_id=f"user-{i % 2}",
)
await async_mysql_db_real.upsert_user_memory(memory)
# Get stats
stats, total = await async_mysql_db_real.get_user_memory_stats()
assert total >= 2 # At least 2 users
assert len(stats) >= 2
@pytest.mark.asyncio
async def test_upsert_memories(async_mysql_db_real):
"""Test upsert_memories for inserting new memories"""
# Create memories
memories = [
UserMemory(
memory_id=f"bulk_memory_{i}",
memory=f"Bulk memory content {i}",
user_id="bulk_user",
topics=["bulk", f"topic_{i}"],
)
for i in range(5)
]
# Bulk upsert memories
results = await async_mysql_db_real.upsert_memories(memories)
# Verify results
assert len(results) == 5
for i, result in enumerate(results):
assert isinstance(result, UserMemory)
assert result.memory_id == f"bulk_memory_{i}"
assert result.user_id == "bulk_user"
assert "bulk" in result.topics
@pytest.mark.asyncio
async def test_upsert_memories_update(async_mysql_db_real):
"""Test upsert_memories for updating existing memories"""
# Create initial memories
initial_memories = [
UserMemory(
memory_id=f"update_memory_{i}",
memory=f"Original content {i}",
user_id="update_user",
topics=["original"],
)
for i in range(3)
]
await async_mysql_db_real.upsert_memories(initial_memories)
# Update memories
updated_memories = [
UserMemory(
memory_id=f"update_memory_{i}",
memory=f"Updated content {i}",
user_id="update_user",
topics=["updated", f"topic_{i}"],
)
for i in range(3)
]
results = await async_mysql_db_real.upsert_memories(updated_memories)
# Verify updates
assert len(results) == 3
for i, result in enumerate(results):
assert isinstance(result, UserMemory)
assert result.memory == f"Updated content {i}"
assert "updated" in result.topics
assert "original" not in result.topics
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/async_mysql/test_memory.py",
"license": "Apache License 2.0",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/async_mysql/test_metrics.py | """Integration tests for AsyncMySQLDb metrics methods"""
from datetime import date
import pytest
@pytest.mark.asyncio
async def test_calculate_and_get_metrics(async_mysql_db_real):
"""Test calculating and retrieving metrics"""
# This test requires sessions to exist first
# For now, just test that the methods don't error
from agno.session import AgentSession
# Create a test session
session = AgentSession(
session_id="test-metrics-session",
agent_id="test-agent",
user_id="test-user",
)
await async_mysql_db_real.upsert_session(session)
# Try to calculate metrics (may not produce results if no sessions in date range)
result = await async_mysql_db_real.calculate_metrics()
# Result can be None or list
assert result is None or isinstance(result, list)
@pytest.mark.asyncio
async def test_get_metrics_with_date_range(async_mysql_db_real):
"""Test getting metrics with date filters"""
start_date = date(2024, 1, 1)
end_date = date(2024, 12, 31)
metrics, latest_update = await async_mysql_db_real.get_metrics(starting_date=start_date, ending_date=end_date)
assert isinstance(metrics, list)
# latest_update can be None if no metrics exist
assert latest_update is None or isinstance(latest_update, int)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/async_mysql/test_metrics.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/async_mysql/test_session.py | """Integration tests for AsyncMySQLDb session methods"""
import pytest
from agno.db.base import SessionType
from agno.session import AgentSession, TeamSession, WorkflowSession
@pytest.mark.asyncio
async def test_upsert_and_get_agent_session(async_mysql_db_real):
"""Test upserting and retrieving an agent session"""
session = AgentSession(
session_id="test-agent-session",
agent_id="test-agent",
user_id="test-user",
session_data={"key": "value"},
)
# Upsert session
result = await async_mysql_db_real.upsert_session(session)
assert result is not None
assert result.session_id == "test-agent-session"
# Get session back
retrieved = await async_mysql_db_real.get_session(session_id="test-agent-session", session_type=SessionType.AGENT)
assert retrieved is not None
assert retrieved.agent_id == "test-agent"
assert retrieved.user_id == "test-user"
@pytest.mark.asyncio
async def test_upsert_and_get_team_session(async_mysql_db_real):
"""Test upserting and retrieving a team session"""
session = TeamSession(
session_id="test-team-session",
team_id="test-team",
user_id="test-user",
session_data={"key": "value"},
)
# Upsert session
result = await async_mysql_db_real.upsert_session(session)
assert result is not None
assert result.session_id == "test-team-session"
# Get session back
retrieved = await async_mysql_db_real.get_session(session_id="test-team-session", session_type=SessionType.TEAM)
assert retrieved is not None
assert retrieved.team_id == "test-team"
@pytest.mark.asyncio
async def test_upsert_and_get_workflow_session(async_mysql_db_real):
"""Test upserting and retrieving a workflow session"""
session = WorkflowSession(
session_id="test-workflow-session",
workflow_id="test-workflow",
user_id="test-user",
session_data={"key": "value"},
)
# Upsert session
result = await async_mysql_db_real.upsert_session(session)
assert result is not None
assert result.session_id == "test-workflow-session"
# Get session back
retrieved = await async_mysql_db_real.get_session(
session_id="test-workflow-session", session_type=SessionType.WORKFLOW
)
assert retrieved is not None
assert retrieved.workflow_id == "test-workflow"
@pytest.mark.asyncio
async def test_delete_session(async_mysql_db_real):
"""Test deleting a session"""
session = AgentSession(
session_id="test-delete-session",
agent_id="test-agent",
)
# Upsert and then delete
await async_mysql_db_real.upsert_session(session)
result = await async_mysql_db_real.delete_session("test-delete-session")
assert result is True
# Verify it's gone
retrieved = await async_mysql_db_real.get_session(session_id="test-delete-session", session_type=SessionType.AGENT)
assert retrieved is None
@pytest.mark.asyncio
async def test_get_sessions_with_filters(async_mysql_db_real):
"""Test getting sessions with various filters"""
# Create multiple sessions
for i in range(3):
session = AgentSession(
session_id=f"test-filter-session-{i}",
agent_id="test-agent",
user_id=f"user-{i % 2}",
)
await async_mysql_db_real.upsert_session(session)
# Get all sessions
sessions = await async_mysql_db_real.get_sessions(session_type=SessionType.AGENT)
assert len(sessions) >= 3
# Filter by user_id
user_sessions = await async_mysql_db_real.get_sessions(session_type=SessionType.AGENT, user_id="user-0")
assert len(user_sessions) >= 1
@pytest.mark.asyncio
async def test_rename_session(async_mysql_db_real):
"""Test renaming a session"""
session = AgentSession(
session_id="test-rename-session",
agent_id="test-agent",
session_data={"session_name": "Old Name"},
)
await async_mysql_db_real.upsert_session(session)
# Rename the session
renamed = await async_mysql_db_real.rename_session(
session_id="test-rename-session", session_type=SessionType.AGENT, session_name="New Name"
)
assert renamed is not None
assert renamed.session_data.get("session_name") == "New Name"
@pytest.mark.asyncio
async def test_upsert_sessions(async_mysql_db_real):
"""Test upsert_sessions with mixed session types (Agent, Team, Workflow)"""
import time
# Create agent session
agent_session = AgentSession(
session_id="bulk_agent_session_1",
agent_id="bulk_agent_1",
user_id="bulk_user_1",
agent_data={"name": "Bulk Agent 1"},
session_data={"type": "bulk_test"},
created_at=int(time.time()),
)
# Create team session
team_session = TeamSession(
session_id="bulk_team_session_1",
team_id="bulk_team_1",
user_id="bulk_user_1",
team_data={"name": "Bulk Team 1"},
session_data={"type": "bulk_test"},
created_at=int(time.time()),
)
# Create workflow session
workflow_session = WorkflowSession(
session_id="bulk_workflow_session_1",
workflow_id="bulk_workflow_1",
user_id="bulk_user_1",
workflow_data={"name": "Bulk Workflow 1"},
session_data={"type": "bulk_test"},
created_at=int(time.time()),
)
# Bulk upsert all sessions
sessions = [agent_session, team_session, workflow_session]
results = await async_mysql_db_real.upsert_sessions(sessions)
# Verify results
assert len(results) == 3
# Find and verify per session type
agent_result = next(r for r in results if isinstance(r, AgentSession))
team_result = next(r for r in results if isinstance(r, TeamSession))
workflow_result = next(r for r in results if isinstance(r, WorkflowSession))
# Verify agent session
assert agent_result.session_id == agent_session.session_id
assert agent_result.agent_id == agent_session.agent_id
assert agent_result.agent_data == agent_session.agent_data
# Verify team session
assert team_result.session_id == team_session.session_id
assert team_result.team_id == team_session.team_id
assert team_result.team_data == team_session.team_data
# Verify workflow session
assert workflow_result.session_id == workflow_session.session_id
assert workflow_result.workflow_id == workflow_session.workflow_id
assert workflow_result.workflow_data == workflow_session.workflow_data
@pytest.mark.asyncio
async def test_upsert_sessions_update(async_mysql_db_real):
"""Test upsert_sessions correctly updates existing sessions"""
import time
# Insert sessions
session1 = AgentSession(
session_id="bulk_update_1",
agent_id="agent_1",
user_id="user_1",
agent_data={"name": "Original Agent 1"},
session_data={"version": 1},
created_at=int(time.time()),
)
session2 = AgentSession(
session_id="bulk_update_2",
agent_id="agent_2",
user_id="user_1",
agent_data={"name": "Original Agent 2"},
session_data={"version": 1},
created_at=int(time.time()),
)
await async_mysql_db_real.upsert_sessions([session1, session2])
# Update sessions
updated_session1 = AgentSession(
session_id="bulk_update_1",
agent_id="agent_1",
user_id="user_1",
agent_data={"name": "Updated Agent 1", "updated": True},
session_data={"version": 2, "updated": True},
created_at=session1.created_at, # Keep original created_at
)
updated_session2 = AgentSession(
session_id="bulk_update_2",
agent_id="agent_2",
user_id="user_1",
agent_data={"name": "Updated Agent 2", "updated": True},
session_data={"version": 2, "updated": True},
created_at=session2.created_at, # Keep original created_at
)
results = await async_mysql_db_real.upsert_sessions([updated_session1, updated_session2])
assert len(results) == 2
# Verify sessions were updated
for result in results:
assert isinstance(result, AgentSession)
assert result.agent_data is not None and result.agent_data["updated"] is True
assert result.session_data is not None and result.session_data["version"] == 2
assert result.session_data is not None and result.session_data["updated"] is True
# created_at should be preserved
if result.session_id == "bulk_update_1":
assert result.created_at == session1.created_at
else:
assert result.created_at == session2.created_at
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/async_mysql/test_session.py",
"license": "Apache License 2.0",
"lines": 208,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/test_memory.py | import pytest
from agno.agent.agent import Agent
from agno.db.base import UserMemory
def test_get_user_memories(shared_db):
shared_db.clear_memories()
shared_db.upsert_user_memory(memory=UserMemory(user_id="test_user", memory="test_memory"))
agent = Agent(db=shared_db)
memories = agent.get_user_memories(user_id="test_user")
assert len(memories) == 1
assert memories[0].user_id == "test_user"
assert memories[0].memory == "test_memory"
@pytest.mark.asyncio
async def test_get_user_memories_async(shared_db):
shared_db.clear_memories()
shared_db.upsert_user_memory(memory=UserMemory(user_id="test_user", memory="test_memory"))
agent = Agent(db=shared_db)
memories = await agent.aget_user_memories(user_id="test_user")
assert len(memories) == 1
assert memories[0].user_id == "test_user"
assert memories[0].memory == "test_memory"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_memory.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/test_retries.py | """Integration tests for agent retry functionality."""
from unittest.mock import patch
import pytest
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.run.base import RunStatus
def test_agent_retry():
"""Test that agent retries on failure and eventually succeeds."""
model = OpenAIChat(id="gpt-4o-mini")
agent = Agent(
name="Retry Agent",
model=model,
retries=2,
delay_between_retries=0,
)
# Mock that fails once, then succeeds
attempt_count = {"count": 0}
original_response = model.response
def mock_response(*args, **kwargs):
attempt_count["count"] += 1
if attempt_count["count"] < 2:
raise Exception(f"Simulated failure on attempt {attempt_count['count']}")
return original_response(*args, **kwargs)
# Mock the model's response method so _run's retry logic can still work
with patch.object(model, "response", side_effect=mock_response):
response = agent.run("Test message")
# Should succeed on the 2nd attempt
assert attempt_count["count"] == 2
assert response is not None
assert response.status == RunStatus.completed
def test_agent_exponential_backoff():
"""Test that exponential backoff increases delay between retries."""
model = OpenAIChat(id="gpt-4o-mini")
agent = Agent(
name="Retry Agent",
model=model,
retries=2,
delay_between_retries=1,
exponential_backoff=True,
)
attempt_count = {"count": 0}
original_response = model.response
def mock_response(*args, **kwargs):
attempt_count["count"] += 1
if attempt_count["count"] < 3: # Fail first 2 attempts (attempts 1 and 2)
raise Exception("Simulated failure")
# Succeed on 3rd attempt
return original_response(*args, **kwargs)
# Mock the model's response method so _run's retry logic can still work
with patch.object(model, "response", side_effect=mock_response):
with patch("agno.agent._run.time.sleep") as mock_sleep:
_ = agent.run("Test message")
# Check that sleep was called with exponentially increasing delays
assert mock_sleep.call_count == 2
assert mock_sleep.call_args_list[0][0][0] == 1 # 2^0 * 1
assert mock_sleep.call_args_list[1][0][0] == 2 # 2^1 * 1
def test_agent_keyboard_interrupt_stops_retries():
"""Test that KeyboardInterrupt stops retries immediately."""
model = OpenAIChat(id="gpt-4o-mini")
agent = Agent(
name="Retry Agent",
model=model,
retries=5,
delay_between_retries=0,
)
attempt_count = {"count": 0}
def mock_response(*args, **kwargs):
attempt_count["count"] += 1
raise KeyboardInterrupt()
# Mock the model's response method so _run's KeyboardInterrupt handling can work
with patch.object(model, "response", side_effect=mock_response):
response = agent.run("Test message")
# Should stop on first attempt without retrying
assert attempt_count["count"] == 1
assert response.status == RunStatus.cancelled
assert response.content == "Operation cancelled by user"
@pytest.mark.asyncio
async def test_agent_async_retry():
"""Test that async agent retries on failure and eventually succeeds."""
model = OpenAIChat(id="gpt-4o-mini")
agent = Agent(
name="Async Retry Agent",
model=model,
retries=2,
delay_between_retries=0,
)
attempt_count = {"count": 0}
original_aresponse = model.aresponse
async def mock_aresponse(*args, **kwargs):
attempt_count["count"] += 1
if attempt_count["count"] < 2:
raise Exception(f"Simulated failure on attempt {attempt_count['count']}")
return await original_aresponse(*args, **kwargs)
# Mock the model's aresponse method so _arun's retry logic can still work
with patch.object(model, "aresponse", side_effect=mock_aresponse):
response = await agent.arun("Test message")
# Should succeed on the 2nd attempt
assert attempt_count["count"] == 2
assert response is not None
assert response.status == RunStatus.completed
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_retries.py",
"license": "Apache License 2.0",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/test_retries.py | """Integration tests for model retry functionality."""
from unittest.mock import patch
import pytest
from agno.agent import Agent
from agno.exceptions import ModelProviderError
from agno.models.openai import OpenAIChat
from agno.run.base import RunStatus
def test_model_retry():
"""Test that model retries on failure and eventually succeeds."""
model = OpenAIChat(
id="gpt-4o-mini",
retries=2,
)
agent = Agent(
name="Model Retry Agent",
model=model,
)
# Mock that fails once, then succeeds
attempt_count = {"count": 0}
original_invoke = model.invoke
def mock_invoke(*args, **kwargs):
attempt_count["count"] += 1
if attempt_count["count"] < 2:
raise ModelProviderError(f"Simulated failure on attempt {attempt_count['count']}")
return original_invoke(*args, **kwargs)
with patch.object(model, "invoke", side_effect=mock_invoke):
response = agent.run("Say hello")
# Should succeed on the 2nd attempt
assert attempt_count["count"] == 2
assert response is not None
assert response.status == RunStatus.completed
def test_model_retry_delay():
"""Test that retry delay is constant between retries."""
model = OpenAIChat(
id="gpt-4o-mini",
retries=2,
delay_between_retries=2,
)
agent = Agent(
name="Retry Delay Agent",
model=model,
)
attempt_count = {"count": 0}
original_invoke = model.invoke
def mock_invoke(*args, **kwargs):
attempt_count["count"] += 1
if attempt_count["count"] < 3: # Fail first 2 attempts
raise ModelProviderError("Simulated failure")
# Succeed on 3rd attempt
return original_invoke(*args, **kwargs)
with patch.object(model, "invoke", side_effect=mock_invoke):
with patch("agno.models.base.sleep") as mock_sleep:
_ = agent.run("Say hello")
# Check that sleep was called with constant delay
assert mock_sleep.call_count == 2
assert mock_sleep.call_args_list[0][0][0] == 2 # constant 2s delay
assert mock_sleep.call_args_list[1][0][0] == 2 # constant 2s delay
def test_model_exponential_backoff():
"""Test that exponential backoff increases delay between retries."""
model = OpenAIChat(
id="gpt-4o-mini",
retries=2,
delay_between_retries=1,
exponential_backoff=True,
)
agent = Agent(
name="Exponential Backoff Agent",
model=model,
)
attempt_count = {"count": 0}
original_invoke = model.invoke
def mock_invoke(*args, **kwargs):
attempt_count["count"] += 1
if attempt_count["count"] < 3: # Fail first 2 attempts
raise ModelProviderError("Simulated failure")
# Succeed on 3rd attempt
return original_invoke(*args, **kwargs)
with patch.object(model, "invoke", side_effect=mock_invoke):
with patch("agno.models.base.sleep") as mock_sleep:
_ = agent.run("Say hello")
# Check that sleep was called with exponentially increasing delays
assert mock_sleep.call_count == 2
assert mock_sleep.call_args_list[0][0][0] == 1 # 2^0 * 1
assert mock_sleep.call_args_list[1][0][0] == 2 # 2^1 * 1
@pytest.mark.asyncio
async def test_model_async_retry():
"""Test that model retries on async calls."""
import types
model = OpenAIChat(
id="gpt-4o-mini",
retries=2,
delay_between_retries=0,
)
agent = Agent(
name="Async Model Retry Agent",
model=model,
)
attempt_count = {"count": 0}
original_ainvoke = model.ainvoke
async def mock_ainvoke(self, *args, **kwargs):
attempt_count["count"] += 1
if attempt_count["count"] < 2:
raise ModelProviderError(f"Simulated failure on attempt {attempt_count['count']}")
return await original_ainvoke(*args, **kwargs)
# Properly bind the async method
model.ainvoke = types.MethodType(mock_ainvoke, model)
response = await agent.arun("Say hello")
# Should succeed on the 2nd attempt
assert attempt_count["count"] == 2
assert response is not None
assert response.status == RunStatus.completed
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/test_retries.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/teams/test_retries.py | """Integration tests for team retry functionality."""
from unittest.mock import patch
import pytest
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.run.base import RunStatus
from agno.team import Team
def test_team_retry():
"""Test that team retries on failure and eventually succeeds."""
member = Agent(model=OpenAIChat(id="gpt-4o-mini"))
model = OpenAIChat(id="gpt-4o-mini")
team = Team(
members=[member],
name="Retry Team",
model=model,
retries=2,
delay_between_retries=0,
)
# Mock that fails once, then succeeds
attempt_count = {"count": 0}
original_response = model.response
def mock_response(*args, **kwargs):
attempt_count["count"] += 1
if attempt_count["count"] < 2:
raise Exception(f"Simulated failure on attempt {attempt_count['count']}")
return original_response(*args, **kwargs)
# Mock the model's response method so _run's retry logic can still work
with patch.object(model, "response", side_effect=mock_response):
response = team.run("Test message")
# Should succeed on the 2nd attempt
assert attempt_count["count"] == 2
assert response is not None
assert response.status == RunStatus.completed
def test_team_exponential_backoff():
"""Test that exponential backoff increases delay between retries."""
member = Agent(model=OpenAIChat(id="gpt-4o-mini"))
model = OpenAIChat(id="gpt-4o-mini")
team = Team(
members=[member],
name="Retry Team",
model=model,
retries=2,
delay_between_retries=1,
exponential_backoff=True,
)
attempt_count = {"count": 0}
original_response = model.response
def mock_response(*args, **kwargs):
attempt_count["count"] += 1
if attempt_count["count"] < 3: # Fail first 2 attempts (attempts 1 and 2)
raise Exception("Simulated failure")
# Succeed on 3rd attempt
return original_response(*args, **kwargs)
# Mock the model's response method so _run's retry logic can still work
with patch.object(model, "response", side_effect=mock_response):
with patch("agno.team._run.time.sleep") as mock_sleep:
_ = team.run("Test message")
# Check that sleep was called with exponentially increasing delays
assert mock_sleep.call_count == 2
assert mock_sleep.call_args_list[0][0][0] == 1 # 2^0 * 1
assert mock_sleep.call_args_list[1][0][0] == 2 # 2^1 * 1
def test_team_keyboard_interrupt_stops_retries():
"""Test that KeyboardInterrupt stops retries immediately."""
member = Agent(model=OpenAIChat(id="gpt-4o-mini"))
model = OpenAIChat(id="gpt-4o-mini")
team = Team(
members=[member],
name="Retry Team",
model=model,
retries=5,
delay_between_retries=0,
)
attempt_count = {"count": 0}
def mock_response(*args, **kwargs):
attempt_count["count"] += 1
raise KeyboardInterrupt()
# Mock the model's response method so _run's KeyboardInterrupt handling can work
with patch.object(model, "response", side_effect=mock_response):
response = team.run("Test message")
# Should stop on first attempt without retrying
assert attempt_count["count"] == 1
assert response.status == RunStatus.cancelled
assert response.content == "Operation cancelled by user"
@pytest.mark.asyncio
async def test_team_async_retry():
"""Test that async team retries on failure and eventually succeeds."""
member = Agent(model=OpenAIChat(id="gpt-4o-mini"))
team = Team(
members=[member],
name="Async Retry Team",
model=OpenAIChat(id="gpt-4o-mini"),
retries=2,
delay_between_retries=0,
)
attempt_count = {"count": 0}
original_aresponse = team.model.aresponse # type: ignore
async def mock_aresponse(*args, **kwargs):
attempt_count["count"] += 1
if attempt_count["count"] < 2:
raise Exception(f"Simulated failure on attempt {attempt_count['count']}")
return await original_aresponse(*args, **kwargs)
with patch.object(team.model, "aresponse", side_effect=mock_aresponse):
response = await team.arun("Test message")
# Should succeed on the 2nd attempt
assert attempt_count["count"] == 2
assert response is not None
assert response.status == RunStatus.completed
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/test_retries.py",
"license": "Apache License 2.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/integrations/test_memori.py | """
Unit tests for Memori integration with Agno agents.
Tests the integration pattern using Memori.llm.register().
"""
import os
import tempfile
from unittest.mock import patch
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from agno.agent import Agent
from agno.models.openai import OpenAIChat
# Skip all tests if memori is not installed
pytest.importorskip("memori")
from memori import Memori
@pytest.fixture
def temp_db():
"""Create a temporary SQLite database for testing."""
fd, path = tempfile.mkstemp(suffix=".db")
os.close(fd)
yield path
if os.path.exists(path):
os.unlink(path)
@pytest.fixture
def db_session(temp_db):
"""Create a database session for testing."""
engine = create_engine(f"sqlite:///{temp_db}")
Session = sessionmaker(bind=engine)
return Session
@pytest.fixture
def openai_model():
"""Create an OpenAI model with fake API key for testing."""
with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}):
model = OpenAIChat(id="gpt-4o-mini")
model.get_client()
return model
@pytest.fixture
def openai_model_factory():
"""Factory fixture that creates a fresh OpenAI model each call.
Memori's llm.register() wraps the client object, so registering the same
cached client twice causes provider detection to fail. Use this fixture
when a test needs multiple independent Memori registrations.
"""
def _create():
with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}):
model = OpenAIChat(id="gpt-4o-mini")
model.get_client()
return model
return _create
class TestMemoriIntegration:
"""Test Memori integration with Agno agents."""
def test_memori_initialization(self, db_session, openai_model):
"""Test that Memori can be initialized and registered with Agno."""
mem = Memori(conn=db_session).llm.register(openai_model.get_client())
mem.attribution(entity_id="test-entity", process_id="test-process")
assert mem is not None
assert mem.config is not None
def test_memori_storage_build(self, db_session, openai_model):
"""Test that Memori storage can be built."""
mem = Memori(conn=db_session).llm.register(openai_model.get_client())
mem.attribution(entity_id="test-entity", process_id="test-process")
# Build storage should not raise an exception
mem.config.storage.build()
assert True
def test_agent_with_memori(self, db_session, openai_model):
"""Test that an agent can be created with Memori integration."""
# Initialize Memori
mem = Memori(conn=db_session).llm.register(openai_model.get_client())
mem.attribution(entity_id="test-agent", process_id="test-session")
mem.config.storage.build()
# Create agent
agent = Agent(
model=openai_model,
instructions=["You are a helpful assistant."],
markdown=True,
)
assert agent is not None
assert agent.model == openai_model
def test_multiple_memori_instances(self, db_session, openai_model_factory):
"""Test that multiple Memori instances can be created with different attributions."""
model1 = openai_model_factory()
mem1 = Memori(conn=db_session).llm.register(model1.get_client())
mem1.attribution(entity_id="entity-1", process_id="process-1")
model2 = openai_model_factory()
mem2 = Memori(conn=db_session).llm.register(model2.get_client())
mem2.attribution(entity_id="entity-2", process_id="process-2")
assert mem1 is not None
assert mem2 is not None
def test_memori_with_custom_db_path(self, openai_model):
"""Test Memori initialization with custom database path."""
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as tmp:
db_path = tmp.name
try:
engine = create_engine(f"sqlite:///{db_path}")
Session = sessionmaker(bind=engine)
mem = Memori(conn=Session).llm.register(openai_model.get_client())
mem.attribution(entity_id="test", process_id="test")
mem.config.storage.build()
assert mem is not None
assert os.path.exists(db_path)
finally:
if os.path.exists(db_path):
os.unlink(db_path)
def test_memori_config_exists(self, db_session, openai_model):
"""Test that Memori config object is accessible."""
mem = Memori(conn=db_session).llm.register(openai_model.get_client())
assert hasattr(mem, "config")
assert hasattr(mem.config, "storage")
def test_memori_attribution_required(self, db_session, openai_model):
"""Test that attribution can be set on Memori instance."""
mem = Memori(conn=db_session).llm.register(openai_model.get_client())
# Should be able to set attribution without error
mem.attribution(entity_id="test-entity", process_id="test-process")
assert True
class TestMemoriWithAgent:
"""Integration tests for Memori with Agno agents."""
def test_agent_memory_persistence(self, db_session, openai_model):
"""Test that agent conversations are persisted with Memori."""
mem = Memori(conn=db_session).llm.register(openai_model.get_client())
mem.attribution(entity_id="test-user", process_id="test-conversation")
mem.config.storage.build()
agent = Agent(
model=openai_model,
instructions=["You are a helpful assistant."],
markdown=True,
)
# Agent should be created successfully
assert agent is not None
def test_agent_with_different_entity_ids(self, db_session, openai_model_factory):
"""Test that different entity IDs can be used for different agents."""
# Agent 1
model1 = openai_model_factory()
mem1 = Memori(conn=db_session).llm.register(model1.get_client())
mem1.attribution(entity_id="user-1", process_id="session-1")
mem1.config.storage.build()
agent1 = Agent(
model=model1,
instructions=["You are agent 1."],
)
# Agent 2
model2 = openai_model_factory()
mem2 = Memori(conn=db_session).llm.register(model2.get_client())
mem2.attribution(entity_id="user-2", process_id="session-2")
mem2.config.storage.build()
agent2 = Agent(
model=model2,
instructions=["You are agent 2."],
)
assert agent1 is not None
assert agent2 is not None
class TestMemoriConfiguration:
"""Tests for Memori configuration options."""
def test_memori_with_different_db_backends(self, openai_model):
"""Test that Memori works with SQLite (actual test)."""
# Test with actual SQLite connection
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as tmp:
db_path = tmp.name
try:
engine = create_engine(f"sqlite:///{db_path}")
Session = sessionmaker(bind=engine)
mem = Memori(conn=Session).llm.register(openai_model.get_client())
mem.attribution(entity_id="test", process_id="test")
mem.config.storage.build()
assert mem is not None
finally:
if os.path.exists(db_path):
os.unlink(db_path)
def test_memori_storage_build_idempotent(self, db_session, openai_model):
"""Test that storage build can be called multiple times safely."""
mem = Memori(conn=db_session).llm.register(openai_model.get_client())
mem.attribution(entity_id="test", process_id="test")
# Should be safe to call multiple times
mem.config.storage.build()
mem.config.storage.build()
assert True
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/integrations/test_memori.py",
"license": "Apache License 2.0",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/redshift.py | import csv
from os import getenv
from typing import Any, Dict, List, Optional
try:
import redshift_connector
from redshift_connector import Connection
except ImportError:
raise ImportError("`redshift_connector` not installed. Please install using `pip install redshift-connector`.")
from agno.tools import Toolkit
from agno.utils.log import log_debug, log_error, log_info
class RedshiftTools(Toolkit):
"""
A toolkit for interacting with Amazon Redshift databases.
Supports these authentication methods:
- Standard username and password authentication
- IAM authentication with AWS profile
- IAM authentication with AWS credentials
Args:
host (Optional[str]): Redshift cluster endpoint hostname. Falls back to REDSHIFT_HOST env var.
port (int): Redshift cluster port number. Default is 5439.
database (Optional[str]): Database name to connect to. Falls back to REDSHIFT_DATABASE env var.
user (Optional[str]): Username for standard authentication.
password (Optional[str]): Password for standard authentication.
iam (bool): Enable IAM authentication. Default is False.
cluster_identifier (Optional[str]): Redshift cluster identifier for IAM auth with provisioned clusters. Falls back to REDSHIFT_CLUSTER_IDENTIFIER env var.
region (Optional[str]): AWS region for IAM credential retrieval. Falls back to AWS_REGION or AWS_DEFAULT_REGION env vars.
db_user (Optional[str]): Database user for IAM auth with provisioned clusters. Falls back to REDSHIFT_DB_USER env var.
access_key_id (Optional[str]): AWS access key ID for IAM auth. Falls back to AWS_ACCESS_KEY_ID env var.
secret_access_key (Optional[str]): AWS secret access key for IAM auth. Falls back to AWS_SECRET_ACCESS_KEY env var.
session_token (Optional[str]): AWS session token for temporary credentials. Falls back to AWS_SESSION_TOKEN env var.
profile (Optional[str]): AWS profile name for IAM auth. Falls back to AWS_PROFILE env var.
ssl (bool): Enable SSL connection. Default is True.
table_schema (str): Default schema for table operations. Default is "public".
"""
_requires_connect: bool = True
def __init__(
self,
# Connection parameters
host: Optional[str] = None,
port: int = 5439,
database: Optional[str] = None,
# Standard authentication (username/password)
user: Optional[str] = None,
password: Optional[str] = None,
# IAM Authentication
iam: bool = False,
cluster_identifier: Optional[str] = None,
region: Optional[str] = None,
db_user: Optional[str] = None,
# AWS Credentials (for IAM auth)
access_key_id: Optional[str] = None,
secret_access_key: Optional[str] = None,
session_token: Optional[str] = None,
profile: Optional[str] = None,
# Connection settings
ssl: bool = True,
table_schema: str = "public",
**kwargs,
):
# Connection parameters
self.host: Optional[str] = host or getenv("REDSHIFT_HOST")
self.port: int = port
self.database: Optional[str] = database or getenv("REDSHIFT_DATABASE")
# Standard authentication
self.user: Optional[str] = user
self.password: Optional[str] = password
# IAM authentication parameters
self.iam: bool = iam
self.cluster_identifier: Optional[str] = cluster_identifier or getenv("REDSHIFT_CLUSTER_IDENTIFIER")
self.region: Optional[str] = region or getenv("AWS_REGION") or getenv("AWS_DEFAULT_REGION")
self.db_user: Optional[str] = db_user or getenv("REDSHIFT_DB_USER")
# AWS credentials
self.access_key_id: Optional[str] = access_key_id or getenv("AWS_ACCESS_KEY_ID")
self.secret_access_key: Optional[str] = secret_access_key or getenv("AWS_SECRET_ACCESS_KEY")
self.session_token: Optional[str] = session_token or getenv("AWS_SESSION_TOKEN")
self.profile: Optional[str] = profile or getenv("AWS_PROFILE")
# Connection settings
self.ssl: bool = ssl
self.table_schema: str = table_schema
# Connection instance
self._connection: Optional[Connection] = None
tools: List[Any] = [
self.show_tables,
self.describe_table,
self.summarize_table,
self.inspect_query,
self.run_query,
self.export_table_to_path,
]
super().__init__(name="redshift_tools", tools=tools, **kwargs)
def connect(self) -> Connection:
"""
Establish a connection to the Redshift database.
Returns:
The database connection object.
Raises:
redshift_connector.Error: If connection fails.
"""
if self._connection is not None:
log_debug("Connection already established, reusing existing connection")
return self._connection
log_info("Establishing connection to Redshift")
self._connection = redshift_connector.connect(**self._get_connection_kwargs())
return self._connection
def close(self) -> None:
"""
Close the database connection if it exists.
"""
if self._connection is not None:
log_info("Closing Redshift connection")
try:
self._connection.close()
except Exception:
pass # Connection might already be closed
self._connection = None
@property
def is_connected(self) -> bool:
"""Check if a connection is currently established."""
return self._connection is not None
def _ensure_connection(self) -> Connection:
"""
Ensure a connection exists, creating one if necessary.
Returns:
The database connection object.
"""
if self._connection is None:
return self.connect()
return self._connection
def _get_connection_kwargs(self) -> Dict[str, Any]:
"""Build connection kwargs from instance."""
connection_kwargs: Dict[str, Any] = {}
# Common connection parameters
if self.host:
connection_kwargs["host"] = self.host
if self.port:
connection_kwargs["port"] = self.port
if self.database:
connection_kwargs["database"] = self.database
connection_kwargs["ssl"] = self.ssl
# IAM Authentication
if self.iam:
connection_kwargs["iam"] = True
# For provisioned clusters (not serverless)
if self.cluster_identifier:
connection_kwargs["cluster_identifier"] = self.cluster_identifier
# db_user required for provisioned clusters with IAM
if self.db_user:
connection_kwargs["db_user"] = self.db_user
# Region for IAM credential retrieval
if self.region:
connection_kwargs["region"] = self.region
# AWS credentials - either profile or explicit
if self.profile:
connection_kwargs["profile"] = self.profile
else:
# Explicit AWS credentials
if self.access_key_id:
connection_kwargs["access_key_id"] = self.access_key_id
if self.secret_access_key:
connection_kwargs["secret_access_key"] = self.secret_access_key
if self.session_token:
connection_kwargs["session_token"] = self.session_token
else:
# Standard username/password authentication
if self.user:
connection_kwargs["user"] = self.user
if self.password:
connection_kwargs["password"] = self.password
return connection_kwargs
def _execute_query(self, query: str, params: Optional[tuple] = None) -> str:
try:
connection = self._ensure_connection()
with connection.cursor() as cursor:
log_debug("Running Redshift query")
cursor.execute(query, params)
if cursor.description is None:
return "Query executed successfully."
columns = [desc[0] for desc in cursor.description]
rows = cursor.fetchall()
if not rows:
return f"Query returned no results.\nColumns: {', '.join(columns)}"
header = ",".join(columns)
data_rows = [",".join(map(str, row)) for row in rows]
return f"{header}\n" + "\n".join(data_rows)
except redshift_connector.Error as e:
log_error(f"Database error: {e}")
if self._connection:
try:
self._connection.rollback()
except Exception:
pass # Connection might be closed
return f"Error executing query: {e}"
except Exception as e:
log_error(f"An unexpected error occurred: {e}")
return f"An unexpected error occurred: {e}"
def show_tables(self) -> str:
"""Lists all tables in the configured schema."""
stmt = "SELECT table_name FROM information_schema.tables WHERE table_schema = %s;"
return self._execute_query(stmt, (self.table_schema,))
def describe_table(self, table: str) -> str:
"""
Provides the schema (column name, data type, is nullable) for a given table.
Args:
table: The name of the table to describe.
Returns:
A string describing the table's columns and data types.
"""
stmt = """
SELECT column_name, data_type, is_nullable
FROM information_schema.columns
WHERE table_schema = %s AND table_name = %s;
"""
return self._execute_query(stmt, (self.table_schema, table))
def summarize_table(self, table: str) -> str:
"""
Computes and returns key summary statistics for a table's columns.
Args:
table: The name of the table to summarize.
Returns:
A string containing a summary of the table.
"""
try:
connection = self._ensure_connection()
with connection.cursor() as cursor:
# First, get column information using a parameterized query
schema_query = """
SELECT column_name, data_type
FROM information_schema.columns
WHERE table_schema = %s AND table_name = %s;
"""
cursor.execute(schema_query, (self.table_schema, table))
columns = cursor.fetchall()
if not columns:
return f"Error: Table '{table}' not found in schema '{self.table_schema}'."
summary_parts = [f"Summary for table: {table}\n"]
# Redshift uses schema.table format for fully qualified names
full_table_name = f'"{self.table_schema}"."{table}"'
for col in columns:
col_name = col[0]
data_type = col[1]
query = None
if any(
t in data_type.lower()
for t in [
"integer",
"int",
"bigint",
"smallint",
"numeric",
"decimal",
"real",
"double precision",
"float",
]
):
query = f"""
SELECT
COUNT(*) AS total_rows,
COUNT("{col_name}") AS non_null_rows,
MIN("{col_name}") AS min,
MAX("{col_name}") AS max,
AVG("{col_name}") AS average,
STDDEV("{col_name}") AS std_deviation
FROM {full_table_name};
"""
elif any(t in data_type.lower() for t in ["char", "varchar", "text", "uuid"]):
query = f"""
SELECT
COUNT(*) AS total_rows,
COUNT("{col_name}") AS non_null_rows,
COUNT(DISTINCT "{col_name}") AS unique_values,
AVG(LEN("{col_name}")) as avg_length
FROM {full_table_name};
"""
if query:
cursor.execute(query)
stats = cursor.fetchone()
summary_parts.append(f"\n--- Column: {col_name} (Type: {data_type}) ---")
if stats is not None:
stats_dict = dict(zip([desc[0] for desc in cursor.description], stats))
for key, value in stats_dict.items():
val_str = (
f"{value:.2f}" if isinstance(value, float) and value is not None else str(value)
)
summary_parts.append(f" {key}: {val_str}")
else:
summary_parts.append(" No statistics available")
return "\n".join(summary_parts)
except redshift_connector.Error as e:
return f"Error summarizing table: {e}"
def inspect_query(self, query: str) -> str:
"""
Shows the execution plan for a SQL query (using EXPLAIN).
Args:
query: The SQL query to inspect.
Returns:
The query's execution plan.
"""
return self._execute_query(f"EXPLAIN {query}")
def export_table_to_path(self, table: str, path: str) -> str:
"""
Exports a table's data to a local CSV file.
Args:
table: The name of the table to export.
path: The local file path to save the file.
Returns:
A confirmation message with the file path.
"""
log_debug(f"Exporting table {table} to {path}")
full_table_name = f'"{self.table_schema}"."{table}"'
stmt = f"SELECT * FROM {full_table_name};"
try:
connection = self._ensure_connection()
with connection.cursor() as cursor:
cursor.execute(stmt)
if cursor.description is None:
return f"Error: Query returned no description for table '{table}'."
columns = [desc[0] for desc in cursor.description]
with open(path, "w", newline="", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(columns)
writer.writerows(cursor)
return f"Successfully exported table '{table}' to '{path}'."
except (redshift_connector.Error, IOError) as e:
if self._connection:
try:
self._connection.rollback()
except Exception:
pass # Connection might be closed
return f"Error exporting table: {e}"
def run_query(self, query: str) -> str:
"""
Runs a read-only SQL query and returns the result.
Args:
query: The SQL query to run.
Returns:
The query result as a formatted string.
"""
return self._execute_query(query)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/redshift.py",
"license": "Apache License 2.0",
"lines": 339,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/tools/test_redshift.py | from unittest.mock import Mock, mock_open, patch
import pytest
try:
import redshift_connector
except ImportError:
raise ImportError("`redshift_connector` not installed. Please install using `pip install redshift-connector`.")
from agno.tools.redshift import RedshiftTools
# --- Mock Data for Tests ---
MOCK_TABLES_RESULT = [("employees",), ("departments",), ("projects",)]
MOCK_DESCRIBE_RESULT = [
("id", "integer", "NO"),
("name", "character varying", "YES"),
("salary", "numeric", "YES"),
("department_id", "integer", "YES"),
]
MOCK_COUNT_RESULT = [(3,)]
MOCK_EXPORT_DATA = [
(1, "Alice", 75000, 1),
(2, "Bob", 80000, 2),
(3, "Charlie", 65000, 1),
]
MOCK_EXPLAIN_RESULT = [
("Seq Scan on employees (cost=0.00..35.50 rows=10 width=32)",),
(" Filter: (salary > 10000)",),
]
class TestRedshiftTools:
"""Unit tests for RedshiftTools using mocking."""
@pytest.fixture
def mock_connection(self):
"""Create a mock connection that behaves like redshift_connector connection."""
conn = Mock()
return conn
@pytest.fixture
def mock_cursor(self):
"""Create a mock cursor that behaves like redshift_connector cursor."""
cursor = Mock()
cursor.description = None
cursor.fetchall.return_value = []
cursor.fetchone.return_value = ()
cursor.__enter__ = Mock(return_value=cursor)
cursor.__exit__ = Mock(return_value=False)
cursor.__iter__ = Mock(return_value=iter([]))
return cursor
@pytest.fixture
def redshift_tools(self, mock_connection, mock_cursor):
"""Create RedshiftTools instance with mocked connection."""
mock_connection.cursor.return_value = mock_cursor
mock_connection.close = Mock()
with patch("redshift_connector.connect", return_value=mock_connection):
tools = RedshiftTools(
host="localhost",
port=5439,
database="testdb",
user="testuser",
password="testpassword",
table_schema="company_data",
)
yield tools
def test_connection_properties(self, redshift_tools):
"""Test that connection properties are properly configured."""
assert redshift_tools.database == "testdb"
assert redshift_tools.host == "localhost"
assert redshift_tools.port == 5439
assert redshift_tools.table_schema == "company_data"
def test_show_tables_success(self, redshift_tools, mock_connection, mock_cursor):
"""Test show_tables returns expected table list."""
mock_cursor.description = [("table_name",)]
mock_cursor.fetchall.return_value = MOCK_TABLES_RESULT
result = redshift_tools.show_tables()
mock_cursor.execute.assert_called_with(
"SELECT table_name FROM information_schema.tables WHERE table_schema = %s;", ("company_data",)
)
assert "table_name" in result
assert "employees" in result
assert "departments" in result
assert "projects" in result
def test_describe_table_success(self, redshift_tools, mock_connection, mock_cursor):
"""Test describe_table returns expected schema information."""
mock_cursor.description = [("column_name",), ("data_type",), ("is_nullable",)]
mock_cursor.fetchall.return_value = MOCK_DESCRIBE_RESULT
result = redshift_tools.describe_table("employees")
mock_cursor.execute.assert_called()
call_args = mock_cursor.execute.call_args
assert "table_schema = %s AND table_name = %s" in call_args[0][0]
assert call_args[0][1] == ("company_data", "employees")
assert "column_name,data_type,is_nullable" in result
assert "salary,numeric,YES" in result
def test_run_query_success(self, redshift_tools, mock_connection, mock_cursor):
"""Test run_query executes SQL and returns formatted results."""
mock_cursor.description = [("count",)]
mock_cursor.fetchall.return_value = MOCK_COUNT_RESULT
result = redshift_tools.run_query("SELECT COUNT(*) FROM employees;")
mock_cursor.execute.assert_called_with("SELECT COUNT(*) FROM employees;", None)
lines = result.strip().split("\n")
assert lines[0] == "count"
assert lines[1] == "3"
def test_export_table_to_path_success(self, redshift_tools, mock_connection, mock_cursor):
"""Test export_table_to_path creates CSV file safely."""
mock_cursor.description = [("id",), ("name",), ("salary",), ("department_id",)]
mock_cursor.__iter__ = Mock(return_value=iter(MOCK_EXPORT_DATA))
mock_file = mock_open()
export_path = "/tmp/test_export.csv"
with patch("builtins.open", mock_file):
result = redshift_tools.export_table_to_path("employees", export_path)
mock_cursor.execute.assert_called_once()
mock_file.assert_called_once_with(export_path, "w", newline="", encoding="utf-8")
assert "Successfully exported table 'employees' to '/tmp/test_export.csv'" in result
def test_inspect_query_success(self, redshift_tools, mock_connection, mock_cursor):
"""Test inspect_query returns execution plan."""
mock_cursor.description = [("QUERY PLAN",)]
mock_cursor.fetchall.return_value = MOCK_EXPLAIN_RESULT
result = redshift_tools.inspect_query("SELECT name FROM employees WHERE salary > 10000;")
mock_cursor.execute.assert_called_with("EXPLAIN SELECT name FROM employees WHERE salary > 10000;", None)
assert "Seq Scan on employees" in result
assert "Filter: (salary > 10000)" in result
def test_database_error_handling(self, redshift_tools, mock_connection, mock_cursor):
"""Test proper error handling for database errors."""
mock_cursor.execute.side_effect = redshift_connector.Error("Table does not exist")
result = redshift_tools.show_tables()
assert "Error executing query:" in result
def test_export_file_error_handling(self, redshift_tools, mock_connection, mock_cursor):
"""Test error handling when file operations fail."""
mock_cursor.description = [("id",), ("name",)]
with patch("builtins.open", side_effect=IOError("Permission denied")):
result = redshift_tools.export_table_to_path("employees", "/invalid/path/file.csv")
assert "Error exporting table: Permission denied" in result
def test_sql_injection_prevention(self, redshift_tools, mock_connection, mock_cursor):
"""Test that SQL injection attempts are safely handled."""
mock_cursor.description = [("column_name",), ("data_type",), ("is_nullable",)]
mock_cursor.fetchall.return_value = []
malicious_table = "users'; DROP TABLE employees; --"
redshift_tools.describe_table(malicious_table)
call_args = mock_cursor.execute.call_args
assert call_args[0][1] == ("company_data", malicious_table)
assert "DROP TABLE" not in call_args[0][0]
def test_iam_authentication_config(self, mock_connection, mock_cursor):
"""Test IAM authentication configuration."""
mock_connection.cursor.return_value = mock_cursor
mock_connection.close = Mock()
mock_cursor.description = [("result",)]
mock_cursor.fetchall.return_value = [(1,)]
with patch("redshift_connector.connect", return_value=mock_connection) as mock_connect:
tools = RedshiftTools(
host="test-workgroup.123456.us-east-1.redshift-serverless.amazonaws.com",
database="dev",
iam=True,
profile="test-profile",
table_schema="public",
)
# Trigger a connection by running a query
tools.run_query("SELECT 1")
mock_connect.assert_called_once()
call_kwargs = mock_connect.call_args[1]
assert call_kwargs["iam"] is True
assert call_kwargs["profile"] == "test-profile"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_redshift.py",
"license": "Apache License 2.0",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/run/requirement.py | from dataclasses import dataclass, field
from datetime import datetime, timezone
from typing import Any, Dict, List, Optional
from uuid import uuid4
from agno.models.response import ToolExecution, UserFeedbackQuestion, UserInputField
@dataclass
class RunRequirement:
"""Requirement to complete a paused run (used in HITL flows)"""
tool_execution: Optional[ToolExecution] = None
created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
# User confirmation
confirmation: Optional[bool] = None
confirmation_note: Optional[str] = None
# User input
user_input_schema: Optional[List[UserInputField]] = None
# User feedback (structured questions with options)
user_feedback_schema: Optional[List[UserFeedbackQuestion]] = None
# External execution
external_execution_result: Optional[str] = None
# Member context (set when requirement originates from a team member)
member_agent_id: Optional[str] = None
member_agent_name: Optional[str] = None
member_run_id: Optional[str] = None
def __init__(
self,
tool_execution: ToolExecution,
id: Optional[str] = None,
created_at: Optional[datetime] = None,
):
self.id = id or str(uuid4())
self.tool_execution = tool_execution
self.user_input_schema = tool_execution.user_input_schema if tool_execution else None
self.user_feedback_schema = tool_execution.user_feedback_schema if tool_execution else None
self.created_at = created_at or datetime.now(timezone.utc)
self.confirmation = None
self.confirmation_note = None
self.external_execution_result = None
self.member_agent_id = None
self.member_agent_name = None
self.member_run_id = None
# Internal: holds a reference to the member's paused RunOutput so
# continue_run can pass it directly without a session lookup.
self._member_run_response: Any = None
@property
def needs_confirmation(self) -> bool:
if self.confirmation is not None:
return False
if not self.tool_execution:
return False
if self.tool_execution.confirmed is not None:
return False
return self.tool_execution.requires_confirmation or False
@property
def needs_user_input(self) -> bool:
if not self.tool_execution:
return False
if self.tool_execution.answered is True:
return False
if self.tool_execution.requires_user_input:
return True
if self.user_input_schema and not all(field.value is not None for field in self.user_input_schema):
return True
return False
@property
def needs_user_feedback(self) -> bool:
if not self.tool_execution:
return False
if self.tool_execution.answered is True:
return False
if self.user_feedback_schema and not all(q.selected_options is not None for q in self.user_feedback_schema):
return True
return False
@property
def needs_external_execution(self) -> bool:
if not self.tool_execution:
return False
if self.external_execution_result is not None:
return False
return self.tool_execution.external_execution_required or False
def confirm(self):
if not self.needs_confirmation:
raise ValueError("This requirement does not require confirmation")
self.confirmation = True
if self.tool_execution:
self.tool_execution.confirmed = True
def reject(self, note: Optional[str] = None):
if not self.needs_confirmation:
raise ValueError("This requirement does not require confirmation")
self.confirmation = False
self.confirmation_note = note
if self.tool_execution:
self.tool_execution.confirmed = False
self.tool_execution.confirmation_note = note
def provide_user_input(self, values: Dict[str, Any]) -> None:
"""Provide user input values for a user-input requirement.
Args:
values: A dictionary mapping field names to their values.
"""
if not self.needs_user_input:
raise ValueError("This requirement does not require user input")
if self.user_input_schema:
for input_field in self.user_input_schema:
if input_field.name in values:
input_field.value = values[input_field.name]
# Also update tool_execution's user_input_schema so handle_user_input_update can copy to tool_args
if self.tool_execution and self.tool_execution.user_input_schema:
for tool_input_field in self.tool_execution.user_input_schema:
if tool_input_field.name in values:
tool_input_field.value = values[tool_input_field.name]
# Only mark as answered when all fields have values
if all(f.value is not None for f in self.user_input_schema) and self.tool_execution:
self.tool_execution.answered = True
def provide_user_feedback(self, selections: Dict[str, List[str]]) -> None:
"""Provide user feedback selections for a user-feedback requirement.
Args:
selections: A dictionary mapping question text to lists of selected option labels.
"""
if not self.needs_user_feedback:
raise ValueError("This requirement does not require user feedback")
if self.user_feedback_schema:
for question in self.user_feedback_schema:
if question.question in selections:
question.selected_options = selections[question.question]
if question.options:
for opt in question.options:
opt.selected = opt.label in question.selected_options
# Also update tool_execution's user_feedback_schema
if self.tool_execution and self.tool_execution.user_feedback_schema:
for tool_question in self.tool_execution.user_feedback_schema:
if tool_question.question in selections:
tool_question.selected_options = selections[tool_question.question]
if tool_question.options:
for opt in tool_question.options:
opt.selected = opt.label in tool_question.selected_options
# Mark as answered when all questions have selections
if all(q.selected_options is not None for q in self.user_feedback_schema) and self.tool_execution:
self.tool_execution.answered = True
def set_external_execution_result(self, result: str):
if not self.needs_external_execution:
raise ValueError("This requirement does not require external execution")
self.external_execution_result = result
if self.tool_execution:
self.tool_execution.result = result
def is_resolved(self) -> bool:
"""Return True if the requirement has been resolved"""
return (
not self.needs_confirmation
and not self.needs_user_input
and not self.needs_user_feedback
and not self.needs_external_execution
)
def to_dict(self) -> Dict[str, Any]:
"""Convert to JSON-serializable dictionary for storage."""
_dict: Dict[str, Any] = {
"id": self.id,
"created_at": self.created_at.isoformat() if isinstance(self.created_at, datetime) else self.created_at,
"confirmation": self.confirmation,
"confirmation_note": self.confirmation_note,
"external_execution_result": self.external_execution_result,
"member_agent_id": self.member_agent_id,
"member_agent_name": self.member_agent_name,
"member_run_id": self.member_run_id,
}
if self.tool_execution is not None:
_dict["tool_execution"] = (
self.tool_execution.to_dict() if isinstance(self.tool_execution, ToolExecution) else self.tool_execution
)
if self.user_input_schema is not None:
_dict["user_input_schema"] = [f.to_dict() if hasattr(f, "to_dict") else f for f in self.user_input_schema]
if self.user_feedback_schema is not None:
_dict["user_feedback_schema"] = [
q.to_dict() if hasattr(q, "to_dict") else q for q in self.user_feedback_schema
]
return {k: v for k, v in _dict.items() if v is not None}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "RunRequirement":
"""Reconstruct from stored dictionary."""
if data is None:
raise ValueError("RunRequirement.from_dict() requires a non-None dict")
# Handle tool_execution
tool_data = data.get("tool_execution")
tool_execution: Optional[ToolExecution] = None
if isinstance(tool_data, ToolExecution):
tool_execution = tool_data
elif isinstance(tool_data, dict):
tool_execution = ToolExecution.from_dict(tool_data)
# Handle created_at (ISO string or datetime)
created_at_raw = data.get("created_at")
created_at: Optional[datetime] = None
if isinstance(created_at_raw, datetime):
created_at = created_at_raw
elif isinstance(created_at_raw, str):
try:
created_at = datetime.fromisoformat(created_at_raw)
except ValueError:
created_at = None
# Build requirement - tool_execution is required by __init__
# For legacy data without tool_execution, create a minimal placeholder
if tool_execution is None:
tool_execution = ToolExecution(tool_name="unknown", tool_args={})
requirement = cls(
tool_execution=tool_execution,
id=data.get("id"),
created_at=created_at,
)
# Set optional fields
requirement.confirmation = data.get("confirmation")
requirement.confirmation_note = data.get("confirmation_note")
requirement.external_execution_result = data.get("external_execution_result")
requirement.member_agent_id = data.get("member_agent_id")
requirement.member_agent_name = data.get("member_agent_name")
requirement.member_run_id = data.get("member_run_id")
# Handle user_input_schema
schema_raw = data.get("user_input_schema")
if schema_raw is not None:
rebuilt_schema: List[UserInputField] = []
for item in schema_raw:
if isinstance(item, UserInputField):
rebuilt_schema.append(item)
elif isinstance(item, dict):
rebuilt_schema.append(UserInputField.from_dict(item))
requirement.user_input_schema = rebuilt_schema if rebuilt_schema else None
# Handle user_feedback_schema
feedback_raw = data.get("user_feedback_schema")
if feedback_raw is not None:
rebuilt_feedback: List[UserFeedbackQuestion] = []
for item in feedback_raw:
if isinstance(item, UserFeedbackQuestion):
rebuilt_feedback.append(item)
elif isinstance(item, dict):
rebuilt_feedback.append(UserFeedbackQuestion.from_dict(item))
requirement.user_feedback_schema = rebuilt_feedback if rebuilt_feedback else None
return requirement
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/run/requirement.py",
"license": "Apache License 2.0",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/agent/human_in_the_loop/test_external_execution_flows.py | import pytest
from agno.agent import Agent, RunOutput # noqa
from agno.models.openai import OpenAIChat
from agno.tools.decorator import tool
def test_tool_call_requires_external_execution(shared_db):
@tool(external_execution=True)
def send_email(to: str, subject: str, body: str):
pass
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[send_email],
db=shared_db,
markdown=True,
telemetry=False,
)
response = agent.run("Send an email to john@doe.com with the subject 'Test' and the body 'Hello, how are you?'")
assert response.is_paused and response.tools is not None
assert response.tools[0].external_execution_required
assert response.tools[0].tool_name == "send_email"
assert response.tools[0].tool_args == {"to": "john@doe.com", "subject": "Test", "body": "Hello, how are you?"}
# Mark the tool as confirmed
response.tools[0].result = "Email sent to john@doe.com with subject Test and body Hello, how are you?"
response = agent.continue_run(response)
assert response.is_paused is False
def test_tool_call_requires_external_execution_stream(shared_db):
@tool(external_execution=True)
def send_email(to: str, subject: str, body: str):
pass
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
db=shared_db,
tools=[send_email],
markdown=True,
telemetry=False,
)
found_external_execution = False
for response in agent.run(
"Send an email to john@doe.com with the subject 'Test' and the body 'Hello, how are you?'", stream=True
):
if response.is_paused:
assert response.tools[0].external_execution_required # type: ignore
assert response.tools[0].tool_name == "send_email" # type: ignore
assert response.tools[0].tool_args == { # type: ignore
"to": "john@doe.com",
"subject": "Test",
"body": "Hello, how are you?",
}
# Mark the tool as confirmed
response.tools[0].result = "Email sent to john@doe.com with subject Test and body Hello, how are you?" # type: ignore
found_external_execution = True
assert found_external_execution, "No tools were found to require external execution"
found_external_execution = False
for response in agent.continue_run(run_id=response.run_id, updated_tools=response.tools, stream=True):
if response.is_paused:
found_external_execution = True
assert found_external_execution is False, "Some tools still require external execution"
@pytest.mark.asyncio
async def test_tool_call_requires_external_execution_async(shared_db):
@tool(external_execution=True)
async def send_email(to: str, subject: str, body: str):
pass
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[send_email],
db=shared_db,
markdown=True,
telemetry=False,
)
response = await agent.arun(
"Send an email to john@doe.com with the subject 'Test' and the body 'Hello, how are you?'"
)
assert response.is_paused and response.tools is not None
assert response.tools[0].external_execution_required # type: ignore
assert response.tools[0].tool_name == "send_email" # type: ignore
assert response.tools[0].tool_args == { # type: ignore
"to": "john@doe.com",
"subject": "Test",
"body": "Hello, how are you?",
}
# Mark the tool as confirmed
response.tools[0].result = "Email sent to john@doe.com with subject Test and body Hello, how are you?" # type: ignore
response = await agent.acontinue_run(run_id=response.run_id, updated_tools=response.tools)
assert response.is_paused is False
def test_tool_call_requires_external_execution_error(shared_db):
@tool(external_execution=True)
def send_email(to: str, subject: str, body: str):
pass
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[send_email],
db=shared_db,
markdown=True,
telemetry=False,
)
response = agent.run("Send an email to john@doe.com with the subject 'Test' and the body 'Hello, how are you?'")
# Check that we cannot continue without confirmation
with pytest.raises(ValueError):
response = agent.continue_run(response)
@pytest.mark.asyncio
async def test_tool_call_requires_external_execution_stream_async(shared_db):
@tool(external_execution=True)
async def send_email(to: str, subject: str, body: str):
pass
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[send_email],
db=shared_db,
markdown=True,
telemetry=False,
)
found_external_execution = False
async for response in agent.arun(
"Send an email to john@doe.com with the subject 'Test' and the body 'Hello, how are you?'", stream=True
):
if response.is_paused:
assert response.tools[0].external_execution_required # type: ignore
assert response.tools[0].tool_name == "send_email" # type: ignore
assert response.tools[0].tool_args == { # type: ignore
"to": "john@doe.com",
"subject": "Test",
"body": "Hello, how are you?",
}
# Mark the tool as confirmed
response.tools[0].result = "Email sent to john@doe.com with subject Test and body Hello, how are you?" # type: ignore
found_external_execution = True
assert found_external_execution, "No tools were found to require external execution"
found_external_execution = False
async for response in agent.acontinue_run(run_id=response.run_id, updated_tools=response.tools, stream=True):
if response.is_paused:
found_external_execution = True
assert found_external_execution is False, "Some tools still require external execution"
def test_tool_call_multiple_requires_external_execution(shared_db):
@tool(external_execution=True)
def get_the_weather(city: str):
pass
def get_activities(city: str):
pass
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather, get_activities],
db=shared_db,
markdown=True,
telemetry=False,
)
response = agent.run("What is the weather in Tokyo and what are the activities?")
assert response.is_paused and response.tools is not None
tool_found = False
for _t in response.tools:
if _t.external_execution_required:
tool_found = True
assert _t.tool_name == "get_the_weather"
assert _t.tool_args == {"city": "Tokyo"}
_t.result = "It is currently 70 degrees and cloudy in Tokyo"
assert tool_found, "No tool was found to require external execution"
response = agent.continue_run(response)
assert response.is_paused is False
assert response.content
def test_run_requirement_external_execution(shared_db):
"""Test a HITL external execution flow using RunRequirements"""
@tool(external_execution=True)
def send_email(to: str, subject: str, body: str):
pass
session_id = "test_session_external_execution"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[send_email],
db=shared_db,
telemetry=False,
)
# Initial run that requires external execution
response = agent.run(
"Send an email to john@doe.com with the subject 'Test' and the body 'Hello, how are you?'",
session_id=session_id,
)
# Verify the run is paused and has active requirements
assert response.is_paused
assert len(response.active_requirements) == 1
# Get the requirement and verify it needs external execution
requirement = response.active_requirements[0]
assert requirement.needs_external_execution
assert requirement.tool_execution and requirement.tool_execution.tool_name == "send_email"
assert requirement.tool_execution and requirement.tool_execution.tool_args == {
"to": "john@doe.com",
"subject": "Test",
"body": "Hello, how are you?",
}
# Use the new DX to set external execution result
tool_args = requirement.tool_execution and requirement.tool_execution.tool_args
assert tool_args is not None
result = f"Email sent to {tool_args['to']} with subject {tool_args['subject']}"
requirement.set_external_execution_result(result)
# Verify the result was set
assert requirement.tool_execution and requirement.tool_execution.result == result
# Continue the run with run_id and requirements
response = agent.continue_run(run_id=response.run_id, requirements=response.requirements, session_id=session_id)
# Verify the run completed successfully
assert response.is_paused is False
assert response.tools is not None
assert response.tools[0].result == result
def test_run_requirement_external_execution_with_entrypoint(shared_db):
"""Test a HITL external execution flow by calling the tool entrypoint directly"""
@tool(external_execution=True)
def execute_shell_command(command: str) -> str:
"""Execute a shell command (only ls is supported for testing)"""
if command.startswith("echo"):
return command.replace("echo ", "")
else:
return f"Executed: {command}"
session_id = "test_session_external_entrypoint"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[execute_shell_command],
db=shared_db,
telemetry=False,
)
# Initial run that requires external execution
response = agent.run("Run the command 'echo Hello World'", session_id=session_id)
# Verify the run is paused
assert response.is_paused
assert len(response.active_requirements) == 1
# Get the requirement
requirement = response.active_requirements[0]
assert requirement.needs_external_execution
assert requirement.tool_execution and requirement.tool_execution.tool_name == "execute_shell_command"
# Execute the tool externally using the entrypoint
tool_args = requirement.tool_execution and requirement.tool_execution.tool_args
assert tool_args is not None
assert execute_shell_command.entrypoint is not None
result = execute_shell_command.entrypoint(**tool_args) # type: ignore
requirement.set_external_execution_result(result)
# Continue the run
response = agent.continue_run(run_id=response.run_id, requirements=response.requirements, session_id=session_id)
# Verify completion
assert response.is_paused is False
assert response.tools is not None
assert response.tools[0].result is not None
assert "Hello World" in response.tools[0].result
@pytest.mark.asyncio
async def test_async_external_execution(shared_db):
"""Test a HITL async external execution flow using RunRequirements"""
@tool(external_execution=True)
def send_email(to: str, subject: str, body: str):
pass
session_id = "test_session_async_external"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[send_email],
db=shared_db,
telemetry=False,
)
# Initial async run that requires external execution
response = await agent.arun(
"Send an email to john@doe.com with the subject 'Test' and the body 'Hello'", session_id=session_id
)
# Verify the run is paused and has active requirements
assert response.is_paused
assert len(response.active_requirements) == 1
# Get the requirement and set result
requirement = response.active_requirements[0]
assert requirement.needs_external_execution
# Use the new DX to set external execution result
tool_args = requirement.tool_execution and requirement.tool_execution.tool_args
assert tool_args is not None
result = f"Email sent to {tool_args['to']}"
requirement.set_external_execution_result(result)
# Continue the run with run_id and requirements
response = await agent.acontinue_run(
run_id=response.run_id, requirements=response.requirements, session_id=session_id
)
# Verify completion
assert response.is_paused is False
assert response.tools is not None
assert response.tools[0].result == result
def test_streaming_external_execution(shared_db):
"""Test a HITL streaming external execution flow using RunRequirements"""
@tool(external_execution=True)
def send_email(to: str, subject: str, body: str):
pass
session_id = "test_session_streaming_external"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[send_email],
db=shared_db,
telemetry=False,
)
# Stream the initial run
paused_run_output = None
for run_output in agent.run(
"Send an email to john@doe.com with the subject 'Test' and the body 'Hello'",
session_id=session_id,
stream=True,
):
if run_output.is_paused: # type: ignore
paused_run_output = run_output
break
# Verify we got a paused run with requirements
assert paused_run_output is not None
assert paused_run_output.is_paused
# Get the requirement using new DX
requirements = paused_run_output.requirements # type: ignore
assert requirements is not None
assert len(requirements) == 1
requirement = requirements[0]
assert requirement.needs_external_execution
# Set external execution result
tool_args = requirement.tool_execution and requirement.tool_execution.tool_args
assert tool_args is not None
result = f"Email sent to {tool_args['to']}"
requirement.set_external_execution_result(result)
# Continue the run with streaming
final_output = None
for run_output in agent.continue_run(
run_id=paused_run_output.run_id,
updated_tools=paused_run_output.tools, # type: ignore
session_id=session_id,
stream=True,
yield_run_output=True,
):
final_output = run_output
# Verify completion
assert final_output is not None
assert final_output.is_paused is False # type: ignore
assert final_output.tools is not None # type: ignore
assert final_output.tools[0].result == result # type: ignore
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/human_in_the_loop/test_external_execution_flows.py",
"license": "Apache License 2.0",
"lines": 324,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/human_in_the_loop/test_run_requirement.py | """Tests for the RunRequirement class, used to handle HITL flows"""
import pytest
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.tools.decorator import tool
def test_run_requirement_needs_confirmation_flag(shared_db):
"""Test that needs_confirmation flag is set correctly"""
@tool(requires_confirmation=True)
def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
telemetry=False,
)
response = agent.run("What is the weather in Tokyo?")
requirement = response.active_requirements[0]
assert requirement.needs_confirmation is True
assert requirement.needs_user_input is False
assert requirement.needs_external_execution is False
def test_run_requirement_needs_user_input_flag(shared_db):
"""Test that needs_user_input flag is set correctly"""
@tool(requires_user_input=True)
def get_user_preference(preference_type: str) -> str:
return f"User preference for {preference_type}"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_user_preference],
db=shared_db,
telemetry=False,
)
response = agent.run("What is my food preference?")
requirement = response.active_requirements[0]
assert requirement.needs_confirmation is False
assert requirement.needs_user_input is True
assert requirement.needs_external_execution is False
def test_run_requirement_confirm_method(shared_db):
"""Test that requirement.confirm() correctly marks the tool as confirmed"""
@tool(requires_confirmation=True)
def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
telemetry=False,
)
response = agent.run("What is the weather in Tokyo?")
assert response.is_paused
assert len(response.active_requirements) == 1
requirement = response.active_requirements[0]
assert requirement.needs_confirmation is True
assert requirement.confirmation is None
assert requirement.is_resolved() is False
requirement.confirm()
# Verify the requirement was confirmed and the tool was updated
assert requirement.confirmation is True
assert requirement.tool_execution and requirement.tool_execution.confirmed is True
assert requirement.is_resolved() is True
def test_run_requirement_reject_method(shared_db):
"""Test that requirement.reject() correctly marks the tool as rejected"""
@tool(requires_confirmation=True)
def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
telemetry=False,
)
response = agent.run("What is the weather in Tokyo?")
assert response.is_paused
assert len(response.active_requirements) == 1
requirement = response.active_requirements[0]
assert requirement.needs_confirmation is True
assert requirement.confirmation is None
assert requirement.is_resolved() is False
requirement.reject()
# Verify the requirement was rejected and the tool was updated
assert requirement.confirmation is False
assert requirement.tool_execution and requirement.tool_execution.confirmed is False
assert requirement.is_resolved() is True
def test_run_requirement_confirm_raises_error_when_not_needed(shared_db):
"""Test that calling confirm() on a requirement that doesn't need confirmation raises ValueError"""
@tool(requires_user_input=True)
def get_user_preference(preference_type: str) -> str:
return f"User preference for {preference_type}"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_user_preference],
db=shared_db,
telemetry=False,
)
response = agent.run("What is my food preference?")
assert response.is_paused
assert len(response.active_requirements) == 1
requirement = response.active_requirements[0]
assert requirement.needs_confirmation is False
assert requirement.needs_user_input is True
# Calling confirm() should raise ValueError
with pytest.raises(ValueError, match="This requirement does not require confirmation"):
requirement.confirm()
def test_run_requirement_reject_raises_error_when_not_needed(shared_db):
"""Test that calling reject() on a requirement that doesn't need confirmation raises ValueError"""
@tool(requires_user_input=True)
def get_user_preference(preference_type: str) -> str:
return f"User preference for {preference_type}"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_user_preference],
db=shared_db,
telemetry=False,
)
response = agent.run("What is my food preference?")
assert response.is_paused
assert len(response.active_requirements) == 1
requirement = response.active_requirements[0]
assert requirement.needs_confirmation is False
# Calling reject() should raise ValueError
with pytest.raises(ValueError, match="This requirement does not require confirmation"):
requirement.reject()
@pytest.mark.asyncio
async def test_run_requirement_async_context(shared_db):
"""Test that RunRequirement works correctly in async context"""
@tool(requires_confirmation=True)
def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
telemetry=False,
)
response = await agent.arun("What is the weather in Tokyo?")
assert response.is_paused
assert len(response.active_requirements) == 1
requirement = response.active_requirements[0]
assert requirement.is_resolved() is False
requirement.confirm()
# Verify confirmation works in async context
assert requirement.confirmation is True
assert requirement.is_resolved() is True
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/human_in_the_loop/test_run_requirement.py",
"license": "Apache License 2.0",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/human_in_the_loop/test_user_confirmation_flows.py | import pytest
from agno.agent import Agent, RunOutput # noqa
from agno.models.openai import OpenAIChat
from agno.tools.decorator import tool
def test_tool_call_requires_confirmation(shared_db):
@tool(requires_confirmation=True)
def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
markdown=True,
telemetry=False,
)
response = agent.run("What is the weather in Tokyo?")
assert response.is_paused
assert response.tools is not None
assert response.tools[0].requires_confirmation
assert response.tools[0].tool_name == "get_the_weather"
assert response.tools[0].tool_args == {"city": "Tokyo"}
# Mark the tool as confirmed
response.tools[0].confirmed = True
response = agent.continue_run(response)
assert response.is_paused is False
assert response.tools is not None
assert response.tools[0].result == "It is currently 70 degrees and cloudy in Tokyo"
def test_tool_call_requires_confirmation_continue_with_run_response(shared_db):
@tool(requires_confirmation=True)
def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
markdown=True,
telemetry=False,
)
response = agent.run("What is the weather in Tokyo?")
assert response.is_paused
assert response.tools is not None
assert response.tools[0].requires_confirmation
assert response.tools[0].tool_name == "get_the_weather"
assert response.tools[0].tool_args == {"city": "Tokyo"}
# Mark the tool as confirmed
response.tools[0].confirmed = True
response = agent.continue_run(response)
assert response.is_paused is False
assert response.tools is not None
assert response.tools[0].result == "It is currently 70 degrees and cloudy in Tokyo"
def test_tool_call_requires_confirmation_continue_with_run_id(shared_db):
@tool(requires_confirmation=True)
def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
session_id = "test_session_1"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
telemetry=False,
)
response = agent.run("What is the weather in Tokyo?", session_id=session_id)
assert response.is_paused
assert response.tools is not None
assert response.tools[0].requires_confirmation
assert response.tools[0].tool_name == "get_the_weather"
assert response.tools[0].tool_args == {"city": "Tokyo"}
# Mark the tool as confirmed
response.tools[0].confirmed = True
# Create a completely new agent instance
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
telemetry=False,
)
response = agent.continue_run(run_id=response.run_id, updated_tools=response.tools, session_id=session_id)
assert response.is_paused is False
assert response.tools is not None
assert response.tools[0].result == "It is currently 70 degrees and cloudy in Tokyo"
def test_tool_call_requires_confirmation_continue_with_run_id_stream(shared_db):
@tool(requires_confirmation=True)
def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
session_id = "test_session_1"
agent = Agent(
id="test_agent",
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
telemetry=False,
)
updated_tools = None
for response in agent.run("What is the weather in Tokyo?", session_id=session_id, stream=True, stream_events=True):
if response.is_paused:
assert response.tools is not None
assert response.tools[0].requires_confirmation
assert response.tools[0].tool_name == "get_the_weather"
assert response.tools[0].tool_args == {"city": "Tokyo"}
# Mark the tool as confirmed
response.tools[0].confirmed = True
updated_tools = response.tools
run_response = agent.get_last_run_output(session_id=session_id)
assert run_response and run_response.is_paused
# Create a completely new agent instance
agent = Agent(
id="test_agent",
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
telemetry=False,
)
response = agent.continue_run(
run_id=run_response.run_id, updated_tools=updated_tools, session_id=session_id, stream=True, stream_events=True
)
for response in response:
if response.is_paused:
assert False, "The run should not be paused"
run_response = agent.get_run_output(run_id=run_response.run_id, session_id=session_id) # type: ignore
assert run_response and run_response.tools is not None
assert run_response.tools[0].result == "It is currently 70 degrees and cloudy in Tokyo"
@pytest.mark.asyncio
async def test_tool_call_requires_confirmation_continue_with_run_id_async(shared_db):
@tool(requires_confirmation=True)
def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
session_id = "test_session_1"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
instructions="When you have confirmation, then just use the tool",
telemetry=False,
)
response = await agent.arun("What is the weather in Tokyo?", session_id=session_id)
assert response.is_paused
assert response.tools is not None
assert len(response.tools) == 1
assert response.tools[0].requires_confirmation
assert response.tools[0].tool_name == "get_the_weather"
assert response.tools[0].tool_args == {"city": "Tokyo"}
# Mark the tool as confirmed
response.tools[0].confirmed = True
# Create a completely new agent instance
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
telemetry=False,
)
response = await agent.acontinue_run(run_id=response.run_id, updated_tools=response.tools, session_id=session_id)
assert response.is_paused is False
assert response.tools is not None
assert response.tools[0].result == "It is currently 70 degrees and cloudy in Tokyo"
def test_tool_call_requires_confirmation_memory_footprint(shared_db):
@tool(requires_confirmation=True)
def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
markdown=True,
telemetry=False,
)
session_id = "test_session"
response = agent.run("What is the weather in Tokyo?", session_id=session_id)
session_from_db = agent.get_session(session_id=session_id)
assert session_from_db and session_from_db.runs is not None
assert len(session_from_db.runs) == 1, "There should be one run in the memory"
assert len(session_from_db.runs[0].messages) == 3, [m.role for m in session_from_db.runs[0].messages] # type: ignore
assert response.is_paused
assert response.tools is not None
# Mark the tool as confirmed
response.tools[0].confirmed = True
response = agent.continue_run(response)
assert response.is_paused is False
assert response.tools is not None
assert response.tools[0].result == "It is currently 70 degrees and cloudy in Tokyo"
session_from_db = agent.get_session(session_id=session_id)
assert session_from_db and session_from_db.runs is not None
assert len(session_from_db.runs) == 1, "There should be one run in the memory"
assert len(session_from_db.runs[0].messages) == 5, [m.role for m in session_from_db.runs[0].messages] # type: ignore
@pytest.mark.asyncio
async def test_tool_call_requires_confirmation_async(shared_db):
@tool(requires_confirmation=True)
async def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
markdown=True,
telemetry=False,
)
response = await agent.arun("What is the weather in Tokyo?")
assert response.is_paused
assert response.tools is not None
assert response.tools[0].requires_confirmation
assert response.tools[0].tool_name == "get_the_weather"
assert response.tools[0].tool_args == {"city": "Tokyo"}
# Mark the tool as confirmed
response.tools[0].confirmed = True
response = await agent.acontinue_run(response)
assert response.tools is not None
assert response.tools[0].result == "It is currently 70 degrees and cloudy in Tokyo"
def test_tool_call_multiple_requires_confirmation(shared_db):
@tool(requires_confirmation=True)
def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
def get_activities(city: str):
return f"The following activities are available in {city}: \n - Shopping \n - Eating \n - Drinking"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather, get_activities],
db=shared_db,
markdown=True,
telemetry=False,
)
response = agent.run("What is the weather in Tokyo and what are the activities?")
assert response.is_paused
assert response.tools is not None
tool_found = False
for _t in response.tools:
if _t.requires_confirmation:
tool_found = True
assert _t.tool_name == "get_the_weather"
assert _t.tool_args == {"city": "Tokyo"}
_t.confirmed = True
assert tool_found, "No tool was found to require confirmation"
response = agent.continue_run(response)
assert response.is_paused is False
assert response.content
def test_run_requirement_confirmation(shared_db):
"""Test a HITL confirmation flow using RunRequirements"""
@tool(requires_confirmation=True)
def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
session_id = "test_session_confirmation"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
telemetry=False,
)
# Initial run that requires confirmation
response = agent.run("What is the weather in Tokyo?", session_id=session_id)
assert response.is_paused
assert len(response.active_requirements) == 1
# Get the requirement and verify it needs confirmation
requirement = response.active_requirements[0]
assert requirement.needs_confirmation
assert requirement.tool_execution and requirement.tool_execution.tool_name == "get_the_weather"
assert requirement.tool_execution and requirement.tool_execution.tool_args == {"city": "Tokyo"}
# Confirm the RunRequirement
requirement.confirm()
# Verify the RunRequirement was confirmed
assert requirement.tool_execution and requirement.tool_execution.confirmed is True
assert requirement.confirmation is True
# Continue the run with run_id and requirements
response = agent.continue_run(run_id=response.run_id, requirements=response.requirements, session_id=session_id)
# Verify the run completed successfully
assert response.is_paused is False
assert response.tools is not None
assert response.tools[0].confirmed is True
assert response.tools[0].result == "It is currently 70 degrees and cloudy in Tokyo"
def test_run_requirement_rejection(shared_db):
"""Test a HITL rejection flow using RunRequirements"""
@tool(requires_confirmation=True)
def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
session_id = "test_session_rejection"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
telemetry=False,
)
# Initial run that requires confirmation
response = agent.run("What is the weather in Tokyo?", session_id=session_id)
assert response.is_paused
assert len(response.active_requirements) == 1
# Get the requirement and verify it needs confirmation
requirement = response.active_requirements[0]
assert requirement.needs_confirmation
assert requirement.tool_execution and requirement.tool_execution.tool_name == "get_the_weather"
assert requirement.tool_execution and requirement.tool_execution.tool_args == {"city": "Tokyo"}
# Reject the RunRequirement
requirement.reject()
# Verify the tool is marked as rejected
assert requirement.tool_execution and requirement.tool_execution.confirmed is False
assert requirement.confirmation is False
# Continue the run with run_id and requirements
response = agent.continue_run(run_id=response.run_id, requirements=response.requirements, session_id=session_id)
# Verify the tool was not executed (no result)
assert response.tools is not None
assert response.tools[0].confirmed is False
assert response.tools[0].result is None
@pytest.mark.asyncio
async def test_async_confirmation(shared_db):
"""Test a HITL confirmation flow using RunRequirements and running the agent asynchronously"""
@tool(requires_confirmation=True)
def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
session_id = "test_session_async_confirmation"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
telemetry=False,
)
# Initial async run that requires confirmation
response = await agent.arun("What is the weather in Tokyo?", session_id=session_id)
assert response.is_paused
assert len(response.active_requirements) == 1
# Get the requirement and confirm it
requirement = response.active_requirements[0]
assert requirement.needs_confirmation
assert requirement.tool_execution and requirement.tool_execution.tool_name == "get_the_weather"
assert requirement.tool_execution and requirement.tool_execution.tool_args == {"city": "Tokyo"}
# Confirm the RunRequirement
requirement.confirm()
assert requirement.tool_execution and requirement.tool_execution.confirmed is True
# Continue the run with run_id and requirements
response = await agent.acontinue_run(
run_id=response.run_id, requirements=response.requirements, session_id=session_id
)
assert response.is_paused is False
assert response.tools is not None
assert response.tools[0].confirmed is True
assert response.tools[0].result == "It is currently 70 degrees and cloudy in Tokyo"
def test_streaming_confirmation(shared_db):
"""Test a HITL confirmation flow using RunRequirements and streaming the response"""
@tool(requires_confirmation=True)
def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
session_id = "test_session_streaming_confirmation"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
telemetry=False,
)
# Stream the initial run (stream=True without stream_events returns RunOutput objects)
paused_run_output = None
for run_output in agent.run("What is the weather in Tokyo?", session_id=session_id, stream=True):
if run_output.is_paused: # type: ignore
paused_run_output = run_output
break
# Verify we got a paused run with active requirements
assert paused_run_output is not None
assert paused_run_output.is_paused
assert len(paused_run_output.requirements) == 1 # type: ignore
# Get the requirement and confirm it using the new DX
requirement = paused_run_output.requirements[0] # type: ignore
assert requirement.needs_confirmation
assert requirement.tool_execution and requirement.tool_execution.tool_name == "get_the_weather"
assert requirement.tool_execution and requirement.tool_execution.tool_args == {"city": "Tokyo"}
# Confirm the RunRequirement
requirement.confirm()
assert requirement.tool_execution and requirement.tool_execution.confirmed is True
# Continue the run, streaming the response
for run_output in agent.continue_run(
run_id=paused_run_output.run_id,
updated_tools=paused_run_output.tools, # type: ignore
session_id=session_id,
stream=True,
yield_run_output=True,
):
continue
# Verify completion
assert run_output is not None
assert run_output.is_paused is False
assert run_output.tools is not None
assert run_output.tools[0].confirmed is True
assert run_output.tools[0].result == "It is currently 70 degrees and cloudy in Tokyo"
@pytest.mark.asyncio
async def test_streaming_confirmation_async(shared_db):
"""Test a HITL confirmation flow using RunRequirements and streaming the response asynchronously"""
@tool(requires_confirmation=True)
def get_the_weather(city: str):
return f"It is currently 70 degrees and cloudy in {city}"
session_id = "test_session_streaming_confirmation"
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[get_the_weather],
db=shared_db,
telemetry=False,
)
# Stream the initial run (stream=True without stream_events returns RunOutput objects)
paused_run_output = None
async for run_output in agent.arun("What is the weather in Tokyo?", session_id=session_id, stream=True):
if run_output.is_paused: # type: ignore
paused_run_output = run_output
break
# Verify we got a paused run with active requirements
assert paused_run_output is not None
assert paused_run_output.is_paused
assert len(paused_run_output.requirements) == 1 # type: ignore
# Get the requirement and confirm it using the new DX
requirement = paused_run_output.requirements[0] # type: ignore
assert requirement.needs_confirmation
assert requirement.tool_execution and requirement.tool_execution.tool_name == "get_the_weather"
assert requirement.tool_execution and requirement.tool_execution.tool_args == {"city": "Tokyo"}
# Confirm the RunRequirement
requirement.confirm()
assert requirement.tool_execution and requirement.tool_execution.confirmed is True
# Continue the run, streaming the response
async for run_output in agent.acontinue_run(
run_id=paused_run_output.run_id,
updated_tools=paused_run_output.tools, # type: ignore
session_id=session_id,
stream=True,
yield_run_output=True,
):
continue
# Verify completion
assert run_output is not None
assert run_output.is_paused is False
assert run_output.tools is not None
assert run_output.tools[0].confirmed is True
assert run_output.tools[0].result == "It is currently 70 degrees and cloudy in Tokyo"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/human_in_the_loop/test_user_confirmation_flows.py",
"license": "Apache License 2.0",
"lines": 422,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/spotify.py | """
Spotify Toolkit for Agno SDK
A toolkit for searching songs, creating playlists, and updating playlists on Spotify.
Requires a valid Spotify access token with appropriate scopes.
Required scopes:
- user-read-private (for getting user ID)
- playlist-modify-public (for public playlists)
- playlist-modify-private (for private playlists)
"""
import json
from typing import Any, List, Optional
import httpx
from agno.tools import Toolkit
from agno.utils.log import log_debug
class SpotifyTools(Toolkit):
"""
Spotify toolkit for searching songs and managing playlists.
Args:
access_token: Spotify OAuth access token with required scopes.
default_market: Default market/country code for search results (e.g., 'US', 'GB').
timeout: Request timeout in seconds.
"""
def __init__(
self,
access_token: str,
default_market: Optional[str] = "US",
timeout: int = 30,
**kwargs,
):
self.access_token = access_token
self.default_market = default_market
self.timeout = timeout
self.base_url = "https://api.spotify.com/v1"
tools: List[Any] = [
self.search_tracks,
self.search_playlists,
self.search_artists,
self.search_albums,
self.get_user_playlists,
self.get_track_recommendations,
self.get_artist_top_tracks,
self.get_album_tracks,
self.get_my_top_tracks,
self.get_my_top_artists,
self.create_playlist,
self.add_tracks_to_playlist,
self.get_playlist,
self.update_playlist_details,
self.remove_tracks_from_playlist,
self.get_current_user,
self.play_track,
self.get_currently_playing,
]
super().__init__(name="spotify", tools=tools, **kwargs)
def _make_request(
self,
endpoint: str,
method: str = "GET",
body: Optional[dict] = None,
params: Optional[dict] = None,
) -> dict:
"""Make an authenticated request to the Spotify API."""
url = f"{self.base_url}/{endpoint}"
headers = {
"Authorization": f"Bearer {self.access_token}",
"Content-Type": "application/json",
}
with httpx.Client(timeout=self.timeout) as client:
response = client.request(
method=method,
url=url,
headers=headers,
json=body,
params=params,
)
if response.status_code == 204:
return {"success": True}
try:
return response.json()
except json.JSONDecodeError:
return {"error": f"Failed to parse response: {response.text}"}
def get_current_user(self) -> str:
"""Get the current authenticated user's profile.
Returns:
JSON string containing user profile with id, display_name, and email.
"""
log_debug("Fetching current Spotify user profile")
result = self._make_request("me")
return json.dumps(result, indent=2)
def get_my_top_tracks(
self,
time_range: str = "medium_term",
limit: int = 20,
) -> str:
"""Get the current user's most played tracks.
Requires the 'user-top-read' scope.
Args:
time_range: Time period for top tracks:
- "short_term": Last 4 weeks
- "medium_term": Last 6 months (default)
- "long_term": All time (several years)
limit: Number of tracks to return (default 20, max 50).
Returns:
JSON string containing list of user's top tracks with id, name, artists, album, and uri.
"""
log_debug(f"Fetching user's top tracks: {time_range}")
params = {
"time_range": time_range,
"limit": min(limit, 50),
}
result = self._make_request("me/top/tracks", params=params)
if "error" in result:
return json.dumps(result, indent=2)
tracks = result.get("items", [])
simplified_tracks = [
{
"rank": i + 1,
"id": track["id"],
"name": track["name"],
"artists": [artist["name"] for artist in track["artists"]],
"album": track["album"]["name"],
"uri": track["uri"],
"popularity": track.get("popularity"),
}
for i, track in enumerate(tracks)
]
return json.dumps(simplified_tracks, indent=2)
def get_my_top_artists(
self,
time_range: str = "medium_term",
limit: int = 20,
) -> str:
"""Get the current user's most played artists.
Requires the 'user-top-read' scope.
Args:
time_range: Time period for top artists:
- "short_term": Last 4 weeks
- "medium_term": Last 6 months (default)
- "long_term": All time (several years)
limit: Number of artists to return (default 20, max 50).
Returns:
JSON string containing list of user's top artists with id, name, genres, and uri.
"""
log_debug(f"Fetching user's top artists: {time_range}")
params = {
"time_range": time_range,
"limit": min(limit, 50),
}
result = self._make_request("me/top/artists", params=params)
if "error" in result:
return json.dumps(result, indent=2)
artists = result.get("items", [])
simplified_artists = [
{
"rank": i + 1,
"id": artist["id"],
"name": artist["name"],
"genres": artist.get("genres", []),
"uri": artist["uri"],
"popularity": artist.get("popularity"),
"followers": artist.get("followers", {}).get("total"),
}
for i, artist in enumerate(artists)
]
return json.dumps(simplified_artists, indent=2)
def search_playlists(
self,
query: str,
max_results: int = 10,
) -> str:
"""Search for playlists on Spotify by name.
Use this to find playlists by name before updating them.
Example: "Good Vibes", "Workout Mix", "Chill Beats"
Args:
query: Search query - playlist name or keywords.
max_results: Maximum number of playlists to return (default 10, max 50).
Returns:
JSON string containing list of playlists with id, name, owner, track_count, and url.
"""
log_debug(f"Searching Spotify for playlists: {query}")
params = {
"q": query,
"type": "playlist",
"limit": min(max_results, 50),
}
result = self._make_request("search", params=params)
if "error" in result:
return json.dumps(result, indent=2)
playlists = result.get("playlists", {}).get("items", [])
simplified_playlists = [
{
"id": playlist["id"],
"name": playlist["name"],
"owner": playlist["owner"]["display_name"],
"track_count": playlist["tracks"]["total"],
"url": playlist["external_urls"]["spotify"],
"uri": playlist["uri"],
"public": playlist.get("public"),
}
for playlist in playlists
if playlist is not None
]
return json.dumps(simplified_playlists, indent=2)
def get_user_playlists(
self,
max_results: int = 20,
) -> str:
"""Get the current user's playlists.
Use this to find playlists owned by or followed by the current user.
This is more reliable than search when looking for the user's own playlists.
Args:
max_results: Maximum number of playlists to return (default 20, max 50).
Returns:
JSON string containing list of user's playlists with id, name, owner, track_count, and url.
"""
log_debug("Fetching current user's playlists")
params = {
"limit": min(max_results, 50),
}
result = self._make_request("me/playlists", params=params)
if "error" in result:
return json.dumps(result, indent=2)
playlists = result.get("items", [])
simplified_playlists = [
{
"id": playlist["id"],
"name": playlist["name"],
"owner": playlist["owner"]["display_name"],
"track_count": playlist["tracks"]["total"],
"url": playlist["external_urls"]["spotify"],
"uri": playlist["uri"],
"public": playlist.get("public"),
}
for playlist in playlists
if playlist is not None
]
return json.dumps(simplified_playlists, indent=2)
def search_artists(
self,
query: str,
max_results: int = 5,
) -> str:
"""Search for artists on Spotify.
Use this to find an artist's ID before getting their top tracks.
Args:
query: Artist name to search for.
max_results: Maximum number of artists to return (default 5, max 50).
Returns:
JSON string containing list of artists with id, name, genres, popularity, and uri.
"""
log_debug(f"Searching Spotify for artists: {query}")
params = {
"q": query,
"type": "artist",
"limit": min(max_results, 50),
}
result = self._make_request("search", params=params)
if "error" in result:
return json.dumps(result, indent=2)
artists = result.get("artists", {}).get("items", [])
simplified_artists = [
{
"id": artist["id"],
"name": artist["name"],
"genres": artist.get("genres", []),
"popularity": artist.get("popularity"),
"uri": artist["uri"],
"followers": artist.get("followers", {}).get("total"),
}
for artist in artists
]
return json.dumps(simplified_artists, indent=2)
def search_albums(
self,
query: str,
max_results: int = 10,
market: Optional[str] = None,
) -> str:
"""Search for albums on Spotify.
Use this to find an album's ID before getting its tracks.
Args:
query: Album name or artist + album name to search for.
max_results: Maximum number of albums to return (default 10, max 50).
market: Country code for market (e.g., 'US'). Uses default if not specified.
Returns:
JSON string containing list of albums with id, name, artists, release_date, total_tracks, and uri.
"""
log_debug(f"Searching Spotify for albums: {query}")
params = {
"q": query,
"type": "album",
"limit": min(max_results, 50),
"market": market or self.default_market,
}
result = self._make_request("search", params=params)
if "error" in result:
return json.dumps(result, indent=2)
albums = result.get("albums", {}).get("items", [])
simplified_albums = [
{
"id": album["id"],
"name": album["name"],
"artists": [artist["name"] for artist in album["artists"]],
"release_date": album.get("release_date"),
"total_tracks": album.get("total_tracks"),
"uri": album["uri"],
"album_type": album.get("album_type"),
}
for album in albums
]
return json.dumps(simplified_albums, indent=2)
def get_album_tracks(
self,
album_id: str,
market: Optional[str] = None,
) -> str:
"""Get all tracks from an album.
Use search_albums first to get the album_id if you don't have it.
Useful for adding entire albums to a playlist.
Args:
album_id: The Spotify ID of the album.
market: Country code for market (e.g., 'US'). Uses default if not specified.
Returns:
JSON string containing album info and list of tracks with id, name, track_number, duration, and uri.
"""
log_debug(f"Fetching tracks for album: {album_id}")
# First get album details
album_result = self._make_request(f"albums/{album_id}", params={"market": market or self.default_market})
if "error" in album_result:
return json.dumps(album_result, indent=2)
tracks = album_result.get("tracks", {}).get("items", [])
simplified_tracks = [
{
"id": track["id"],
"name": track["name"],
"track_number": track["track_number"],
"duration_ms": track["duration_ms"],
"uri": track["uri"],
"artists": [artist["name"] for artist in track["artists"]],
}
for track in tracks
]
response = {
"album": {
"id": album_result["id"],
"name": album_result["name"],
"artists": [artist["name"] for artist in album_result["artists"]],
"release_date": album_result.get("release_date"),
"total_tracks": album_result.get("total_tracks"),
"uri": album_result["uri"],
},
"tracks": simplified_tracks,
}
return json.dumps(response, indent=2)
def get_artist_top_tracks(
self,
artist_id: str,
market: Optional[str] = None,
) -> str:
"""Get an artist's top tracks on Spotify.
Use search_artists first to get the artist_id if you don't have it.
Args:
artist_id: The Spotify ID of the artist.
market: Country code for market (e.g., 'US'). Uses default if not specified.
Returns:
JSON string containing list of top tracks with id, name, album, popularity, and uri.
"""
log_debug(f"Fetching top tracks for artist: {artist_id}")
params = {
"market": market or self.default_market,
}
result = self._make_request(f"artists/{artist_id}/top-tracks", params=params)
if "error" in result:
return json.dumps(result, indent=2)
tracks = result.get("tracks", [])
simplified_tracks = [
{
"id": track["id"],
"name": track["name"],
"artists": [artist["name"] for artist in track["artists"]],
"album": track["album"]["name"],
"uri": track["uri"],
"popularity": track.get("popularity"),
"preview_url": track.get("preview_url"),
}
for track in tracks
]
return json.dumps(simplified_tracks, indent=2)
def get_track_recommendations(
self,
seed_tracks: Optional[List[str]] = None,
seed_artists: Optional[List[str]] = None,
seed_genres: Optional[List[str]] = None,
limit: int = 20,
target_energy: Optional[float] = None,
target_valence: Optional[float] = None,
target_danceability: Optional[float] = None,
target_tempo: Optional[float] = None,
) -> str:
"""Get track recommendations based on seed tracks, artists, or genres.
Must provide at least one seed (track, artist, or genre). Maximum 5 seeds total.
For mood-based playlists, use these audio features (0.0 to 1.0 scale):
- valence: happiness (0=sad, 1=happy)
- energy: intensity (0=calm, 1=energetic)
- danceability: how danceable (0=least, 1=most)
- tempo: BPM (e.g., 120 for upbeat)
Args:
seed_tracks: List of Spotify track IDs (not URIs) to use as seeds.
seed_artists: List of Spotify artist IDs to use as seeds.
seed_genres: List of genres (e.g., "pop", "hip-hop", "rock", "electronic").
limit: Number of recommendations to return (default 20, max 100).
target_energy: Target energy level 0.0-1.0 (higher = more energetic).
target_valence: Target happiness level 0.0-1.0 (higher = happier).
target_danceability: Target danceability 0.0-1.0 (higher = more danceable).
target_tempo: Target tempo in BPM (e.g., 120).
Returns:
JSON string containing list of recommended tracks.
"""
log_debug("Fetching track recommendations")
params: dict[str, Any] = {
"limit": min(limit, 100),
}
if seed_tracks:
params["seed_tracks"] = ",".join(seed_tracks[:5])
if seed_artists:
params["seed_artists"] = ",".join(seed_artists[:5])
if seed_genres:
params["seed_genres"] = ",".join(seed_genres[:5])
# Audio feature targets
if target_energy is not None:
params["target_energy"] = target_energy
if target_valence is not None:
params["target_valence"] = target_valence
if target_danceability is not None:
params["target_danceability"] = target_danceability
if target_tempo is not None:
params["target_tempo"] = target_tempo
# Validate at least one seed
if not any([seed_tracks, seed_artists, seed_genres]):
return json.dumps({"error": "At least one seed (tracks, artists, or genres) is required"}, indent=2)
result = self._make_request("recommendations", params=params)
if "error" in result:
return json.dumps(result, indent=2)
tracks = result.get("tracks", [])
simplified_tracks = [
{
"id": track["id"],
"name": track["name"],
"artists": [artist["name"] for artist in track["artists"]],
"album": track["album"]["name"],
"uri": track["uri"],
"popularity": track.get("popularity"),
"preview_url": track.get("preview_url"),
}
for track in tracks
]
return json.dumps(simplified_tracks, indent=2)
def play_track(
self,
track_uri: Optional[str] = None,
context_uri: Optional[str] = None,
device_id: Optional[str] = None,
position_ms: int = 0,
) -> str:
"""Start or resume playback on the user's Spotify.
Requires an active Spotify session (open Spotify app on any device).
Requires the 'user-modify-playback-state' scope.
Args:
track_uri: Spotify URI of track to play (e.g., "spotify:track:xxx").
If not provided, resumes current playback.
context_uri: Spotify URI of context to play (album, artist, playlist).
e.g., "spotify:playlist:xxx" or "spotify:album:xxx"
device_id: Optional device ID to play on. Uses active device if not specified.
position_ms: Position in milliseconds to start from (default 0).
Returns:
JSON string with success status or error.
"""
log_debug(f"Starting playback: track={track_uri}, context={context_uri}")
params = {}
if device_id:
params["device_id"] = device_id
body: dict[str, Any] = {}
if track_uri:
body["uris"] = [track_uri]
if context_uri:
body["context_uri"] = context_uri
if position_ms:
body["position_ms"] = position_ms
result = self._make_request(
"me/player/play", method="PUT", body=body if body else None, params=params if params else None
)
if result.get("success") or not result.get("error"):
return json.dumps({"success": True, "message": "Playback started"}, indent=2)
# Common error: no active device
if result.get("error", {}).get("reason") == "NO_ACTIVE_DEVICE":
return json.dumps(
{
"error": "No active Spotify device found. Please open Spotify on any device first.",
"reason": "NO_ACTIVE_DEVICE",
},
indent=2,
)
return json.dumps(result, indent=2)
def get_currently_playing(self) -> str:
"""Get information about the user's current playback state.
Returns:
JSON string containing current track, device, progress, and playback state.
"""
log_debug("Fetching currently playing track")
result = self._make_request("me/player/currently-playing")
if not result or result.get("success"):
return json.dumps({"message": "Nothing currently playing"}, indent=2)
if "error" in result:
return json.dumps(result, indent=2)
track = result.get("item", {})
response = {
"is_playing": result.get("is_playing"),
"progress_ms": result.get("progress_ms"),
"track": {
"id": track.get("id"),
"name": track.get("name"),
"artists": [a["name"] for a in track.get("artists", [])],
"album": track.get("album", {}).get("name"),
"uri": track.get("uri"),
"duration_ms": track.get("duration_ms"),
}
if track
else None,
"device": result.get("device", {}).get("name"),
}
return json.dumps(response, indent=2)
def search_tracks(
self,
query: str,
max_results: int = 10,
market: Optional[str] = None,
) -> str:
"""Search for tracks on Spotify.
Use this to find songs by name, artist, album, or any combination.
Examples: "happy Eminem", "Coldplay Paradise", "upbeat pop songs"
Args:
query: Search query - can include track name, artist, genre, mood, etc.
max_results: Maximum number of tracks to return (default 10, max 50).
market: Country code for market (e.g., 'US'). Uses default if not specified.
Returns:
JSON string containing list of tracks with id, name, artists, album, uri, and preview_url.
"""
log_debug(f"Searching Spotify for tracks: {query}")
params = {
"q": query,
"type": "track",
"limit": min(max_results, 50),
"market": market or self.default_market,
}
result = self._make_request("search", params=params)
if "error" in result:
return json.dumps(result, indent=2)
tracks = result.get("tracks", {}).get("items", [])
simplified_tracks = [
{
"id": track["id"],
"name": track["name"],
"artists": [artist["name"] for artist in track["artists"]],
"album": track["album"]["name"],
"uri": track["uri"],
"preview_url": track.get("preview_url"),
"popularity": track.get("popularity"),
}
for track in tracks
]
return json.dumps(simplified_tracks, indent=2)
def create_playlist(
self,
name: str,
description: Optional[str] = None,
public: bool = False,
track_uris: Optional[List[str]] = None,
) -> str:
"""Create a new playlist for the current user.
Args:
name: Name of the playlist.
description: Optional description for the playlist.
public: Whether the playlist should be public (default False).
track_uris: Optional list of Spotify track URIs to add initially.
Format: ["spotify:track:xxx", "spotify:track:yyy"]
Returns:
JSON string containing the created playlist details including id, name, and url.
"""
log_debug(f"Creating Spotify playlist: {name}")
# First get the current user's ID
user_response = self._make_request("me")
if "error" in user_response:
return json.dumps(user_response, indent=2)
user_id = user_response["id"]
# Create the playlist
body = {
"name": name,
"description": description or "",
"public": public,
}
playlist = self._make_request(f"users/{user_id}/playlists", method="POST", body=body)
if "error" in playlist:
return json.dumps(playlist, indent=2)
# Add tracks if provided
if track_uris and len(track_uris) > 0:
add_result = self._make_request(
f"playlists/{playlist['id']}/tracks",
method="POST",
body={"uris": track_uris[:100]}, # Spotify allows max 100 per request
)
if "error" in add_result:
playlist["track_add_error"] = add_result["error"]
else:
playlist["tracks_added"] = len(track_uris[:100])
result = {
"id": playlist["id"],
"name": playlist["name"],
"description": playlist.get("description"),
"url": playlist["external_urls"]["spotify"],
"uri": playlist["uri"],
"tracks_added": playlist.get("tracks_added", 0),
}
return json.dumps(result, indent=2)
def add_tracks_to_playlist(
self,
playlist_id: str,
track_uris: List[str],
position: Optional[int] = None,
) -> str:
"""Add tracks to an existing playlist.
Args:
playlist_id: The Spotify ID of the playlist.
track_uris: List of Spotify track URIs to add.
Format: ["spotify:track:xxx", "spotify:track:yyy"]
position: Optional position to insert tracks (0-indexed). Appends to end if not specified.
Returns:
JSON string with success status and snapshot_id.
"""
log_debug(f"Adding {len(track_uris)} tracks to playlist {playlist_id}")
body: dict[str, Any] = {"uris": track_uris[:100]}
if position is not None:
body["position"] = position
result = self._make_request(f"playlists/{playlist_id}/tracks", method="POST", body=body)
if "snapshot_id" in result:
return json.dumps(
{
"success": True,
"tracks_added": len(track_uris[:100]),
"snapshot_id": result["snapshot_id"],
},
indent=2,
)
return json.dumps(result, indent=2)
def remove_tracks_from_playlist(
self,
playlist_id: str,
track_uris: List[str],
) -> str:
"""Remove tracks from a playlist.
Args:
playlist_id: The Spotify ID of the playlist.
track_uris: List of Spotify track URIs to remove.
Format: ["spotify:track:xxx", "spotify:track:yyy"]
Returns:
JSON string with success status and snapshot_id.
"""
log_debug(f"Removing {len(track_uris)} tracks from playlist {playlist_id}")
body = {"tracks": [{"uri": uri} for uri in track_uris]}
result = self._make_request(f"playlists/{playlist_id}/tracks", method="DELETE", body=body)
if "snapshot_id" in result:
return json.dumps(
{
"success": True,
"tracks_removed": len(track_uris),
"snapshot_id": result["snapshot_id"],
},
indent=2,
)
return json.dumps(result, indent=2)
def get_playlist(
self,
playlist_id: str,
include_tracks: bool = True,
) -> str:
"""Get details of a playlist.
Args:
playlist_id: The Spotify ID of the playlist.
include_tracks: Whether to include track listing (default True).
Returns:
JSON string containing playlist details and optionally its tracks.
"""
log_debug(f"Fetching playlist: {playlist_id}")
fields = "id,name,description,public,owner(display_name),external_urls"
if include_tracks:
fields += ",tracks.items(track(id,name,artists(name),uri))"
result = self._make_request(f"playlists/{playlist_id}", params={"fields": fields})
if "error" in result:
return json.dumps(result, indent=2)
playlist_info = {
"id": result["id"],
"name": result["name"],
"description": result.get("description"),
"public": result.get("public"),
"owner": result.get("owner", {}).get("display_name"),
"url": result.get("external_urls", {}).get("spotify"),
}
if include_tracks and "tracks" in result:
playlist_info["tracks"] = [
{
"id": item["track"]["id"],
"name": item["track"]["name"],
"artists": [a["name"] for a in item["track"]["artists"]],
"uri": item["track"]["uri"],
}
for item in result["tracks"]["items"]
if item.get("track")
]
return json.dumps(playlist_info, indent=2)
def update_playlist_details(
self,
playlist_id: str,
name: Optional[str] = None,
description: Optional[str] = None,
public: Optional[bool] = None,
) -> str:
"""Update a playlist's name, description, or visibility.
Args:
playlist_id: The Spotify ID of the playlist.
name: New name for the playlist (optional).
description: New description for the playlist (optional).
public: New visibility setting (optional).
Returns:
JSON string with success status.
"""
log_debug(f"Updating playlist details: {playlist_id}")
body: dict[str, Any] = {}
if name is not None:
body["name"] = name
if description is not None:
body["description"] = description
if public is not None:
body["public"] = public
if not body:
return json.dumps({"error": "No updates provided"}, indent=2)
result = self._make_request(f"playlists/{playlist_id}", method="PUT", body=body)
if result.get("success") or "error" not in result:
return json.dumps({"success": True, "updated_fields": list(body.keys())}, indent=2)
return json.dumps(result, indent=2)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/spotify.py",
"license": "Apache License 2.0",
"lines": 748,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/table.py | import sqlite3
db_path = "tmp/traces.db"
table_name = "agno_spans"
conn = sqlite3.connect(db_path)
cur = conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {table_name}")
conn.commit()
conn.close()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/table.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/os/routers/traces/schemas.py | from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from agno.os.utils import format_duration_ms
class TraceSearchGroupBy(str, Enum):
"""Grouping options for trace search results."""
RUN = "run" # Returns individual traces (TraceDetail)
SESSION = "session" # Returns aggregated session stats (TraceSessionStats)
def _derive_span_type(span: Any) -> str:
"""
Derive the correct span type from span attributes.
OpenInference sets span_kind to:
- AGENT for both agents and teams
- CHAIN for workflows
We use additional context (agno.team.id, agno.workflow.id) to differentiate:
- WORKFLOW: CHAIN spans or spans with agno.workflow.id
- TEAM: AGENT spans with agno.team.id
- AGENT: AGENT spans without agno.team.id
- LLM, TOOL, etc.: unchanged
"""
span_kind = span.attributes.get("openinference.span.kind", "UNKNOWN")
# Check for workflow (CHAIN kind or has workflow.id)
if span_kind == "CHAIN":
return "WORKFLOW"
# Check for team vs agent
if span_kind == "AGENT":
# If it has a team.id attribute, it's a TEAM span
if span.attributes.get("agno.team.id") or span.attributes.get("team.id"):
return "TEAM"
return "AGENT"
# Return original span kind for LLM, TOOL, etc.
return span_kind
class TraceNode(BaseModel):
"""Recursive node structure for rendering trace hierarchy in the frontend"""
id: str = Field(..., description="Span ID")
name: str = Field(..., description="Span name (e.g., 'agent.run', 'llm.invoke')")
type: str = Field(..., description="Span kind (AGENT, TEAM, WORKFLOW, LLM, TOOL)")
duration: str = Field(..., description="Human-readable duration (e.g., '123ms', '1.5s')")
start_time: datetime = Field(..., description="Start time (Pydantic auto-serializes to ISO 8601)")
end_time: datetime = Field(..., description="End time (Pydantic auto-serializes to ISO 8601)")
status: str = Field(..., description="Status code (OK, ERROR)")
input: Optional[str] = Field(None, description="Input to the span")
output: Optional[str] = Field(None, description="Output from the span")
error: Optional[str] = Field(None, description="Error message if status is ERROR")
spans: Optional[List["TraceNode"]] = Field(None, description="Child spans in the trace hierarchy")
step_type: Optional[str] = Field(None, description="Workflow step type (Step, Condition, function, Agent, Team)")
metadata: Optional[Dict[str, Any]] = Field(None, description="Additional span attributes and data")
extra_data: Optional[Dict[str, Any]] = Field(
None, description="Flexible field for custom attributes and additional data"
)
@classmethod
def from_span(cls, span: Any, spans: Optional[List["TraceNode"]] = None) -> "TraceNode":
"""Create TraceNode from a Span object"""
# Derive the correct span type (AGENT, TEAM, WORKFLOW, LLM, TOOL, etc.)
span_type = _derive_span_type(span)
# Also get the raw span_kind for metadata extraction logic
span_kind = span.attributes.get("openinference.span.kind", "UNKNOWN")
# Extract input/output at root level (for all span types)
input_val = span.attributes.get("input.value")
output_val = span.attributes.get("output.value")
# Extract error information
error_val = None
if span.status_code == "ERROR":
error_val = span.status_message or span.attributes.get("exception.message")
output_val = None
# Build metadata with key attributes based on span kind
metadata: Dict[str, Any] = {}
if span_kind == "AGENT":
if run_id := span.attributes.get("agno.run.id"):
metadata["run_id"] = run_id
elif span_kind == "LLM":
if model_name := span.attributes.get("llm.model_name"):
metadata["model"] = model_name
if input_tokens := span.attributes.get("llm.token_count.prompt"):
metadata["input_tokens"] = input_tokens
if output_tokens := span.attributes.get("llm.token_count.completion"):
metadata["output_tokens"] = output_tokens
elif span_kind == "TOOL":
if tool_name := span.attributes.get("tool.name"):
metadata["tool_name"] = tool_name
if tool_params := span.attributes.get("tool.parameters"):
metadata["parameters"] = tool_params
elif span_kind == "CHAIN":
if workflow_description := span.attributes.get("agno.workflow.description"):
metadata["description"] = workflow_description
if steps_count := span.attributes.get("agno.workflow.steps_count"):
metadata["steps_count"] = steps_count
if steps := span.attributes.get("agno.workflow.steps"):
metadata["steps"] = steps
if step_types := span.attributes.get("agno.workflow.step_types"):
metadata["step_types"] = step_types
# Add session/user context if present
if session_id := span.attributes.get("session.id"):
metadata["session_id"] = session_id
if user_id := span.attributes.get("user.id"):
metadata["user_id"] = user_id
# Use datetime objects directly
return cls(
id=span.span_id,
name=span.name,
type=span_type,
duration=format_duration_ms(span.duration_ms),
start_time=span.start_time,
end_time=span.end_time,
status=span.status_code,
input=input_val,
output=output_val,
error=error_val,
spans=spans,
step_type=None, # Set by _build_span_tree for workflow steps
metadata=metadata if metadata else None,
extra_data=None,
)
class TraceSummary(BaseModel):
"""Summary information for trace list view"""
trace_id: str = Field(..., description="Unique trace identifier")
name: str = Field(..., description="Trace name (usually root span name)")
status: str = Field(..., description="Overall status (OK, ERROR, UNSET)")
duration: str = Field(..., description="Human-readable total duration")
start_time: datetime = Field(..., description="Trace start time (Pydantic auto-serializes to ISO 8601)")
end_time: datetime = Field(..., description="Trace end time (Pydantic auto-serializes to ISO 8601)")
total_spans: int = Field(..., description="Total number of spans in this trace")
error_count: int = Field(..., description="Number of spans with errors")
input: Optional[str] = Field(None, description="Input to the agent")
run_id: Optional[str] = Field(None, description="Associated run ID")
session_id: Optional[str] = Field(None, description="Associated session ID")
user_id: Optional[str] = Field(None, description="Associated user ID")
agent_id: Optional[str] = Field(None, description="Associated agent ID")
team_id: Optional[str] = Field(None, description="Associated team ID")
workflow_id: Optional[str] = Field(None, description="Associated workflow ID")
created_at: datetime = Field(..., description="Time when trace was created (Pydantic auto-serializes to ISO 8601)")
@classmethod
def from_trace(cls, trace: Any, input: Optional[str] = None) -> "TraceSummary":
# Use datetime objects directly (Pydantic will auto-serialize to ISO 8601)
return cls(
trace_id=trace.trace_id,
name=trace.name,
status=trace.status,
duration=format_duration_ms(trace.duration_ms),
start_time=trace.start_time,
end_time=trace.end_time,
total_spans=trace.total_spans,
error_count=trace.error_count,
input=input,
run_id=trace.run_id,
session_id=trace.session_id,
user_id=trace.user_id,
agent_id=trace.agent_id,
team_id=trace.team_id,
workflow_id=trace.workflow_id,
created_at=trace.created_at,
)
class TraceSessionStats(BaseModel):
"""Aggregated trace statistics grouped by session"""
session_id: str = Field(..., description="Session identifier")
user_id: Optional[str] = Field(None, description="User ID associated with the session")
agent_id: Optional[str] = Field(None, description="Agent ID(s) used in the session")
team_id: Optional[str] = Field(None, description="Team ID associated with the session")
workflow_id: Optional[str] = Field(None, description="Workflow ID associated with the session")
total_traces: int = Field(..., description="Total number of traces in this session")
first_trace_at: datetime = Field(..., description="Time of first trace (Pydantic auto-serializes to ISO 8601)")
last_trace_at: datetime = Field(..., description="Time of last trace (Pydantic auto-serializes to ISO 8601)")
class TraceDetail(BaseModel):
"""Detailed trace information with hierarchical span tree"""
trace_id: str = Field(..., description="Unique trace identifier")
name: str = Field(..., description="Trace name (usually root span name)")
status: str = Field(..., description="Overall status (OK, ERROR)")
duration: str = Field(..., description="Human-readable total duration")
start_time: datetime = Field(..., description="Trace start time (Pydantic auto-serializes to ISO 8601)")
end_time: datetime = Field(..., description="Trace end time (Pydantic auto-serializes to ISO 8601)")
total_spans: int = Field(..., description="Total number of spans in this trace")
error_count: int = Field(..., description="Number of spans with errors")
input: Optional[str] = Field(None, description="Input to the agent/workflow")
output: Optional[str] = Field(None, description="Output from the agent/workflow")
error: Optional[str] = Field(None, description="Error message if status is ERROR")
run_id: Optional[str] = Field(None, description="Associated run ID")
session_id: Optional[str] = Field(None, description="Associated session ID")
user_id: Optional[str] = Field(None, description="Associated user ID")
agent_id: Optional[str] = Field(None, description="Associated agent ID")
team_id: Optional[str] = Field(None, description="Associated team ID")
workflow_id: Optional[str] = Field(None, description="Associated workflow ID")
created_at: datetime = Field(..., description="Time when trace was created (Pydantic auto-serializes to ISO 8601)")
tree: List[TraceNode] = Field(..., description="Hierarchical tree of spans (root nodes)")
@classmethod
def from_trace_and_spans(cls, trace: Any, spans: List[Any]) -> "TraceDetail":
"""Create TraceDetail from a Trace and its Spans, building the tree structure"""
# Find root span to extract input/output/error
root_span = next((s for s in spans if not s.parent_span_id), None)
trace_input = None
trace_output = None
trace_error = None
if root_span:
trace_input = root_span.attributes.get("input.value")
output_val = root_span.attributes.get("output.value")
# If trace status is ERROR, extract error and set output to None
if trace.status == "ERROR" or root_span.status_code == "ERROR":
trace_error = root_span.status_message or root_span.attributes.get("exception.message")
trace_output = None
else:
trace_output = output_val
span_kind = root_span.attributes.get("openinference.span.kind", "")
output_is_empty = not trace_output or trace_output == "None" or str(trace_output).strip() == "None"
if span_kind == "CHAIN" and output_is_empty and trace.status != "ERROR":
# Find direct children of root span (workflow steps)
root_span_id = root_span.span_id
direct_children = [s for s in spans if s.parent_span_id == root_span_id]
if direct_children:
# Sort by end_time to get the last executed step
direct_children.sort(key=lambda s: s.end_time, reverse=True)
last_step = direct_children[0]
# Get output from the last step
trace_output = last_step.attributes.get("output.value")
# Calculate total tokens from all LLM spans
total_input_tokens = 0
total_output_tokens = 0
for span in spans:
if span.attributes.get("openinference.span.kind") == "LLM":
input_tokens = span.attributes.get("llm.token_count.prompt", 0)
output_tokens = span.attributes.get("llm.token_count.completion", 0)
if input_tokens:
total_input_tokens += input_tokens
if output_tokens:
total_output_tokens += output_tokens
# Build span tree with token totals
span_tree = cls._build_span_tree(
spans,
total_input_tokens,
total_output_tokens,
trace_start_time=trace.start_time,
trace_end_time=trace.end_time,
trace_duration_ms=trace.duration_ms,
)
# Use datetime objects directly (Pydantic will auto-serialize to ISO 8601)
return cls(
trace_id=trace.trace_id,
name=trace.name,
status=trace.status,
duration=format_duration_ms(trace.duration_ms),
start_time=trace.start_time,
end_time=trace.end_time,
total_spans=trace.total_spans,
error_count=trace.error_count,
input=trace_input,
output=trace_output,
error=trace_error,
run_id=trace.run_id,
session_id=trace.session_id,
user_id=trace.user_id,
agent_id=trace.agent_id,
team_id=trace.team_id,
workflow_id=trace.workflow_id,
created_at=trace.created_at,
tree=span_tree,
)
@staticmethod
def _build_span_tree(
spans: List[Any],
total_input_tokens: int,
total_output_tokens: int,
trace_start_time: Optional[datetime] = None,
trace_end_time: Optional[datetime] = None,
trace_duration_ms: Optional[int] = None,
) -> List[TraceNode]:
"""Build hierarchical tree from flat list of spans
Args:
spans: List of span objects
total_input_tokens: Total input tokens across all spans
total_output_tokens: Total output tokens across all spans
trace_start_time: Corrected start time from trace aggregation
trace_end_time: Corrected end time from trace aggregation
trace_duration_ms: Corrected duration from trace aggregation
"""
if not spans:
return []
# Create a map of parent_id -> list of spans
spans_map: Dict[Optional[str], List[Any]] = {}
for span in spans:
parent_id = span.parent_span_id
if parent_id not in spans_map:
spans_map[parent_id] = []
spans_map[parent_id].append(span)
# Extract step_types list from workflow root span for index-based matching
step_types_list: List[str] = []
root_spans = spans_map.get(None, [])
for root_span in root_spans:
span_kind = root_span.attributes.get("openinference.span.kind", "")
if span_kind == "CHAIN":
step_types = root_span.attributes.get("agno.workflow.step_types", [])
if step_types:
step_types_list = list(step_types)
break # Use first workflow root span's step_types
# Recursive function to build tree for a span
# step_index is used to track position within direct children of root (workflow steps)
def build_node(span: Any, is_root: bool = False, step_index: Optional[int] = None) -> TraceNode:
span_id = span.span_id
children_spans = spans_map.get(span_id, [])
# Sort children spans by start time
if children_spans:
children_spans.sort(key=lambda s: s.start_time)
# Recursively build spans
# For root span's direct children (workflow steps), pass the index
children_nodes: Optional[List[TraceNode]] = None
if is_root and step_types_list:
children_nodes = []
for idx, child in enumerate(children_spans):
children_nodes.append(build_node(child, step_index=idx))
elif children_spans:
children_nodes = [build_node(child) for child in children_spans]
# For root span, create custom metadata with token totals
if is_root:
# Build simplified metadata for root with token totals
root_metadata: Dict[str, Any] = {}
if total_input_tokens > 0:
root_metadata["total_input_tokens"] = total_input_tokens
if total_output_tokens > 0:
root_metadata["total_output_tokens"] = total_output_tokens
# Use trace-level timing if available
start_time = trace_start_time if trace_start_time else span.start_time
end_time = trace_end_time if trace_end_time else span.end_time
duration_ms = trace_duration_ms if trace_duration_ms is not None else span.duration_ms
# Derive the correct span type (AGENT, TEAM, WORKFLOW, etc.)
span_type = _derive_span_type(span)
span_kind = span.attributes.get("openinference.span.kind", "UNKNOWN")
# Add workflow-specific metadata for CHAIN/WORKFLOW spans
if span_kind == "CHAIN":
if workflow_description := span.attributes.get("agno.workflow.description"):
root_metadata["description"] = workflow_description
if steps_count := span.attributes.get("agno.workflow.steps_count"):
root_metadata["steps_count"] = steps_count
if steps := span.attributes.get("agno.workflow.steps"):
root_metadata["steps"] = steps
if step_types := span.attributes.get("agno.workflow.step_types"):
root_metadata["step_types"] = step_types
# Use datetime objects directly (Pydantic will auto-serialize to ISO 8601)
# Skip input/output/error for root span (already at top level of TraceDetail)
return TraceNode(
id=span.span_id,
name=span.name,
type=span_type,
duration=format_duration_ms(duration_ms),
start_time=start_time,
end_time=end_time,
status=span.status_code,
input=None, # Skip for root span (already at TraceDetail level)
output=None, # Skip for root span (already at TraceDetail level)
error=None, # Skip for root span (already at TraceDetail level)
spans=children_nodes if children_nodes else None,
metadata=root_metadata if root_metadata else None,
extra_data=None,
)
else:
# Create node from span
node = TraceNode.from_span(span, spans=children_nodes)
# For workflow step spans (direct children of root), assign step_type by index
if step_index is not None and step_types_list and step_index < len(step_types_list):
node.step_type = step_types_list[step_index]
return node
# Sort root spans by start time
root_spans.sort(key=lambda s: s.start_time)
# Build tree starting from roots
return [build_node(root, is_root=True) for root in root_spans]
class TraceSearchRequest(BaseModel):
"""Request body for POST /traces/search with advanced filtering.
The filter field accepts a FilterExpr DSL dict supporting composable queries
with AND/OR/NOT logic and operators like EQ, NEQ, GT, GTE, LT, LTE, IN, CONTAINS, STARTSWITH.
Example for run grouping (default):
{
"filter": {
"op": "AND",
"conditions": [
{"op": "EQ", "key": "status", "value": "OK"},
{"op": "CONTAINS", "key": "user_id", "value": "admin"}
]
},
"group_by": "run",
"page": 1,
"limit": 20
}
Example for session grouping:
{
"filter": {"op": "EQ", "key": "agent_id", "value": "my-agent"},
"group_by": "session",
"page": 1,
"limit": 20
}
"""
filter: Optional[Dict[str, Any]] = Field(
None,
description="FilterExpr DSL as JSON dict. Supports operators: EQ, NEQ, GT, GTE, LT, LTE, IN, CONTAINS, STARTSWITH, AND, OR, NOT.",
)
group_by: TraceSearchGroupBy = Field(
default=TraceSearchGroupBy.RUN,
description="Grouping mode: 'run' returns individual TraceDetail, 'session' returns aggregated TraceSessionStats.",
)
page: int = Field(default=1, ge=1, description="Page number (1-indexed)")
limit: int = Field(default=20, ge=1, le=100, description="Number of traces per page (max 100)")
class FilterFieldSchema(BaseModel):
"""Schema describing a single filterable field for the frontend filter bar."""
key: str = Field(..., description="Column/field name used in filter expressions")
label: str = Field(..., description="Human-readable display label for the UI")
type: str = Field(..., description="Field data type: string, number, datetime, enum")
operators: List[str] = Field(..., description="List of valid filter operators for this field")
values: Optional[List[str]] = Field(None, description="Allowed enum values (for autocomplete/dropdown)")
class FilterSchemaResponse(BaseModel):
"""Response for the filter schema endpoint. Tells the FE what fields, operators, and values are available."""
fields: List[FilterFieldSchema] = Field(..., description="Available filterable fields")
logical_operators: List[str] = Field(
default=["AND", "OR"], description="Logical operators for combining filter clauses"
)
# -- Trace filter schema definition --
TRACE_FILTER_SCHEMA = FilterSchemaResponse(
fields=[
FilterFieldSchema(
key="status",
label="Status",
type="enum",
operators=["EQ", "NEQ", "IN"],
values=["OK", "ERROR"],
),
FilterFieldSchema(
key="user_id",
label="User ID",
type="string",
operators=["EQ", "NEQ", "CONTAINS", "STARTSWITH", "IN"],
),
FilterFieldSchema(
key="agent_id",
label="Agent ID",
type="string",
operators=["EQ", "NEQ", "CONTAINS", "STARTSWITH", "IN"],
),
FilterFieldSchema(
key="team_id",
label="Team ID",
type="string",
operators=["EQ", "NEQ", "CONTAINS", "STARTSWITH", "IN"],
),
FilterFieldSchema(
key="workflow_id",
label="Workflow ID",
type="string",
operators=["EQ", "NEQ", "CONTAINS", "STARTSWITH", "IN"],
),
FilterFieldSchema(
key="session_id",
label="Session ID",
type="string",
operators=["EQ", "NEQ", "CONTAINS", "STARTSWITH", "IN"],
),
FilterFieldSchema(
key="run_id",
label="Run ID",
type="string",
operators=["EQ", "NEQ", "CONTAINS", "STARTSWITH", "IN"],
),
FilterFieldSchema(
key="name",
label="Trace Name",
type="string",
operators=["EQ", "NEQ", "CONTAINS", "STARTSWITH"],
),
FilterFieldSchema(
key="trace_id",
label="Trace ID",
type="string",
operators=["EQ", "NEQ", "CONTAINS", "STARTSWITH", "IN"],
),
FilterFieldSchema(
key="duration_ms",
label="Duration (ms)",
type="number",
operators=["EQ", "NEQ", "GT", "GTE", "LT", "LTE"],
),
FilterFieldSchema(
key="start_time",
label="Start Time",
type="datetime",
operators=["GT", "GTE", "LT", "LTE"],
),
FilterFieldSchema(
key="end_time",
label="End Time",
type="datetime",
operators=["GT", "GTE", "LT", "LTE"],
),
FilterFieldSchema(
key="created_at",
label="Created At",
type="datetime",
operators=["GT", "GTE", "LT", "LTE"],
),
],
logical_operators=["AND", "OR"],
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/traces/schemas.py",
"license": "Apache License 2.0",
"lines": 495,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/os/routers/traces/traces.py | import logging
from typing import Optional, Union
from fastapi import Depends, HTTPException, Query, Request
from fastapi.routing import APIRouter
from agno.db.base import AsyncBaseDb, BaseDb
from agno.os.auth import get_auth_token_from_request, get_authentication_dependency
from agno.os.routers.traces.schemas import (
TRACE_FILTER_SCHEMA,
FilterSchemaResponse,
TraceDetail,
TraceNode,
TraceSearchGroupBy,
TraceSearchRequest,
TraceSessionStats,
TraceSummary,
)
from agno.os.schema import (
BadRequestResponse,
InternalServerErrorResponse,
NotFoundResponse,
PaginatedResponse,
PaginationInfo,
UnauthenticatedResponse,
ValidationErrorResponse,
)
from agno.os.settings import AgnoAPISettings
from agno.os.utils import get_db, timestamp_to_datetime
from agno.remote.base import RemoteDb
from agno.utils.log import log_error
logger = logging.getLogger(__name__)
def get_traces_router(
dbs: dict[str, list[Union[BaseDb, AsyncBaseDb, RemoteDb]]], settings: AgnoAPISettings = AgnoAPISettings(), **kwargs
) -> APIRouter:
"""Create traces router with comprehensive OpenAPI documentation for trace endpoints."""
router = APIRouter(
dependencies=[Depends(get_authentication_dependency(settings))],
tags=["Traces"],
responses={
400: {"description": "Bad Request", "model": BadRequestResponse},
401: {"description": "Unauthorized", "model": UnauthenticatedResponse},
404: {"description": "Not Found", "model": NotFoundResponse},
422: {"description": "Validation Error", "model": ValidationErrorResponse},
500: {"description": "Internal Server Error", "model": InternalServerErrorResponse},
},
)
return attach_routes(router=router, dbs=dbs)
def attach_routes(router: APIRouter, dbs: dict[str, list[Union[BaseDb, AsyncBaseDb, RemoteDb]]]) -> APIRouter:
@router.get(
"/traces",
response_model=PaginatedResponse[TraceSummary],
response_model_exclude_none=True,
tags=["Traces"],
operation_id="get_traces",
summary="List Traces",
description=(
"Retrieve a paginated list of execution traces with optional filtering.\n\n"
"**Traces provide observability into:**\n"
"- Agent execution flows\n"
"- Model invocations and token usage\n"
"- Tool calls and their results\n"
"- Errors and performance bottlenecks\n\n"
"**Filtering Options:**\n"
"- By run, session, user, or agent ID\n"
"- By status (OK, ERROR)\n"
"- By time range\n\n"
"**Pagination:**\n"
"- Use `page` (1-indexed) and `limit` parameters\n"
"- Response includes pagination metadata (total_pages, total_count, etc.)\n\n"
"**Response Format:**\n"
"Returns summary information for each trace. Use GET `/traces/{trace_id}` for detailed hierarchy."
),
responses={
200: {
"description": "List of traces retrieved successfully",
"content": {
"application/json": {
"example": {
"data": [
{
"trace_id": "a1b2c3d4",
"name": "Stock_Price_Agent.run",
"status": "OK",
"duration": "1.2s",
"start_time": "2025-11-19T10:30:00.000000+00:00",
"total_spans": 4,
"error_count": 0,
"input": "What is the stock price of NVDA?",
"run_id": "run123",
"session_id": "session456",
"user_id": "user789",
"agent_id": "agent_stock",
"team_id": None,
"workflow_id": None,
"created_at": "2025-11-19T10:30:00+00:00",
}
],
"meta": {
"page": 1,
"limit": 20,
"total_pages": 5,
"total_count": 95,
},
}
}
},
}
},
)
async def get_traces(
request: Request,
run_id: Optional[str] = Query(default=None, description="Filter by run ID"),
session_id: Optional[str] = Query(default=None, description="Filter by session ID"),
user_id: Optional[str] = Query(default=None, description="Filter by user ID"),
agent_id: Optional[str] = Query(default=None, description="Filter by agent ID"),
team_id: Optional[str] = Query(default=None, description="Filter by team ID"),
workflow_id: Optional[str] = Query(default=None, description="Filter by workflow ID"),
status: Optional[str] = Query(default=None, description="Filter by status (OK, ERROR)"),
start_time: Optional[str] = Query(
default=None,
description="Filter traces starting after this time (ISO 8601 format with timezone, e.g., '2025-11-19T10:00:00Z' or '2025-11-19T15:30:00+05:30'). Times are converted to UTC for comparison.",
),
end_time: Optional[str] = Query(
default=None,
description="Filter traces ending before this time (ISO 8601 format with timezone, e.g., '2025-11-19T11:00:00Z' or '2025-11-19T16:30:00+05:30'). Times are converted to UTC for comparison.",
),
page: int = Query(default=1, description="Page number (1-indexed)", ge=0),
limit: int = Query(default=20, description="Number of traces per page", ge=1),
db_id: Optional[str] = Query(default=None, description="Database ID to query traces from"),
):
"""Get list of traces with optional filters and pagination"""
import time as time_module
# Get database using db_id or default to first available
db = await get_db(dbs, db_id)
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.get_traces(
run_id=run_id,
session_id=session_id,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
workflow_id=workflow_id,
status=status,
start_time=start_time,
end_time=end_time,
limit=limit,
page=page,
db_id=db_id,
headers=headers,
)
try:
start_time_ms = time_module.time() * 1000
# Convert ISO datetime strings to UTC datetime objects
start_time_dt = timestamp_to_datetime(start_time, "start_time") if start_time else None
end_time_dt = timestamp_to_datetime(end_time, "end_time") if end_time else None
if isinstance(db, AsyncBaseDb):
traces, total_count = await db.get_traces(
run_id=run_id,
session_id=session_id,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
workflow_id=workflow_id,
status=status,
start_time=start_time_dt,
end_time=end_time_dt,
limit=limit,
page=page,
)
else:
traces, total_count = db.get_traces(
run_id=run_id,
session_id=session_id,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
workflow_id=workflow_id,
status=status,
start_time=start_time_dt,
end_time=end_time_dt,
limit=limit,
page=page,
)
end_time_ms = time_module.time() * 1000
search_time_ms = round(end_time_ms - start_time_ms, 2)
# Calculate total pages
total_pages = (total_count + limit - 1) // limit if limit > 0 else 0
trace_inputs = {}
for trace in traces:
if isinstance(db, AsyncBaseDb):
spans = await db.get_spans(trace_id=trace.trace_id)
else:
spans = db.get_spans(trace_id=trace.trace_id)
# Find root span and extract input
root_span = next((s for s in spans if not s.parent_span_id), None)
if root_span and hasattr(root_span, "attributes"):
trace_inputs[trace.trace_id] = root_span.attributes.get("input.value")
# Build response
trace_summaries = [
TraceSummary.from_trace(trace, input=trace_inputs.get(trace.trace_id)) for trace in traces
]
return PaginatedResponse(
data=trace_summaries,
meta=PaginationInfo(
page=page,
limit=limit,
total_pages=total_pages,
total_count=total_count,
search_time_ms=search_time_ms,
),
)
except Exception as e:
log_error(f"Error retrieving traces: {e}")
raise HTTPException(status_code=500, detail=f"Error retrieving traces: {str(e)}")
@router.get(
"/traces/filter-schema",
response_model=FilterSchemaResponse,
tags=["Traces"],
operation_id="get_traces_filter_schema",
summary="Get Trace Filter Schema",
description=(
"Returns the available filterable fields, their types, valid operators, and enum values.\n\n"
"The frontend uses this to dynamically build the filter bar UI:\n"
"- Field dropdown populated from `fields[].key`\n"
"- Operator dropdown changes per field type\n"
"- Value input shows autocomplete for enum fields (e.g., status)\n"
"- Logical operators (AND, OR) for combining clauses"
),
)
async def get_traces_filter_schema():
"""Return the filter schema for traces (fields, operators, enum values)"""
return TRACE_FILTER_SCHEMA
@router.get(
"/traces/{trace_id}",
response_model=Union[TraceDetail, TraceNode],
response_model_exclude_none=True,
tags=["Traces"],
operation_id="get_trace",
summary="Get Trace or Span Detail",
description=(
"Retrieve detailed trace information with hierarchical span tree, or a specific span within the trace.\n\n"
"**Without span_id parameter:**\n"
"Returns the full trace with hierarchical span tree:\n"
"- Trace metadata (ID, status, duration, context)\n"
"- Hierarchical tree of all spans\n"
"- Each span includes timing, status, and type-specific metadata\n\n"
"**With span_id parameter:**\n"
"Returns details for a specific span within the trace:\n"
"- Span metadata (ID, name, type, timing)\n"
"- Status and error information\n"
"- Type-specific attributes (model, tokens, tool params, etc.)\n\n"
"**Span Hierarchy (full trace):**\n"
"The `tree` field contains root spans, each with potential `children`.\n"
"This recursive structure represents the execution flow:\n"
"```\n"
"Agent.run (root)\n"
" ├─ LLM.invoke\n"
" ├─ Tool.execute\n"
" │ └─ LLM.invoke (nested)\n"
" └─ LLM.invoke\n"
"```\n\n"
"**Span Types:**\n"
"- `AGENT`: Agent execution with input/output\n"
"- `LLM`: Model invocations with tokens and prompts\n"
"- `TOOL`: Tool calls with parameters and results"
),
responses={
200: {
"description": "Trace or span detail retrieved successfully",
"content": {
"application/json": {
"examples": {
"full_trace": {
"summary": "Full trace with hierarchy (no span_id)",
"value": {
"trace_id": "a1b2c3d4",
"name": "Stock_Price_Agent.run",
"status": "OK",
"duration": "1.2s",
"start_time": "2025-11-19T10:30:00.000000+00:00",
"end_time": "2025-11-19T10:30:01.200000+00:00",
"total_spans": 4,
"error_count": 0,
"input": "What is Tesla stock price?",
"output": "The current price of Tesla (TSLA) is $245.67.",
"error": None,
"run_id": "run123",
"session_id": "session456",
"user_id": "user789",
"agent_id": "stock_agent",
"team_id": None,
"workflow_id": None,
"created_at": "2025-11-19T10:30:00+00:00",
"tree": [
{
"id": "span1",
"name": "Stock_Price_Agent.run",
"type": "AGENT",
"duration": "1.2s",
"status": "OK",
"input": None,
"output": None,
"error": None,
"spans": [],
}
],
},
},
"single_span": {
"summary": "Single span detail (with span_id)",
"value": {
"id": "span2",
"name": "gpt-4o-mini.invoke",
"type": "LLM",
"duration": "800ms",
"status": "OK",
"metadata": {"model": "gpt-4o-mini", "input_tokens": 120},
},
},
}
}
},
},
404: {"description": "Trace or span not found", "model": NotFoundResponse},
},
)
async def get_trace(
request: Request,
trace_id: str,
span_id: Optional[str] = Query(default=None, description="Optional: Span ID to retrieve specific span"),
run_id: Optional[str] = Query(default=None, description="Optional: Run ID to retrieve trace for"),
db_id: Optional[str] = Query(default=None, description="Database ID to query trace from"),
):
"""Get detailed trace with hierarchical span tree, or a specific span within the trace"""
# Get database using db_id or default to first available
db = await get_db(dbs, db_id)
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.get_trace(
trace_id=trace_id,
span_id=span_id,
run_id=run_id,
db_id=db_id,
headers=headers,
)
try:
# If span_id is provided, return just that span
if span_id:
if isinstance(db, AsyncBaseDb):
span = await db.get_span(span_id)
else:
span = db.get_span(span_id)
if span is None:
raise HTTPException(status_code=404, detail="Span not found")
# Verify the span belongs to the requested trace
if span.trace_id != trace_id:
raise HTTPException(status_code=404, detail=f"Span {span_id} does not belong to trace {trace_id}")
# Convert to TraceNode (without children since we're fetching a single span)
return TraceNode.from_span(span, spans=None)
# Otherwise, return full trace with hierarchy
# Get trace
if isinstance(db, AsyncBaseDb):
trace = await db.get_trace(trace_id=trace_id, run_id=run_id)
else:
trace = db.get_trace(trace_id=trace_id, run_id=run_id)
if trace is None:
raise HTTPException(status_code=404, detail="Trace not found")
# Get all spans for this trace
if isinstance(db, AsyncBaseDb):
spans = await db.get_spans(trace_id=trace_id)
else:
spans = db.get_spans(trace_id=trace_id)
# Build hierarchical response
return TraceDetail.from_trace_and_spans(trace, spans)
except HTTPException:
raise
except Exception as e:
log_error(f"Error retrieving trace {trace_id}: {e}")
raise HTTPException(status_code=500, detail=f"Error retrieving trace: {str(e)}")
@router.get(
"/trace_session_stats",
response_model=PaginatedResponse[TraceSessionStats],
response_model_exclude_none=True,
tags=["Traces"],
operation_id="get_trace_stats",
summary="Get Trace Statistics by Session",
description=(
"Retrieve aggregated trace statistics grouped by session ID with pagination.\n\n"
"**Provides insights into:**\n"
"- Total traces per session\n"
"- First and last trace timestamps per session\n"
"- Associated user and agent information\n\n"
"**Filtering Options:**\n"
"- By user ID\n"
"- By agent ID\n\n"
"**Use Cases:**\n"
"- Monitor session-level activity\n"
"- Track conversation flows\n"
"- Identify high-activity sessions\n"
"- Analyze user engagement patterns"
),
responses={
200: {
"description": "Trace statistics retrieved successfully",
"content": {
"application/json": {
"example": {
"data": [
{
"session_id": "37029bc6-1794-4ba8-a629-1efedc53dcad",
"user_id": "kaustubh@agno.com",
"agent_id": "hackernews-agent",
"team_id": None,
"total_traces": 5,
"first_trace_at": "2025-11-19T10:15:16+00:00",
"last_trace_at": "2025-11-19T10:21:30+00:00",
}
],
"meta": {
"page": 1,
"limit": 20,
"total_pages": 3,
"total_count": 45,
},
}
}
},
},
500: {"description": "Failed to retrieve statistics", "model": InternalServerErrorResponse},
},
)
async def get_trace_stats(
request: Request,
user_id: Optional[str] = Query(default=None, description="Filter by user ID"),
agent_id: Optional[str] = Query(default=None, description="Filter by agent ID"),
team_id: Optional[str] = Query(default=None, description="Filter by team ID"),
workflow_id: Optional[str] = Query(default=None, description="Filter by workflow ID"),
start_time: Optional[str] = Query(
default=None,
description="Filter sessions with traces created after this time (ISO 8601 format with timezone, e.g., '2025-11-19T10:00:00Z' or '2025-11-19T15:30:00+05:30'). Times are converted to UTC for comparison.",
),
end_time: Optional[str] = Query(
default=None,
description="Filter sessions with traces created before this time (ISO 8601 format with timezone, e.g., '2025-11-19T11:00:00Z' or '2025-11-19T16:30:00+05:30'). Times are converted to UTC for comparison.",
),
page: int = Query(default=1, description="Page number (1-indexed)", ge=1),
limit: int = Query(default=20, description="Number of sessions per page", ge=1),
db_id: Optional[str] = Query(default=None, description="Database ID to query statistics from"),
):
"""Get trace statistics grouped by session"""
import time as time_module
# Get database using db_id or default to first available
db = await get_db(dbs, db_id)
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.get_trace_session_stats(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
workflow_id=workflow_id,
start_time=start_time,
end_time=end_time,
limit=limit,
page=page,
db_id=db_id,
headers=headers,
)
try:
start_time_ms = time_module.time() * 1000
# Convert ISO datetime strings to UTC datetime objects
start_time_dt = timestamp_to_datetime(start_time, "start_time") if start_time else None
end_time_dt = timestamp_to_datetime(end_time, "end_time") if end_time else None
if isinstance(db, AsyncBaseDb):
stats_list, total_count = await db.get_trace_stats(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
workflow_id=workflow_id,
start_time=start_time_dt,
end_time=end_time_dt,
limit=limit,
page=page,
)
else:
stats_list, total_count = db.get_trace_stats(
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
workflow_id=workflow_id,
start_time=start_time_dt,
end_time=end_time_dt,
limit=limit,
page=page,
)
end_time_ms = time_module.time() * 1000
search_time_ms = round(end_time_ms - start_time_ms, 2)
# Calculate total pages
total_pages = (total_count + limit - 1) // limit if limit > 0 else 0
# Convert stats to response models (Pydantic auto-serializes datetime to ISO 8601)
stats_response = [
TraceSessionStats(
session_id=stat["session_id"],
user_id=stat.get("user_id"),
agent_id=stat.get("agent_id"),
team_id=stat.get("team_id"),
workflow_id=stat.get("workflow_id"),
total_traces=stat["total_traces"],
first_trace_at=stat["first_trace_at"],
last_trace_at=stat["last_trace_at"],
)
for stat in stats_list
]
return PaginatedResponse(
data=stats_response,
meta=PaginationInfo(
page=page,
limit=limit,
total_pages=total_pages,
total_count=total_count,
search_time_ms=search_time_ms,
),
)
except Exception as e:
log_error(f"Error retrieving trace statistics: {e}")
raise HTTPException(status_code=500, detail=f"Error retrieving statistics: {str(e)}")
@router.post(
"/traces/search",
response_model=Union[PaginatedResponse[TraceDetail], PaginatedResponse[TraceSessionStats]],
response_model_exclude_none=True,
tags=["Traces"],
operation_id="search_traces",
summary="Search Traces with Advanced Filters",
description=(
"Search traces using the FilterExpr DSL for complex, composable queries.\n\n"
"**Group By Mode:**\n"
"- `run` (default): Returns `PaginatedResponse[TraceDetail]` with full span trees\n"
"- `session`: Returns `PaginatedResponse[TraceSessionStats]` with aggregated session stats\n\n"
"**Supported Operators:**\n"
"- Comparison: `EQ`, `NEQ`, `GT`, `GTE`, `LT`, `LTE`\n"
"- Inclusion: `IN`\n"
"- String matching: `CONTAINS` (case-insensitive substring), `STARTSWITH` (prefix)\n"
"- Logical: `AND`, `OR`, `NOT`\n\n"
"**Filterable Fields:**\n"
"trace_id, name, status, start_time, end_time, duration_ms, "
"run_id, session_id, user_id, agent_id, team_id, workflow_id, created_at\n\n"
"**Example Request Body (runs):**\n"
"```json\n"
"{\n"
' "filter": {"op": "EQ", "key": "status", "value": "OK"},\n'
' "group_by": "run",\n'
' "page": 1,\n'
' "limit": 20\n'
"}\n"
"```\n\n"
"**Example Request Body (sessions):**\n"
"```json\n"
"{\n"
' "filter": {"op": "CONTAINS", "key": "agent_id", "value": "stock"},\n'
' "group_by": "session",\n'
' "page": 1,\n'
' "limit": 20\n'
"}\n"
"```"
),
responses={
400: {"description": "Invalid filter expression", "model": BadRequestResponse},
},
)
async def search_traces(
request: Request,
body: TraceSearchRequest,
db_id: Optional[str] = Query(default=None, description="Database ID to query traces from"),
):
"""Search traces using advanced FilterExpr DSL queries.
Returns TraceDetail (group_by=run) or TraceSessionStats (group_by=session).
"""
import time as time_module
# Get database using db_id or default to first available
db = await get_db(dbs, db_id)
if isinstance(db, RemoteDb):
auth_token = get_auth_token_from_request(request)
headers = {"Authorization": f"Bearer {auth_token}"} if auth_token else None
return await db.search_traces(
filter_expr=body.filter,
group_by=body.group_by.value,
limit=body.limit,
page=body.page,
db_id=db_id,
headers=headers,
)
try:
start_time_ms = time_module.time() * 1000
# Validate filter expression if provided
filter_expr_dict = None
if body.filter:
from agno.filters import from_dict
from_dict(body.filter) # Validate structure; raises ValueError if invalid
filter_expr_dict = body.filter
# Branch based on group_by mode
if body.group_by == TraceSearchGroupBy.SESSION:
# Session grouping - return TraceSessionStats
if isinstance(db, AsyncBaseDb):
stats_list, total_count = await db.get_trace_stats(
filter_expr=filter_expr_dict,
limit=body.limit,
page=body.page,
)
else:
stats_list, total_count = db.get_trace_stats(
filter_expr=filter_expr_dict,
limit=body.limit,
page=body.page,
)
end_time_ms = time_module.time() * 1000
search_time_ms = round(end_time_ms - start_time_ms, 2)
# Calculate total pages
total_pages = (total_count + body.limit - 1) // body.limit if body.limit > 0 else 0
# Convert stats to response models
stats_response = [
TraceSessionStats(
session_id=stat["session_id"],
user_id=stat.get("user_id"),
agent_id=stat.get("agent_id"),
team_id=stat.get("team_id"),
workflow_id=stat.get("workflow_id"),
total_traces=stat["total_traces"],
first_trace_at=stat["first_trace_at"],
last_trace_at=stat["last_trace_at"],
)
for stat in stats_list
]
return PaginatedResponse(
data=stats_response,
meta=PaginationInfo(
page=body.page,
limit=body.limit,
total_pages=total_pages,
total_count=total_count,
search_time_ms=search_time_ms,
),
)
else:
# Run grouping (default) - return TraceDetail
if isinstance(db, AsyncBaseDb):
traces, total_count = await db.get_traces(
filter_expr=filter_expr_dict,
limit=body.limit,
page=body.page,
)
else:
traces, total_count = db.get_traces(
filter_expr=filter_expr_dict,
limit=body.limit,
page=body.page,
)
end_time_ms = time_module.time() * 1000
search_time_ms = round(end_time_ms - start_time_ms, 2)
# Calculate total pages
total_pages = (total_count + body.limit - 1) // body.limit if body.limit > 0 else 0
# Build full TraceDetail (with span tree) for each trace
trace_details = []
for trace in traces:
if isinstance(db, AsyncBaseDb):
spans = await db.get_spans(trace_id=trace.trace_id)
else:
spans = db.get_spans(trace_id=trace.trace_id)
trace_details.append(TraceDetail.from_trace_and_spans(trace, spans))
return PaginatedResponse(
data=trace_details,
meta=PaginationInfo(
page=body.page,
limit=body.limit,
total_pages=total_pages,
total_count=total_count,
search_time_ms=search_time_ms,
),
)
except ValueError as e:
raise HTTPException(status_code=400, detail=f"Invalid filter expression: {str(e)}")
except Exception as e:
log_error(f"Error searching traces: {e}")
raise HTTPException(status_code=500, detail=f"Error searching traces: {str(e)}")
return router
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/os/routers/traces/traces.py",
"license": "Apache License 2.0",
"lines": 686,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/tracing/exporter.py | """
Custom OpenTelemetry SpanExporter that writes traces to Agno database.
"""
import asyncio
from collections import defaultdict
from typing import Dict, List, Sequence, Union
from opentelemetry.sdk.trace import ReadableSpan # type: ignore
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult # type: ignore
from agno.db.base import AsyncBaseDb, BaseDb
from agno.remote.base import RemoteDb
from agno.tracing.schemas import Span, create_trace_from_spans
from agno.utils.log import logger
class DatabaseSpanExporter(SpanExporter):
"""Custom OpenTelemetry SpanExporter that writes to Agno database"""
def __init__(self, db: Union[BaseDb, AsyncBaseDb, RemoteDb]):
"""
Initialize the DatabaseSpanExporter.
Args:
db: Database instance (sync or async) to store traces
"""
self.db = db
self._shutdown = False
def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
"""
Export spans to the database.
This method:
1. Converts OpenTelemetry spans to Span objects
2. Groups spans by trace_id
3. Creates Trace records (one per trace_id)
4. Creates Span records (multiple per trace_id)
Args:
spans: Sequence of OpenTelemetry ReadableSpan objects
Returns:
SpanExportResult indicating success or failure
"""
if self._shutdown:
logger.warning("DatabaseSpanExporter is shutdown, cannot export spans")
return SpanExportResult.FAILURE
if not spans:
return SpanExportResult.SUCCESS
try:
# Convert OpenTelemetry spans to Span objects
converted_spans: List[Span] = []
for span in spans:
try:
converted_span = Span.from_otel_span(span)
converted_spans.append(converted_span)
except Exception as e:
logger.error(f"Failed to convert span {span.name}: {e}")
# Continue processing other spans
continue
if not converted_spans:
return SpanExportResult.SUCCESS
# Group spans by trace_id
spans_by_trace: Dict[str, List[Span]] = defaultdict(list)
for converted_span in converted_spans:
spans_by_trace[converted_span.trace_id].append(converted_span)
# Handle async DB
if isinstance(self.db, RemoteDb):
# Skipping remote database because it handles its own tracing
pass
elif isinstance(self.db, AsyncBaseDb):
self._export_async(spans_by_trace)
else:
# Synchronous database
self._export_sync(spans_by_trace)
return SpanExportResult.SUCCESS
except Exception as e:
logger.error(f"Failed to export spans to database: {e}", exc_info=True)
return SpanExportResult.FAILURE
def _export_sync(self, spans_by_trace: Dict[str, List[Span]]) -> None:
"""Export traces and spans to synchronous database"""
try:
# Create trace and span records for each trace
for trace_id, spans in spans_by_trace.items():
# Create trace record (aggregate of all spans)
trace = create_trace_from_spans(spans)
if trace:
self.db.upsert_trace(trace) # type: ignore
# Create span records
self.db.create_spans(spans) # type: ignore
except Exception as e:
logger.error(f"Failed to export sync traces: {e}", exc_info=True)
raise
def _export_async(self, spans_by_trace: Dict[str, List[Span]]) -> None:
"""Handle async database export"""
try:
loop = asyncio.get_event_loop()
if loop.is_running():
# We're in an async context, schedule the coroutine
asyncio.create_task(self._do_async_export(spans_by_trace))
else:
# No running loop, run in new loop
loop.run_until_complete(self._do_async_export(spans_by_trace))
except RuntimeError:
# No event loop, create new one
try:
asyncio.run(self._do_async_export(spans_by_trace))
except Exception as e:
logger.error(f"Failed to export async traces: {e}", exc_info=True)
async def _do_async_export(self, spans_by_trace: Dict[str, List[Span]]) -> None:
"""Actually perform the async export"""
try:
# Create trace and span records for each trace
for trace_id, spans in spans_by_trace.items():
# Create trace record (aggregate of all spans)
trace = create_trace_from_spans(spans)
if trace:
create_trace_result = self.db.upsert_trace(trace) # type: ignore
if create_trace_result is not None:
await create_trace_result
# Create span records
create_spans_result = self.db.create_spans(spans) # type: ignore
if create_spans_result is not None:
await create_spans_result
except Exception as e:
logger.error(f"Failed to do async export: {e}", exc_info=True)
raise
def shutdown(self) -> None:
"""Shutdown the exporter"""
self._shutdown = True
logger.debug("DatabaseSpanExporter shutdown")
def force_flush(self, timeout_millis: int = 30000) -> bool:
"""
Force flush any pending spans.
Since we write immediately to the database, this is a no-op.
Args:
timeout_millis: Timeout in milliseconds
Returns:
True if flush was successful
"""
return True
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tracing/exporter.py",
"license": "Apache License 2.0",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/tracing/schemas.py | """
Trace data models for Agno tracing.
"""
from dataclasses import asdict, dataclass
from datetime import datetime, timezone
from typing import Any, Dict, List, Optional
from opentelemetry.sdk.trace import ReadableSpan # type: ignore
from opentelemetry.trace import SpanKind, StatusCode # type: ignore
@dataclass
class Trace:
"""Represents a complete trace (one record per trace_id)"""
trace_id: str
name: str # Name from root span
status: str # Overall status: OK, ERROR, UNSET
start_time: datetime # Python datetime object
end_time: datetime # Python datetime object
duration_ms: int
total_spans: int
error_count: int
# Context from root span
run_id: Optional[str]
session_id: Optional[str]
user_id: Optional[str]
agent_id: Optional[str]
team_id: Optional[str]
workflow_id: Optional[str]
created_at: datetime # Python datetime object
def to_dict(self) -> Dict[str, Any]:
"""Convert Trace to dictionary for database storage (datetime -> ISO string)"""
data = asdict(self)
# Convert datetime objects to ISO format strings for database storage
data["start_time"] = self.start_time.isoformat()
data["end_time"] = self.end_time.isoformat()
data["created_at"] = self.created_at.isoformat()
return data
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "Trace":
"""Create Trace from dictionary (ISO string -> datetime)"""
# Convert ISO format strings to datetime objects
start_time = data["start_time"]
if isinstance(start_time, str):
start_time = datetime.fromisoformat(start_time.replace("Z", "+00:00"))
elif isinstance(start_time, int):
start_time = datetime.fromtimestamp(start_time / 1_000_000_000, tz=timezone.utc)
end_time = data["end_time"]
if isinstance(end_time, str):
end_time = datetime.fromisoformat(end_time.replace("Z", "+00:00"))
elif isinstance(end_time, int):
end_time = datetime.fromtimestamp(end_time / 1_000_000_000, tz=timezone.utc)
created_at = data["created_at"]
if isinstance(created_at, str):
created_at = datetime.fromisoformat(created_at.replace("Z", "+00:00"))
elif isinstance(created_at, int):
created_at = datetime.fromtimestamp(created_at, tz=timezone.utc)
return cls(
trace_id=data["trace_id"],
name=data["name"],
status=data["status"],
start_time=start_time,
end_time=end_time,
duration_ms=data["duration_ms"],
total_spans=data["total_spans"],
error_count=data["error_count"],
run_id=data.get("run_id"),
session_id=data.get("session_id"),
user_id=data.get("user_id"),
agent_id=data.get("agent_id"),
team_id=data.get("team_id"),
workflow_id=data.get("workflow_id"),
created_at=created_at,
)
@dataclass
class Span:
"""Represents a single span within a trace"""
span_id: str
trace_id: str
parent_span_id: Optional[str]
name: str
span_kind: str
status_code: str
status_message: Optional[str]
start_time: datetime # Python datetime object
end_time: datetime # Python datetime object
duration_ms: int
attributes: Dict[str, Any]
created_at: datetime # Python datetime object
def to_dict(self) -> Dict[str, Any]:
"""Convert Span to dictionary for database storage (datetime -> ISO string)"""
data = asdict(self)
# Convert datetime objects to ISO format strings for database storage
data["start_time"] = self.start_time.isoformat()
data["end_time"] = self.end_time.isoformat()
data["created_at"] = self.created_at.isoformat()
return data
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "Span":
"""Create Span from dictionary (ISO string -> datetime)"""
# Convert ISO format strings to datetime objects
start_time = data["start_time"]
if isinstance(start_time, str):
start_time = datetime.fromisoformat(start_time.replace("Z", "+00:00"))
elif isinstance(start_time, int):
start_time = datetime.fromtimestamp(start_time / 1_000_000_000, tz=timezone.utc)
end_time = data["end_time"]
if isinstance(end_time, str):
end_time = datetime.fromisoformat(end_time.replace("Z", "+00:00"))
elif isinstance(end_time, int):
end_time = datetime.fromtimestamp(end_time / 1_000_000_000, tz=timezone.utc)
created_at = data["created_at"]
if isinstance(created_at, str):
created_at = datetime.fromisoformat(created_at.replace("Z", "+00:00"))
elif isinstance(created_at, int):
created_at = datetime.fromtimestamp(created_at, tz=timezone.utc)
return cls(
span_id=data["span_id"],
trace_id=data["trace_id"],
parent_span_id=data.get("parent_span_id"),
name=data["name"],
span_kind=data["span_kind"],
status_code=data["status_code"],
status_message=data.get("status_message"),
start_time=start_time,
end_time=end_time,
duration_ms=data["duration_ms"],
attributes=data.get("attributes", {}),
created_at=created_at,
)
@classmethod
def from_otel_span(cls, otel_span: ReadableSpan) -> "Span":
"""Convert OpenTelemetry ReadableSpan to Span"""
# Extract span context
span_context = otel_span.context
trace_id = format(span_context.trace_id, "032x") if span_context else "0" * 32
span_id = format(span_context.span_id, "016x") if span_context else "0" * 16
# Extract parent span ID if exists
parent_span_id = None
if otel_span.parent and otel_span.parent.span_id:
parent_span_id = format(otel_span.parent.span_id, "016x")
# Extract span kind
span_kind_map = {
SpanKind.INTERNAL: "INTERNAL",
SpanKind.SERVER: "SERVER",
SpanKind.CLIENT: "CLIENT",
SpanKind.PRODUCER: "PRODUCER",
SpanKind.CONSUMER: "CONSUMER",
}
span_kind = span_kind_map.get(otel_span.kind, "INTERNAL")
# Extract status
status_code_map = {
StatusCode.UNSET: "UNSET",
StatusCode.OK: "OK",
StatusCode.ERROR: "ERROR",
}
status_code = status_code_map.get(otel_span.status.status_code, "UNSET")
status_message = otel_span.status.description
# Calculate duration in milliseconds
start_time_ns = otel_span.start_time or 0
end_time_ns = otel_span.end_time or start_time_ns
duration_ms = int((end_time_ns - start_time_ns) / 1_000_000)
# Convert nanosecond timestamps to datetime objects
start_time = datetime.fromtimestamp(start_time_ns / 1_000_000_000, tz=timezone.utc)
end_time = datetime.fromtimestamp(end_time_ns / 1_000_000_000, tz=timezone.utc)
# Convert attributes to dictionary
attributes: Dict[str, Any] = {}
if otel_span.attributes:
for key, value in otel_span.attributes.items():
# Convert attribute values to JSON-serializable types
if isinstance(value, (str, int, float, bool, type(None))):
attributes[key] = value
elif isinstance(value, (list, tuple)):
attributes[key] = list(value)
else:
attributes[key] = str(value)
return cls(
span_id=span_id,
trace_id=trace_id,
parent_span_id=parent_span_id,
name=otel_span.name,
span_kind=span_kind,
status_code=status_code,
status_message=status_message,
start_time=start_time,
end_time=end_time,
duration_ms=duration_ms,
attributes=attributes,
created_at=datetime.now(timezone.utc),
)
def create_trace_from_spans(spans: List[Span]) -> Optional[Trace]:
"""
Create a Trace object from a list of Span objects with the same trace_id.
Args:
spans: List of Span objects belonging to the same trace
Returns:
Trace object with aggregated information, or None if spans list is empty
"""
if not spans:
return None
# Find root span (no parent)
root_span = next((s for s in spans if not s.parent_span_id), spans[0])
# Calculate aggregated metrics
trace_id = spans[0].trace_id
start_time = min(s.start_time for s in spans)
end_time = max(s.end_time for s in spans)
duration_ms = int((end_time - start_time).total_seconds() * 1000)
total_spans = len(spans)
error_count = sum(1 for s in spans if s.status_code == "ERROR")
# Determine overall status (ERROR if any span errored, OK otherwise)
status = "ERROR" if error_count > 0 else "OK"
# Extract context from root span's attributes
attrs = root_span.attributes
run_id = attrs.get("run_id") or attrs.get("agno.run.id")
session_id = attrs.get("session_id") or attrs.get("agno.session.id") or attrs.get("session.id")
user_id = attrs.get("user_id") or attrs.get("agno.user.id") or attrs.get("user.id")
# Try to extract agent_id from the span name or attributes
agent_id = attrs.get("agent_id") or attrs.get("agno.agent.id")
team_id = attrs.get("team_id") or attrs.get("agno.team.id")
workflow_id = attrs.get("workflow_id") or attrs.get("agno.workflow.id")
return Trace(
trace_id=trace_id,
name=root_span.name,
status=status,
start_time=start_time,
end_time=end_time,
duration_ms=duration_ms,
total_spans=total_spans,
error_count=error_count,
run_id=run_id,
session_id=session_id,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
workflow_id=workflow_id,
created_at=datetime.now(timezone.utc),
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tracing/schemas.py",
"license": "Apache License 2.0",
"lines": 234,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/hooks/decorator.py | from functools import wraps
from typing import Any, Callable, TypeVar, Union, overload
# Type variable for better type hints
F = TypeVar("F", bound=Callable[..., Any])
# Attribute name used to mark hooks for background execution
HOOK_RUN_IN_BACKGROUND_ATTR = "_agno_run_in_background"
def _is_async_function(func: Callable) -> bool:
"""
Check if a function is async, even when wrapped by decorators like @staticmethod.
Traverses the full wrapper chain to find the original function.
"""
from inspect import iscoroutinefunction, unwrap
# First, try the standard inspect function on the wrapper
if iscoroutinefunction(func):
return True
# Use unwrap to traverse the full __wrapped__ chain to the original function
try:
original_func = unwrap(func)
if original_func is not func and iscoroutinefunction(original_func):
return True
except ValueError:
# unwrap raises ValueError if it hits a cycle
pass
# Check if the function has CO_COROUTINE flag in its code object
try:
if hasattr(func, "__code__") and func.__code__.co_flags & 0x80: # CO_COROUTINE flag
return True
except (AttributeError, TypeError):
pass
return False
@overload
def hook() -> Callable[[F], F]: ...
@overload
def hook(
*,
run_in_background: bool = False,
) -> Callable[[F], F]: ...
@overload
def hook(func: F) -> F: ...
def hook(*args, **kwargs) -> Union[F, Callable[[F], F]]:
"""Decorator to configure hook behavior.
Args:
run_in_background: If True, this hook will be scheduled as a FastAPI background task
when background_tasks is available, regardless of the agent/team's
run_hooks_in_background setting. This allows per-hook control over
background execution. This is only use-able when running with AgentOS.
Returns:
Union[F, Callable[[F], F]]: Decorated function or decorator
Examples:
@hook
def my_hook(run_output, agent):
# This runs normally (blocking)
process_output(run_output.content)
@hook()
def another_hook(run_output, agent):
# Same as above - runs normally
process_output(run_output.content)
@hook(run_in_background=True)
def my_background_hook(run_output, agent):
# This will run in the background when background_tasks is available
send_notification(run_output.content)
@hook(run_in_background=True)
async def my_async_background_hook(run_output, agent):
# Async hooks also supported
await send_async_notification(run_output.content)
agent = Agent(
model=OpenAIChat(id="gpt-4o"),
post_hooks=[my_hook, my_background_hook],
)
"""
# Valid kwargs for the hook decorator
VALID_KWARGS = frozenset({"run_in_background"})
# Validate kwargs
invalid_kwargs = set(kwargs.keys()) - VALID_KWARGS
if invalid_kwargs:
raise ValueError(
f"Invalid hook configuration arguments: {invalid_kwargs}. Valid arguments are: {sorted(VALID_KWARGS)}"
)
def decorator(func: F) -> F:
run_in_background = kwargs.get("run_in_background", False)
# Preserve existing hook attributes from previously applied decorators
# Use OR logic: if any decorator sets run_in_background=True, it stays True
existing_run_in_background = should_run_in_background(func)
final_run_in_background = run_in_background or existing_run_in_background
@wraps(func)
def sync_wrapper(*args: Any, **kwargs: Any) -> Any:
return func(*args, **kwargs)
@wraps(func)
async def async_wrapper(*args: Any, **kwargs: Any) -> Any:
return await func(*args, **kwargs)
# Choose appropriate wrapper based on function type
if _is_async_function(func):
wrapper = async_wrapper
else:
wrapper = sync_wrapper
# Set the background execution attribute (combined from all decorators)
setattr(wrapper, HOOK_RUN_IN_BACKGROUND_ATTR, final_run_in_background)
return wrapper # type: ignore
# Handle both @hook and @hook() cases
if len(args) == 1 and callable(args[0]) and not kwargs:
return decorator(args[0])
return decorator
def should_run_in_background(hook_func: Callable) -> bool:
"""
Check if a hook function is marked to run in background.
Traverses the wrapper chain to find the attribute when multiple decorators are stacked.
Args:
hook_func: The hook function to check
Returns:
True if the hook is decorated with @hook(run_in_background=True)
"""
# Check the function directly first
if hasattr(hook_func, HOOK_RUN_IN_BACKGROUND_ATTR):
return getattr(hook_func, HOOK_RUN_IN_BACKGROUND_ATTR)
# Traverse the wrapper chain to find the attribute
current = hook_func
seen: set[int] = set()
while hasattr(current, "__wrapped__"):
if id(current) in seen:
break
seen.add(id(current))
current = current.__wrapped__
if hasattr(current, HOOK_RUN_IN_BACKGROUND_ATTR):
return getattr(current, HOOK_RUN_IN_BACKGROUND_ATTR)
return False
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/hooks/decorator.py",
"license": "Apache License 2.0",
"lines": 125,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/os/test_backround_tasks.py | """Integration tests for background tasks in AgentOS.
Note on Testing Background Tasks:
When using httpx.AsyncClient with ASGITransport for testing, the ASGI transport
waits for all background tasks to complete before returning the response. This is
by design in the ASGI specification to ensure background tasks finish before app
shutdown. Therefore, we cannot test timing/non-blocking behavior directly.
Instead, we use mocking to verify that hooks are properly added to FastAPI's
BackgroundTasks when run_hooks_in_background=True, which proves they will run
in the background in production environments.
Note: run_hooks_in_background is configured at the AgentOS level (default=False)
and propagated to agents/teams, rather than being set on individual agents.
"""
import asyncio
import json
import time
from typing import Dict
from unittest.mock import patch
import httpx
import pytest
from httpx import ASGITransport
from agno.agent.agent import Agent
from agno.models.openai import OpenAIChat
from agno.os import AgentOS
from agno.run.agent import RunOutput
@pytest.fixture
def execution_tracker() -> Dict[str, bool]:
"""Shared state to track hook execution."""
return {
"pre_hook_executed": False,
"post_hook_executed": False,
"async_post_hook_executed": False,
"response_returned": False,
}
@pytest.fixture
def agent_with_hooks(shared_db, execution_tracker):
"""Create an agent with hooks (background mode is set by AgentOS)."""
async def pre_hook_log(run_input, agent):
"""Pre-hook that logs request."""
execution_tracker["pre_hook_executed"] = True
async def post_hook_log(run_output: RunOutput, agent: Agent):
"""Post-hook that runs in background."""
await asyncio.sleep(0.5) # Simulate some work
execution_tracker["post_hook_executed"] = True
async def async_post_hook_log(run_output: RunOutput, agent: Agent):
"""Async post-hook that runs in background."""
await asyncio.sleep(0.5) # Simulate async work
execution_tracker["async_post_hook_executed"] = True
return Agent(
name="background-task-agent",
id="background-task-agent-id",
model=OpenAIChat(id="gpt-4o"),
db=shared_db,
pre_hooks=[pre_hook_log],
post_hooks=[post_hook_log, async_post_hook_log],
)
@pytest.fixture
def test_app_with_background(agent_with_hooks):
"""Create a FastAPI app with background hooks enabled."""
agent_os = AgentOS(agents=[agent_with_hooks], run_hooks_in_background=True)
return agent_os.get_app()
@pytest.mark.asyncio
async def test_background_hooks_non_streaming(test_app_with_background, agent_with_hooks, execution_tracker):
"""Test that post-hooks run in background for non-streaming responses."""
async with httpx.AsyncClient(
transport=ASGITransport(app=test_app_with_background), base_url="http://test"
) as client:
response = await client.post(
f"/agents/{agent_with_hooks.id}/runs",
data={"message": "Hello, world!", "stream": "false"},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
# Response should be returned immediately
assert response.status_code == 200
response_json = response.json()
assert response_json["run_id"] is not None
assert response_json["agent_id"] == agent_with_hooks.id
# Mark that response was returned
execution_tracker["response_returned"] = True
# Pre-hooks should have executed (they always block)
assert execution_tracker["pre_hook_executed"] is True
# Background tasks should have been scheduled but may not be complete yet
# Wait a bit for background tasks to complete
await asyncio.sleep(1.5)
# Now verify background hooks executed
assert execution_tracker["post_hook_executed"] is True
assert execution_tracker["async_post_hook_executed"] is True
@pytest.mark.asyncio
async def test_background_hooks_streaming(test_app_with_background, agent_with_hooks, execution_tracker):
"""Test that post-hooks run in background for streaming responses."""
async with httpx.AsyncClient(
transport=ASGITransport(app=test_app_with_background), base_url="http://test"
) as client:
async with client.stream(
"POST",
f"/agents/{agent_with_hooks.id}/runs",
data={"message": "Hello, world!", "stream": "true"},
headers={"Content-Type": "application/x-www-form-urlencoded"},
) as response:
assert response.status_code == 200
assert "text/event-stream" in response.headers.get("content-type", "")
# Collect streaming chunks
chunks = []
async for line in response.aiter_lines():
if line.startswith("data: "):
data = line[6:] # Remove 'data: ' prefix
if data != "[DONE]":
chunks.append(json.loads(data))
# Verify we received data
assert len(chunks) > 0
# Mark that response was returned
execution_tracker["response_returned"] = True
# Pre-hooks should have executed
assert execution_tracker["pre_hook_executed"] is True
# Wait for background tasks to complete
await asyncio.sleep(1.5)
# Verify background hooks executed
assert execution_tracker["post_hook_executed"] is True
assert execution_tracker["async_post_hook_executed"] is True
@pytest.mark.asyncio
async def test_background_hooks_are_added_as_background_tasks(agent_with_hooks):
"""Test that hooks are added to FastAPI background tasks when run_hooks_in_background=True."""
tasks_added = []
async def tracked_post_hook(run_output: RunOutput):
"""A post-hook that we can track."""
tasks_added.append("tracked_post_hook")
# Replace post_hooks with our tracked hook
agent_with_hooks.post_hooks = [tracked_post_hook]
# Mock BackgroundTasks at the FastAPI level
original_add_task = None
def mock_add_task(self, func, *args, **kwargs):
"""Mock add_task to track what's being added."""
tasks_added.append(func.__name__)
# Call the original to maintain functionality
if original_add_task:
return original_add_task(self, func, *args, **kwargs)
# Patch BackgroundTasks.add_task method
from fastapi import BackgroundTasks
original_add_task = BackgroundTasks.add_task
with patch.object(BackgroundTasks, "add_task", mock_add_task):
# Create app after patching with background hooks enabled
agent_os = AgentOS(agents=[agent_with_hooks], run_hooks_in_background=True)
app = agent_os.get_app()
async with httpx.AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
response = await client.post(
f"/agents/{agent_with_hooks.id}/runs",
data={"message": "Hello!", "stream": "false"},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
# Response should succeed
assert response.status_code == 200
# Verify that hooks were added as background tasks
# The hook function should have been added to tasks
assert len(tasks_added) > 0, "At least one background task should be added"
assert "tracked_post_hook" in tasks_added, "Our tracked hook should be in background tasks"
@pytest.mark.asyncio
async def test_background_hooks_with_hook_parameters(test_app_with_background, agent_with_hooks):
"""Test that background hooks receive correct parameters."""
received_params = {}
async def param_checking_hook(run_output: RunOutput, agent: Agent, session, user_id, run_context):
"""Hook that checks it receives expected parameters."""
received_params["run_output"] = run_output is not None
received_params["agent"] = agent is not None
received_params["session"] = session is not None
received_params["user_id"] = user_id
received_params["run_context"] = run_context is not None
agent_with_hooks.post_hooks = [param_checking_hook]
async with httpx.AsyncClient(
transport=ASGITransport(app=test_app_with_background), base_url="http://test"
) as client:
response = await client.post(
f"/agents/{agent_with_hooks.id}/runs",
data={
"message": "Test parameters",
"user_id": "test-user-123",
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
# Wait for background task
await asyncio.sleep(0.5)
# Verify all expected parameters were passed
assert received_params["run_output"] is True
assert received_params["agent"] is True
assert received_params["session"] is True
assert received_params["user_id"] == "test-user-123"
assert received_params["run_context"] is True
@pytest.mark.asyncio
async def test_agent_without_background_mode(shared_db):
"""Test that hooks execute synchronously when background mode is disabled on AgentOS."""
execution_tracker = {"hook_executed": False}
tasks_added = []
async def blocking_post_hook(run_output: RunOutput):
"""Post-hook that executes synchronously."""
execution_tracker["hook_executed"] = True
agent = Agent(
name="blocking-agent",
id="blocking-agent-id",
model=OpenAIChat(id="gpt-4o"),
db=shared_db,
post_hooks=[blocking_post_hook],
)
# Mock add_task to track if hooks are added as background tasks
original_add_task = None
def mock_add_task(self, func, *args, **kwargs):
"""Mock add_task to track what's being added."""
tasks_added.append(func.__name__)
if original_add_task:
return original_add_task(self, func, *args, **kwargs)
from fastapi import BackgroundTasks
original_add_task = BackgroundTasks.add_task
with patch.object(BackgroundTasks, "add_task", mock_add_task):
# Disable background hooks at the AgentOS level
agent_os = AgentOS(agents=[agent], run_hooks_in_background=False)
app = agent_os.get_app()
async with httpx.AsyncClient(transport=ASGITransport(app=app), base_url="http://test") as client:
response = await client.post(
f"/agents/{agent.id}/runs",
data={"message": "Hello!", "stream": "false"},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
# Response should succeed
assert response.status_code == 200
# Hook should have executed
assert execution_tracker["hook_executed"] is True
# Verify that our post hook was NOT added to background tasks
# When run_hooks_in_background=False on AgentOS, hooks execute synchronously
assert "blocking_post_hook" not in tasks_added, "Hook should not be added as background task when disabled"
@pytest.mark.asyncio
async def test_background_hooks_with_multiple_hooks(test_app_with_background, agent_with_hooks):
"""Test that multiple background hooks all execute."""
execution_count = {"count": 0}
def hook1(run_output: RunOutput):
time.sleep(0.3)
execution_count["count"] += 1
def hook2(run_output: RunOutput):
time.sleep(0.3)
execution_count["count"] += 1
async def hook3(run_output: RunOutput):
await asyncio.sleep(0.3)
execution_count["count"] += 1
agent_with_hooks.post_hooks = [hook1, hook2, hook3]
async with httpx.AsyncClient(
transport=ASGITransport(app=test_app_with_background), base_url="http://test"
) as client:
response = await client.post(
f"/agents/{agent_with_hooks.id}/runs",
data={"message": "Test multiple hooks", "stream": "false"},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
# Wait for all background tasks
await asyncio.sleep(1.5)
# All three hooks should have executed
assert execution_count["count"] == 3
@pytest.mark.asyncio
async def test_agentos_propagates_background_setting_to_agents(shared_db):
"""Test that AgentOS correctly propagates run_hooks_in_background to agents."""
agent = Agent(
name="test-agent",
id="test-agent-id",
model=OpenAIChat(id="gpt-4o"),
db=shared_db,
)
# By default, _run_hooks_in_background should be False on the agent
assert agent._run_hooks_in_background is None
# When AgentOS is created with run_hooks_in_background=True (default)
agent_os = AgentOS(agents=[agent], run_hooks_in_background=True)
agent_os.get_app()
# The agent's _run_hooks_in_background should now be True
assert agent._run_hooks_in_background is True
@pytest.mark.asyncio
async def test_agentos_propagates_background_setting_disabled(shared_db):
"""Test that AgentOS correctly propagates run_hooks_in_background=False to agents."""
agent = Agent(
name="test-agent",
id="test-agent-id",
model=OpenAIChat(id="gpt-4o"),
db=shared_db,
)
# When AgentOS is created with run_hooks_in_background=False
agent_os = AgentOS(agents=[agent], run_hooks_in_background=False)
agent_os.get_app()
# The agent's _run_hooks_in_background should remain False
assert agent._run_hooks_in_background is False
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/os/test_backround_tasks.py",
"license": "Apache License 2.0",
"lines": 285,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/test_instructions.py | import pytest
from agno.agent import Agent
from agno.team import Team
def test_callable_instructions():
def instructions(agent: Agent, team: Team) -> str:
return "You are a helpful assistant."
agent = Agent(instructions=instructions)
team = Team(instructions=instructions, members=[])
agent.run("Hello")
team.run("Hello")
assert True, "No Errors"
@pytest.mark.asyncio
async def test_async_callable_instructions():
async def instructions(agent: Agent, team: Team) -> str:
return "You are a helpful assistant."
agent = Agent(instructions=instructions)
team = Team(instructions=instructions, members=[])
await agent.arun("Hello")
await team.arun("Hello")
def test_callable_system_message():
def instructions(agent: Agent, team: Team) -> str:
return "You are a helpful assistant."
agent = Agent(system_message=instructions)
team = Team(system_message=instructions, members=[])
agent.run("Hello")
team.run("Hello")
assert True, "No Errors"
@pytest.mark.asyncio
async def test_async_callable_system_message():
async def instructions(agent: Agent, team: Team) -> str:
return "You are a helpful assistant."
agent = Agent(system_message=instructions)
team = Team(system_message=instructions, members=[])
await agent.arun("Hello")
await team.arun("Hello")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_instructions.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/compression/manager.py | import asyncio
from dataclasses import dataclass, field
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
from pydantic import BaseModel
from agno.models.base import Model
from agno.models.message import Message
from agno.models.utils import get_model
from agno.utils.log import log_error, log_info, log_warning
if TYPE_CHECKING:
from agno.metrics import RunMetrics
DEFAULT_COMPRESSION_PROMPT = dedent("""\
You are compressing tool call results to save context space while preserving critical information.
Your goal: Extract only the essential information from the tool output.
ALWAYS PRESERVE:
• Specific facts: numbers, statistics, amounts, prices, quantities, metrics
• Temporal data: dates, times, timestamps (use short format: "Oct 21 2025")
• Entities: people, companies, products, locations, organizations
• Identifiers: URLs, IDs, codes, technical identifiers, versions
• Key quotes, citations, sources (if relevant to agent's task)
COMPRESS TO ESSENTIALS:
• Descriptions: keep only key attributes
• Explanations: distill to core insight
• Lists: focus on most relevant items based on agent context
• Background: minimal context only if critical
REMOVE ENTIRELY:
• Introductions, conclusions, transitions
• Hedging language ("might", "possibly", "appears to")
• Meta-commentary ("According to", "The results show")
• Formatting artifacts (markdown, HTML, JSON structure)
• Redundant or repetitive information
• Generic background not relevant to agent's task
• Promotional language, filler words
EXAMPLE:
Input: "According to recent market analysis and industry reports, OpenAI has made several significant announcements in the technology sector. The company revealed ChatGPT Atlas on October 21, 2025, which represents a new AI-powered browser application that has been specifically designed for macOS users. This browser is strategically positioned to compete with traditional search engines in the market. Additionally, on October 6, 2025, OpenAI launched Apps in ChatGPT, which includes a comprehensive software development kit (SDK) for developers. The company has also announced several initial strategic partners who will be integrating with this new feature, including well-known companies such as Spotify, the popular music streaming service, Zillow, which is a real estate marketplace platform, and Canva, a graphic design platform."
Output: "OpenAI - Oct 21 2025: ChatGPT Atlas (AI browser, macOS, search competitor); Oct 6 2025: Apps in ChatGPT + SDK; Partners: Spotify, Zillow, Canva"
Be concise while retaining all critical facts.
""")
@dataclass
class CompressionManager:
model: Optional[Model] = None # model used for compression
compress_tool_results: bool = True
compress_tool_results_limit: Optional[int] = None
compress_token_limit: Optional[int] = None
compress_tool_call_instructions: Optional[str] = None
stats: Dict[str, Any] = field(default_factory=dict)
def __post_init__(self):
if self.compress_tool_results_limit is None and self.compress_token_limit is None:
self.compress_tool_results_limit = 3
def _is_tool_result_message(self, msg: Message) -> bool:
return msg.role == "tool"
def should_compress(
self,
messages: List[Message],
tools: Optional[List] = None,
model: Optional[Model] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
) -> bool:
"""Check if tool results should be compressed.
Args:
messages: List of messages to check.
tools: List of tools for token counting.
model: The Agent / Team model.
response_format: Output schema for accurate token counting.
"""
if not self.compress_tool_results:
return False
# Token-based threshold check
if self.compress_token_limit is not None and model is not None:
tokens = model.count_tokens(messages, tools, response_format)
if tokens >= self.compress_token_limit:
log_info(f"Token limit hit: {tokens} >= {self.compress_token_limit}")
return True
# Count-based threshold check
if self.compress_tool_results_limit is not None:
uncompressed_tools_count = len(
[m for m in messages if self._is_tool_result_message(m) and m.compressed_content is None]
)
if uncompressed_tools_count >= self.compress_tool_results_limit:
log_info(f"Tool count limit hit: {uncompressed_tools_count} >= {self.compress_tool_results_limit}")
return True
return False
def _compress_tool_result(
self,
tool_result: Message,
run_metrics: Optional["RunMetrics"] = None,
) -> Optional[str]:
if not tool_result:
return None
tool_content = f"Tool: {tool_result.tool_name or 'unknown'}\n{tool_result.content}"
self.model = get_model(self.model)
if not self.model:
log_warning("No compression model available")
return None
compression_prompt = self.compress_tool_call_instructions or DEFAULT_COMPRESSION_PROMPT
compression_message = "Tool Results to Compress: " + tool_content + "\n"
try:
response = self.model.response(
messages=[
Message(role="system", content=compression_prompt),
Message(role="user", content=compression_message),
]
)
# Accumulate compression model metrics
if run_metrics is not None:
from agno.metrics import ModelType, accumulate_model_metrics
accumulate_model_metrics(response, self.model, ModelType.COMPRESSION_MODEL, run_metrics)
return response.content
except Exception as e:
log_error(f"Error compressing tool result: {e}")
return tool_content
def compress(
self,
messages: List[Message],
run_metrics: Optional["RunMetrics"] = None,
) -> None:
"""Compress uncompressed tool results"""
if not self.compress_tool_results:
return
uncompressed_tools = [msg for msg in messages if msg.role == "tool" and msg.compressed_content is None]
if not uncompressed_tools:
return
# Compress uncompressed tool results
for tool_msg in uncompressed_tools:
original_len = len(str(tool_msg.content)) if tool_msg.content else 0
compressed = self._compress_tool_result(tool_msg, run_metrics=run_metrics)
if compressed:
tool_msg.compressed_content = compressed
# Count actual tool results (Gemini combines multiple in one message)
tool_results_count = len(tool_msg.tool_calls) if tool_msg.tool_calls else 1
self.stats["tool_results_compressed"] = (
self.stats.get("tool_results_compressed", 0) + tool_results_count
)
self.stats["original_size"] = self.stats.get("original_size", 0) + original_len
self.stats["compressed_size"] = self.stats.get("compressed_size", 0) + len(compressed)
else:
log_warning(f"Compression failed for {tool_msg.tool_name}")
# * Async methods *#
async def ashould_compress(
self,
messages: List[Message],
tools: Optional[List] = None,
model: Optional[Model] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
) -> bool:
"""Async check if tool results should be compressed.
Args:
messages: List of messages to check.
tools: List of tools for token counting.
model: The Agent / Team model.
response_format: Output schema for accurate token counting.
"""
if not self.compress_tool_results:
return False
# Token-based threshold check
if self.compress_token_limit is not None and model is not None:
tokens = await model.acount_tokens(messages, tools, response_format)
if tokens >= self.compress_token_limit:
log_info(f"Token limit hit: {tokens} >= {self.compress_token_limit}")
return True
# Count-based threshold check
if self.compress_tool_results_limit is not None:
uncompressed_tools_count = len(
[m for m in messages if self._is_tool_result_message(m) and m.compressed_content is None]
)
if uncompressed_tools_count >= self.compress_tool_results_limit:
log_info(f"Tool count limit hit: {uncompressed_tools_count} >= {self.compress_tool_results_limit}")
return True
return False
async def _acompress_tool_result(
self,
tool_result: Message,
run_metrics: Optional["RunMetrics"] = None,
) -> Optional[str]:
"""Async compress a single tool result"""
if not tool_result:
return None
tool_content = f"Tool: {tool_result.tool_name or 'unknown'}\n{tool_result.content}"
self.model = get_model(self.model)
if not self.model:
log_warning("No compression model available")
return None
compression_prompt = self.compress_tool_call_instructions or DEFAULT_COMPRESSION_PROMPT
compression_message = "Tool Results to Compress: " + tool_content + "\n"
try:
response = await self.model.aresponse(
messages=[
Message(role="system", content=compression_prompt),
Message(role="user", content=compression_message),
]
)
# Accumulate compression model metrics
if run_metrics is not None:
from agno.metrics import ModelType, accumulate_model_metrics
accumulate_model_metrics(response, self.model, ModelType.COMPRESSION_MODEL, run_metrics)
return response.content
except Exception as e:
log_error(f"Error compressing tool result: {e}")
return tool_content
async def acompress(
self,
messages: List[Message],
run_metrics: Optional["RunMetrics"] = None,
) -> None:
"""Async compress uncompressed tool results"""
if not self.compress_tool_results:
return
uncompressed_tools = [msg for msg in messages if msg.role == "tool" and msg.compressed_content is None]
if not uncompressed_tools:
return
# Track original sizes before compression
original_sizes = [len(str(msg.content)) if msg.content else 0 for msg in uncompressed_tools]
# Parallel compression using asyncio.gather
tasks = [self._acompress_tool_result(msg, run_metrics=run_metrics) for msg in uncompressed_tools]
results = await asyncio.gather(*tasks)
# Apply results and track stats
for msg, compressed, original_len in zip(uncompressed_tools, results, original_sizes):
if compressed:
msg.compressed_content = compressed
# Count actual tool results (Gemini combines multiple in one message)
tool_results_count = len(msg.tool_calls) if msg.tool_calls else 1
self.stats["tool_results_compressed"] = (
self.stats.get("tool_results_compressed", 0) + tool_results_count
)
self.stats["original_size"] = self.stats.get("original_size", 0) + original_len
self.stats["compressed_size"] = self.stats.get("compressed_size", 0) + len(compressed)
else:
log_warning(f"Compression failed for {msg.tool_name}")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/compression/manager.py",
"license": "Apache License 2.0",
"lines": 228,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/agent/test_tool_compression.py | import pytest
from agno.agent import Agent
from agno.compression.manager import CompressionManager
from agno.models.openai import OpenAIChat
def search_tool(query: str) -> str:
"""Search tool that returns large content to trigger compression."""
return f"Search results for '{query}': " + ("This is detailed information about the query. " * 50)
def get_data(item: str) -> str:
"""Get data tool that returns large content."""
return f"Data for '{item}': " + ("Comprehensive data entry with lots of details. " * 50)
@pytest.fixture
def compression_agent(shared_db):
"""Agent with compression enabled and low threshold."""
return Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[search_tool, get_data],
db=shared_db,
compress_tool_results=True,
compression_manager=CompressionManager(compress_tool_results_limit=1),
instructions="Use the tools as requested. Make multiple tool calls when asked.",
telemetry=False,
)
def test_compression_sync(compression_agent, shared_db):
"""Test compression: all tool messages compressed, content shorter, persists in session."""
response = compression_agent.run(
"First search for 'Python programming' and then search for 'JavaScript frameworks'"
)
tool_messages = [m for m in response.messages if m.role == "tool"]
assert len(tool_messages) >= 2, "Expected at least 2 tool calls"
# ALL tool messages must be compressed
for msg in tool_messages:
assert msg.compressed_content is not None, "All tool messages should be compressed"
assert len(str(msg.compressed_content)) < len(str(msg.content)), "Compressed content should be shorter"
assert msg.get_content(use_compressed_content=True) == msg.compressed_content
assert msg.get_content(use_compressed_content=False) == msg.content
# Verify persistence in session
session = compression_agent.get_session(compression_agent.session_id)
assert session is not None, "Session should be retrievable"
persisted_tool_messages = [m for r in session.runs for m in (r.messages or []) if m.role == "tool"]
assert len(persisted_tool_messages) >= 2, "Persisted session should have 2+ tool messages"
for msg in persisted_tool_messages:
assert msg.compressed_content is not None, "Compressed content should persist in session"
@pytest.mark.asyncio
async def test_compression_async(shared_db):
"""Test compression works in async mode with all tool messages compressed."""
# Create fresh instance to avoid event loop issues
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[search_tool, get_data],
db=shared_db,
compress_tool_results=True,
compression_manager=CompressionManager(compress_tool_results_limit=1),
instructions="Use the tools as requested. Make multiple tool calls when asked.",
telemetry=False,
)
response = await agent.arun("Search for 'Python async' and then search for 'asyncio patterns'")
tool_messages = [m for m in response.messages if m.role == "tool"]
assert len(tool_messages) >= 2, "Expected at least 2 tool calls"
# ALL tool messages must be compressed
for msg in tool_messages:
assert msg.compressed_content is not None, "All tool messages should be compressed"
assert len(str(msg.compressed_content)) < len(str(msg.content)), "Compressed content should be shorter"
def test_no_compression_when_disabled(shared_db):
"""Tool messages should NOT have compressed_content when compression is disabled."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[search_tool],
db=shared_db,
compress_tool_results=False,
instructions="Use the search tool.",
telemetry=False,
)
response = agent.run("Search for 'test query'")
tool_messages = [m for m in response.messages if m.role == "tool"]
for msg in tool_messages:
assert msg.compressed_content is None, "compressed_content should be None when compression is disabled"
def test_no_compression_below_threshold(shared_db):
"""Compression should not trigger when below threshold."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
tools=[search_tool],
db=shared_db,
compress_tool_results=True,
compression_manager=CompressionManager(compress_tool_results_limit=10),
instructions="Use the search tool once.",
telemetry=False,
)
response = agent.run("Search for 'single query'")
tool_messages = [m for m in response.messages if m.role == "tool"]
for msg in tool_messages:
assert msg.compressed_content is None, "No compression should occur below threshold"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_tool_compression.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/teams/test_tool_compression.py | import pytest
from agno.agent import Agent
from agno.compression.manager import CompressionManager
from agno.models.openai import OpenAIChat
from agno.team.team import Team
def search_tool(query: str) -> str:
"""Search tool that returns large content to trigger compression."""
return f"Search results for '{query}': " + ("This is detailed information about the query. " * 50)
def get_data(item: str) -> str:
"""Get data tool that returns large content."""
return f"Data for '{item}': " + ("Comprehensive data entry with lots of details. " * 50)
@pytest.fixture
def dummy_member():
"""A minimal member agent with no tools (team leader will use its own tools)."""
return Agent(
name="Assistant",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You assist with general questions.",
telemetry=False,
)
@pytest.fixture
def compression_team(dummy_member, shared_db):
"""Team with tools directly on team leader and compression enabled."""
return Team(
name="CompressionTeam",
model=OpenAIChat(id="gpt-4o-mini"),
members=[dummy_member],
tools=[search_tool, get_data], # Tools directly on team leader
db=shared_db,
compress_tool_results=True,
compression_manager=CompressionManager(compress_tool_results_limit=1),
instructions="Use YOUR OWN search_tool and get_data tools to answer questions. Do NOT delegate to members for search tasks.",
telemetry=False,
)
def test_compression_sync(compression_team, shared_db):
"""Test compression: team leader's tool messages compressed and content is shorter."""
response = compression_team.run("Search for 'Python programming' and also search for 'JavaScript frameworks'")
tool_messages = [m for m in response.messages if m.role == "tool"]
assert len(tool_messages) >= 2, "Expected at least 2 tool calls"
# ALL tool messages must be compressed
for msg in tool_messages:
assert msg.compressed_content is not None, "All tool messages should be compressed"
assert len(str(msg.compressed_content)) < len(str(msg.content)), "Compressed content should be shorter"
assert msg.get_content(use_compressed_content=True) == msg.compressed_content
assert msg.get_content(use_compressed_content=False) == msg.content
# Verify persistence in session
session = compression_team.get_session(compression_team.session_id)
assert session is not None, "Session should be retrievable"
persisted_tool_messages = [m for r in session.runs for m in (r.messages or []) if m.role == "tool"]
assert len(persisted_tool_messages) >= 2, "Persisted session should have 2+ tool messages"
for msg in persisted_tool_messages:
assert msg.compressed_content is not None, "Compressed content should persist in session"
@pytest.mark.asyncio
async def test_compression_async(shared_db):
"""Test compression works in async mode for team leader's tool calls."""
# Create fresh instances to avoid event loop issues
dummy_member = Agent(
name="Assistant",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You assist with general questions.",
telemetry=False,
)
team = Team(
name="CompressionTeam",
model=OpenAIChat(id="gpt-4o-mini"),
members=[dummy_member],
tools=[search_tool, get_data], # Tools directly on team leader
db=shared_db,
compress_tool_results=True,
compression_manager=CompressionManager(compress_tool_results_limit=1),
instructions="Use YOUR OWN search_tool and get_data tools to answer questions. Do NOT delegate to members for search tasks.",
telemetry=False,
)
response = await team.arun("Search for 'Python async' and then search for 'asyncio patterns'")
tool_messages = [m for m in response.messages if m.role == "tool"]
assert len(tool_messages) >= 2, "Expected at least 2 tool calls"
# ALL tool messages must be compressed
for msg in tool_messages:
assert msg.compressed_content is not None, "All tool messages should be compressed"
assert len(str(msg.compressed_content)) < len(str(msg.content)), "Compressed content should be shorter"
def test_no_compression_when_disabled(shared_db):
"""Tool messages should NOT have compressed_content when compression is disabled."""
dummy_member = Agent(
name="Assistant",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You assist with general questions.",
telemetry=False,
)
team = Team(
name="NoCompressionTeam",
model=OpenAIChat(id="gpt-4o-mini"),
members=[dummy_member],
tools=[search_tool],
db=shared_db,
compress_tool_results=False,
instructions="Use YOUR OWN search_tool. Do NOT delegate.",
telemetry=False,
)
response = team.run("Search for 'test query'")
tool_messages = [m for m in response.messages if m.role == "tool"]
for msg in tool_messages:
assert msg.compressed_content is None, "compressed_content should be None when compression is disabled"
def test_no_compression_below_threshold(shared_db):
"""Compression should not trigger when below threshold."""
dummy_member = Agent(
name="Assistant",
model=OpenAIChat(id="gpt-4o-mini"),
instructions="You assist with general questions.",
telemetry=False,
)
team = Team(
name="ThresholdTeam",
model=OpenAIChat(id="gpt-4o-mini"),
members=[dummy_member],
tools=[search_tool],
db=shared_db,
compress_tool_results=True,
compression_manager=CompressionManager(compress_tool_results_limit=10),
instructions="Use YOUR OWN search_tool once. Do NOT delegate.",
telemetry=False,
)
response = team.run("Search for 'single query'")
tool_messages = [m for m in response.messages if m.role == "tool"]
for msg in tool_messages:
assert msg.compressed_content is None, "No compression should occur below threshold"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/test_tool_compression.py",
"license": "Apache License 2.0",
"lines": 125,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/memory/strategies/base.py | from abc import ABC, abstractmethod
from typing import List
from agno.db.schemas import UserMemory
from agno.models.base import Model
from agno.utils.tokens import count_text_tokens
class MemoryOptimizationStrategy(ABC):
"""Abstract base class for memory optimization strategies.
Subclasses must implement optimize() and aoptimize().
get_system_prompt() is optional and only needed for LLM-based strategies.
"""
def get_system_prompt(self) -> str:
"""Get system prompt for this optimization strategy.
Returns:
System prompt string for LLM-based strategies.
"""
raise NotImplementedError
@abstractmethod
def optimize(
self,
memories: List[UserMemory],
model: Model,
) -> List[UserMemory]:
"""Optimize memories synchronously.
Args:
memories: List of UserMemory objects to optimize
model: Model to use for optimization (if needed)
Returns:
List of optimized UserMemory objects
"""
raise NotImplementedError
@abstractmethod
async def aoptimize(
self,
memories: List[UserMemory],
model: Model,
) -> List[UserMemory]:
"""Optimize memories asynchronously.
Args:
memories: List of UserMemory objects to optimize
model: Model to use for optimization (if needed)
Returns:
List of optimized UserMemory objects
"""
raise NotImplementedError
def count_tokens(self, memories: List[UserMemory]) -> int:
"""Count total tokens across all memories.
Args:
memories: List of UserMemory objects
Returns:
Total token count
"""
return sum(count_text_tokens(m.memory or "") for m in memories)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/memory/strategies/base.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/agno/memory/strategies/summarize.py | """Summarize strategy: Combine all memories into single comprehensive summary."""
from textwrap import dedent
from typing import List
from uuid import uuid4
from agno.db.schemas import UserMemory
from agno.memory.strategies import MemoryOptimizationStrategy
from agno.models.base import Model
from agno.models.message import Message
from agno.utils.dttm import now_epoch_s
from agno.utils.log import log_debug
class SummarizeStrategy(MemoryOptimizationStrategy):
"""Combine all memories into single comprehensive summary.
This strategy summarizes all memories into one coherent narrative,
achieving maximum compression by eliminating redundancy. All
metadata (topics, user_id) is preserved in the summarized memory.
"""
def _get_system_prompt(self) -> str:
"""Get system prompt for memory summarization.
Returns:
System prompt string for LLM
"""
return dedent("""\
You are a memory compression assistant. Your task is to summarize multiple memories about a user
into a single comprehensive summary while preserving all key facts.
Requirements:
- Combine related information from all memories
- Preserve all factual information
- Remove redundancy and consolidate repeated facts
- Create a coherent narrative about the user
- Maintain third-person perspective
- Do not add information not present in the original memories
Return only the summarized memory text, nothing else.\
""")
def optimize(
self,
memories: List[UserMemory],
model: Model,
) -> List[UserMemory]:
"""Summarize multiple memories into single comprehensive summary.
Args:
memories: List of UserMemory objects to summarize
model: Model to use for summarization
Returns:
List containing single summarized UserMemory object
Raises:
ValueError: If memories list is empty or if user_id cannot be determined
"""
# Validate memories list
if not memories:
raise ValueError("No Memories found")
# Extract user_id from first memory
user_id = memories[0].user_id
if user_id is None:
raise ValueError("Cannot determine user_id: first memory does not have a valid user_id or is None")
# Collect all memory contents
memory_contents = [mem.memory for mem in memories if mem.memory]
# Combine topics - get unique topics from all memories
all_topics: List[str] = []
for mem in memories:
if mem.topics:
all_topics.extend(mem.topics)
summarized_topics = list(set(all_topics)) if all_topics else None
# Check if agent_id and team_id are consistent
agent_ids = {mem.agent_id for mem in memories if mem.agent_id}
summarized_agent_id = list(agent_ids)[0] if len(agent_ids) == 1 else None
team_ids = {mem.team_id for mem in memories if mem.team_id}
summarized_team_id = list(team_ids)[0] if len(team_ids) == 1 else None
# Create comprehensive prompt for summarization
combined_content = "\n\n".join([f"Memory {i + 1}: {content}" for i, content in enumerate(memory_contents)])
system_prompt = self._get_system_prompt()
messages_for_model = [
Message(role="system", content=system_prompt),
Message(role="user", content=f"Summarize these memories into a single summary:\n\n{combined_content}"),
]
# Generate summarized content
response = model.response(messages=messages_for_model)
summarized_content = response.content or " ".join(memory_contents)
# Generate new memory_id
new_memory_id = str(uuid4())
# Create summarized memory
summarized_memory = UserMemory(
memory_id=new_memory_id,
memory=summarized_content.strip(),
topics=summarized_topics,
user_id=user_id,
agent_id=summarized_agent_id,
team_id=summarized_team_id,
updated_at=now_epoch_s(),
)
log_debug(
f"Summarized {len(memories)} memories into 1: {self.count_tokens(memories)} -> {self.count_tokens([summarized_memory])} tokens"
)
return [summarized_memory]
async def aoptimize(
self,
memories: List[UserMemory],
model: Model,
) -> List[UserMemory]:
"""Async version: Summarize multiple memories into single comprehensive summary.
Args:
memories: List of UserMemory objects to summarize
model: Model to use for summarization
Returns:
List containing single summarized UserMemory object
Raises:
ValueError: If memories list is empty or if user_id cannot be determined
"""
# Validate memories list
if not memories:
raise ValueError("No Memories found")
# Extract user_id from first memory
user_id = memories[0].user_id
if user_id is None:
raise ValueError("Cannot determine user_id: first memory does not have a valid user_id or is None")
# Collect all memory contents
memory_contents = [mem.memory for mem in memories if mem.memory]
# Combine topics - get unique topics from all memories
all_topics: List[str] = []
for mem in memories:
if mem.topics:
all_topics.extend(mem.topics)
summarized_topics = list(set(all_topics)) if all_topics else None
# Check if agent_id and team_id are consistent
agent_ids = {mem.agent_id for mem in memories if mem.agent_id}
summarized_agent_id = list(agent_ids)[0] if len(agent_ids) == 1 else None
team_ids = {mem.team_id for mem in memories if mem.team_id}
summarized_team_id = list(team_ids)[0] if len(team_ids) == 1 else None
# Create comprehensive prompt for summarization
combined_content = "\n\n".join([f"Memory {i + 1}: {content}" for i, content in enumerate(memory_contents)])
system_prompt = self._get_system_prompt()
messages_for_model = [
Message(role="system", content=system_prompt),
Message(role="user", content=f"Summarize these memories into a single summary:\n\n{combined_content}"),
]
# Generate summarized content (async)
response = await model.aresponse(messages=messages_for_model)
summarized_content = response.content or " ".join(memory_contents)
# Generate new memory_id
new_memory_id = str(uuid4())
# Create summarized memory
summarized_memory = UserMemory(
memory_id=new_memory_id,
memory=summarized_content.strip(),
topics=summarized_topics,
user_id=user_id,
agent_id=summarized_agent_id,
team_id=summarized_team_id,
updated_at=now_epoch_s(),
)
log_debug(
f"Summarized {len(memories)} memories into 1: {self.count_tokens(memories)} -> {self.count_tokens([summarized_memory])} tokens"
)
return [summarized_memory]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/memory/strategies/summarize.py",
"license": "Apache License 2.0",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/memory/strategies/types.py | """Memory optimization strategy types and factory."""
from enum import Enum
from agno.memory.strategies import MemoryOptimizationStrategy
class MemoryOptimizationStrategyType(str, Enum):
"""Enumeration of available memory optimization strategies."""
SUMMARIZE = "summarize"
class MemoryOptimizationStrategyFactory:
"""Factory for creating memory optimization strategy instances."""
@classmethod
def create_strategy(cls, strategy_type: MemoryOptimizationStrategyType, **kwargs) -> MemoryOptimizationStrategy:
"""Create an instance of the optimization strategy with given parameters.
Args:
strategy_type: Type of strategy to create
**kwargs: Additional parameters for strategy initialization
Returns:
MemoryOptimizationStrategy instance
"""
strategy_map = {
MemoryOptimizationStrategyType.SUMMARIZE: cls._create_summarize_strategy,
}
return strategy_map[strategy_type](**kwargs)
@classmethod
def _create_summarize_strategy(cls, **kwargs) -> MemoryOptimizationStrategy:
from agno.memory.strategies.summarize import SummarizeStrategy
return SummarizeStrategy(**kwargs)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/memory/strategies/types.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/utils/tokens.py | import json
import math
from functools import lru_cache
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union
from pydantic import BaseModel
from agno.media import Audio, File, Image, Video
from agno.models.message import Message
from agno.tools.function import Function
from agno.utils.log import log_warning
# Default image dimensions used as fallback when actual dimensions cannot be determined.
# These values provide a more conservative estimate for high-detail image token counting.
DEFAULT_IMAGE_WIDTH = 1024
DEFAULT_IMAGE_HEIGHT = 1024
# Different models use different encodings
@lru_cache(maxsize=16)
def _get_tiktoken_encoding(model_id: str):
model_id = model_id.lower()
try:
import tiktoken
try:
# Use model-specific encoding
return tiktoken.encoding_for_model(model_id)
except KeyError:
return tiktoken.get_encoding("o200k_base")
except ImportError:
log_warning("tiktoken not installed. Please install it using `pip install tiktoken`.")
return None
@lru_cache(maxsize=16)
def _get_hf_tokenizer(model_id: str):
try:
from tokenizers import Tokenizer
model_id = model_id.lower()
# Llama-3 models use a different tokenizer than Llama-2
if "llama-3" in model_id or "llama3" in model_id:
return Tokenizer.from_pretrained("Xenova/llama-3-tokenizer")
# Llama-2 models and Replicate models (LiteLLM uses llama tokenizer for replicate)
if "llama-2" in model_id or "llama2" in model_id or "replicate" in model_id:
return Tokenizer.from_pretrained("hf-internal-testing/llama-tokenizer")
# Cohere command-r models have their own tokenizer
if "command-r" in model_id:
return Tokenizer.from_pretrained("Xenova/c4ai-command-r-v01-tokenizer")
return None
except ImportError:
log_warning("tokenizers not installed. Please install it using `pip install tokenizers`.")
return None
except Exception:
return None
def _select_tokenizer(model_id: str) -> Tuple[str, Any]:
# Priority 1: HuggingFace tokenizers for models with specific tokenizers
hf_tokenizer = _get_hf_tokenizer(model_id)
if hf_tokenizer is not None:
return ("huggingface", hf_tokenizer)
# Priority 2: tiktoken for OpenAI models
tiktoken_enc = _get_tiktoken_encoding(model_id)
if tiktoken_enc is not None:
return ("tiktoken", tiktoken_enc)
# Fallback: No tokenizer available, will use character-based estimation
return ("none", None)
# =============================================================================
# Tool Token Counting
# =============================================================================
# OpenAI counts tool/function tokens by converting them to a TypeScript-like
# namespace format. This approach was reverse-engineered and documented from:
# https://github.com/forestwanglin/openai-java/blob/main/jtokkit/src/main/java/xyz/felh/openai/jtokkit/utils/TikTokenUtils.java
#
# The formatted output looks like:
# namespace functions {
# // {description}
# type {name} = (_: {
# // {param_description}
# {param_name}{?}: {type},
# }) => any;
# } // namespace functions
# =============================================================================
# OpenAI internally represents function/tool definitions in a TypeScript-like format for tokenization
def _format_function_definitions(tools: List[Dict[str, Any]]) -> str:
"""
Formats tool definitions as a TypeScript namespace.
Returns:
A TypeScript namespace string representation of all tools.
Example:
Input tool: {"function": {"name": "get_weather", "parameters": {...}}}
Output: "namespace functions {\ntype get_weather = (_: {...}) => any;\n}"
"""
lines = []
lines.append("namespace functions {")
lines.append("")
for tool in tools:
# Handle both {"function": {...}} and direct function dict formats
function = tool.get("function", tool)
if function_description := function.get("description"):
lines.append(f"// {function_description}")
function_name = function.get("name", "")
parameters = function.get("parameters", {})
properties = parameters.get("properties", {})
if properties:
lines.append(f"type {function_name} = (_: {{")
lines.append(_format_object_parameters(parameters, 0))
lines.append("}) => any;")
else:
# Functions with no parameters
lines.append(f"type {function_name} = () => any;")
lines.append("")
lines.append("} // namespace functions")
return "\n".join(lines)
def _format_object_parameters(parameters: Dict[str, Any], indent: int) -> str:
"""
Format JSON Schema object properties as TypeScript object properties.
Args:
parameters: A JSON Schema object with 'properties' and optional 'required' keys.
indent: Number of spaces for indentation.
Returns:
TypeScript property definitions, one per line.
Example:
Input: {"properties": {"name": {"type": "string"}}, "required": ["name"]}
Output: "name: string,"
"""
properties = parameters.get("properties", {})
if not properties:
return ""
required_params = parameters.get("required", [])
lines = []
for key, props in properties.items():
# Add property description as a comment
description = props.get("description")
if description:
lines.append(f"// {description}")
# Required params have no "?", optional params have "?"
question = "" if required_params and key in required_params else "?"
lines.append(f"{key}{question}: {_format_type(props, indent)},")
return "\n".join([" " * max(0, indent) + line for line in lines])
def _format_type(props: Dict[str, Any], indent: int) -> str:
"""
Convert a JSON Schema type to its TypeScript equivalent.
Recursively handles nested types including arrays and objects.
Args:
props: A JSON Schema property definition containing 'type' and optionally
'enum', 'items' (for arrays), or 'properties' (for objects).
indent: The current indentation level for nested object formatting.
Returns:
A TypeScript type string.
Example:
- {"type": "string"} -> "string"
- {"type": "string", "enum": ["low", "high"]} -> '"low" | "high"'
- {"type": "array", "items": {"type": "number"}} -> "number[]"
"""
type_name = props.get("type", "any")
if type_name == "string":
if "enum" in props:
# Convert enum to TypeScript union of string literals
return " | ".join([f'"{item}"' for item in props["enum"]])
return "string"
elif type_name == "array":
# Recursively format the array item type
items = props.get("items", {})
return f"{_format_type(items, indent)}[]"
elif type_name == "object":
# Recursively format nested object properties
return f"{{\n{_format_object_parameters(props, indent + 2)}\n}}"
elif type_name in ["integer", "number"]:
if "enum" in props:
return " | ".join([f'"{item}"' for item in props["enum"]])
return "number"
elif type_name == "boolean":
return "boolean"
elif type_name == "null":
return "null"
else:
# Default to "any" for unknown types
return "any"
# =============================================================================
# Multi-modal Token Counting
# =============================================================================
# Image dimension parsing uses magic byte detection to identify file formats
# without relying on external libraries. This allows efficient header-only reads.
# =============================================================================
def _get_image_type(data: bytes) -> Optional[str]:
"""Returns the image format from magic bytes in the file header."""
if len(data) < 12:
return None
# PNG: 8-byte signature
if data[0:8] == b"\x89\x50\x4e\x47\x0d\x0a\x1a\x0a":
return "png"
# GIF: "GIF8" followed by "9a" or "7a" (we check for 'a')
if data[0:4] == b"GIF8" and data[5:6] == b"a":
return "gif"
# JPEG: SOI marker (Start of Image)
if data[0:3] == b"\xff\xd8\xff":
return "jpeg"
# HEIC/HEIF: ftyp box at offset 4
if data[4:8] == b"ftyp":
return "heic"
# WebP: RIFF container with WEBP identifier
if data[0:4] == b"RIFF" and data[8:12] == b"WEBP":
return "webp"
return None
def _parse_image_dimensions_from_bytes(data: bytes, img_type: Optional[str] = None) -> Tuple[int, int]:
"""Returns the image dimensions (width, height) from raw image bytes."""
import io
import struct
if img_type is None:
img_type = _get_image_type(data)
if img_type == "png":
# PNG IHDR chunk: width at offset 16, height at offset 20 (big-endian)
return struct.unpack(">LL", data[16:24])
elif img_type == "gif":
# GIF logical screen descriptor: width/height at offset 6 (little-endian)
return struct.unpack("<HH", data[6:10])
elif img_type == "jpeg":
# JPEG requires scanning for SOF (Start of Frame) markers
# SOF markers are 0xC0-0xCF, excluding 0xC4 (DHT), 0xC8 (JPG), 0xCC (DAC)
with io.BytesIO(data) as f:
f.seek(0)
size = 2
ftype = 0
while not 0xC0 <= ftype <= 0xCF or ftype in (0xC4, 0xC8, 0xCC):
f.seek(size, 1)
byte = f.read(1)
# Skip any padding 0xFF bytes
while ord(byte) == 0xFF:
byte = f.read(1)
ftype = ord(byte)
size = struct.unpack(">H", f.read(2))[0] - 2
f.seek(1, 1) # Skip precision byte
h, w = struct.unpack(">HH", f.read(4))
return w, h
elif img_type == "webp":
# WebP has three encoding formats with different dimension locations
if data[12:16] == b"VP8X":
# Extended format: 24-bit dimensions stored in 3 bytes each
w = struct.unpack("<I", data[24:27] + b"\x00")[0] + 1
h = struct.unpack("<I", data[27:30] + b"\x00")[0] + 1
return w, h
elif data[12:16] == b"VP8 ":
# Lossy format: dimensions in first frame header, 14-bit masked
w = struct.unpack("<H", data[26:28])[0] & 0x3FFF
h = struct.unpack("<H", data[28:30])[0] & 0x3FFF
return w, h
elif data[12:16] == b"VP8L":
# Lossless format: dimensions bit-packed in 4 bytes
bits = struct.unpack("<I", data[21:25])[0]
w = (bits & 0x3FFF) + 1
h = ((bits >> 14) & 0x3FFF) + 1
return w, h
return DEFAULT_IMAGE_WIDTH, DEFAULT_IMAGE_HEIGHT
def _get_image_dimensions(image: Image) -> Tuple[int, int]:
"""Returns the image dimensions (width, height) from an Image object."""
try:
# Try to get format hint from metadata to skip magic byte detection
img_format = image.format
if not img_format and image.mime_type:
img_format = image.mime_type.split("/")[-1] if "/" in image.mime_type else None
# Get raw bytes from the appropriate source
if image.content:
data = image.content
elif image.filepath:
with open(image.filepath, "rb") as f:
data = f.read(100) # Only need header bytes for dimension parsing
elif image.url:
import httpx
response = httpx.get(image.url, timeout=5)
data = response.content
else:
return DEFAULT_IMAGE_WIDTH, DEFAULT_IMAGE_HEIGHT
return _parse_image_dimensions_from_bytes(data, img_format)
except Exception:
return DEFAULT_IMAGE_WIDTH, DEFAULT_IMAGE_HEIGHT
def count_file_tokens(file: File) -> int:
"""Estimate the number of tokens in a file based on its size and type."""
# Determine file size from available source
size = 0
if file.content and isinstance(file.content, (str, bytes)):
size = len(file.content)
elif file.filepath:
try:
path = Path(file.filepath) if isinstance(file.filepath, str) else file.filepath
if path.exists():
size = path.stat().st_size
except Exception:
pass
elif file.url:
# Use HEAD request to get Content-Length without downloading
try:
import urllib.request
req = urllib.request.Request(file.url, method="HEAD")
with urllib.request.urlopen(req, timeout=5) as response:
content_length = response.headers.get("Content-Length")
if content_length:
size = int(content_length)
except Exception:
pass
if size == 0:
return 0
# Determine file extension for type-based estimation
ext = None
if file.format:
ext = file.format.lower().lstrip(".")
elif file.filepath:
path = Path(file.filepath) if isinstance(file.filepath, str) else file.filepath
ext = path.suffix.lower().lstrip(".") if path.suffix else None
elif file.url:
url_path = file.url.split("?")[0]
if "." in url_path:
ext = url_path.rsplit(".", 1)[-1].lower()
# Text files: ~4 characters per token (based on typical tiktoken ratios)
if ext in {"txt", "csv", "md", "json", "xml", "html"}:
return size // 4
# Binary/other files: ~40 bytes per token (rough estimate)
return size // 40
def count_tool_tokens(
tools: Sequence[Union[Function, Dict[str, Any]]],
model_id: str = "gpt-4o",
) -> int:
"""Count tokens consumed by tool/function definitions"""
if not tools:
return 0
# Convert Function objects to dict format for formatting
tool_dicts = []
for tool in tools:
if isinstance(tool, Function):
tool_dicts.append(tool.to_dict())
else:
tool_dicts.append(tool)
# Format tools in TypeScript namespace format and count tokens
formatted = _format_function_definitions(tool_dicts)
tokens = count_text_tokens(formatted, model_id)
return tokens
def count_schema_tokens(
output_schema: Optional[Union[Dict, Type["BaseModel"]]],
model_id: str = "gpt-4o",
) -> int:
"""Estimate tokens for output_schema/output_schema."""
if output_schema is None:
return 0
try:
from pydantic import BaseModel
if isinstance(output_schema, type) and issubclass(output_schema, BaseModel):
# Convert Pydantic model to JSON schema
schema = output_schema.model_json_schema()
elif isinstance(output_schema, dict):
schema = output_schema
else:
return 0
schema_json = json.dumps(schema)
return count_text_tokens(schema_json, model_id)
except Exception:
return 0
def count_text_tokens(text: str, model_id: str = "gpt-4o") -> int:
if not text:
return 0
tokenizer_type, tokenizer = _select_tokenizer(model_id)
if tokenizer_type == "huggingface":
return len(tokenizer.encode(text).ids)
elif tokenizer_type == "tiktoken":
# disallowed_special=() allows all special tokens to be encoded
return len(tokenizer.encode(text, disallowed_special=()))
else:
# Fallback: ~4 characters per token (typical for English text)
return len(text) // 4
# =============================================================================
# Image Token Counting
# =============================================================================
# OpenAI's vision models process images by dividing them into 512x512 tiles.
# The token count depends on the image dimensions and detail level.
# OpenAI's image token formula:
# 1. If max(width, height) > 2000: scale to fit in 2000px on longest side
# 2. If min(width, height) > 768: scale so shortest side is 768px
# 3. tiles = ceil(width/512) * ceil(height/512)
# 4. tokens = 85 + (170 * tiles)
# Token constants:
# - 85: Base tokens for any image (covers metadata, low-detail representation)
# - 170: Additional tokens per 512x512 tile (high-detail tile encoding)
# Detail modes:
# - "low": Fixed 85 tokens (thumbnail/overview only)
# - "high"/"auto": Full tile-based calculation
# Example:
# 1024x1024 image with high detail:
# - No scaling needed (within limits)
# - tiles = ceil(1024/512) * ceil(1024/512) = 2 * 2 = 4
# - tokens = 85 + (170 * 4) = 765
# =============================================================================
def count_image_tokens(image: Image) -> int:
width, height = _get_image_dimensions(image)
detail = image.detail or "auto"
if width <= 0 or height <= 0:
return 0
# Low detail: fixed 85 tokens regardless of dimensions
if detail == "low":
return 85
# For auto/high detail, calculate based on dimensions
# Step 1: Scale down if longest side exceeds 2000px
if max(width, height) > 2000:
scale = 2000 / max(width, height)
width, height = int(width * scale), int(height * scale)
# Step 2: Scale down if shortest side exceeds 768px
if min(width, height) > 768:
scale = 768 / min(width, height)
width, height = int(width * scale), int(height * scale)
# Step 3: Calculate tiles (512x512 each)
tiles = math.ceil(width / 512) * math.ceil(height / 512)
# Step 4: 85 base tokens + 170 tokens per tile
return 85 + (170 * tiles)
# =============================================================================
# Audio Token Counting
# =============================================================================
# This is an Agno-specific implementation using a conservative estimate of 25 tokens per second of audio.
# OpenAI's Whisper model actually uses ~50 tokens/second (20ms per token), but this estimate is more conservative for context window planning.
# Example:
# 10 seconds of audio: 10 * 25 = 250 tokens
def count_audio_tokens(audio: Audio) -> int:
"""Estimate the number of tokens for an audio clip based on duration."""
duration = audio.duration or 0
if duration <= 0:
return 0
return int(duration * 25)
# =============================================================================
# Video Token Counting
# =============================================================================
# This is an Agno-specific implementation that treats video as a sequence of
# images, applying the OpenAI image token formula to each frame.
# Example:
# 5 second video at 1 fps with 512x512 resolution:
# - tiles = 1 (512/512 = 1)
# - tokens_per_frame = 85 + 170 = 255
# - num_frames = 5
# - total = 255 * 5 = 1275 tokens
# =============================================================================
def count_video_tokens(video: Video) -> int:
duration = video.duration or 0
if duration <= 0:
return 0
# Use defaults if dimensions/fps not specified
width = video.width or 512
height = video.height or 512
fps = video.fps or 1.0
# Calculate tokens per frame using the same formula as images (high detail)
w, h = width, height
# Scale down if longest side exceeds 2000px
if max(w, h) > 2000:
scale = 2000 / max(w, h)
w, h = int(w * scale), int(h * scale)
# Scale down if shortest side exceeds 768px
if min(w, h) > 768:
scale = 768 / min(w, h)
w, h = int(w * scale), int(h * scale)
tiles = math.ceil(w / 512) * math.ceil(h / 512)
tokens_per_frame = 85 + (170 * tiles)
# Calculate total tokens for all frames
num_frames = max(int(duration * fps), 1)
return num_frames * tokens_per_frame
def _count_media_tokens(message: Message) -> int:
tokens = 0
if message.images:
for image in message.images:
tokens += count_image_tokens(image)
if message.audio:
for audio in message.audio:
tokens += count_audio_tokens(audio)
if message.videos:
for video in message.videos:
tokens += count_video_tokens(video)
if message.files:
for file in message.files:
tokens += count_file_tokens(file)
return tokens
def _count_message_tokens(message: Message, model_id: str = "gpt-4o") -> int:
tokens = 0
text_parts: List[str] = []
# Collect content text
content = message.get_content(use_compressed_content=True)
if content:
if isinstance(content, str):
text_parts.append(content)
elif isinstance(content, list):
# Handle multimodal content blocks
for item in content:
if isinstance(item, str):
text_parts.append(item)
elif isinstance(item, dict):
item_type = item.get("type", "")
if item_type == "text":
text_parts.append(item.get("text", ""))
elif item_type == "image_url":
# Handle OpenAI-style content lists without populating message.images
image_url_data = item.get("image_url", {})
url = image_url_data.get("url") if isinstance(image_url_data, dict) else None
detail = image_url_data.get("detail", "auto") if isinstance(image_url_data, dict) else "auto"
temp_image = Image(url=url, detail=detail)
tokens += count_image_tokens(temp_image)
else:
text_parts.append(json.dumps(item))
else:
text_parts.append(str(content))
# Collect tool call arguments
if message.tool_calls:
for tool_call in message.tool_calls:
if isinstance(tool_call, dict) and "function" in tool_call:
args = tool_call["function"].get("arguments", "")
text_parts.append(str(args))
# Collect tool response id
if message.tool_call_id:
text_parts.append(message.tool_call_id)
# Collect reasoning content
if message.reasoning_content:
text_parts.append(message.reasoning_content)
if message.redacted_reasoning_content:
text_parts.append(message.redacted_reasoning_content)
# Collect name field
if message.name:
text_parts.append(message.name)
# Count all text tokens in a single call
if text_parts:
tokens += count_text_tokens(" ".join(text_parts), model_id)
# Count all media attachments
tokens += _count_media_tokens(message)
return tokens
def count_tokens(
messages: List[Message],
tools: Optional[List[Union[Function, Dict[str, Any]]]] = None,
model_id: str = "gpt-4o",
output_schema: Optional[Union[Dict, Type["BaseModel"]]] = None,
) -> int:
total = 0
model_id = model_id.lower()
# Count message tokens
if messages:
for msg in messages:
total += _count_message_tokens(msg, model_id)
# Add tool tokens
if tools:
total += count_tool_tokens(tools, model_id)
# Add output_schema/output_schema tokens
if output_schema is not None:
total += count_schema_tokens(output_schema, model_id)
return total
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/utils/tokens.py",
"license": "Apache License 2.0",
"lines": 536,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/memory/test_memory_optimization.py | from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from agno.db.base import AsyncBaseDb, BaseDb
from agno.memory.manager import MemoryManager, UserMemory
from agno.memory.strategies import (
MemoryOptimizationStrategy,
MemoryOptimizationStrategyType,
)
@pytest.fixture
def mock_db():
db = MagicMock(spec=BaseDb)
# Setup default behaviors
db.upsert_user_memory.return_value = None
return db
@pytest.fixture
def mock_async_db():
db = AsyncMock(spec=AsyncBaseDb)
return db
@pytest.fixture
def mock_model():
model = MagicMock()
# Patch get_model to return our mock model instead of validating it
with patch("agno.memory.manager.get_model", return_value=model):
yield model
@pytest.fixture
def mock_strategy():
strategy = MagicMock(spec=MemoryOptimizationStrategy)
strategy.count_tokens.return_value = 100
return strategy
@pytest.fixture
def sample_memories():
return [
UserMemory(memory="Memory 1", user_id="user-1", memory_id="m1"),
UserMemory(memory="Memory 2", user_id="user-1", memory_id="m2"),
]
@pytest.fixture
def optimized_memories():
return [UserMemory(memory="Optimized Memory", user_id="user-1", memory_id="opt-1")]
def test_optimize_memories_success(mock_db, mock_model, mock_strategy, sample_memories, optimized_memories):
"""Test successful synchronous memory optimization."""
# Setup
manager = MemoryManager(db=mock_db, model=mock_model)
manager.get_user_memories = MagicMock(return_value=sample_memories)
manager.clear_user_memories = MagicMock()
# Mock factory to return our mock strategy
with patch(
"agno.memory.strategies.MemoryOptimizationStrategyFactory.create_strategy", return_value=mock_strategy
) as mock_factory:
# Mock strategy optimize method
mock_strategy.optimize.return_value = optimized_memories
# Execute
result = manager.optimize_memories(
user_id="user-1", strategy=MemoryOptimizationStrategyType.SUMMARIZE, apply=True
)
# Verify Factory usage
mock_factory.assert_called_once_with(MemoryOptimizationStrategyType.SUMMARIZE)
# Verify Optimization call
mock_strategy.optimize.assert_called_once_with(memories=sample_memories, model=mock_model)
# Verify DB operations (apply=True)
manager.clear_user_memories.assert_called_once_with(user_id="user-1")
mock_db.upsert_user_memory.assert_called()
assert mock_db.upsert_user_memory.call_count == len(optimized_memories)
assert result == optimized_memories
def test_optimize_memories_apply_false(mock_db, mock_model, mock_strategy, sample_memories, optimized_memories):
"""Test optimization without applying changes to DB."""
manager = MemoryManager(db=mock_db, model=mock_model)
manager.get_user_memories = MagicMock(return_value=sample_memories)
manager.clear_user_memories = MagicMock()
with patch("agno.memory.strategies.MemoryOptimizationStrategyFactory.create_strategy", return_value=mock_strategy):
mock_strategy.optimize.return_value = optimized_memories
result = manager.optimize_memories(user_id="user-1", apply=False)
# Verify DB was NOT touched
manager.clear_user_memories.assert_not_called()
mock_db.upsert_user_memory.assert_not_called()
assert result == optimized_memories
def test_optimize_memories_empty(mock_db, mock_model):
"""Test optimization with no existing memories."""
manager = MemoryManager(db=mock_db, model=mock_model)
manager.get_user_memories = MagicMock(return_value=[])
result = manager.optimize_memories(user_id="user-1")
assert result == []
# Should return early before creating strategy
mock_model.assert_not_called()
def test_optimize_memories_custom_strategy_instance(
mock_db, mock_model, mock_strategy, sample_memories, optimized_memories
):
"""Test optimization passing a strategy instance directly."""
manager = MemoryManager(db=mock_db, model=mock_model)
manager.get_user_memories = MagicMock(return_value=sample_memories)
manager.clear_user_memories = MagicMock()
mock_strategy.optimize.return_value = optimized_memories
# Pass instance directly
manager.optimize_memories(user_id="user-1", strategy=mock_strategy, apply=True)
# Verify method called on passed instance
mock_strategy.optimize.assert_called_once()
def test_optimize_memories_async_db_error(mock_async_db, mock_model):
"""Test that calling sync optimize with async DB raises ValueError."""
manager = MemoryManager(db=mock_async_db, model=mock_model)
with pytest.raises(ValueError, match="not supported with an async DB"):
manager.optimize_memories(user_id="user-1")
@pytest.mark.asyncio
async def test_aoptimize_memories_success(
mock_async_db, mock_model, mock_strategy, sample_memories, optimized_memories
):
"""Test successful async memory optimization."""
manager = MemoryManager(db=mock_async_db, model=mock_model)
manager.aget_user_memories = AsyncMock(return_value=sample_memories)
manager.aclear_user_memories = AsyncMock()
with patch("agno.memory.strategies.MemoryOptimizationStrategyFactory.create_strategy", return_value=mock_strategy):
mock_strategy.aoptimize = AsyncMock(return_value=optimized_memories)
result = await manager.aoptimize_memories(
user_id="user-1", strategy=MemoryOptimizationStrategyType.SUMMARIZE, apply=True
)
# Verify async calls
mock_strategy.aoptimize.assert_called_once_with(memories=sample_memories, model=mock_model)
manager.aclear_user_memories.assert_called_once_with(user_id="user-1")
assert mock_async_db.upsert_user_memory.await_count == len(optimized_memories)
assert result == optimized_memories
@pytest.mark.asyncio
async def test_aoptimize_memories_apply_false(
mock_async_db, mock_model, mock_strategy, sample_memories, optimized_memories
):
"""Test async optimization without applying to DB."""
manager = MemoryManager(db=mock_async_db, model=mock_model)
manager.aget_user_memories = AsyncMock(return_value=sample_memories)
manager.aclear_user_memories = AsyncMock()
with patch("agno.memory.strategies.MemoryOptimizationStrategyFactory.create_strategy", return_value=mock_strategy):
mock_strategy.aoptimize = AsyncMock(return_value=optimized_memories)
result = await manager.aoptimize_memories(user_id="user-1", apply=False)
manager.aclear_user_memories.assert_not_called()
mock_async_db.upsert_user_memory.assert_not_called()
assert result == optimized_memories
@pytest.mark.asyncio
async def test_aoptimize_memories_empty(mock_async_db, mock_model):
"""Test async optimization with empty memories."""
manager = MemoryManager(db=mock_async_db, model=mock_model)
manager.aget_user_memories = AsyncMock(return_value=[])
result = await manager.aoptimize_memories(user_id="user-1")
assert result == []
@pytest.mark.asyncio
async def test_aoptimize_memories_sync_db_compatibility(
mock_db, mock_model, mock_strategy, sample_memories, optimized_memories
):
"""Test that async optimize works even with a sync DB (hybrid usage)."""
manager = MemoryManager(db=mock_db, model=mock_model)
# Note: With sync DB, it calls get_user_memories (sync) not aget
manager.get_user_memories = MagicMock(return_value=sample_memories)
manager.clear_user_memories = MagicMock()
# But upsert/clear might be handled differently depending on implementation
# The code handles `isinstance(self.db, AsyncBaseDb)` checks.
# Since we mocked db as BaseDb, manager treats it as sync
with patch("agno.memory.strategies.MemoryOptimizationStrategyFactory.create_strategy", return_value=mock_strategy):
mock_strategy.aoptimize = AsyncMock(return_value=optimized_memories)
# It should use the async strategy method, but sync DB methods
result = await manager.aoptimize_memories(user_id="user-1", apply=True)
mock_strategy.aoptimize.assert_called_once()
# Should use sync upsert since DB is sync
mock_db.upsert_user_memory.assert_called()
assert result == optimized_memories
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/memory/test_memory_optimization.py",
"license": "Apache License 2.0",
"lines": 159,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/utils/test_tokens.py | import pytest
from agno.media import Audio, File, Image, Video
from agno.models.message import Message
from agno.utils.tokens import (
count_audio_tokens,
count_file_tokens,
count_image_tokens,
count_schema_tokens,
count_text_tokens,
count_tokens,
count_video_tokens,
)
def test_count_text_tokens_basic():
result = count_text_tokens("Hello world")
assert isinstance(result, int)
assert result > 0
assert result == 2
def test_count_text_tokens_empty_string():
result = count_text_tokens("")
assert result == 0
def test_count_text_tokens_multiple_words():
text = "The quick brown fox jumps over the lazy dog"
result = count_text_tokens(text)
assert isinstance(result, int)
assert result > 0
def test_count_text_tokens_long_text():
text = " ".join(["word"] * 100)
result = count_text_tokens(text)
assert isinstance(result, int)
assert result > 0
def test_count_text_tokens_special_characters():
text = "Hello! How are you? I'm fine, thanks."
result = count_text_tokens(text)
assert isinstance(result, int)
assert result > 0
def test_count_text_tokens_unicode():
text = "Hello 世界"
result = count_text_tokens(text)
assert isinstance(result, int)
assert result > 0
def test_count_text_tokens_different_lengths():
short_text = "Hello"
long_text = "Hello " * 10
short_count = count_text_tokens(short_text)
long_count = count_text_tokens(long_text)
assert long_count >= short_count
def test_count_image_tokens_low_detail():
image = Image(url="https://example.com/image.jpg", detail="low")
result = count_image_tokens(image)
assert result == 85 # Low detail is always 85 tokens
def test_count_image_tokens_high_detail_default():
image = Image(url="https://example.com/image.jpg", detail="high")
result = count_image_tokens(image)
# Default 1024x1024 = 2x2 tiles = 4 tiles
# 85 + (170 * 4) = 765
assert result == 765
def test_count_image_tokens_auto_detail():
image = Image(url="https://example.com/image.jpg", detail="auto")
result = count_image_tokens(image)
assert result == 765 # Same as high detail with default dimensions
def test_count_image_tokens_no_detail():
image = Image(url="https://example.com/image.jpg")
result = count_image_tokens(image)
assert result == 765
def test_count_audio_tokens_basic():
audio = Audio(url="https://example.com/audio.mp3", duration=10.0)
result = count_audio_tokens(audio)
# 10 seconds * 25 tokens/second = 250 tokens
assert result == 250
def test_count_audio_tokens_zero_duration():
audio = Audio(url="https://example.com/audio.mp3", duration=0)
result = count_audio_tokens(audio)
assert result == 0
def test_count_audio_tokens_long_audio():
audio = Audio(url="https://example.com/audio.mp3", duration=60.0)
result = count_audio_tokens(audio)
# 60 seconds * 25 tokens/second = 1500 tokens
assert result == 1500
# --- Video Token Tests ---
def test_count_video_tokens_basic():
video = Video(url="https://example.com/video.mp4", duration=5.0, fps=1.0)
result = count_video_tokens(video)
# Default 512x512 = 1x1 tile = 1 tile per frame
# tokens_per_frame = 85 + (170 * 1) = 255
# 5 frames * 255 = 1275 tokens
assert result == 1275
def test_count_video_tokens_no_duration():
video = Video(url="https://example.com/video.mp4")
result = count_video_tokens(video)
assert result == 0
def test_count_video_tokens_with_dimensions():
video = Video(
url="https://example.com/video.mp4",
duration=2.0,
fps=1.0,
width=1024,
height=1024,
)
result = count_video_tokens(video)
assert result == 1530
# --- File Token Tests ---
def test_count_file_tokens_text_file():
file = File(content="Hello world! " * 100, format="txt")
result = count_file_tokens(file)
content_size = len("Hello world! " * 100)
expected = content_size // 4
assert result == expected
def test_count_file_tokens_binary_file():
file = File(content=b"binary content " * 100, format="pdf")
result = count_file_tokens(file)
content_size = len(b"binary content " * 100)
expected = content_size // 40
assert result == expected
def test_count_file_tokens_url_without_size():
file = File(url="https://example.com/nonexistent.txt", format="txt")
result = count_file_tokens(file)
assert result == 0
def test_count_tokens_simple_message():
messages = [Message(role="user", content="Hello world")]
result = count_tokens(messages)
assert isinstance(result, int)
assert result > 0
def test_count_tokens_with_images():
image = Image(url="https://example.com/image.jpg", detail="low")
messages = [Message(role="user", content="What is in this image?", images=[image])]
result = count_tokens(messages)
# Should include text tokens + 85 for low detail image
assert result > 85
def test_count_tokens_with_content_list_image_url_low():
messages = [
Message(
role="user",
content=[
{"type": "text", "text": "What is in this image?"},
{"type": "image_url", "image_url": {"url": "https://example.com/image.jpg", "detail": "low"}},
],
)
]
result = count_tokens(messages)
assert result >= 85
# Ensure image tokens are counted in addition to text
assert result > count_tokens([Message(role="user", content="What is in this image?")])
def test_count_tokens_with_audio():
audio = Audio(url="https://example.com/audio.mp3", duration=10.0)
messages = [Message(role="user", content="Transcribe this audio", audio=[audio])]
result = count_tokens(messages)
# Should include text tokens + 250 for 10s audio
assert result > 250
def test_count_tokens_with_content_list_image_url_high_detail_default_dims():
messages = [
Message(
role="user",
content=[
{"type": "image_url", "image_url": {"url": "https://example.com/image.jpg"}},
],
)
]
result = count_tokens(messages)
# Default dimensions (1024x1024) with auto/high detail -> 765 tokens for the image
assert result >= 765
def test_count_tokens_multiple_messages():
messages = [
Message(role="system", content="You are a helpful assistant."),
Message(role="user", content="Hello!"),
Message(role="assistant", content="Hi there! How can I help you?"),
Message(role="user", content="What is 2 + 2?"),
]
result = count_tokens(messages)
assert isinstance(result, int)
assert result > 10 # Multiple messages should have meaningful token count
def test_count_tokens_multimodal_message():
image1 = Image(url="https://example.com/img1.jpg", detail="low")
image2 = Image(url="https://example.com/img2.jpg", detail="low")
audio = Audio(url="https://example.com/audio.mp3", duration=10.0)
video = Video(url="https://example.com/video.mp4", duration=2.0, fps=1.0)
file = File(content="x" * 400, format="txt")
# Long text content
long_text = "This is a detailed description. " * 50
messages = [
Message(
role="user",
content=long_text,
images=[image1, image2],
audio=[audio],
videos=[video],
files=[file],
)
]
result = count_tokens(messages)
expected_media_tokens = 170 + 250 + 510 + 100
assert result > expected_media_tokens
assert result > 1000
def test_count_tokens_conversation_with_media():
image = Image(url="https://example.com/photo.jpg", detail="low")
audio = Audio(url="https://example.com/voice.mp3", duration=5.0)
messages = [
Message(role="system", content="You are a helpful assistant that can analyze images and audio."),
Message(role="user", content="What do you see in this image?", images=[image]),
Message(role="assistant", content="I can see a beautiful landscape with mountains."),
Message(role="user", content="Now listen to this audio and describe it.", audio=[audio]),
Message(role="assistant", content="The audio contains background music with nature sounds."),
]
result = count_tokens(messages)
assert result > 210
assert result > 250
@pytest.mark.asyncio
async def test_model_acount_tokens():
"""Test async token counting on Model base class."""
from agno.models.openai import OpenAIChat
model = OpenAIChat(id="gpt-4o")
messages = [Message(role="user", content="Hello world")]
sync_count = model.count_tokens(messages)
async_count = await model.acount_tokens(messages)
assert sync_count == async_count
assert sync_count > 0
@pytest.mark.asyncio
async def test_model_acount_tokens_with_tools():
"""Test async token counting with tools."""
from agno.models.openai import OpenAIChat
from agno.tools.function import Function
model = OpenAIChat(id="gpt-4o")
messages = [Message(role="user", content="What is the weather?")]
def get_weather(location: str) -> str:
"""Get weather for a location."""
return f"Weather in {location}"
tools = [Function.from_callable(get_weather)]
sync_count = model.count_tokens(messages, tools)
async_count = await model.acount_tokens(messages, tools)
assert sync_count == async_count
assert sync_count > 0
def test_count_schema_tokens_pydantic():
"""Test schema token counting with Pydantic model."""
from pydantic import BaseModel
class SimpleSchema(BaseModel):
answer: str
score: float
tokens = count_schema_tokens(SimpleSchema, "gpt-4o-mini")
assert isinstance(tokens, int)
assert tokens > 0
assert tokens < 100 # Reasonable range for simple schema
def test_count_schema_tokens_complex():
"""Test schema token counting with more complex schema."""
from typing import List
from pydantic import BaseModel, Field
class ComplexSchema(BaseModel):
title: str = Field(..., description="Title of the item")
description: str = Field(..., description="Detailed description")
score: int = Field(..., description="Score from 1-10")
tags: List[str] = Field(default_factory=list, description="List of tags")
tokens = count_schema_tokens(ComplexSchema, "gpt-4o-mini")
assert isinstance(tokens, int)
assert tokens > 0
assert tokens > 50 # Complex schema should have more tokens
def test_count_schema_tokens_dict():
"""Test schema token counting with dict schema."""
schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"},
},
"required": ["name", "age"],
}
tokens = count_schema_tokens(schema, "gpt-4o-mini")
assert isinstance(tokens, int)
assert tokens > 0
def test_count_schema_tokens_none():
"""Test schema token counting with None."""
tokens = count_schema_tokens(None, "gpt-4o-mini")
assert tokens == 0
def test_count_tokens_with_schema():
"""Test count_tokens includes schema tokens."""
from pydantic import BaseModel
class SimpleSchema(BaseModel):
answer: str
messages = [Message(role="user", content="Hello")]
# Count without schema
tokens_no_schema = count_tokens(messages, model_id="gpt-4o-mini")
# Count with schema
tokens_with_schema = count_tokens(messages, model_id="gpt-4o-mini", output_schema=SimpleSchema)
# Schema should add tokens
assert tokens_with_schema > tokens_no_schema
def test_model_count_tokens_with_schema():
"""Test model.count_tokens includes schema tokens."""
from pydantic import BaseModel
from agno.models.openai import OpenAIChat
class SimpleSchema(BaseModel):
answer: str
model = OpenAIChat(id="gpt-4o-mini")
messages = [Message(role="user", content="Hello")]
# Count without schema
tokens_no_schema = model.count_tokens(messages)
# Count with schema
tokens_with_schema = model.count_tokens(messages, output_schema=SimpleSchema)
# Schema should add tokens
assert tokens_with_schema > tokens_no_schema
@pytest.mark.asyncio
async def test_model_acount_tokens_with_schema():
"""Test model.acount_tokens includes schema tokens."""
from pydantic import BaseModel
from agno.models.openai import OpenAIChat
class SimpleSchema(BaseModel):
answer: str
model = OpenAIChat(id="gpt-4o-mini")
messages = [Message(role="user", content="Hello")]
# Count without schema
tokens_no_schema = await model.acount_tokens(messages)
# Count with schema
tokens_with_schema = await model.acount_tokens(messages, output_schema=SimpleSchema)
# Schema should add tokens
assert tokens_with_schema > tokens_no_schema
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/utils/test_tokens.py",
"license": "Apache License 2.0",
"lines": 311,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/agent/test_output_schema_override.py | import pytest
from pydantic import BaseModel, Field
from agno.agent import Agent, RunOutput
from agno.models.openai import OpenAIChat
class PersonSchema(BaseModel):
name: str = Field(..., description="Person's name")
age: int = Field(..., description="Person's age")
class BookSchema(BaseModel):
title: str = Field(..., description="Book title")
author: str = Field(..., description="Book author")
year: int = Field(..., description="Publication year")
person_json_schema = {
"type": "json_schema",
"json_schema": {
"name": "PersonInfo",
"schema": {
"type": "object",
"properties": {
"name": {"type": "string", "description": "Person's full name"},
"age": {"type": "integer", "description": "Person's age"},
},
"required": ["name", "age"],
"additionalProperties": False,
},
},
}
book_json_schema = {
"type": "json_schema",
"json_schema": {
"name": "BookInfo",
"schema": {
"type": "object",
"properties": {
"title": {"type": "string", "description": "Book title"},
"author": {"type": "string", "description": "Author name"},
"year": {"type": "integer", "description": "Publication year"},
},
"required": ["title", "author", "year"],
"additionalProperties": False,
},
},
}
def test_run_with_output_schema():
"""Test that output_schema can be overridden in run() and is restored after."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
markdown=False,
)
assert agent.output_schema == PersonSchema
response: RunOutput = agent.run(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response.content, BookSchema)
assert response.content.title is not None
assert response.content.author is not None
assert response.content.year is not None
assert agent.output_schema == PersonSchema
def test_run_streaming_with_output_schema():
"""Test that output_schema override works with streaming."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
markdown=False,
)
assert agent.output_schema == PersonSchema
final_response = None
for event in agent.run(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, BookSchema)
assert final_response.content.title is not None
assert final_response.content.author is not None
assert final_response.content.year is not None
assert agent.output_schema == PersonSchema
@pytest.mark.asyncio
async def test_arun_with_output_schema():
"""Test that output_schema can be overridden in arun() and is restored after."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
markdown=False,
)
assert agent.output_schema == PersonSchema
response: RunOutput = await agent.arun(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response.content, BookSchema)
assert response.content.title is not None
assert response.content.author is not None
assert response.content.year is not None
assert agent.output_schema == PersonSchema
@pytest.mark.asyncio
async def test_arun_streaming_with_output_schema():
"""Test that output_schema override works with async streaming."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
markdown=False,
)
assert agent.output_schema == PersonSchema
final_response = None
async for event in agent.arun(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, BookSchema)
assert final_response.content.title is not None
assert final_response.content.author is not None
assert final_response.content.year is not None
assert agent.output_schema == PersonSchema
def test_run_without_default_schema():
"""Test output_schema override when agent has no default schema."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
markdown=False,
)
assert agent.output_schema is None
response: RunOutput = agent.run(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response.content, BookSchema)
assert response.content.title is not None
assert response.content.author is not None
assert response.content.year is not None
assert agent.output_schema is None
@pytest.mark.asyncio
async def test_arun_without_default_schema():
"""Test output_schema override in arun() when agent has no default schema."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
markdown=False,
)
assert agent.output_schema is None
response: RunOutput = await agent.arun(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response.content, BookSchema)
assert response.content.title is not None
assert response.content.author is not None
assert response.content.year is not None
assert agent.output_schema is None
def test_multiple_calls_in_sequence():
"""Test multiple sequential calls with different schema overrides."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
markdown=False,
)
response1: RunOutput = agent.run(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response1.content, BookSchema)
assert agent.output_schema == PersonSchema
response2: RunOutput = agent.run(
"Tell me about a person named John who is 30 years old",
stream=False,
)
assert isinstance(response2.content, PersonSchema)
assert agent.output_schema == PersonSchema
response3: RunOutput = agent.run(
"Tell me about 'To Kill a Mockingbird' by Harper Lee published in 1960",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response3.content, BookSchema)
assert agent.output_schema == PersonSchema
@pytest.mark.asyncio
async def test_multiple_async_calls_in_sequence():
"""Test multiple sequential async calls with different schema overrides."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
markdown=False,
)
response1: RunOutput = await agent.arun(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response1.content, BookSchema)
assert agent.output_schema == PersonSchema
response2: RunOutput = await agent.arun(
"Tell me about a person named John who is 30 years old",
stream=False,
)
assert isinstance(response2.content, PersonSchema)
assert agent.output_schema == PersonSchema
response3: RunOutput = await agent.arun(
"Tell me about 'To Kill a Mockingbird' by Harper Lee published in 1960",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response3.content, BookSchema)
assert agent.output_schema == PersonSchema
def test_run_with_parser_model():
"""Test that output_schema override works with parser model."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
parser_model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
markdown=False,
)
assert agent.output_schema == PersonSchema
response: RunOutput = agent.run(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response.content, BookSchema)
assert response.content.title is not None
assert response.content.author is not None
assert response.content.year is not None
assert agent.output_schema == PersonSchema
def test_run_streaming_with_parser_model():
"""Test that output_schema override works with parser model streaming."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
parser_model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
markdown=False,
)
assert agent.output_schema == PersonSchema
final_response = None
for event in agent.run(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, BookSchema)
assert final_response.content.title is not None
assert final_response.content.author is not None
assert final_response.content.year is not None
assert agent.output_schema == PersonSchema
@pytest.mark.asyncio
async def test_arun_with_parser_model():
"""Test that output_schema override works with parser model in arun()."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
parser_model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
markdown=False,
)
assert agent.output_schema == PersonSchema
response: RunOutput = await agent.arun(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response.content, BookSchema)
assert response.content.title is not None
assert response.content.author is not None
assert response.content.year is not None
assert agent.output_schema == PersonSchema
@pytest.mark.asyncio
async def test_arun_streaming_with_parser_model():
"""Test that output_schema override works with parser model async streaming."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
parser_model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
markdown=False,
)
assert agent.output_schema == PersonSchema
final_response = None
async for event in agent.arun(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, BookSchema)
assert final_response.content.title is not None
assert final_response.content.author is not None
assert final_response.content.year is not None
assert agent.output_schema == PersonSchema
def test_run_with_structured_outputs():
"""Test that output_schema override works with structured outputs."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
structured_outputs=True,
markdown=False,
)
assert agent.output_schema == PersonSchema
response: RunOutput = agent.run(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response.content, BookSchema)
assert response.content.title is not None
assert response.content.author is not None
assert response.content.year is not None
assert agent.output_schema == PersonSchema
def test_run_with_json_mode():
"""Test that output_schema override works with JSON mode."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
use_json_mode=True,
markdown=False,
)
assert agent.output_schema == PersonSchema
response: RunOutput = agent.run(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response.content, BookSchema)
assert response.content.title is not None
assert response.content.author is not None
assert response.content.year is not None
assert agent.output_schema == PersonSchema
def test_run_with_default():
"""Test that passing output_schema=None uses the default schema."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
markdown=False,
)
assert agent.output_schema == PersonSchema
response: RunOutput = agent.run(
"Tell me about a person named John who is 30 years old",
output_schema=None,
stream=False,
)
assert isinstance(response.content, PersonSchema)
assert response.content.name is not None
assert response.content.age is not None
assert agent.output_schema == PersonSchema
def test_run_streaming_without_default_schema():
"""Test streaming run without default schema, with override."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
markdown=False,
)
assert agent.output_schema is None
final_response = None
for event in agent.run(
"Tell me about 'The Catcher in the Rye' by J.D. Salinger published in 1951",
output_schema=BookSchema,
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, BookSchema)
assert final_response.content.title is not None
assert final_response.content.author is not None
assert final_response.content.year is not None
assert agent.output_schema is None
@pytest.mark.asyncio
async def test_arun_streaming_without_default_schema():
"""Test async streaming run without default schema, with override."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
markdown=False,
)
assert agent.output_schema is None
final_response = None
async for event in agent.arun(
"Tell me about 'War and Peace' by Leo Tolstoy published in 1869",
output_schema=BookSchema,
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, BookSchema)
assert final_response.content.title is not None
assert final_response.content.author is not None
assert final_response.content.year is not None
assert agent.output_schema is None
def test_run_streaming_with_json_mode():
"""Test streaming run with JSON mode and override."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
use_json_mode=True,
markdown=False,
)
assert agent.output_schema == PersonSchema
final_response = None
for event in agent.run(
"Tell me about 'Slaughterhouse-Five' by Kurt Vonnegut published in 1969",
output_schema=BookSchema,
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, BookSchema)
assert final_response.content.title is not None
assert final_response.content.author is not None
assert final_response.content.year is not None
assert agent.output_schema == PersonSchema
@pytest.mark.asyncio
async def test_arun_with_json_mode():
"""Test async run with JSON mode and override."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
use_json_mode=True,
markdown=False,
)
assert agent.output_schema == PersonSchema
response = await agent.arun(
"Tell me about 'The Grapes of Wrath' by John Steinbeck published in 1939",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response.content, BookSchema)
assert response.content.title is not None
assert response.content.author is not None
assert response.content.year is not None
assert agent.output_schema == PersonSchema
@pytest.mark.asyncio
async def test_arun_streaming_with_json_mode():
"""Test async streaming run with JSON mode and override."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
use_json_mode=True,
markdown=False,
)
assert agent.output_schema == PersonSchema
final_response = None
async for event in agent.arun(
"Tell me about 'Of Mice and Men' by John Steinbeck published in 1937",
output_schema=BookSchema,
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, BookSchema)
assert final_response.content.title is not None
assert final_response.content.author is not None
assert final_response.content.year is not None
assert agent.output_schema == PersonSchema
def test_run_streaming_with_structured_outputs():
"""Test streaming run with structured outputs and override."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
structured_outputs=True,
markdown=False,
)
assert agent.output_schema == PersonSchema
final_response = None
for event in agent.run(
"Tell me about 'A Tale of Two Cities' by Charles Dickens published in 1859",
output_schema=BookSchema,
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, BookSchema)
assert final_response.content.title is not None
assert final_response.content.author is not None
assert final_response.content.year is not None
assert agent.output_schema == PersonSchema
@pytest.mark.asyncio
async def test_arun_with_structured_outputs():
"""Test async run with structured outputs and override."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
structured_outputs=True,
markdown=False,
)
assert agent.output_schema == PersonSchema
response = await agent.arun(
"Tell me about 'Jane Eyre' by Charlotte Bronte published in 1847",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response.content, BookSchema)
assert response.content.title is not None
assert response.content.author is not None
assert response.content.year is not None
assert agent.output_schema == PersonSchema
@pytest.mark.asyncio
async def test_arun_streaming_with_structured_outputs():
"""Test async streaming run with structured outputs and override."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
structured_outputs=True,
markdown=False,
)
assert agent.output_schema == PersonSchema
final_response = None
async for event in agent.arun(
"Tell me about 'Wuthering Heights' by Emily Bronte published in 1847",
output_schema=BookSchema,
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, BookSchema)
assert final_response.content.title is not None
assert final_response.content.author is not None
assert final_response.content.year is not None
assert agent.output_schema == PersonSchema
@pytest.mark.asyncio
async def test_arun_with_default():
"""Test that passing output_schema=None uses default schema in async."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
markdown=False,
)
assert agent.output_schema == PersonSchema
response = await agent.arun(
"Tell me about a person named Carol who is 28 years old",
output_schema=None,
stream=False,
)
assert isinstance(response.content, PersonSchema)
assert response.content.name is not None
assert response.content.age is not None
assert agent.output_schema == PersonSchema
def test_run_with_json_schema():
"""Test that JSON schema works as output_schema."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=person_json_schema,
markdown=False,
)
response: RunOutput = agent.run(
"Tell me about Albert Einstein who was 76 years old",
stream=False,
)
assert isinstance(response.content, dict)
assert "name" in response.content
assert "age" in response.content
assert response.content_type == "dict"
@pytest.mark.asyncio
async def test_arun_with_json_schema():
"""Test that JSON schema works with async run."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=person_json_schema,
markdown=False,
)
response: RunOutput = await agent.arun(
"Tell me about Isaac Newton who was 84 years old",
stream=False,
)
assert isinstance(response.content, dict)
assert "name" in response.content
assert "age" in response.content
assert response.content_type == "dict"
def test_run_with_json_schema_override():
"""Test that JSON schema can be overridden at runtime."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=person_json_schema,
markdown=False,
)
assert agent.output_schema == person_json_schema
response: RunOutput = agent.run(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=book_json_schema,
stream=False,
)
assert isinstance(response.content, dict)
assert "title" in response.content
assert "author" in response.content
assert "year" in response.content
assert agent.output_schema == person_json_schema
@pytest.mark.asyncio
async def test_arun_with_json_schema_override():
"""Test that JSON schema override works with async."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=person_json_schema,
markdown=False,
)
response: RunOutput = await agent.arun(
"Tell me about 'The Great Gatsby' by F. Scott Fitzgerald published in 1925",
output_schema=book_json_schema,
stream=False,
)
assert isinstance(response.content, dict)
assert "title" in response.content
assert "author" in response.content
assert "year" in response.content
assert agent.output_schema == person_json_schema
def test_run_streaming_with_json_schema():
"""Test that JSON schema works with streaming."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=person_json_schema,
markdown=False,
)
final_response = None
for event in agent.run(
"Tell me about Marie Curie who was 66 years old",
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, dict)
assert "name" in final_response.content
assert "age" in final_response.content
@pytest.mark.asyncio
async def test_arun_streaming_with_json_schema():
"""Test that JSON schema works with async streaming."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=person_json_schema,
markdown=False,
)
final_response = None
async for event in agent.arun(
"Tell me about Nikola Tesla who was 86 years old",
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, dict)
assert "name" in final_response.content
assert "age" in final_response.content
def test_run_json_schema_with_structured_outputs():
"""Test JSON schema with structured_outputs=True."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=person_json_schema,
structured_outputs=True,
markdown=False,
)
response: RunOutput = agent.run(
"Tell me about Charles Darwin who was 73 years old",
stream=False,
)
assert isinstance(response.content, dict)
assert "name" in response.content
assert "age" in response.content
assert response.content_type == "dict"
@pytest.mark.asyncio
async def test_arun_json_schema_with_structured_outputs():
"""Test JSON schema with structured_outputs=True in async."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=person_json_schema,
structured_outputs=True,
markdown=False,
)
response: RunOutput = await agent.arun(
"Tell me about Galileo Galilei who was 77 years old",
stream=False,
)
assert isinstance(response.content, dict)
assert "name" in response.content
assert "age" in response.content
assert response.content_type == "dict"
def test_run_json_schema_without_default():
"""Test JSON schema override when agent has no default schema."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
markdown=False,
)
assert agent.output_schema is None
response: RunOutput = agent.run(
"Tell me about Ada Lovelace who was 36 years old",
output_schema=person_json_schema,
stream=False,
)
assert isinstance(response.content, dict)
assert "name" in response.content
assert "age" in response.content
assert agent.output_schema is None
@pytest.mark.asyncio
async def test_arun_json_schema_without_default():
"""Test JSON schema override in async when agent has no default schema."""
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
markdown=False,
)
assert agent.output_schema is None
response: RunOutput = await agent.arun(
"Tell me about Alan Turing who was 41 years old",
output_schema=person_json_schema,
stream=False,
)
assert isinstance(response.content, dict)
assert "name" in response.content
assert "age" in response.content
assert agent.output_schema is None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/agent/test_output_schema_override.py",
"license": "Apache License 2.0",
"lines": 720,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/os/test_output_schema_override.py | """
Integration tests for AgentOS dynamic output_schema.
Tests passing output_schema as JSON schema string via AgentOS API endpoints.
"""
import json
import pytest
from fastapi.testclient import TestClient
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.os import AgentOS
from agno.team import Team
@pytest.fixture(autouse=True)
def reset_async_client():
"""Reset global async HTTP client between tests to avoid event loop conflicts."""
import agno.utils.http as http_utils
# Reset before test
http_utils._global_async_client = None
yield
# Reset after test
http_utils._global_async_client = None
class MovieScript(BaseModel):
title: str = Field(..., description="Movie title")
genre: str = Field(..., description="Movie genre")
def test_agent_with_output_schema(test_os_client: TestClient, test_agent: Agent):
"""Test agent run with simple output schema passed as JSON string."""
schema = {
"title": "MovieScript",
"type": "object",
"properties": {
"title": {"type": "string"},
"genre": {"type": "string"},
},
"required": ["title", "genre"],
}
response = test_os_client.post(
f"/agents/{test_agent.id}/runs",
data={
"message": "Write a movie about AI",
"output_schema": json.dumps(schema),
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert "content" in data
assert isinstance(data["content"], dict)
assert "title" in data["content"]
assert "genre" in data["content"]
assert isinstance(data["content"]["title"], str)
assert isinstance(data["content"]["genre"], str)
assert data["content_type"] == "MovieScript"
def test_agent_with_nested_schema(test_os_client: TestClient, test_agent: Agent):
"""Test agent run with nested object in output schema."""
schema = {
"title": "Product",
"type": "object",
"properties": {
"name": {"type": "string"},
"price": {"type": "number"},
"in_stock": {"type": "boolean"},
"supplier": {
"type": "object",
"title": "Supplier",
"properties": {
"name": {"type": "string"},
"country": {"type": "string"},
},
"required": ["name", "country"],
},
},
"required": ["name", "price", "in_stock", "supplier"],
}
response = test_os_client.post(
f"/agents/{test_agent.id}/runs",
data={
"message": "Create a product: laptop from a tech supplier in USA",
"output_schema": json.dumps(schema),
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert "content" in data
assert isinstance(data["content"], dict)
assert "supplier" in data["content"]
assert isinstance(data["content"]["supplier"], dict)
assert "name" in data["content"]["supplier"]
assert "country" in data["content"]["supplier"]
def test_agent_with_array_schema(test_os_client: TestClient, test_agent: Agent):
"""Test agent run with array fields in output schema."""
schema = {
"title": "Recipe",
"type": "object",
"properties": {
"name": {"type": "string"},
"ingredients": {
"type": "array",
"items": {"type": "string"},
},
"prep_time": {"type": "integer"},
},
"required": ["name", "ingredients"],
}
response = test_os_client.post(
f"/agents/{test_agent.id}/runs",
data={
"message": "Give me a simple pasta recipe",
"output_schema": json.dumps(schema),
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert "content" in data
assert isinstance(data["content"], dict)
assert "ingredients" in data["content"]
assert isinstance(data["content"]["ingredients"], list)
assert len(data["content"]["ingredients"]) > 0
def test_agent_with_optional_fields(test_os_client: TestClient, test_agent: Agent):
"""Test agent run with optional fields in output schema."""
schema = {
"title": "Config",
"type": "object",
"properties": {
"host": {"type": "string"},
"port": {"type": "integer"},
"username": {"type": "string"},
"password": {"type": "string"},
},
"required": ["host", "port"],
}
response = test_os_client.post(
f"/agents/{test_agent.id}/runs",
data={
"message": "Create a server config for localhost:8080",
"output_schema": json.dumps(schema),
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert "content" in data
assert isinstance(data["content"], dict)
assert "host" in data["content"]
assert "port" in data["content"]
def test_agent_streaming_with_schema(test_os_client: TestClient, test_agent: Agent):
"""Test agent streaming run with output schema."""
schema = {
"title": "Answer",
"type": "object",
"properties": {
"answer": {"type": "string"},
"confidence": {"type": "number"},
},
"required": ["answer"],
}
response = test_os_client.post(
f"/agents/{test_agent.id}/runs",
data={
"message": "What is 2+2?",
"output_schema": json.dumps(schema),
"stream": "true",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
assert "text/event-stream" in response.headers.get("content-type", "")
def test_agent_with_invalid_schema(test_os_client: TestClient, test_agent: Agent):
"""Test agent run handles invalid output schema gracefully."""
response = test_os_client.post(
f"/agents/{test_agent.id}/runs",
data={
"message": "Write a story",
"output_schema": "not valid json{",
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert "content" in data
assert isinstance(data["content"], str)
def test_agent_with_array_of_objects(test_os_client: TestClient, test_agent: Agent):
"""Test agent run with array of objects in output schema."""
schema = {
"title": "MovieCast",
"type": "object",
"properties": {
"movie": {"type": "string"},
"actors": {
"type": "array",
"items": {
"type": "object",
"title": "Actor",
"properties": {
"name": {"type": "string"},
"role": {"type": "string"},
},
"required": ["name", "role"],
},
},
},
"required": ["movie", "actors"],
}
response = test_os_client.post(
f"/agents/{test_agent.id}/runs",
data={
"message": "Create a cast for a space movie with 2 actors",
"output_schema": json.dumps(schema),
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert "content" in data
assert isinstance(data["content"], dict)
assert "actors" in data["content"]
assert isinstance(data["content"]["actors"], list)
if len(data["content"]["actors"]) > 0:
assert "name" in data["content"]["actors"][0]
assert "role" in data["content"]["actors"][0]
def test_agent_preconfigured_vs_dynamic_schema(test_os_client: TestClient, test_agent: Agent):
"""Compare agent with pre-configured schema vs dynamic schema passed via API."""
agent_with_schema = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
output_schema=MovieScript,
telemetry=False,
markdown=False,
)
agent_os1 = AgentOS(agents=[agent_with_schema])
app1 = agent_os1.get_app()
with TestClient(app1) as client1:
response1 = client1.post(
f"/agents/{agent_with_schema.id}/runs",
data={"message": "Write a sci-fi movie about AI", "stream": "false"},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
schema = {
"title": "MovieScript",
"type": "object",
"properties": {
"title": {"type": "string"},
"genre": {"type": "string"},
},
"required": ["title", "genre"],
}
response2 = test_os_client.post(
f"/agents/{test_agent.id}/runs",
data={
"message": "Write a sci-fi movie about AI",
"output_schema": json.dumps(schema),
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response1.status_code == 200
assert response2.status_code == 200
data1 = response1.json()
data2 = response2.json()
assert isinstance(data1["content"], dict)
assert isinstance(data2["content"], dict)
assert set(data1["content"].keys()) == set(data2["content"].keys())
assert data1["content_type"] == data2["content_type"] == "MovieScript"
def test_team_with_output_schema(test_os_client: TestClient, test_team: Team):
"""Test team run with simple output schema passed as JSON string."""
schema = {
"title": "Report",
"type": "object",
"properties": {
"summary": {"type": "string"},
"recommendation": {"type": "string"},
},
"required": ["summary", "recommendation"],
}
response = test_os_client.post(
f"/teams/{test_team.id}/runs",
data={
"message": "Analyze the benefits of remote work",
"output_schema": json.dumps(schema),
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert "content" in data
assert isinstance(data["content"], dict)
assert "summary" in data["content"]
assert "recommendation" in data["content"]
assert data["content_type"] == "Report"
def test_team_with_nested_schema(test_os_client: TestClient, test_team: Team):
"""Test team run with nested objects in output schema."""
schema = {
"title": "Analysis",
"type": "object",
"properties": {
"topic": {"type": "string"},
"findings": {
"type": "object",
"title": "Findings",
"properties": {
"pros": {
"type": "array",
"items": {"type": "string"},
},
"cons": {
"type": "array",
"items": {"type": "string"},
},
},
"required": ["pros", "cons"],
},
},
"required": ["topic", "findings"],
}
response = test_os_client.post(
f"/teams/{test_team.id}/runs",
data={
"message": "Analyze electric vehicles",
"output_schema": json.dumps(schema),
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert "content" in data
assert isinstance(data["content"], dict)
assert "findings" in data["content"]
assert isinstance(data["content"]["findings"], dict)
assert "pros" in data["content"]["findings"]
assert "cons" in data["content"]["findings"]
def test_team_streaming_with_schema(test_os_client: TestClient, test_team: Team):
"""Test team streaming run with output schema."""
schema = {
"title": "Result",
"type": "object",
"properties": {
"output": {"type": "string"},
"status": {"type": "string"},
},
"required": ["output"],
}
response = test_os_client.post(
f"/teams/{test_team.id}/runs",
data={
"message": "Write a tagline for a tech startup",
"output_schema": json.dumps(schema),
"stream": "true",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
assert "text/event-stream" in response.headers.get("content-type", "")
def test_team_without_schema(test_os_client: TestClient, test_team: Team):
"""Test team run without output schema returns plain string."""
response = test_os_client.post(
f"/teams/{test_team.id}/runs",
data={"message": "Hello", "stream": "false"},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert "content" in data
assert isinstance(data["content"], str)
def test_team_with_array_schema(test_os_client: TestClient, test_team: Team):
"""Test team run with array fields in output schema."""
schema = {
"title": "Recipe",
"type": "object",
"properties": {
"name": {"type": "string"},
"ingredients": {
"type": "array",
"items": {"type": "string"},
},
"prep_time": {"type": "integer"},
},
"required": ["name", "ingredients"],
}
response = test_os_client.post(
f"/teams/{test_team.id}/runs",
data={
"message": "Give me a simple pasta recipe",
"output_schema": json.dumps(schema),
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert "content" in data
assert isinstance(data["content"], dict)
assert "ingredients" in data["content"]
assert isinstance(data["content"]["ingredients"], list)
assert len(data["content"]["ingredients"]) > 0
def test_team_with_optional_fields(test_os_client: TestClient, test_team: Team):
"""Test team run with optional fields in output schema."""
schema = {
"title": "Config",
"type": "object",
"properties": {
"host": {"type": "string"},
"port": {"type": "integer"},
"username": {"type": "string"},
"password": {"type": "string"},
},
"required": ["host", "port"],
}
response = test_os_client.post(
f"/teams/{test_team.id}/runs",
data={
"message": "Create a server config for localhost:8080",
"output_schema": json.dumps(schema),
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert "content" in data
assert isinstance(data["content"], dict)
assert "host" in data["content"]
assert "port" in data["content"]
def test_team_with_invalid_schema(test_os_client: TestClient, test_team: Team):
"""Test team run handles invalid output schema gracefully."""
response = test_os_client.post(
f"/teams/{test_team.id}/runs",
data={
"message": "Write a story",
"output_schema": "not valid json{",
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert "content" in data
assert isinstance(data["content"], str)
def test_team_with_array_of_objects(test_os_client: TestClient, test_team: Team):
"""Test team run with array of objects in output schema."""
schema = {
"title": "MovieCast",
"type": "object",
"properties": {
"movie": {"type": "string"},
"actors": {
"type": "array",
"items": {
"type": "object",
"title": "Actor",
"properties": {
"name": {"type": "string"},
"role": {"type": "string"},
},
"required": ["name", "role"],
},
},
},
"required": ["movie", "actors"],
}
response = test_os_client.post(
f"/teams/{test_team.id}/runs",
data={
"message": "Create a cast for a space movie with 2 actors",
"output_schema": json.dumps(schema),
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert "content" in data
assert isinstance(data["content"], dict)
assert "actors" in data["content"]
assert isinstance(data["content"]["actors"], list)
if len(data["content"]["actors"]) > 0:
assert "name" in data["content"]["actors"][0]
assert "role" in data["content"]["actors"][0]
def test_team_preconfigured_vs_dynamic_schema(test_os_client: TestClient, test_team: Team):
"""Compare team with pre-configured schema vs dynamic schema passed via API."""
team_with_schema = Team(
name="Writing Team",
members=[
Agent(
name="Writer",
model=OpenAIChat(id="gpt-4o-mini"),
telemetry=False,
)
],
output_schema=MovieScript,
telemetry=False,
markdown=False,
)
agent_os1 = AgentOS(teams=[team_with_schema])
app1 = agent_os1.get_app()
with TestClient(app1) as client1:
response1 = client1.post(
f"/teams/{team_with_schema.id}/runs",
data={"message": "Write a sci-fi movie about AI", "stream": "false"},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
schema = {
"title": "MovieScript",
"type": "object",
"properties": {
"title": {"type": "string"},
"genre": {"type": "string"},
},
"required": ["title", "genre"],
}
response2 = test_os_client.post(
f"/teams/{test_team.id}/runs",
data={
"message": "Write a sci-fi movie about AI",
"output_schema": json.dumps(schema),
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response1.status_code == 200
assert response2.status_code == 200
data1 = response1.json()
data2 = response2.json()
assert isinstance(data1["content"], dict)
assert isinstance(data2["content"], dict)
assert set(data1["content"].keys()) == set(data2["content"].keys())
assert data1["content_type"] == data2["content_type"] == "MovieScript"
def test_agent_use_json_schema_true_keeps_dict(test_os_client: TestClient, test_agent: Agent):
"""Test use_json_schema=true keeps output_schema as dict for direct pass-through."""
schema = {
"type": "json_schema",
"json_schema": {
"name": "Person",
"schema": {
"type": "object",
"properties": {"name": {"type": "string"}, "age": {"type": "integer"}},
"required": ["name", "age"],
"additionalProperties": False,
},
},
}
response = test_os_client.post(
f"/agents/{test_agent.id}/runs",
data={
"message": "Person named Alice age 30",
"output_schema": json.dumps(schema),
"use_json_schema": "true",
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert isinstance(data.get("content"), dict)
assert "name" in data["content"]
assert "age" in data["content"]
def test_agent_use_json_schema_false_converts_to_pydantic(test_os_client: TestClient, test_agent: Agent):
"""Test use_json_schema=false converts to Pydantic model."""
schema = {
"type": "object",
"properties": {"city": {"type": "string"}, "country": {"type": "string"}},
"required": ["city", "country"],
}
response = test_os_client.post(
f"/agents/{test_agent.id}/runs",
data={
"message": "City: Paris, Country: France",
"output_schema": json.dumps(schema),
"use_json_schema": "false",
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert isinstance(data.get("content"), dict)
assert "city" in data["content"]
def test_agent_json_object_format_passthrough(test_os_client: TestClient, test_agent: Agent):
"""Test json_object format passes through with use_json_schema=true."""
schema = {"type": "json_object"}
response = test_os_client.post(
f"/agents/{test_agent.id}/runs",
data={
"message": "Return JSON with name=Charlie and age=35",
"output_schema": json.dumps(schema),
"use_json_schema": "true",
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert isinstance(data.get("content"), dict)
def test_team_use_json_schema_true_keeps_dict(test_os_client: TestClient, test_team: Team):
"""Test team with use_json_schema=true keeps output_schema as dict."""
schema = {
"type": "json_schema",
"json_schema": {
"name": "Report",
"schema": {
"type": "object",
"properties": {"summary": {"type": "string"}, "score": {"type": "integer"}},
"required": ["summary", "score"],
"additionalProperties": False,
},
},
}
response = test_os_client.post(
f"/teams/{test_team.id}/runs",
data={
"message": "Write a report summary with score 85",
"output_schema": json.dumps(schema),
"use_json_schema": "true",
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert isinstance(data.get("content"), dict)
assert "summary" in data["content"]
assert "score" in data["content"]
def test_team_use_json_schema_false_converts_to_pydantic(test_os_client: TestClient, test_team: Team):
"""Test team with use_json_schema=false converts to Pydantic model."""
schema = {
"type": "object",
"properties": {"city": {"type": "string"}, "country": {"type": "string"}},
"required": ["city", "country"],
}
response = test_os_client.post(
f"/teams/{test_team.id}/runs",
data={
"message": "City: Tokyo, Country: Japan",
"output_schema": json.dumps(schema),
"use_json_schema": "false",
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert isinstance(data.get("content"), dict)
assert "city" in data["content"]
def test_team_json_object_format_passthrough(test_os_client: TestClient, test_team: Team):
"""Test team with json_object format passes through with use_json_schema=true."""
schema = {"type": "json_object"}
response = test_os_client.post(
f"/teams/{test_team.id}/runs",
data={
"message": "Return JSON with product=phone and price=999",
"output_schema": json.dumps(schema),
"use_json_schema": "true",
"stream": "false",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
data = response.json()
assert isinstance(data.get("content"), dict)
def test_agent_streaming_use_json_schema_true(test_os_client: TestClient, test_agent: Agent):
"""Test agent streaming with use_json_schema=true passes through dict schema."""
schema = {
"type": "json_schema",
"json_schema": {
"name": "Answer",
"schema": {
"type": "object",
"properties": {"answer": {"type": "string"}},
"required": ["answer"],
},
},
}
with test_os_client.stream(
"POST",
f"/agents/{test_agent.id}/runs",
data={
"message": "What is 2+2? Answer in JSON.",
"output_schema": json.dumps(schema),
"use_json_schema": "true",
"stream": "true",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
) as response:
assert response.status_code == 200
assert "text/event-stream" in response.headers.get("content-type", "")
# Collect streaming chunks
chunks = []
for line in response.iter_lines():
if line.startswith("data: "):
data = line[6:]
if data != "[DONE]":
chunks.append(json.loads(data))
# Verify we received data
assert len(chunks) > 0
# Check first chunk has expected fields
first_chunk = chunks[0]
assert first_chunk.get("run_id") is not None
assert first_chunk.get("agent_id") == test_agent.id
# Verify content across chunks
content_chunks = [chunk.get("content") for chunk in chunks if chunk.get("content")]
assert len(content_chunks) > 0
# Verify final content is dict with expected keys
last_chunk = chunks[-1]
if last_chunk.get("content"):
assert isinstance(last_chunk["content"], dict)
assert "answer" in last_chunk["content"]
def test_agent_streaming_json_object_passthrough(test_os_client: TestClient, test_agent: Agent):
"""Test agent streaming with json_object format passes through."""
schema = {"type": "json_object"}
with test_os_client.stream(
"POST",
f"/agents/{test_agent.id}/runs",
data={
"message": "Return JSON with x=1 and y=2",
"output_schema": json.dumps(schema),
"use_json_schema": "true",
"stream": "true",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
) as response:
assert response.status_code == 200
assert "text/event-stream" in response.headers.get("content-type", "")
# Collect streaming chunks
chunks = []
for line in response.iter_lines():
if line.startswith("data: "):
data = line[6:]
if data != "[DONE]":
chunks.append(json.loads(data))
# Verify we received data
assert len(chunks) > 0
# Check first chunk has expected fields
first_chunk = chunks[0]
assert first_chunk.get("run_id") is not None
assert first_chunk.get("agent_id") == test_agent.id
# Verify content across chunks
content_chunks = [chunk.get("content") for chunk in chunks if chunk.get("content")]
assert len(content_chunks) > 0
# Verify final content is dict
last_chunk = chunks[-1]
if last_chunk.get("content"):
assert isinstance(last_chunk["content"], dict)
def test_team_streaming_use_json_schema_true(test_os_client: TestClient, test_team: Team):
"""Test team streaming with use_json_schema=true passes through dict schema."""
schema = {
"type": "json_schema",
"json_schema": {
"name": "Result",
"schema": {
"type": "object",
"properties": {"result": {"type": "string"}},
"required": ["result"],
},
},
}
with test_os_client.stream(
"POST",
f"/teams/{test_team.id}/runs",
data={
"message": "Give me a result in JSON format.",
"output_schema": json.dumps(schema),
"use_json_schema": "true",
"stream": "true",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
) as response:
assert response.status_code == 200
assert "text/event-stream" in response.headers.get("content-type", "")
# Collect streaming chunks
chunks = []
for line in response.iter_lines():
if line.startswith("data: "):
data = line[6:]
if data != "[DONE]":
chunks.append(json.loads(data))
# Verify we received data
assert len(chunks) > 0
# Check first chunk has expected fields
first_chunk = chunks[0]
assert first_chunk.get("run_id") is not None
assert first_chunk.get("team_id") == test_team.id
# Verify content across chunks
content_chunks = [chunk.get("content") for chunk in chunks if chunk.get("content")]
assert len(content_chunks) > 0
# Verify final content is dict with expected keys
last_chunk = chunks[-1]
if last_chunk.get("content"):
assert isinstance(last_chunk["content"], dict)
assert "result" in last_chunk["content"]
def test_team_streaming_json_object_passthrough(test_os_client: TestClient, test_team: Team):
"""Test team streaming with json_object format passes through."""
schema = {"type": "json_object"}
with test_os_client.stream(
"POST",
f"/teams/{test_team.id}/runs",
data={
"message": "Return JSON with a=1 and b=2",
"output_schema": json.dumps(schema),
"use_json_schema": "true",
"stream": "true",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
) as response:
assert response.status_code == 200
assert "text/event-stream" in response.headers.get("content-type", "")
# Collect streaming chunks
chunks = []
for line in response.iter_lines():
if line.startswith("data: "):
data = line[6:]
if data != "[DONE]":
chunks.append(json.loads(data))
# Verify we received data
assert len(chunks) > 0
# Check first chunk has expected fields
first_chunk = chunks[0]
assert first_chunk.get("run_id") is not None
assert first_chunk.get("team_id") == test_team.id
# Verify content across chunks
content_chunks = [chunk.get("content") for chunk in chunks if chunk.get("content")]
assert len(content_chunks) > 0
# Verify final content is dict
last_chunk = chunks[-1]
if last_chunk.get("content"):
assert isinstance(last_chunk["content"], dict)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/os/test_output_schema_override.py",
"license": "Apache License 2.0",
"lines": 832,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/teams/test_output_schema_override.py | import pytest
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.team import Team
@pytest.fixture(autouse=True)
def reset_async_client():
"""Reset global async HTTP client between tests to avoid event loop conflicts."""
import agno.utils.http as http_utils
# Reset before test
http_utils._global_async_client = None
yield
# Reset after test
http_utils._global_async_client = None
class PersonSchema(BaseModel):
name: str = Field(..., description="Person's name")
age: int = Field(..., description="Person's age")
class BookSchema(BaseModel):
title: str = Field(..., description="Book title")
author: str = Field(..., description="Book author")
year: int = Field(..., description="Publication year")
person_json_schema = {
"type": "json_schema",
"json_schema": {
"name": "PersonInfo",
"schema": {
"type": "object",
"properties": {
"name": {"type": "string", "description": "Person's full name"},
"age": {"type": "integer", "description": "Person's age"},
},
"required": ["name", "age"],
"additionalProperties": False,
},
},
}
book_json_schema = {
"type": "json_schema",
"json_schema": {
"name": "BookInfo",
"schema": {
"type": "object",
"properties": {
"title": {"type": "string", "description": "Book title"},
"author": {"type": "string", "description": "Author name"},
"year": {"type": "integer", "description": "Publication year"},
},
"required": ["title", "author", "year"],
"additionalProperties": False,
},
},
}
def test_team_run_with_output_schema():
"""Test that output_schema can be overridden in team.run() and is restored after."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
output_schema=PersonSchema,
markdown=False,
)
assert team.output_schema == PersonSchema
response = team.run(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response.content, BookSchema)
assert response.content.title is not None
assert response.content.author is not None
assert response.content.year is not None
assert team.output_schema == PersonSchema
@pytest.mark.asyncio
async def test_team_arun_with_output_schema():
"""Test that output_schema can be overridden in team.arun() and is restored after."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
output_schema=PersonSchema,
markdown=False,
)
assert team.output_schema == PersonSchema
response = await team.arun(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response.content, BookSchema)
assert response.content.title is not None
assert response.content.author is not None
assert response.content.year is not None
assert team.output_schema == PersonSchema
def test_team_run_without_default_schema():
"""Test output_schema override when team has no default schema."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
markdown=False,
)
assert team.output_schema is None
response = team.run(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response.content, BookSchema)
assert response.content.title is not None
assert response.content.author is not None
assert response.content.year is not None
assert team.output_schema is None
@pytest.mark.asyncio
async def test_team_arun_without_default_schema():
"""Test output_schema override in team.arun() when team has no default schema."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
markdown=False,
)
assert team.output_schema is None
response = await team.arun(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response.content, BookSchema)
assert response.content.title is not None
assert response.content.author is not None
assert response.content.year is not None
assert team.output_schema is None
def test_team_multiple_calls_in_sequence():
"""Test multiple sequential calls with different schema overrides."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
output_schema=PersonSchema,
markdown=False,
)
response1 = team.run(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response1.content, BookSchema)
assert team.output_schema == PersonSchema
response2 = team.run(
"Tell me about a person named John who is 30 years old",
stream=False,
)
assert isinstance(response2.content, PersonSchema)
assert team.output_schema == PersonSchema
response3 = team.run(
"Tell me about 'To Kill a Mockingbird' by Harper Lee published in 1960",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response3.content, BookSchema)
assert team.output_schema == PersonSchema
@pytest.mark.asyncio
async def test_team_multiple_async_calls_in_sequence():
"""Test multiple sequential async calls with different schema overrides."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
output_schema=PersonSchema,
markdown=False,
)
response1 = await team.arun(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response1.content, BookSchema)
assert team.output_schema == PersonSchema
response2 = await team.arun(
"Tell me about a person named John who is 30 years old",
stream=False,
)
assert isinstance(response2.content, PersonSchema)
assert team.output_schema == PersonSchema
response3 = await team.arun(
"Tell me about 'To Kill a Mockingbird' by Harper Lee published in 1960",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response3.content, BookSchema)
assert team.output_schema == PersonSchema
def test_team_run_streaming_with_output_schema():
"""Test that output_schema override works with team streaming."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
output_schema=PersonSchema,
markdown=False,
)
assert team.output_schema == PersonSchema
final_response = None
for event in team.run(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, BookSchema)
assert final_response.content.title is not None
assert final_response.content.author is not None
assert final_response.content.year is not None
assert team.output_schema == PersonSchema
@pytest.mark.asyncio
async def test_team_arun_streaming_with_output_schema():
"""Test that output_schema override works with team async streaming."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
output_schema=PersonSchema,
markdown=False,
)
assert team.output_schema == PersonSchema
final_response = None
async for event in team.arun(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, BookSchema)
assert final_response.content.title is not None
assert final_response.content.author is not None
assert final_response.content.year is not None
assert team.output_schema == PersonSchema
def test_team_run_with_parser_model():
"""Test that output_schema override works with team parser model."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
parser_model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
markdown=False,
)
assert team.output_schema == PersonSchema
response = team.run(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response.content, BookSchema)
assert response.content.title is not None
assert response.content.author is not None
assert response.content.year is not None
assert team.output_schema == PersonSchema
def test_team_run_streaming_with_parser_model():
"""Test that output_schema override works with team parser model streaming."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
parser_model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
markdown=False,
)
assert team.output_schema == PersonSchema
final_response = None
for event in team.run(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, BookSchema)
assert final_response.content.title is not None
assert final_response.content.author is not None
assert final_response.content.year is not None
assert team.output_schema == PersonSchema
@pytest.mark.asyncio
async def test_team_arun_with_parser_model():
"""Test that output_schema override works with team parser model in arun()."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
parser_model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
markdown=False,
)
assert team.output_schema == PersonSchema
response = await team.arun(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response.content, BookSchema)
assert response.content.title is not None
assert response.content.author is not None
assert response.content.year is not None
assert team.output_schema == PersonSchema
@pytest.mark.asyncio
async def test_team_arun_streaming_with_parser_model():
"""Test that output_schema override works with team parser model async streaming."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
parser_model=OpenAIChat(id="gpt-4o-mini"),
output_schema=PersonSchema,
markdown=False,
)
assert team.output_schema == PersonSchema
final_response = None
async for event in team.arun(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, BookSchema)
assert final_response.content.title is not None
assert final_response.content.author is not None
assert final_response.content.year is not None
assert team.output_schema == PersonSchema
def test_team_run_with_json_mode():
"""Test that output_schema override works with team JSON mode."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
output_schema=PersonSchema,
use_json_mode=True,
markdown=False,
)
assert team.output_schema == PersonSchema
response = team.run(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response.content, BookSchema)
assert response.content.title is not None
assert response.content.author is not None
assert response.content.year is not None
assert team.output_schema == PersonSchema
def test_team_run_with_default():
"""Test that passing output_schema=None uses the default schema for team."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
output_schema=PersonSchema,
markdown=False,
)
assert team.output_schema == PersonSchema
response = team.run(
"Tell me about a person named John who is 30 years old",
output_schema=None,
stream=False,
)
assert isinstance(response.content, PersonSchema)
assert response.content.name is not None
assert response.content.age is not None
assert team.output_schema == PersonSchema
def test_team_run_streaming_without_default_schema():
"""Test team streaming run without default schema, with override."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
markdown=False,
)
assert team.output_schema is None
final_response = None
for event in team.run(
"Tell me about 'The Catcher in the Rye' by J.D. Salinger published in 1951",
output_schema=BookSchema,
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, BookSchema)
assert final_response.content.title is not None
assert final_response.content.author is not None
assert final_response.content.year is not None
assert team.output_schema is None
@pytest.mark.asyncio
async def test_team_arun_streaming_without_default_schema():
"""Test team async streaming run without default schema, with override."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
markdown=False,
)
assert team.output_schema is None
final_response = None
async for event in team.arun(
"Tell me about 'War and Peace' by Leo Tolstoy published in 1869",
output_schema=BookSchema,
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, BookSchema)
assert final_response.content.title is not None
assert final_response.content.author is not None
assert final_response.content.year is not None
assert team.output_schema is None
def test_team_run_streaming_with_json_mode():
"""Test team streaming run with JSON mode and override."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
output_schema=PersonSchema,
use_json_mode=True,
markdown=False,
)
assert team.output_schema == PersonSchema
final_response = None
for event in team.run(
"Tell me about 'Slaughterhouse-Five' by Kurt Vonnegut published in 1969",
output_schema=BookSchema,
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, BookSchema)
assert final_response.content.title is not None
assert final_response.content.author is not None
assert final_response.content.year is not None
assert team.output_schema == PersonSchema
@pytest.mark.asyncio
async def test_team_arun_with_json_mode():
"""Test team async run with JSON mode and override."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
output_schema=PersonSchema,
use_json_mode=True,
markdown=False,
)
assert team.output_schema == PersonSchema
response = await team.arun(
"Tell me about 'The Grapes of Wrath' by John Steinbeck published in 1939",
output_schema=BookSchema,
stream=False,
)
assert isinstance(response.content, BookSchema)
assert response.content.title is not None
assert response.content.author is not None
assert response.content.year is not None
assert team.output_schema == PersonSchema
@pytest.mark.asyncio
async def test_team_arun_streaming_with_json_mode():
"""Test team async streaming run with JSON mode and override."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
output_schema=PersonSchema,
use_json_mode=True,
markdown=False,
)
assert team.output_schema == PersonSchema
final_response = None
async for event in team.arun(
"Tell me about 'Of Mice and Men' by John Steinbeck published in 1937",
output_schema=BookSchema,
stream=True,
):
if hasattr(event, "content"):
final_response = event
assert final_response is not None
assert isinstance(final_response.content, BookSchema)
assert final_response.content.title is not None
assert final_response.content.author is not None
assert final_response.content.year is not None
assert team.output_schema == PersonSchema
@pytest.mark.asyncio
async def test_team_arun_with_default():
"""Test that passing output_schema=None uses default schema for team in async."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
output_schema=PersonSchema,
markdown=False,
)
assert team.output_schema == PersonSchema
response = await team.arun(
"Tell me about a person named Carol who is 28 years old",
output_schema=None,
stream=False,
)
assert isinstance(response.content, PersonSchema)
assert response.content.name is not None
assert response.content.age is not None
assert team.output_schema == PersonSchema
def test_team_run_with_json_schema():
"""Test that JSON schema works as team output_schema."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
output_schema=person_json_schema,
markdown=False,
)
response = team.run(
"Tell me about Albert Einstein who was 76 years old",
stream=False,
)
assert isinstance(response.content, dict)
assert "name" in response.content
assert "age" in response.content
assert response.content_type == "dict"
@pytest.mark.asyncio
async def test_team_arun_with_json_schema():
"""Test that JSON schema works with team async run."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
output_schema=person_json_schema,
markdown=False,
)
response = await team.arun(
"Tell me about Isaac Newton who was 84 years old",
stream=False,
)
assert isinstance(response.content, dict)
assert "name" in response.content
assert "age" in response.content
assert response.content_type == "dict"
def test_team_run_with_json_schema_override():
"""Test that JSON schema can be overridden at runtime in team."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
output_schema=person_json_schema,
markdown=False,
)
assert team.output_schema == person_json_schema
response = team.run(
"Tell me about '1984' by George Orwell published in 1949",
output_schema=book_json_schema,
stream=False,
)
assert isinstance(response.content, dict)
assert "title" in response.content
assert "author" in response.content
assert "year" in response.content
assert team.output_schema == person_json_schema
@pytest.mark.asyncio
async def test_team_arun_with_json_schema_override():
"""Test that JSON schema override works with team async."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
output_schema=person_json_schema,
markdown=False,
)
response = await team.arun(
"Tell me about 'The Great Gatsby' by F. Scott Fitzgerald published in 1925",
output_schema=book_json_schema,
stream=False,
)
assert isinstance(response.content, dict)
assert "title" in response.content
assert "author" in response.content
assert "year" in response.content
assert team.output_schema == person_json_schema
def test_team_run_json_schema_without_default():
"""Test JSON schema override when team has no default schema."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
markdown=False,
)
assert team.output_schema is None
response = team.run(
"Tell me about Ada Lovelace who was 36 years old",
output_schema=person_json_schema,
stream=False,
)
assert isinstance(response.content, dict)
assert "name" in response.content
assert "age" in response.content
assert team.output_schema is None
@pytest.mark.asyncio
async def test_team_arun_json_schema_without_default():
"""Test JSON schema override in async when team has no default schema."""
agent1 = Agent(
name="Agent1",
role="Information provider",
model=OpenAIChat(id="gpt-4o-mini"),
)
team = Team(
name="TestTeam",
members=[agent1],
markdown=False,
)
assert team.output_schema is None
response = await team.arun(
"Tell me about Alan Turing who was 41 years old",
output_schema=person_json_schema,
stream=False,
)
assert isinstance(response.content, dict)
assert "name" in response.content
assert "age" in response.content
assert team.output_schema is None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/teams/test_output_schema_override.py",
"license": "Apache License 2.0",
"lines": 730,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/knowledge/test_async_knowledge_retriever.py | """
Test for issue #5490: Async knowledge_retriever not properly awaited
when add_knowledge_to_context is enabled.
This test verifies that async knowledge retrievers are properly awaited
when using add_knowledge_to_context=True with async execution paths.
"""
import asyncio
from typing import Optional
import pytest
from agno.agent import Agent
from agno.models.openai import OpenAIChat
# Define an async knowledge retriever function
async def knowledge_retriever(
query: str, agent: Optional[Agent] = None, num_documents: int = 5, **kwargs
) -> Optional[list[dict]]:
"""
Custom async knowledge retriever function to search for relevant documents.
Args:
query (str): The search query string
agent (Agent): The agent instance making the query
num_documents (int): Number of documents to retrieve (default: 5)
**kwargs: Additional keyword arguments
Returns:
Optional[list[dict]]: List of retrieved documents or None if search fails
"""
# Simulate async retrieval
await asyncio.sleep(0.1)
return [{"content": f"Retrieved doc for: {query}"}]
@pytest.mark.asyncio
async def test_async_retriever_with_add_knowledge_to_context():
"""
Test that async knowledge_retriever is properly awaited when add_knowledge_to_context=True.
This test reproduces issue #5490 where async knowledge retrievers were not
properly awaited in the async execution path, causing a coroutine object to be
returned instead of actual results.
"""
# Initialize model
model = OpenAIChat(id="gpt-4o")
# Initialize agent with async knowledge retriever and add_knowledge_to_context=True
# This is the key setting that triggers the bug without the fix
agent = Agent(
model=model,
add_knowledge_to_context=True,
knowledge_retriever=knowledge_retriever,
instructions="You are a helpful assistant that uses knowledge from the knowledge base.",
)
# Execute async query - this should work correctly with the fix
query = "Retrieve all documents from the knowledge base"
response = await agent.arun(query)
# Verify that the response was generated successfully
assert response is not None
assert response.content is not None
# Verify that references were added to the response (indicating knowledge retrieval worked)
# If the async retriever wasn't awaited, references would be None or contain a coroutine object
if response.references:
# Check that references contain actual document data, not coroutine objects
for ref in response.references:
assert ref.references is not None
assert isinstance(ref.references, list)
# Verify references are actual dicts, not coroutine objects
for doc in ref.references:
assert isinstance(doc, dict)
assert "content" in doc
@pytest.mark.asyncio
async def test_async_retriever_with_search_knowledge():
"""
Test that async knowledge_retriever works correctly with search_knowledge=True.
This test verifies the alternative code path that may already handle
async retrievers properly.
"""
model = OpenAIChat(id="gpt-4o")
agent = Agent(
model=model,
search_knowledge=True,
knowledge_retriever=knowledge_retriever,
instructions="You are a helpful assistant that searches the knowledge base.",
)
query = "Search for information about documents"
response = await agent.arun(query)
# Verify that the response was generated successfully
assert response is not None
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/knowledge/test_async_knowledge_retriever.py",
"license": "Apache License 2.0",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/sqlite/test_memory.py | """Integration tests for the Memory related methods of the SqliteDb class"""
from typing import cast
import pytest
from agno.db.schemas.memory import UserMemory
from agno.db.sqlite.sqlite import SqliteDb
@pytest.fixture(autouse=True)
def cleanup_memories(sqlite_db_real: SqliteDb):
"""Fixture to clean-up session rows after each test"""
yield
with sqlite_db_real.Session() as session:
try:
memories_table = sqlite_db_real._get_table("memories")
if memories_table is not None:
session.execute(memories_table.delete())
session.commit()
except Exception:
session.rollback()
@pytest.fixture
def sample_memory() -> UserMemory:
"""Fixture returning a sample UserMemory"""
return UserMemory(memory_id="1", memory="User likes surfing", user_id="1", topics=["sports", "water"])
def test_insert_memory(sqlite_db_real: SqliteDb, sample_memory: UserMemory):
"""Ensure the upsert method works as expected when inserting a new AgentSession"""
result = sqlite_db_real.upsert_user_memory(sample_memory, deserialize=True)
assert result is not None
memory = cast(UserMemory, result)
assert memory.memory_id == sample_memory.memory_id
assert memory.memory == sample_memory.memory
assert memory.user_id == sample_memory.user_id
assert memory.topics == sample_memory.topics
def test_get_memories_by_topics(sqlite_db_real: SqliteDb):
"""Test getting memories by topics."""
sqlite_db_real.upsert_user_memory(
UserMemory(memory_id="1", memory="User likes surfing", user_id="1", topics=["sports", "water"]),
deserialize=True,
)
sqlite_db_real.upsert_user_memory(
UserMemory(memory_id="2", memory="User likes sushi", user_id="1", topics=["food", "japanese"]), deserialize=True
)
memories = sqlite_db_real.get_user_memories(topics=["sports"])
assert len(memories) == 1
assert memories[0].memory_id == "1"
assert memories[0].memory == "User likes surfing"
assert memories[0].user_id == "1"
assert memories[0].topics == ["sports", "water"]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/sqlite/test_memory.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/os/test_memory.py | """Integration tests for the Memory routes in AgentOS."""
def test_create_memory(test_os_client):
"""Test creating a new memory."""
response = test_os_client.post(
"/memories",
json={
"memory": "User prefers technical explanations with code examples",
"user_id": "test-user-123",
"topics": ["preferences", "communication_style"],
},
)
assert response.status_code == 200
response_json = response.json()
assert response_json["memory_id"] is not None
assert response_json["memory"] == "User prefers technical explanations with code examples"
assert response_json["user_id"] == "test-user-123"
assert set(response_json["topics"]) == {"preferences", "communication_style"}
assert response_json["updated_at"] is not None
def test_update_memory(test_os_client):
"""Test updating an existing memory."""
# First create a memory
create_response = test_os_client.post(
"/memories",
json={
"memory": "Original memory content",
"user_id": "test-user-update",
"topics": ["original"],
},
)
memory_id = create_response.json()["memory_id"]
# Update the memory
response = test_os_client.patch(
f"/memories/{memory_id}",
json={
"memory": "Updated memory content",
"user_id": "test-user-update",
"topics": ["updated", "modified"],
},
)
assert response.status_code == 200
response_json = response.json()
assert response_json["memory_id"] == memory_id
assert response_json["memory"] == "Updated memory content"
assert set(response_json["topics"]) == {"updated", "modified"}
def test_create_memory_without_user_id_returns_400(test_os_client):
"""Test that creating a memory without user_id returns 400 error."""
response = test_os_client.post(
"/memories",
json={
"memory": "Some memory content",
},
)
assert response.status_code == 400
assert response.json()["detail"] == "User ID is required"
def test_get_memory_by_id(test_os_client):
"""Test retrieving a specific memory by ID."""
# First create a memory
create_response = test_os_client.post(
"/memories",
json={
"memory": "User is a Python developer",
"user_id": "test-user-789",
"topics": ["technical", "skills"],
},
)
memory_id = create_response.json()["memory_id"]
# Now retrieve it
response = test_os_client.get(f"/memories/{memory_id}")
assert response.status_code == 200
response_json = response.json()
assert response_json["memory_id"] == memory_id
assert response_json["memory"] == "User is a Python developer"
assert response_json["user_id"] == "test-user-789"
assert set(response_json["topics"]) == {"technical", "skills"}
def test_get_memory_with_invalid_id_returns_404(test_os_client):
"""Test retrieving a non-existent memory returns 404."""
response = test_os_client.get("/memories/invalid-memory-id")
assert response.status_code == 404
assert "not found" in response.json()["detail"].lower()
def test_list_memories(test_os_client):
"""Test listing memories with pagination."""
# Create multiple memories
test_os_client.post(
"/memories",
json={
"memory": "Memory 1",
"user_id": "test-user-list",
"topics": ["topic1"],
},
)
test_os_client.post(
"/memories",
json={
"memory": "Memory 2",
"user_id": "test-user-list",
"topics": ["topic2"],
},
)
# List memories
response = test_os_client.get("/memories")
assert response.status_code == 200
response_json = response.json()
assert "data" in response_json
assert "meta" in response_json
assert len(response_json["data"]) >= 2
assert response_json["meta"]["page"] == 1
assert response_json["meta"]["limit"] == 20
def test_list_memories_with_pagination(test_os_client):
"""Test listing memories with custom pagination parameters."""
# Create test memories
for i in range(5):
test_os_client.post(
"/memories",
json={
"memory": f"Memory {i}",
"user_id": "test-user-pagination",
},
)
# Test pagination
response = test_os_client.get("/memories?limit=2&page=1")
assert response.status_code == 200
response_json = response.json()
assert len(response_json["data"]) <= 2
assert response_json["meta"]["limit"] == 2
assert response_json["meta"]["page"] == 1
def test_list_memories_filtered_by_user(test_os_client):
"""Test filtering memories by user_id."""
# Create memories for different users
test_os_client.post(
"/memories",
json={
"memory": "User A memory",
"user_id": "user-a",
},
)
test_os_client.post(
"/memories",
json={
"memory": "User B memory",
"user_id": "user-b",
},
)
# Filter by user_id
response = test_os_client.get("/memories?user_id=user-a")
assert response.status_code == 200
response_json = response.json()
assert all(mem["user_id"] == "user-a" for mem in response_json["data"])
def test_list_memories_filtered_by_topics(test_os_client):
"""Test filtering memories by topics."""
# Create memories with different topics
test_os_client.post(
"/memories",
json={
"memory": "Technical memory",
"user_id": "test-user-topics",
"topics": ["technical", "python"],
},
)
test_os_client.post(
"/memories",
json={
"memory": "Personal memory",
"user_id": "test-user-topics",
"topics": ["personal", "hobbies"],
},
)
# Filter by topic
response = test_os_client.get("/memories?topics=technical")
assert response.status_code == 200
response_json = response.json()
# Note: The filtering should work, but the exact matching behavior depends on implementation
assert len(response_json["data"]) >= 1
def test_list_memories_with_search_content(test_os_client):
"""Test searching memories by content."""
# Create memories with specific content
test_os_client.post(
"/memories",
json={
"memory": "User loves programming in Python",
"user_id": "test-user-search",
},
)
test_os_client.post(
"/memories",
json={
"memory": "User prefers Java for enterprise applications",
"user_id": "test-user-search",
},
)
# Search for specific content
response = test_os_client.get("/memories?search_content=Python")
assert response.status_code == 200
response_json = response.json()
# At least one result should match
assert len(response_json["data"]) >= 1
def test_update_memory_without_user_id_returns_400(test_os_client):
"""Test that updating a memory without user_id returns 400 error."""
response = test_os_client.patch(
"/memories/some-id",
json={
"memory": "Updated content",
},
)
assert response.status_code == 400
assert response.json()["detail"] == "User ID is required"
def test_delete_memory(test_os_client):
"""Test deleting a specific memory."""
# First create a memory
create_response = test_os_client.post(
"/memories",
json={
"memory": "Memory to be deleted",
"user_id": "test-user-delete",
},
)
memory_id = create_response.json()["memory_id"]
# Delete the memory
response = test_os_client.delete(f"/memories/{memory_id}")
assert response.status_code == 204
# Verify it's deleted by trying to retrieve it
get_response = test_os_client.get(f"/memories/{memory_id}")
assert get_response.status_code == 404
def test_delete_multiple_memories(test_os_client):
"""Test deleting multiple memories at once."""
# Create multiple memories
memory_ids = []
for i in range(3):
create_response = test_os_client.post(
"/memories",
json={
"memory": f"Memory to delete {i}",
"user_id": "test-user-bulk-delete",
},
)
memory_ids.append(create_response.json()["memory_id"])
# Delete multiple memories
response = test_os_client.request(
"DELETE",
"/memories",
json={
"memory_ids": memory_ids,
"user_id": "test-user-bulk-delete",
},
)
assert response.status_code == 204
# Verify they're all deleted
for memory_id in memory_ids:
get_response = test_os_client.get(f"/memories/{memory_id}")
assert get_response.status_code == 404
def test_delete_memories_with_empty_list_returns_422(test_os_client):
"""Test that deleting with empty memory_ids list returns validation error."""
response = test_os_client.request(
"DELETE",
"/memories",
json={
"memory_ids": [],
},
)
assert response.status_code == 422
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/os/test_memory.py",
"license": "Apache License 2.0",
"lines": 259,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/os/test_team_runs.py | """Integration tests for running Teams in AgentOS."""
import json
from unittest.mock import AsyncMock, patch
from agno.run import RunContext
from agno.team.team import Team
def test_create_team_run(test_os_client, test_team: Team):
"""Test creating a team run using form input."""
response = test_os_client.post(
f"/teams/{test_team.id}/runs",
data={"message": "Hello, world!", "stream": "false"},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
response_json = response.json()
assert response_json["run_id"] is not None
assert response_json["team_id"] == test_team.id
assert response_json["content"] is not None
def test_create_team_run_streaming(test_os_client, test_team: Team):
"""Test creating a team run with streaming enabled."""
with test_os_client.stream(
"POST",
f"/teams/{test_team.id}/runs",
data={
"message": "Hello, world!",
"stream": "true",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
) as response:
assert response.status_code == 200
assert "text/event-stream" in response.headers.get("content-type", "")
# Collect streaming chunks
chunks = []
for line in response.iter_lines():
if line.startswith("data: "):
data = line[6:] # Remove 'data: ' prefix
if data != "[DONE]":
chunks.append(json.loads(data))
# Verify we received data
assert len(chunks) > 0
# Check first chunk has expected fields
first_chunk = chunks[0]
assert first_chunk.get("run_id") is not None
assert first_chunk.get("team_id") == test_team.id
# Verify content across chunks
content_chunks = [chunk.get("content") for chunk in chunks if chunk.get("content")]
assert len(content_chunks) > 0
def test_running_unknown_team_returns_404(test_os_client):
"""Test running an unknown team returns a 404 error."""
response = test_os_client.post(
"/teams/unknown-team/runs",
data={"message": "Hello, world!", "stream": "false"},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 404
assert response.json()["detail"] == "Team not found"
def test_create_team_run_without_message_returns_422(test_os_client, test_team: Team):
"""Test that missing required message field returns validation error."""
response = test_os_client.post(
f"/teams/{test_team.id}/runs",
data={},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 422
def test_passing_kwargs_to_team_run(test_os_client, test_team: Team):
"""Test passing kwargs to a team run."""
def assert_run_context(run_context: RunContext):
assert run_context.user_id == "test-user-123"
assert run_context.session_id == "test-session-123"
assert run_context.session_state == {"test_session_state": "test-session-state"}
assert run_context.dependencies == {"test_dependencies": "test-dependencies"}
assert run_context.metadata == {"test_metadata": "test-metadata"}
assert run_context.knowledge_filters == {"test_knowledge_filters": "test-knowledge-filters"}
test_team.pre_hooks = [assert_run_context]
response = test_os_client.post(
f"/teams/{test_team.id}/runs",
data={
"message": "Hello, world!",
"user_id": "test-user-123",
"session_id": "test-session-123",
"session_state": {"test_session_state": "test-session-state"},
"dependencies": {"test_dependencies": "test-dependencies"},
"metadata": {"test_metadata": "test-metadata"},
"knowledge_filters": {"test_knowledge_filters": "test-knowledge-filters"},
"stream": "false",
"add_dependencies_to_context": True,
"add_session_state_to_context": True,
"add_history_to_context": False,
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
response_json = response.json()
assert response_json["run_id"] is not None
assert response_json["team_id"] == test_team.id
assert response_json["content"] is not None
def test_create_team_run_with_kwargs(test_os_client, test_team: Team):
"""Test that the create_team_run endpoint accepts kwargs."""
class MockRunOutput:
def to_dict(self):
return {}
# Patch deep_copy to return the same instance so our mock works
# (AgentOS uses create_fresh=True which calls deep_copy)
with (
patch.object(test_team, "deep_copy", return_value=test_team),
patch.object(test_team, "arun", new_callable=AsyncMock) as mock_arun,
):
mock_arun.return_value = MockRunOutput()
response = test_os_client.post(
f"/teams/{test_team.id}/runs",
data={
"message": "Hello, world!",
"stream": "false",
# Passing some extra fields to the run endpoint
"extra_field": "foo",
"extra_field_two": "bar",
},
)
assert response.status_code == 200
# Asserting our extra fields were passed as kwargs
call_args = mock_arun.call_args
assert call_args.kwargs["extra_field"] == "foo"
assert call_args.kwargs["extra_field_two"] == "bar"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/os/test_team_runs.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/os/test_workflow_runs.py | """Integration tests for running Workflows in AgentOS."""
import json
from unittest.mock import AsyncMock, patch
from agno.workflow.workflow import Workflow
def test_create_workflow_run(test_os_client, test_workflow: Workflow):
"""Test creating a workflow run using form input."""
response = test_os_client.post(
f"/workflows/{test_workflow.id}/runs",
data={"message": "Hello, world!", "stream": "false"},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
response_json = response.json()
assert response_json["run_id"] is not None
assert response_json["workflow_id"] == test_workflow.id
assert response_json["content"] is not None
def test_create_workflow_run_streaming(test_os_client, test_workflow: Workflow):
"""Test creating a workflow run with streaming enabled."""
with test_os_client.stream(
"POST",
f"/workflows/{test_workflow.id}/runs",
data={
"message": "Hello, world!",
"stream": "true",
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
) as response:
assert response.status_code == 200
assert "text/event-stream" in response.headers.get("content-type", "")
# Collect streaming chunks
chunks = []
for line in response.iter_lines():
if line.startswith("data: "):
data = line[6:] # Remove 'data: ' prefix
if data != "[DONE]":
chunks.append(json.loads(data))
# Verify we received data
assert len(chunks) > 0
# Check first chunk has expected fields
first_chunk = chunks[0]
assert first_chunk.get("run_id") is not None
assert first_chunk.get("workflow_id") == test_workflow.id
# Verify content across chunks
content_chunks = [chunk.get("content") for chunk in chunks if chunk.get("content")]
assert len(content_chunks) > 0
def test_running_unknown_workflow_returns_404(test_os_client):
"""Test running an unknown workflow returns a 404 error."""
response = test_os_client.post(
"/workflows/unknown-workflow/runs",
data={"message": "Hello, world!", "stream": "false"},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 404
assert response.json()["detail"] == "Workflow not found"
def test_create_workflow_run_without_message_returns_422(test_os_client, test_workflow: Workflow):
"""Test that missing required message field returns validation error."""
response = test_os_client.post(
f"/workflows/{test_workflow.id}/runs",
data={},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 422
def test_create_workflow_run_with_kwargs(test_os_client, test_workflow: Workflow):
"""Test that the create_agent_run endpoint accepts kwargs."""
class MockRunOutput:
def to_dict(self):
return {}
# Patch deep_copy to return the same instance so our mock works
# (AgentOS uses create_fresh=True which calls deep_copy)
with (
patch.object(test_workflow, "deep_copy", return_value=test_workflow),
patch.object(test_workflow, "arun", new_callable=AsyncMock) as mock_arun,
):
mock_arun.return_value = MockRunOutput()
response = test_os_client.post(
f"/workflows/{test_workflow.id}/runs",
data={
"message": "Hello, world!",
"stream": "false",
# Passing some extra fields to the run endpoint
"extra_field": "foo",
"extra_field_two": "bar",
},
)
assert response.status_code == 200
# Asserting our extra fields were passed as kwargs
call_args = mock_arun.call_args
assert call_args.kwargs["extra_field"] == "foo"
assert call_args.kwargs["extra_field_two"] == "bar"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/os/test_workflow_runs.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/nano_banana.py | from __future__ import annotations
import os
from io import BytesIO
from typing import Any, List, Optional
from uuid import uuid4
from agno.media import Image
from agno.tools import Toolkit
from agno.tools.function import ToolResult
from agno.utils.log import log_debug, logger
try:
from google import genai
from google.genai import types
from PIL import Image as PILImage
except ImportError as exc:
missing = []
try:
from google.genai import types
except ImportError:
missing.append("google-genai")
try:
from PIL import Image as PILImage
except ImportError:
missing.append("Pillow")
raise ImportError(
f"Missing required package(s): {', '.join(missing)}. Install using: pip install {' '.join(missing)}"
) from exc
# Note: Expand this list as new models become supported by the Google Content Generation API.
ALLOWED_MODELS = ["gemini-2.5-flash-image"]
ALLOWED_RATIOS = ["1:1", "2:3", "3:2", "3:4", "4:3", "4:5", "5:4", "9:16", "16:9", "21:9"]
class NanoBananaTools(Toolkit):
def __init__(
self,
model: str = "gemini-2.5-flash-image",
aspect_ratio: str = "1:1",
api_key: Optional[str] = None,
enable_create_image: bool = True,
**kwargs,
):
self.model = model
self.aspect_ratio = aspect_ratio
self.api_key = api_key or os.getenv("GOOGLE_API_KEY")
# Validate model
if model not in ALLOWED_MODELS:
raise ValueError(f"Invalid model '{model}'. Supported: {', '.join(ALLOWED_MODELS)}")
if self.aspect_ratio not in ALLOWED_RATIOS:
raise ValueError(f"Invalid aspect_ratio '{self.aspect_ratio}'. Supported: {', '.join(ALLOWED_RATIOS)}")
if not self.api_key:
raise ValueError("GOOGLE_API_KEY not set. Export it: `export GOOGLE_API_KEY=<your-key>`")
tools: List[Any] = []
if enable_create_image:
tools.append(self.create_image)
super().__init__(name="nano_banana", tools=tools, **kwargs)
def create_image(self, prompt: str) -> ToolResult:
"""Generate an image from a text prompt."""
try:
client = genai.Client(api_key=self.api_key)
log_debug(f"NanoBanana generating image with prompt: {prompt}")
cfg = types.GenerateContentConfig(
response_modalities=["IMAGE"],
image_config=types.ImageConfig(aspect_ratio=self.aspect_ratio),
)
response = client.models.generate_content(
model=self.model,
contents=[prompt], # type: ignore
config=cfg,
)
generated_images: List[Image] = []
response_str = ""
if not hasattr(response, "candidates") or not response.candidates:
logger.warning("No candidates in response")
return ToolResult(content="No images were generated in the response")
# Process each candidate
for candidate in response.candidates:
if not hasattr(candidate, "content") or not candidate.content or not candidate.content.parts:
continue
for part in candidate.content.parts:
if hasattr(part, "text") and part.text:
response_str += part.text + "\n"
if hasattr(part, "inline_data") and part.inline_data:
try:
# Extract image data from the blob
image_data = part.inline_data.data
mime_type = getattr(part.inline_data, "mime_type", "image/png")
if image_data:
pil_img = PILImage.open(BytesIO(image_data))
# Save to buffer with proper format
buffer = BytesIO()
image_format = "PNG" if "png" in mime_type.lower() else "JPEG"
pil_img.save(buffer, format=image_format)
buffer.seek(0)
agno_img = Image(
id=str(uuid4()),
content=buffer.getvalue(),
original_prompt=prompt,
)
generated_images.append(agno_img)
log_debug(f"Successfully processed image with ID: {agno_img.id}")
response_str += f"Image generated successfully (ID: {agno_img.id}).\n"
except Exception as img_exc:
logger.error(f"Failed to process image data: {img_exc}")
response_str += f"Failed to process image: {img_exc}\n"
if hasattr(response, "usage_metadata") and response.usage_metadata:
log_debug(
f"Token usage - Prompt: {response.usage_metadata.prompt_token_count}, "
f"Response: {response.usage_metadata.candidates_token_count}, "
f"Total: {response.usage_metadata.total_token_count}"
)
if generated_images:
return ToolResult(
content=response_str.strip() or "Image(s) generated successfully",
images=generated_images,
)
else:
return ToolResult(
content=response_str.strip() or "No images were generated",
images=None,
)
except Exception as exc:
logger.error(f"NanoBanana image generation failed: {exc}")
return ToolResult(content=f"Error generating image: {str(exc)}")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/nano_banana.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/integration/workflows/test_convenience_functions.py | import uuid
import pytest
from agno.agent.agent import Agent
from agno.db.sqlite.async_sqlite import AsyncSqliteDb
from agno.db.sqlite.sqlite import SqliteDb
from agno.team.team import Team
from agno.workflow.step import Step
from agno.workflow.workflow import Workflow
# -- 1. Workflow class convenience functions --
def test_get_session(simple_workflow: Workflow):
"""Test get_session returns the correct session."""
session_id = str(uuid.uuid4())
# Simple run to store the session
response = simple_workflow.run("Hello", session_id=session_id)
assert response is not None
# Assert we can get the session
session = simple_workflow.get_session(session_id=session_id)
assert session is not None
assert session.session_id == session_id
assert len(session.runs or []) == 1
@pytest.mark.asyncio
async def test_aget_session(simple_workflow_with_async_db: Workflow):
"""Test aget_session returns the correct session."""
session_id = str(uuid.uuid4())
# Simple run to store the session
response = await simple_workflow_with_async_db.arun("Hello", session_id=session_id)
assert response is not None
# Assert we can get the session
session = await simple_workflow_with_async_db.aget_session(session_id=session_id)
assert session is not None
assert session.session_id == session_id
assert len(session.runs or []) == 1
def test_get_session_nonexistent(simple_workflow):
"""Test get_session returns None for non-existent session."""
session = simple_workflow.get_session(session_id="nonexistent")
assert session is None
def test_get_chat_history(simple_workflow: Workflow):
"""Test get_chat_history returns the correct chat history."""
session_id = str(uuid.uuid4())
# Simple run to store the session
response = simple_workflow.run("Hello", session_id=session_id)
assert response is not None
# Assert we can get the chat history
chat_history = simple_workflow.get_chat_history(session_id=session_id)
assert len(chat_history) == 1
assert chat_history[0].input == "Hello"
assert chat_history[0].output == response.content
def test_get_chat_history_with_default_session_id(simple_workflow: Workflow):
"""Test get_chat_history uses workflow's session_id if not provided."""
simple_workflow.session_id = str(uuid.uuid4())
response = simple_workflow.run("Hello")
assert response is not None
# Assert we can get the chat history
chat_history = simple_workflow.get_chat_history()
assert len(chat_history) == 1
assert chat_history[0].input == "Hello"
assert chat_history[0].output == response.content
# -- 2. Step class convenience functions --
def test_step_get_chat_history_for_agent(agent_with_db: Agent):
"""Test step.get_chat_history returns the correct chat history."""
session_id = str(uuid.uuid4())
step = Step(name="Test Step", agent=agent_with_db)
workflow = Workflow(name="Test Workflow", db=agent_with_db.db, steps=[step])
# Simple run to store the session
response = workflow.run("Hello", session_id=session_id)
assert response is not None
# Assert we can get the chat history
step_chat_history = step.get_chat_history(session_id=session_id)
assert step_chat_history is not None
assert len(step_chat_history) == 3
assert step_chat_history[0].role == "system"
assert step_chat_history[1].role == "user"
assert step_chat_history[1].content == "Hello"
assert step_chat_history[2].role == "assistant"
assert step_chat_history[2].content == response.content
@pytest.mark.asyncio
async def test_step_aget_chat_history_for_agent(mock_agent: Agent, async_shared_db: AsyncSqliteDb):
"""Test step.aget_chat_history returns the correct chat history."""
session_id = str(uuid.uuid4())
mock_agent.db = async_shared_db
step = Step(name="Test Step", agent=mock_agent)
workflow = Workflow(name="Test Workflow", db=async_shared_db, steps=[step])
# Simple run to store the session
response = await workflow.arun("Hello", session_id=session_id)
assert response is not None
# Assert we can get the chat history
step_chat_history = await step.aget_chat_history(session_id=session_id)
assert step_chat_history is not None
assert len(step_chat_history) == 3
assert step_chat_history[0].role == "system"
assert step_chat_history[1].role == "user"
assert step_chat_history[1].content == "Hello"
assert step_chat_history[2].role == "assistant"
assert step_chat_history[2].content == response.content
def test_step_get_chat_history_for_team(mock_team: Team, shared_db: SqliteDb):
"""Test step.get_chat_history returns the correct chat history."""
session_id = str(uuid.uuid4())
mock_team.db = shared_db
step = Step(name="Test Step", team=mock_team)
workflow = Workflow(name="Test Workflow", db=shared_db, steps=[step])
# Simple run to store the session
response = workflow.run("Hello", session_id=session_id)
assert response is not None
# Assert we can get the chat history
step_chat_history = step.get_chat_history(session_id=session_id)
assert step_chat_history is not None
assert len(step_chat_history) == 3
assert step_chat_history[0].role == "system"
assert step_chat_history[1].role == "user"
assert step_chat_history[1].content == "Hello"
assert step_chat_history[2].role == "assistant"
assert step_chat_history[2].content == response.content
@pytest.mark.asyncio
async def test_step_aget_chat_history_for_team(mock_team: Team, async_shared_db: AsyncSqliteDb):
"""Test step.aget_chat_history returns the correct chat history."""
session_id = str(uuid.uuid4())
mock_team.db = async_shared_db
step = Step(name="Test Step", team=mock_team)
workflow = Workflow(name="Test Workflow", db=async_shared_db, steps=[step])
# Simple run to store the session
response = await workflow.arun("Hello", session_id=session_id)
assert response is not None
# Assert we can get the chat history
step_chat_history = await step.aget_chat_history(session_id=session_id)
assert step_chat_history is not None
assert len(step_chat_history) == 3
assert step_chat_history[0].role == "system"
assert step_chat_history[1].role == "user"
assert step_chat_history[1].content == "Hello"
assert step_chat_history[2].role == "assistant"
assert step_chat_history[2].content == response.content
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/workflows/test_convenience_functions.py",
"license": "Apache License 2.0",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/models/test_client_caching.py | """
Tests for httpx client caching and resource leak prevention.
This test suite verifies that:
1. Global httpx clients are singletons and reused across models
2. OpenAI clients are cached per model instance
3. No new httpx clients are created on every request
"""
import os
import httpx
import pytest
# Set test API key to avoid env var lookup errors
os.environ.setdefault("OPENAI_API_KEY", "test-key-for-testing")
from agno.models.openai.chat import OpenAIChat
from agno.models.openai.responses import OpenAIResponses
from agno.utils.http import (
aclose_default_clients,
close_sync_client,
get_default_async_client,
get_default_sync_client,
set_default_async_client,
set_default_sync_client,
)
class TestGlobalHttpxClients:
"""Test suite for global httpx client singleton pattern."""
def teardown_method(self):
"""Clean up global clients after each test."""
close_sync_client()
@pytest.mark.asyncio
async def test_sync_client_is_singleton(self):
"""Verify that the global sync httpx client is a singleton."""
client1 = get_default_sync_client()
client2 = get_default_sync_client()
assert client1 is client2, "Sync clients should be the same instance"
assert isinstance(client1, httpx.Client)
@pytest.mark.asyncio
async def test_async_client_is_singleton(self):
"""Verify that the global async httpx client is a singleton."""
client1 = get_default_async_client()
client2 = get_default_async_client()
assert client1 is client2, "Async clients should be the same instance"
assert isinstance(client1, httpx.AsyncClient)
def test_sync_and_async_clients_are_different(self):
"""Verify that sync and async clients are different instances."""
sync_client = get_default_sync_client()
async_client = get_default_async_client()
assert sync_client is not async_client, "Sync and async clients should be different"
def test_closed_sync_client_gets_recreated(self):
"""Verify that closed sync client gets recreated."""
client1 = get_default_sync_client()
client1.close()
client2 = get_default_sync_client()
# Should create a new client when the previous one is closed
assert client1 is not client2
assert isinstance(client2, httpx.Client)
@pytest.mark.asyncio
async def test_closed_async_client_gets_recreated(self):
"""Verify that closed async client gets recreated."""
client1 = get_default_async_client()
await client1.aclose()
client2 = get_default_async_client()
# Should create a new client when the previous one is closed
assert client1 is not client2
assert isinstance(client2, httpx.AsyncClient)
class TestOpenAIChatClientCaching:
"""Test suite for OpenAIChat client caching."""
def teardown_method(self):
"""Clean up global clients after each test."""
close_sync_client()
def test_sync_client_is_cached(self):
"""Verify that OpenAIChat caches the sync client."""
model = OpenAIChat(id="gpt-4o")
client1 = model.get_client()
client2 = model.get_client()
assert client1 is client2, "OpenAI sync clients should be cached"
assert model.client is not None
assert model.client is client1
def test_async_client_is_cached(self):
"""Verify that OpenAIChat caches the async client."""
model = OpenAIChat(id="gpt-4o")
client1 = model.get_async_client()
client2 = model.get_async_client()
assert client1 is client2, "OpenAI async clients should be cached"
assert model.async_client is not None
assert model.async_client is client1
def test_multiple_models_share_global_httpx_client(self):
"""Verify that multiple models can share the same global httpx client."""
model1 = OpenAIChat(id="gpt-4o")
model2 = OpenAIChat(id="gpt-4-turbo")
model3 = OpenAIChat(id="gpt-3.5-turbo")
# Get clients from each model
model1.get_client()
model2.get_client()
model3.get_client()
# All models should use the same global httpx client internally
# We verify this by checking that only one global client exists
global_sync_client = get_default_sync_client()
assert isinstance(global_sync_client, httpx.Client)
def test_sync_client_uses_global_httpx_client(self):
"""Verify that OpenAIChat uses the global httpx client for sync operations."""
global_sync_client = get_default_sync_client()
model = OpenAIChat(id="gpt-4o")
openai_client = model.get_client()
# The OpenAI client should have the global httpx client
assert openai_client._client is global_sync_client
def test_async_client_uses_global_httpx_client(self):
"""Verify that OpenAIChat uses the global httpx client for async operations."""
global_async_client = get_default_async_client()
model = OpenAIChat(id="gpt-4o")
openai_client = model.get_async_client()
# The OpenAI client should have the global httpx client
assert openai_client._client is global_async_client
class TestOpenAIResponsesClientCaching:
"""Test suite for OpenAIResponses client caching."""
def teardown_method(self):
"""Clean up global clients after each test."""
close_sync_client()
def test_sync_client_is_cached(self):
"""Verify that OpenAIResponses caches the sync client."""
model = OpenAIResponses(id="gpt-4o")
client1 = model.get_client()
client2 = model.get_client()
assert client1 is client2, "OpenAI sync clients should be cached"
assert model.client is not None
assert model.client is client1
def test_async_client_is_cached(self):
"""Verify that OpenAIResponses caches the async client."""
model = OpenAIResponses(id="gpt-4o")
client1 = model.get_async_client()
client2 = model.get_async_client()
assert client1 is client2, "OpenAI async clients should be cached"
assert model.async_client is not None
assert model.async_client is client1
def test_uses_global_httpx_client(self):
"""Verify that OpenAIResponses uses the global httpx client."""
global_sync_client = get_default_sync_client()
global_async_client = get_default_async_client()
model = OpenAIResponses(id="gpt-4o")
sync_openai = model.get_client()
async_openai = model.get_async_client()
# Both should use global clients
assert sync_openai._client is global_sync_client
assert async_openai._client is global_async_client
class TestCustomHttpClient:
"""Test suite for custom httpx client support."""
def teardown_method(self):
"""Clean up global clients after each test."""
close_sync_client()
def test_custom_sync_client_is_respected(self):
"""Verify that custom sync httpx client is used when provided."""
custom_client = httpx.Client()
model = OpenAIChat(id="gpt-4o", http_client=custom_client)
openai_client = model.get_client()
# Should use the custom client
assert openai_client._client is custom_client
custom_client.close()
def test_custom_async_client_is_respected(self):
"""Verify that custom async httpx client is used when provided."""
custom_client = httpx.AsyncClient()
model = OpenAIChat(id="gpt-4o", http_client=custom_client)
openai_client = model.get_async_client()
# Should use the custom client
assert openai_client._client is custom_client
class TestAsyncCleanup:
"""Test suite for async cleanup functionality."""
@pytest.mark.asyncio
async def test_aclose_default_clients_closes_both(self):
"""Verify that aclose_default_clients closes both sync and async clients."""
sync_client = get_default_sync_client()
async_client = get_default_async_client()
assert not sync_client.is_closed
assert not async_client.is_closed
# Close both clients
await aclose_default_clients()
assert sync_client.is_closed
assert async_client.is_closed
@pytest.mark.asyncio
async def test_clients_recreated_after_async_close(self):
"""Verify that clients are recreated after async close."""
sync_client1 = get_default_sync_client()
async_client1 = get_default_async_client()
await aclose_default_clients()
# Should get new clients
sync_client2 = get_default_sync_client()
async_client2 = get_default_async_client()
assert sync_client1 is not sync_client2
assert async_client1 is not async_client2
class TestSetGlobalClients:
"""Test suite for setting custom global clients."""
def teardown_method(self):
"""Clean up global clients after each test."""
close_sync_client()
def test_set_custom_sync_client_affects_all_models(self):
"""Verify that setting a custom sync client affects all models."""
custom_client = httpx.Client(limits=httpx.Limits(max_connections=100, max_keepalive_connections=50))
set_default_sync_client(custom_client)
# Create models after setting custom client
model1 = OpenAIChat(id="gpt-4o")
model2 = OpenAIResponses(id="gpt-4o")
# Both should use the custom client
assert model1.get_client()._client is custom_client
assert model2.get_client()._client is custom_client
custom_client.close()
def test_set_custom_async_client_affects_all_models(self):
"""Verify that setting a custom async client affects all models."""
custom_client = httpx.AsyncClient(limits=httpx.Limits(max_connections=100, max_keepalive_connections=50))
set_default_async_client(custom_client)
# Create models after setting custom client
model1 = OpenAIChat(id="gpt-4o")
model2 = OpenAIResponses(id="gpt-4o")
# Both should use the custom client
assert model1.get_async_client()._client is custom_client
assert model2.get_async_client()._client is custom_client
def test_custom_client_persists_across_multiple_calls(self):
"""Verify that custom client persists across multiple calls."""
custom_client = httpx.Client(limits=httpx.Limits(max_connections=250))
set_default_sync_client(custom_client)
model = OpenAIChat(id="gpt-4o")
# Multiple calls should use the same custom client
for _ in range(5):
openai_client = model.get_client()
assert openai_client._client is custom_client
custom_client.close()
def test_set_client_overrides_previous_default(self):
"""Verify that setting a new client replaces the previous default."""
# Get default client
default_client = get_default_sync_client()
# Set custom client
custom_client = httpx.Client(limits=httpx.Limits(max_connections=100))
set_default_sync_client(custom_client)
# New calls should get custom client
new_client = get_default_sync_client()
assert new_client is custom_client
assert new_client is not default_client
custom_client.close()
class TestResourceLeakPrevention:
"""Test suite for resource leak prevention."""
def teardown_method(self):
"""Clean up global clients after each test."""
close_sync_client()
def test_no_new_httpx_clients_created_per_request(self):
"""Verify that no new httpx clients are created on repeated requests."""
model = OpenAIChat(id="gpt-4o")
global_client = get_default_sync_client()
# Simulate multiple requests
for _ in range(10):
client = model.get_client()
# Same client should be used
assert client._client is global_client
# Only one global client should exist
new_global_client = get_default_sync_client()
assert new_global_client is global_client
def test_multiple_models_share_single_global_client(self):
"""Verify that multiple models share a single global httpx client."""
global_client = get_default_sync_client()
# Create multiple models
models = [
OpenAIChat(id="gpt-4o"),
OpenAIChat(id="gpt-4-turbo"),
OpenAIChat(id="gpt-3.5-turbo"),
OpenAIResponses(id="gpt-4o"),
]
# All should use the same global client
for model in models:
openai_client = model.get_client()
assert openai_client._client is global_client
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/models/test_client_caching.py",
"license": "Apache License 2.0",
"lines": 267,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/async_mongo/test_db.py | """Integration tests for the setup and main methods of the AsyncMongoDb class
Required to have a running MongoDB instance to run these tests.
These tests assume:
- Username=mongoadmin
- Password=secret
"""
from datetime import datetime, timezone
import pytest
try:
from agno.db.mongo import AsyncMongoDb
except ImportError:
pytest.skip(
"Neither motor nor pymongo async installed, skipping AsyncMongoDb integration tests", allow_module_level=True
)
@pytest.mark.asyncio
async def test_init_with_db_url():
"""Test initialization with actual database URL format"""
db_url = "mongodb://mongoadmin:secret@localhost:27017"
db = AsyncMongoDb(db_url=db_url, db_name="test_init_db", session_collection="test_async_mongo_sessions")
assert db.db_url == db_url
assert db.session_table_name == "test_async_mongo_sessions"
assert db.db_name == "test_init_db"
# Test connection
collection_names = await db.database.list_collection_names()
assert isinstance(collection_names, list)
# Cleanup
await db.database.client.drop_database("test_init_db")
db._client.close() # type: ignore
@pytest.mark.asyncio
async def test_table_exists(async_mongo_db_real):
"""Test checking if a collection exists in MongoDB"""
# Create a test collection
await async_mongo_db_real.database["test_collection"].insert_one({"test": "data"})
# Check if it exists
exists = await async_mongo_db_real.table_exists("test_collection")
assert exists is True
# Check non-existent collection
exists = await async_mongo_db_real.table_exists("nonexistent_collection")
assert exists is False
@pytest.mark.asyncio
async def test_create_session_collection_integration(async_mongo_db_real):
"""Test actual session collection creation with MongoDB"""
# Get session collection (will create it)
collection = await async_mongo_db_real._get_collection("sessions", create_collection_if_not_found=True)
assert collection is not None
# Verify collection exists in database
collection_names = await async_mongo_db_real.database.list_collection_names()
assert async_mongo_db_real.session_table_name in collection_names
# Verify we can insert and query
await collection.insert_one({"session_id": "test-123", "test": "data"})
doc = await collection.find_one({"session_id": "test-123"})
assert doc is not None
assert doc["test"] == "data"
@pytest.mark.asyncio
async def test_create_collection_with_indexes(async_mongo_db_real):
"""Test that indexes are created correctly"""
# Create sessions collection (should create indexes)
await async_mongo_db_real._get_collection("sessions", create_collection_if_not_found=True)
# Verify indexes exist
collection = async_mongo_db_real.database[async_mongo_db_real.session_table_name]
indexes = await collection.index_information()
# Should have indexes (at minimum the default _id index)
assert len(indexes) > 0
assert "_id_" in indexes # Default MongoDB index
@pytest.mark.asyncio
async def test_get_collection_caching(async_mongo_db_real):
"""Test that collections are cached after first retrieval"""
# First call should create the collection
collection1 = await async_mongo_db_real._get_collection("sessions", create_collection_if_not_found=True)
assert hasattr(async_mongo_db_real, "session_collection")
# Second call should use cached collection
collection2 = await async_mongo_db_real._get_collection("sessions", create_collection_if_not_found=True)
# Should be the same collection object
assert collection1.name == collection2.name
@pytest.mark.asyncio
async def test_multiple_collections(async_mongo_db_real):
"""Test creating and using multiple collections"""
# Create multiple collections
sessions_coll = await async_mongo_db_real._get_collection("sessions", create_collection_if_not_found=True)
memories_coll = await async_mongo_db_real._get_collection("memories", create_collection_if_not_found=True)
metrics_coll = await async_mongo_db_real._get_collection("metrics", create_collection_if_not_found=True)
assert sessions_coll is not None
assert memories_coll is not None
assert metrics_coll is not None
# Verify all collections are cached
assert hasattr(async_mongo_db_real, "session_collection")
assert hasattr(async_mongo_db_real, "memory_collection")
assert hasattr(async_mongo_db_real, "metrics_collection")
# Verify they're different collections
assert sessions_coll.name != memories_coll.name
assert sessions_coll.name != metrics_coll.name
@pytest.mark.asyncio
async def test_full_workflow(async_mongo_db_real):
"""Test a complete workflow of creating and using collections"""
# Get collections (will create them)
session_collection = await async_mongo_db_real._get_collection("sessions", create_collection_if_not_found=True)
await async_mongo_db_real._get_collection("memories", create_collection_if_not_found=True)
# Verify collections are cached
assert hasattr(async_mongo_db_real, "session_collection")
assert hasattr(async_mongo_db_real, "memory_collection")
# Verify we can insert and query data (basic smoke test)
# Insert a test session
await session_collection.insert_one(
{
"session_id": "test-session-123",
"session_type": "agent",
"created_at": int(datetime.now(timezone.utc).timestamp() * 1000),
"session_data": {"test": "data"},
}
)
# Query it back
doc = await session_collection.find_one({"session_id": "test-session-123"})
assert doc is not None
assert doc["session_type"] == "agent"
assert doc["session_data"]["test"] == "data"
@pytest.mark.asyncio
async def test_event_loop_handling_in_integration(async_mongo_db_real):
"""Test that event loop changes are handled correctly in real scenario
This test verifies the fix for the 'Event loop is closed' bug.
"""
# Create collections in current event loop
await async_mongo_db_real._get_collection("sessions", create_collection_if_not_found=True)
await async_mongo_db_real._get_collection("memories", create_collection_if_not_found=True)
# Verify collections are cached
assert hasattr(async_mongo_db_real, "session_collection")
assert hasattr(async_mongo_db_real, "memory_collection")
# Count initialized flags
initialized_before = [attr for attr in vars(async_mongo_db_real).keys() if attr.endswith("_initialized")]
assert len(initialized_before) >= 2
# Simulate event loop change
async_mongo_db_real._event_loop = None
_ = async_mongo_db_real.db_client
# Collections and flags should be cleared
assert not hasattr(async_mongo_db_real, "session_collection")
assert not hasattr(async_mongo_db_real, "memory_collection")
# Initialized flags should be cleared (this is the fix!)
initialized_after = [attr for attr in vars(async_mongo_db_real).keys() if attr.endswith("_initialized")]
assert len(initialized_after) == 0, f"Expected 0 initialized flags, found: {initialized_after}"
# Should be able to recreate collections without errors
collection = await async_mongo_db_real._get_collection("sessions", create_collection_if_not_found=True)
assert collection is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/async_mongo/test_db.py",
"license": "Apache License 2.0",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/db/test_async_mongo.py | import asyncio
from unittest.mock import Mock, patch
import pytest
from pymongo import AsyncMongoClient
from agno.db.mongo import AsyncMongoDb
def test_id_is_deterministic():
"""Test that two db instances with same URL have same ID"""
db1 = AsyncMongoDb(db_url="mongodb://localhost:27017", db_name="test_db")
db2 = AsyncMongoDb(db_url="mongodb://localhost:27017", db_name="test_db")
assert db1.id == db2.id
def test_id_different_for_different_configs():
"""Test that different configs produce different IDs"""
db1 = AsyncMongoDb(db_url="mongodb://localhost:27017", db_name="test_db1")
db2 = AsyncMongoDb(db_url="mongodb://localhost:27017", db_name="test_db2")
assert db1.id != db2.id
def test_init_with_url():
"""Test initialization with database URL"""
db = AsyncMongoDb(
db_url="mongodb://localhost:27017",
db_name="test_db",
session_collection="sessions",
memory_collection="memories",
)
assert db.db_url == "mongodb://localhost:27017"
assert db.db_name == "test_db"
assert db.session_table_name == "sessions"
assert db.memory_table_name == "memories"
def test_init_with_client():
"""Test initialization with provided client"""
test_client = AsyncMongoClient()
db = AsyncMongoDb(db_client=test_client, db_name="test_db", session_collection="sessions")
assert db._provided_client == test_client
assert db.db_name == "test_db"
assert db.session_table_name == "sessions"
def test_init_no_url_or_client():
"""Test initialization fails without URL or client"""
with pytest.raises(ValueError, match="One of db_url or db_client must be provided"):
AsyncMongoDb(db_name="test_db")
def test_init_defaults():
"""Test initialization with defaults"""
db = AsyncMongoDb(db_url="mongodb://localhost:27017")
assert db.db_name == "agno"
assert db._client is None
assert db._database is None
assert db._event_loop is None
def test_init_with_all_collections():
"""Test initialization with all collection names"""
db = AsyncMongoDb(
db_url="mongodb://localhost:27017",
db_name="test_db",
session_collection="sessions",
memory_collection="memories",
metrics_collection="metrics",
eval_collection="evals",
knowledge_collection="knowledge",
culture_collection="culture",
)
assert db.session_table_name == "sessions"
assert db.memory_table_name == "memories"
assert db.metrics_table_name == "metrics"
assert db.eval_table_name == "evals"
assert db.knowledge_table_name == "knowledge"
assert db.culture_table_name == "culture"
@pytest.mark.asyncio
async def test_initialization_flags_cleared_on_event_loop_change():
"""Test that _initialized flags are cleared when event loop changes.
This is the primary fix for the bug. When the event loop changes,
both collection caches AND initialization flags must be cleared.
Bug context: After upgrading from 2.2.7 to 2.2.9, users experienced
"Event loop is closed" errors when calling aupsert_session in custom
situations with subagents, particularly when multiple asyncio.run()
calls were made.
"""
db = AsyncMongoDb(db_url="mongodb://localhost:27017", db_name="test_event_loop_fix")
# First operation - create collections
await db._get_collection("sessions", create_collection_if_not_found=True)
await db._get_collection("memories", create_collection_if_not_found=True)
# Should have collections cached
assert hasattr(db, "session_collection")
assert hasattr(db, "memory_collection")
# Should have initialized flags
initialized_flags_before = [attr for attr in vars(db).keys() if attr.endswith("_initialized")]
assert len(initialized_flags_before) >= 2, "Should have initialized flags"
# Manually simulate event loop change (what _ensure_client does)
db._event_loop = None # Force it to detect a "new" loop next time
_ = db.db_client # This should trigger cleanup
# After loop change, collections AND initialized flags should be cleared
assert not hasattr(db, "session_collection"), "session_collection should be cleared"
assert not hasattr(db, "memory_collection"), "memory_collection should be cleared"
# Check that ALL _initialized flags are cleared (this is the fix!)
initialized_flags_after = [attr for attr in vars(db).keys() if attr.endswith("_initialized")]
assert len(initialized_flags_after) == 0, (
f"All _initialized flags should be cleared, but found: {initialized_flags_after}"
)
@pytest.mark.asyncio
async def test_indexes_awaited_properly():
"""Test that index creation is properly awaited.
The second part of the fix ensures create_collection_indexes_async
properly awaits Motor's async create_index() calls, preventing
unawaited futures from being left on the event loop.
"""
db = AsyncMongoDb(db_url="mongodb://localhost:27017", db_name="test_await_indexes")
# This should complete without hanging or leaving pending futures
collection = await db._get_collection("sessions", create_collection_if_not_found=True)
assert collection is not None
# Verify no pending tasks (excluding current task)
pending = [task for task in asyncio.all_tasks() if not task.done()]
current_task = asyncio.current_task()
pending = [task for task in pending if task != current_task]
assert len(pending) == 0, f"Should have no pending tasks, but found: {pending}"
@pytest.mark.asyncio
async def test_collection_cache_reset_on_event_loop_change():
"""Test that all collection caches are reset on event loop change"""
db = AsyncMongoDb(db_url="mongodb://localhost:27017", db_name="test_cache_reset")
# Create collections
await db._get_collection("sessions", create_collection_if_not_found=True)
await db._get_collection("memories", create_collection_if_not_found=True)
await db._get_collection("metrics", create_collection_if_not_found=True)
# Should have multiple collections cached
collections_before = [attr for attr in vars(db).keys() if attr.endswith("_collection")]
assert len(collections_before) >= 3
# Force event loop change
db._event_loop = None
_ = db.db_client
# All collections should be cleared
collections_after = [attr for attr in vars(db).keys() if attr.endswith("_collection")]
assert len(collections_after) == 0, f"All collections should be cleared, found: {collections_after}"
@pytest.mark.asyncio
async def test_get_collection_invalid_type():
"""Test getting collection with invalid type raises error"""
db = AsyncMongoDb(db_url="mongodb://localhost:27017", db_name="test_invalid_type")
with pytest.raises(ValueError, match="Unknown table type"):
await db._get_collection("invalid_type")
@pytest.mark.asyncio
async def test_get_collection_without_table_name():
"""Test that _get_collection raises error if table name not configured"""
db = AsyncMongoDb(db_url="mongodb://localhost:27017", db_name="test_no_table_name")
# Manually clear the table name to simulate not being configured
db.session_table_name = None # type: ignore
with pytest.raises(ValueError, match="Session collection was not provided"):
await db._get_collection("sessions", create_collection_if_not_found=True)
def test_db_client_property():
"""Test db_client property calls _ensure_client"""
db = AsyncMongoDb(db_url="mongodb://localhost:27017", db_name="test_client_property")
with patch.object(db, "_ensure_client", return_value=Mock()) as mock_ensure:
client = db.db_client
assert client is not None
mock_ensure.assert_called_once()
def test_should_reset_collection_cache():
"""Test _should_reset_collection_cache method"""
db = AsyncMongoDb(db_url="mongodb://localhost:27017", db_name="test_should_reset")
# Before any event loop is set
assert db._should_reset_collection_cache() is False
# After setting event loop
db._event_loop = asyncio.get_event_loop()
# Should return False when same loop
assert db._should_reset_collection_cache() is False
def test_client_type_constants():
"""Test that client type constants are defined correctly"""
assert AsyncMongoDb.CLIENT_TYPE_MOTOR == "motor"
assert AsyncMongoDb.CLIENT_TYPE_PYMONGO_ASYNC == "pymongo_async"
assert AsyncMongoDb.CLIENT_TYPE_UNKNOWN == "unknown"
def test_detect_motor_client_type():
"""Test that Motor client is correctly detected"""
try:
from motor.motor_asyncio import AsyncIOMotorClient
mock_client = Mock(spec=AsyncIOMotorClient)
db = AsyncMongoDb(db_client=mock_client, db_name="test_db")
assert db._client_type == AsyncMongoDb.CLIENT_TYPE_MOTOR
except ImportError:
pytest.skip("Motor not available")
def test_detect_pymongo_async_client_type():
"""Test that PyMongo async client is correctly detected"""
# Check if PyMongo async is available
try:
import pymongo # noqa: F401
# Verify AsyncMongoClient exists
if not hasattr(pymongo, "AsyncMongoClient"):
pytest.skip("PyMongo async not available")
except ImportError:
pytest.skip("PyMongo async not available")
# Create a mock that will pass isinstance check
# We need to make the mock actually be an instance of AsyncMongoClient
# or use a real instance if possible, or patch the detection
mock_client = Mock()
# Set the class name and module to help with fallback detection
mock_client.__class__.__name__ = "AsyncMongoClient"
mock_client.__class__.__module__ = "pymongo"
db = AsyncMongoDb(db_client=mock_client, db_name="test_db")
assert db._client_type == AsyncMongoDb.CLIENT_TYPE_PYMONGO_ASYNC
def test_auto_select_preferred_client_from_url():
"""Test that preferred client is auto-selected when creating from URL"""
# Import availability flags from the module
from agno.db.mongo.async_mongo import MOTOR_AVAILABLE, PYMONGO_ASYNC_AVAILABLE
db = AsyncMongoDb(db_url="mongodb://localhost:27017", db_name="test_db")
# Should prefer PyMongo async if available, else Motor
if PYMONGO_ASYNC_AVAILABLE:
assert db._client_type == AsyncMongoDb.CLIENT_TYPE_PYMONGO_ASYNC
elif MOTOR_AVAILABLE:
assert db._client_type == AsyncMongoDb.CLIENT_TYPE_MOTOR
else:
pytest.fail("Neither client type available")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/db/test_async_mongo.py",
"license": "Apache License 2.0",
"lines": 202,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/workflows/test_custom_executor_events.py | """Integration tests for workflow streaming events."""
from pydantic import BaseModel
from agno.agent.agent import Agent
from agno.workflow import Step, Workflow
from agno.workflow.types import StepInput, StepOutput
def test_agent_stream_basic_events():
def test_step_one_executor(step_input: StepInput) -> StepOutput:
agent = Agent(
name="custom_agent",
instructions="You are a custom agent that can perform a custom task.",
)
response = agent.run(input=step_input.input, stream=True)
for chunk in response:
yield chunk
test_step_one = Step(
name="test_step_one",
executor=test_step_one_executor,
)
workflow = Workflow(
name="test_workflow",
steps=[test_step_one],
)
events = list(workflow.run(input="test", stream=True, stream_events=True))
event_types = [type(event).__name__ for event in events]
print(event_types)
assert "WorkflowStartedEvent" in event_types
assert "WorkflowCompletedEvent" in event_types
assert "StepStartedEvent" in event_types
assert "StepCompletedEvent" in event_types
assert "StepOutputEvent" in event_types
assert "StepOutputEvent" in event_types
assert "RunContentEvent" in event_types
def test_agent_stream_all_events():
def test_step_one_executor(step_input: StepInput) -> StepOutput:
agent = Agent(
name="custom_agent",
instructions="You are a custom agent that can perform a custom task.",
)
response = agent.run(input=step_input.input, stream=True, stream_events=True)
for chunk in response:
yield chunk
test_step_one = Step(
name="test_step_one",
executor=test_step_one_executor,
)
workflow = Workflow(
name="test_workflow",
steps=[test_step_one],
)
events = list(workflow.run(input="test", stream=True, stream_events=True))
event_types = [type(event).__name__ for event in events]
assert "WorkflowStartedEvent" in event_types
assert "WorkflowCompletedEvent" in event_types
assert "StepStartedEvent" in event_types
assert "StepCompletedEvent" in event_types
assert "StepOutputEvent" in event_types
assert "StepOutputEvent" in event_types
assert "RunContentEvent" in event_types
assert "RunStartedEvent" in event_types
assert "RunContentCompletedEvent" in event_types
assert "RunCompletedEvent" in event_types
def test_agent_stream_with_output_schema():
class TestClass(BaseModel):
name: str
age: int
location: str
def test_step_one_executor(step_input: StepInput) -> StepOutput:
agent = Agent(
name="custom_agent",
output_schema=TestClass,
instructions="You are a custom agent that can perform a custom task.",
)
response = agent.run(input=step_input.input, stream=True)
for chunk in response:
yield chunk
test_step_one = Step(
name="test_step_one",
executor=test_step_one_executor,
)
workflow = Workflow(
name="test_workflow",
steps=[test_step_one],
)
events = list(workflow.run(input="test", stream=True, stream_events=True))
event_types = [type(event).__name__ for event in events]
assert "WorkflowStartedEvent" in event_types
assert "WorkflowCompletedEvent" in event_types
assert "StepStartedEvent" in event_types
assert "StepCompletedEvent" in event_types
assert "StepOutputEvent" in event_types
assert "StepOutputEvent" in event_types
assert "RunContentEvent" in event_types
def test_agent_stream_with_yield_run_output():
def test_step_one_executor(step_input: StepInput) -> StepOutput:
agent = Agent(
name="custom_agent",
instructions="You are a custom agent that can perform a custom task.",
)
response = agent.run(input=step_input.input, stream=True, yield_run_output=True)
for chunk in response:
yield chunk
test_step_one = Step(
name="test_step_one",
executor=test_step_one_executor,
)
workflow = Workflow(
name="test_workflow",
steps=[test_step_one],
)
events = list(workflow.run(input="test", stream=True, stream_events=True))
event_types = [type(event).__name__ for event in events]
assert "WorkflowStartedEvent" in event_types
assert "WorkflowCompletedEvent" in event_types
assert "StepStartedEvent" in event_types
assert "StepCompletedEvent" in event_types
assert "StepOutputEvent" in event_types
assert "StepOutputEvent" in event_types
assert "RunContentEvent" in event_types
def test_agent_stream_with_yield_step_output():
def test_step_one_executor(step_input: StepInput) -> StepOutput:
agent = Agent(
name="custom_agent",
instructions="You are a custom agent that can perform a custom task.",
)
response = agent.run(input=step_input.input, stream=True, yield_run_output=True)
for chunk in response:
yield chunk
yield StepOutput(content="Hello, world!")
test_step_one = Step(
name="test_step_one",
executor=test_step_one_executor,
)
workflow = Workflow(
name="test_workflow",
steps=[test_step_one],
)
events = list(workflow.run(input="test", stream=True, stream_events=True))
event_types = [type(event).__name__ for event in events]
assert "WorkflowStartedEvent" in event_types
assert "WorkflowCompletedEvent" in event_types
assert "StepStartedEvent" in event_types
assert "StepCompletedEvent" in event_types
assert "StepOutputEvent" in event_types
assert "RunContentEvent" in event_types
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/workflows/test_custom_executor_events.py",
"license": "Apache License 2.0",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/filters.py | """Search filter expressions for filtering knowledge base documents and search results.
This module provides a set of filter operators for constructing complex search queries
that can be applied to knowledge bases, vector databases, and other searchable content.
Filter Types:
- Comparison: EQ (equals), NEQ (not equals), GT (greater than), GTE (greater than or equal),
LT (less than), LTE (less than or equal)
- Inclusion: IN (value in list)
- String: CONTAINS (substring match), STARTSWITH (prefix match)
- Logical: AND, OR, NOT
Example:
>>> from agno.filters import EQ, GT, IN, AND, OR, NOT
>>>
>>> # Simple equality filter
>>> filter = EQ("category", "technology")
>>>
>>> # Complex filter with multiple conditions
>>> filter = AND(
... EQ("status", "published"),
... GT("views", 1000),
... IN("category", ["tech", "science"])
... )
>>>
>>> # Using OR logic
>>> filter = OR(EQ("priority", "high"), EQ("urgent", True))
>>>
>>> # Negating conditions
>>> filter = NOT(EQ("status", "archived"))
>>>
>>> # Complex nested logic
>>> filter = OR(
... AND(EQ("type", "article"), GT("word_count", 500)),
... AND(EQ("type", "tutorial"), NOT(EQ("difficulty", "beginner")))
... )
"""
from __future__ import annotations
from typing import Any, List
# Maximum recursion depth for nested filter expressions (prevents stack overflow attacks)
MAX_FILTER_DEPTH: int = 10
# ============================================================
# Base Expression
# ============================================================
class FilterExpr:
"""Base class for all filter expressions.
Filters can be combined using AND, OR, and NOT classes:
- AND: Combine filters where both expressions must be true
- OR: Combine filters where either expression can be true
- NOT: Negate a filter expression
Example:
>>> # Create complex filters using AND, OR, NOT
>>> filter = OR(AND(EQ("status", "active"), GT("age", 18)), EQ("role", "admin"))
>>> # Equivalent to: (status == "active" AND age > 18) OR role == "admin"
"""
# Logical operator overloads
def __or__(self, other: FilterExpr) -> OR:
"""Combine two filters with OR logic."""
return OR(self, other)
def __and__(self, other: FilterExpr) -> AND:
"""Combine two filters with AND logic."""
return AND(self, other)
def __invert__(self) -> NOT:
"""Negate a filter."""
return NOT(self)
def to_dict(self) -> dict:
"""Convert the filter expression to a dictionary representation."""
raise NotImplementedError("Subclasses must implement to_dict()")
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.__dict__})"
# ============================================================
# Comparison & Inclusion Filters
# ============================================================
class EQ(FilterExpr):
"""Equality filter - matches documents where a field equals a specific value.
Args:
key: The field name to compare
value: The value to match against
Example:
>>> # Match documents where status is "published"
>>> filter = EQ("status", "published")
>>>
>>> # Match documents where author_id is 123
>>> filter = EQ("author_id", 123)
"""
def __init__(self, key: str, value: Any):
self.key = key
self.value = value
def to_dict(self) -> dict:
return {"op": "EQ", "key": self.key, "value": self.value}
class IN(FilterExpr):
"""Inclusion filter - matches documents where a field's value is in a list of values.
Args:
key: The field name to check
values: List of acceptable values
Example:
>>> # Match documents where category is either "tech", "science", or "engineering"
>>> filter = IN("category", ["tech", "science", "engineering"])
>>>
>>> # Match documents where status is either "draft" or "published"
>>> filter = IN("status", ["draft", "published"])
"""
def __init__(self, key: str, values: List[Any]):
self.key = key
self.values = values
def to_dict(self) -> dict:
return {"op": "IN", "key": self.key, "values": self.values}
class GT(FilterExpr):
"""Greater than filter - matches documents where a field's value is greater than a threshold.
Args:
key: The field name to compare
value: The threshold value
Example:
>>> # Match documents where age is greater than 18
>>> filter = GT("age", 18)
>>>
>>> # Match documents where price is greater than 100.0
>>> filter = GT("price", 100.0)
>>>
>>> # Match documents created after a certain timestamp
>>> filter = GT("created_at", 1234567890)
"""
def __init__(self, key: str, value: Any):
self.key = key
self.value = value
def to_dict(self) -> dict:
return {"op": "GT", "key": self.key, "value": self.value}
class LT(FilterExpr):
"""Less than filter - matches documents where a field's value is less than a threshold.
Args:
key: The field name to compare
value: The threshold value
Example:
>>> # Match documents where age is less than 65
>>> filter = LT("age", 65)
>>>
>>> # Match documents where price is less than 50.0
>>> filter = LT("price", 50.0)
>>>
>>> # Match documents created before a certain timestamp
>>> filter = LT("created_at", 1234567890)
"""
def __init__(self, key: str, value: Any):
self.key = key
self.value = value
def to_dict(self) -> dict:
return {"op": "LT", "key": self.key, "value": self.value}
class NEQ(FilterExpr):
"""Not-equal filter - matches documents where a field does not equal a specific value.
Args:
key: The field name to compare
value: The value to compare against
Example:
>>> # Match documents where status is not "archived"
>>> filter = NEQ("status", "archived")
"""
def __init__(self, key: str, value: Any):
self.key = key
self.value = value
def to_dict(self) -> dict:
return {"op": "NEQ", "key": self.key, "value": self.value}
class GTE(FilterExpr):
"""Greater than or equal filter - matches documents where a field's value is >= a threshold.
Args:
key: The field name to compare
value: The threshold value
Example:
>>> # Match documents where age is 18 or older
>>> filter = GTE("age", 18)
>>>
>>> # Match documents created on or after a certain timestamp
>>> filter = GTE("created_at", "2025-01-01T00:00:00Z")
"""
def __init__(self, key: str, value: Any):
self.key = key
self.value = value
def to_dict(self) -> dict:
return {"op": "GTE", "key": self.key, "value": self.value}
class LTE(FilterExpr):
"""Less than or equal filter - matches documents where a field's value is <= a threshold.
Args:
key: The field name to compare
value: The threshold value
Example:
>>> # Match documents where age is 65 or younger
>>> filter = LTE("age", 65)
>>>
>>> # Match documents created on or before a certain timestamp
>>> filter = LTE("created_at", "2025-12-31T23:59:59Z")
"""
def __init__(self, key: str, value: Any):
self.key = key
self.value = value
def to_dict(self) -> dict:
return {"op": "LTE", "key": self.key, "value": self.value}
# ============================================================
# String Matching Filters
# ============================================================
class CONTAINS(FilterExpr):
"""Substring match filter - matches documents where a field contains a substring (case-insensitive).
Args:
key: The field name to search
value: The substring to search for
Example:
>>> # Match documents where user_id contains "admin"
>>> filter = CONTAINS("user_id", "admin")
>>>
>>> # Match documents where name contains "john"
>>> filter = CONTAINS("name", "john")
"""
def __init__(self, key: str, value: str):
self.key = key
self.value = value
def to_dict(self) -> dict:
return {"op": "CONTAINS", "key": self.key, "value": self.value}
class STARTSWITH(FilterExpr):
"""Prefix match filter - matches documents where a field starts with a given string.
Args:
key: The field name to search
value: The prefix to match
Example:
>>> # Match documents where name starts with "Agent"
>>> filter = STARTSWITH("name", "Agent")
>>>
>>> # Match documents where session_id starts with "sess_"
>>> filter = STARTSWITH("session_id", "sess_")
"""
def __init__(self, key: str, value: str):
self.key = key
self.value = value
def to_dict(self) -> dict:
return {"op": "STARTSWITH", "key": self.key, "value": self.value}
# ============================================================
# Logical Operators
# ============================================================
class AND(FilterExpr):
"""Logical AND operator - matches documents where ALL expressions are true.
Combines multiple filter expressions where every expression must be satisfied
for a document to match.
Args:
*expressions: Variable number of FilterExpr expressions to combine with AND logic
Example:
>>> # Match documents where status is "published" AND age > 18
>>> filter = AND(EQ("status", "published"), GT("age", 18))
>>>
>>> # Multiple expressions
>>> filter = AND(
... EQ("status", "active"),
... GT("score", 80),
... IN("category", ["tech", "science"])
... )
"""
def __init__(self, *expressions: FilterExpr):
self.expressions = list(expressions)
def to_dict(self) -> dict:
return {"op": "AND", "conditions": [e.to_dict() for e in self.expressions]}
class OR(FilterExpr):
"""Logical OR operator - matches documents where ANY expression is true.
Combines multiple filter expressions where at least one expression must be satisfied
for a document to match.
Args:
*expressions: Variable number of FilterExpr expressions to combine with OR logic
Example:
>>> # Match documents where status is "published" OR status is "archived"
>>> filter = OR(EQ("status", "published"), EQ("status", "archived"))
>>>
>>> # Complex: Match VIP users OR users with high score
>>> filter = OR(
... EQ("membership", "VIP"),
... GT("score", 1000)
... )
"""
def __init__(self, *expressions: FilterExpr):
self.expressions = list(expressions)
def to_dict(self) -> dict:
return {"op": "OR", "conditions": [e.to_dict() for e in self.expressions]}
class NOT(FilterExpr):
"""Logical NOT operator - matches documents where the expression is NOT true.
Negates a filter expression, matching documents that don't satisfy the expression.
Args:
expression: The FilterExpr expression to negate
Example:
>>> # Match documents where status is NOT "draft"
>>> filter = NOT(EQ("status", "draft"))
>>>
>>> # Exclude inactive users with low scores
>>> filter = NOT(AND(EQ("status", "inactive"), LT("score", 10)))
>>>
>>> # Match users who are NOT in the blocked list
>>> filter = NOT(IN("user_id", [101, 102, 103]))
"""
def __init__(self, expression: FilterExpr):
self.expression = expression
def to_dict(self) -> dict:
return {"op": "NOT", "condition": self.expression.to_dict()}
# ============================================================
# Deserialization
# ============================================================
def from_dict(filter_dict: dict, _depth: int = 0) -> FilterExpr:
"""Reconstruct a FilterExpr object from its dictionary representation.
This function deserializes filter expressions that were serialized using the
to_dict() method, enabling filters to be passed through JSON APIs and reconstructed
on the server side.
Args:
filter_dict: Dictionary representation of a filter expression with an "op" key
_depth: Internal parameter tracking recursion depth. Do not pass manually.
Returns:
FilterExpr: The reconstructed filter expression object
Raises:
ValueError: If the filter dictionary has an invalid structure, unknown operator,
or exceeds max recursion depth.
Example:
>>> # Serialize and deserialize a simple filter
>>> original = EQ("status", "published")
>>> serialized = original.to_dict()
>>> # {"op": "EQ", "key": "status", "value": "published"}
>>> reconstructed = from_dict(serialized)
>>>
>>> # Complex filter with nested expressions
>>> complex_filter = OR(AND(EQ("type", "article"), GT("views", 1000)), IN("priority", ["high", "urgent"]))
>>> serialized = complex_filter.to_dict()
>>> reconstructed = from_dict(serialized)
>>>
>>> # From JSON API
>>> import json
>>> json_str = '{"op": "AND", "conditions": [{"op": "EQ", "key": "status", "value": "active"}, {"op": "GT", "key": "age", "value": 18}]}'
>>> filter_dict = json.loads(json_str)
>>> filter_expr = from_dict(filter_dict)
"""
# Check recursion depth limit
if _depth > MAX_FILTER_DEPTH:
raise ValueError(f"Filter expression exceeds maximum nesting depth of {MAX_FILTER_DEPTH}")
if not isinstance(filter_dict, dict) or "op" not in filter_dict:
raise ValueError(f"Invalid filter dictionary: must contain 'op' key. Got: {filter_dict}")
op = filter_dict["op"]
# Comparison and inclusion operators
if op == "EQ":
if "key" not in filter_dict or "value" not in filter_dict:
raise ValueError(f"EQ filter requires 'key' and 'value' fields. Got: {filter_dict}")
return EQ(filter_dict["key"], filter_dict["value"])
elif op == "IN":
if "key" not in filter_dict or "values" not in filter_dict:
raise ValueError(f"IN filter requires 'key' and 'values' fields. Got: {filter_dict}")
return IN(filter_dict["key"], filter_dict["values"])
elif op == "GT":
if "key" not in filter_dict or "value" not in filter_dict:
raise ValueError(f"GT filter requires 'key' and 'value' fields. Got: {filter_dict}")
return GT(filter_dict["key"], filter_dict["value"])
elif op == "LT":
if "key" not in filter_dict or "value" not in filter_dict:
raise ValueError(f"LT filter requires 'key' and 'value' fields. Got: {filter_dict}")
return LT(filter_dict["key"], filter_dict["value"])
elif op == "NEQ":
if "key" not in filter_dict or "value" not in filter_dict:
raise ValueError(f"NEQ filter requires 'key' and 'value' fields. Got: {filter_dict}")
return NEQ(filter_dict["key"], filter_dict["value"])
elif op == "GTE":
if "key" not in filter_dict or "value" not in filter_dict:
raise ValueError(f"GTE filter requires 'key' and 'value' fields. Got: {filter_dict}")
return GTE(filter_dict["key"], filter_dict["value"])
elif op == "LTE":
if "key" not in filter_dict or "value" not in filter_dict:
raise ValueError(f"LTE filter requires 'key' and 'value' fields. Got: {filter_dict}")
return LTE(filter_dict["key"], filter_dict["value"])
elif op == "CONTAINS":
if "key" not in filter_dict or "value" not in filter_dict:
raise ValueError(f"CONTAINS filter requires 'key' and 'value' fields. Got: {filter_dict}")
return CONTAINS(filter_dict["key"], filter_dict["value"])
elif op == "STARTSWITH":
if "key" not in filter_dict or "value" not in filter_dict:
raise ValueError(f"STARTSWITH filter requires 'key' and 'value' fields. Got: {filter_dict}")
return STARTSWITH(filter_dict["key"], filter_dict["value"])
# Logical operators
elif op == "AND":
if "conditions" not in filter_dict:
raise ValueError(f"AND filter requires 'conditions' field. Got: {filter_dict}")
conditions = [from_dict(cond, _depth + 1) for cond in filter_dict["conditions"]]
return AND(*conditions)
elif op == "OR":
if "conditions" not in filter_dict:
raise ValueError(f"OR filter requires 'conditions' field. Got: {filter_dict}")
conditions = [from_dict(cond, _depth + 1) for cond in filter_dict["conditions"]]
return OR(*conditions)
elif op == "NOT":
if "condition" not in filter_dict:
raise ValueError(f"NOT filter requires 'condition' field. Got: {filter_dict}")
return NOT(from_dict(filter_dict["condition"], _depth + 1))
else:
raise ValueError(f"Unknown filter operator: {op}")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/filters.py",
"license": "Apache License 2.0",
"lines": 386,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/tests/unit/test_filters.py | """Unit tests for search filter expressions.
Tests cover:
- Basic filter operators (EQ, IN, GT, LT, NEQ, GTE, LTE)
- String matching operators (CONTAINS, STARTSWITH)
- Logical operators (AND, OR, NOT)
- Operator overloading (&, |, ~)
- Serialization (to_dict)
- Deserialization (from_dict)
- Complex nested expressions
- Edge cases
"""
import pytest
from agno.filters import (
AND,
CONTAINS,
EQ,
GT,
GTE,
IN,
LT,
LTE,
NEQ,
NOT,
OR,
STARTSWITH,
FilterExpr,
from_dict,
)
class TestBasicOperators:
"""Test basic filter operators."""
def test_eq_with_string(self):
"""Test EQ operator with string values."""
filter_expr = EQ("status", "published")
assert filter_expr.key == "status"
assert filter_expr.value == "published"
assert filter_expr.to_dict() == {
"op": "EQ",
"key": "status",
"value": "published",
}
def test_eq_with_int(self):
"""Test EQ operator with integer values."""
filter_expr = EQ("age", 25)
assert filter_expr.key == "age"
assert filter_expr.value == 25
assert filter_expr.to_dict() == {"op": "EQ", "key": "age", "value": 25}
def test_eq_with_float(self):
"""Test EQ operator with float values."""
filter_expr = EQ("price", 19.99)
assert filter_expr.key == "price"
assert filter_expr.value == 19.99
assert filter_expr.to_dict() == {"op": "EQ", "key": "price", "value": 19.99}
def test_eq_with_bool(self):
"""Test EQ operator with boolean values."""
filter_expr = EQ("is_active", True)
assert filter_expr.key == "is_active"
assert filter_expr.value is True
assert filter_expr.to_dict() == {"op": "EQ", "key": "is_active", "value": True}
def test_eq_with_none(self):
"""Test EQ operator with None value."""
filter_expr = EQ("deleted_at", None)
assert filter_expr.key == "deleted_at"
assert filter_expr.value is None
assert filter_expr.to_dict() == {"op": "EQ", "key": "deleted_at", "value": None}
def test_in_with_strings(self):
"""Test IN operator with list of strings."""
filter_expr = IN("category", ["tech", "science", "engineering"])
assert filter_expr.key == "category"
assert filter_expr.values == ["tech", "science", "engineering"]
assert filter_expr.to_dict() == {
"op": "IN",
"key": "category",
"values": ["tech", "science", "engineering"],
}
def test_in_with_ints(self):
"""Test IN operator with list of integers."""
filter_expr = IN("user_id", [1, 2, 3, 100])
assert filter_expr.key == "user_id"
assert filter_expr.values == [1, 2, 3, 100]
assert filter_expr.to_dict() == {
"op": "IN",
"key": "user_id",
"values": [1, 2, 3, 100],
}
def test_in_with_empty_list(self):
"""Test IN operator with empty list."""
filter_expr = IN("tags", [])
assert filter_expr.key == "tags"
assert filter_expr.values == []
assert filter_expr.to_dict() == {"op": "IN", "key": "tags", "values": []}
def test_in_with_single_item(self):
"""Test IN operator with single item list."""
filter_expr = IN("status", ["published"])
assert filter_expr.values == ["published"]
def test_gt_with_int(self):
"""Test GT operator with integer."""
filter_expr = GT("age", 18)
assert filter_expr.key == "age"
assert filter_expr.value == 18
assert filter_expr.to_dict() == {"op": "GT", "key": "age", "value": 18}
def test_gt_with_float(self):
"""Test GT operator with float."""
filter_expr = GT("score", 85.5)
assert filter_expr.key == "score"
assert filter_expr.value == 85.5
assert filter_expr.to_dict() == {"op": "GT", "key": "score", "value": 85.5}
def test_gt_with_negative(self):
"""Test GT operator with negative number."""
filter_expr = GT("temperature", -10)
assert filter_expr.value == -10
def test_lt_with_int(self):
"""Test LT operator with integer."""
filter_expr = LT("age", 65)
assert filter_expr.key == "age"
assert filter_expr.value == 65
assert filter_expr.to_dict() == {"op": "LT", "key": "age", "value": 65}
def test_lt_with_float(self):
"""Test LT operator with float."""
filter_expr = LT("price", 100.50)
assert filter_expr.key == "price"
assert filter_expr.value == 100.50
assert filter_expr.to_dict() == {"op": "LT", "key": "price", "value": 100.50}
def test_lt_with_zero(self):
"""Test LT operator with zero."""
filter_expr = LT("balance", 0)
assert filter_expr.value == 0
class TestNewComparisonOperators:
"""Test new comparison operators (NEQ, GTE, LTE)."""
def test_neq_with_string(self):
"""Test NEQ operator with string values."""
filter_expr = NEQ("status", "archived")
assert filter_expr.key == "status"
assert filter_expr.value == "archived"
assert filter_expr.to_dict() == {
"op": "NEQ",
"key": "status",
"value": "archived",
}
def test_neq_with_int(self):
"""Test NEQ operator with integer values."""
filter_expr = NEQ("priority", 0)
assert filter_expr.key == "priority"
assert filter_expr.value == 0
assert filter_expr.to_dict() == {"op": "NEQ", "key": "priority", "value": 0}
def test_neq_with_bool(self):
"""Test NEQ operator with boolean values."""
filter_expr = NEQ("is_deleted", True)
assert filter_expr.key == "is_deleted"
assert filter_expr.value is True
assert filter_expr.to_dict() == {"op": "NEQ", "key": "is_deleted", "value": True}
def test_neq_with_none(self):
"""Test NEQ operator with None value."""
filter_expr = NEQ("error", None)
assert filter_expr.key == "error"
assert filter_expr.value is None
def test_gte_with_int(self):
"""Test GTE operator with integer."""
filter_expr = GTE("age", 18)
assert filter_expr.key == "age"
assert filter_expr.value == 18
assert filter_expr.to_dict() == {"op": "GTE", "key": "age", "value": 18}
def test_gte_with_float(self):
"""Test GTE operator with float."""
filter_expr = GTE("score", 85.5)
assert filter_expr.key == "score"
assert filter_expr.value == 85.5
assert filter_expr.to_dict() == {"op": "GTE", "key": "score", "value": 85.5}
def test_gte_with_timestamp(self):
"""Test GTE operator with ISO timestamp string."""
ts = "2025-01-01T00:00:00Z"
filter_expr = GTE("created_at", ts)
assert filter_expr.value == ts
assert filter_expr.to_dict() == {"op": "GTE", "key": "created_at", "value": ts}
def test_gte_with_zero(self):
"""Test GTE operator with zero (boundary value)."""
filter_expr = GTE("duration_ms", 0)
assert filter_expr.value == 0
def test_lte_with_int(self):
"""Test LTE operator with integer."""
filter_expr = LTE("age", 65)
assert filter_expr.key == "age"
assert filter_expr.value == 65
assert filter_expr.to_dict() == {"op": "LTE", "key": "age", "value": 65}
def test_lte_with_float(self):
"""Test LTE operator with float."""
filter_expr = LTE("price", 99.99)
assert filter_expr.key == "price"
assert filter_expr.value == 99.99
assert filter_expr.to_dict() == {"op": "LTE", "key": "price", "value": 99.99}
def test_lte_with_timestamp(self):
"""Test LTE operator with ISO timestamp string."""
ts = "2025-12-31T23:59:59Z"
filter_expr = LTE("created_at", ts)
assert filter_expr.value == ts
def test_lte_with_negative(self):
"""Test LTE operator with negative number."""
filter_expr = LTE("temperature", -5)
assert filter_expr.value == -5
def test_range_with_gte_lte(self):
"""Test range query using GTE and LTE together."""
filter_expr = AND(GTE("age", 18), LTE("age", 65))
result = filter_expr.to_dict()
assert result["op"] == "AND"
assert result["conditions"][0] == {"op": "GTE", "key": "age", "value": 18}
assert result["conditions"][1] == {"op": "LTE", "key": "age", "value": 65}
def test_neq_combined_with_and(self):
"""Test NEQ in complex AND expression."""
filter_expr = AND(NEQ("status", "archived"), NEQ("status", "deleted"))
result = filter_expr.to_dict()
assert len(result["conditions"]) == 2
assert all(c["op"] == "NEQ" for c in result["conditions"])
class TestStringMatchingOperators:
"""Test string matching operators (CONTAINS, STARTSWITH)."""
def test_contains_basic(self):
"""Test CONTAINS operator with basic string."""
filter_expr = CONTAINS("user_id", "admin")
assert filter_expr.key == "user_id"
assert filter_expr.value == "admin"
assert filter_expr.to_dict() == {
"op": "CONTAINS",
"key": "user_id",
"value": "admin",
}
def test_contains_with_spaces(self):
"""Test CONTAINS operator with string containing spaces."""
filter_expr = CONTAINS("name", "John Doe")
assert filter_expr.value == "John Doe"
def test_contains_with_empty_string(self):
"""Test CONTAINS operator with empty string."""
filter_expr = CONTAINS("description", "")
assert filter_expr.value == ""
def test_contains_case_sensitivity_note(self):
"""Test CONTAINS stores the value as-is (case handling is in converter)."""
filter_expr = CONTAINS("name", "ADMIN")
assert filter_expr.value == "ADMIN"
result = filter_expr.to_dict()
assert result["value"] == "ADMIN"
def test_contains_with_special_characters(self):
"""Test CONTAINS operator with special characters."""
filter_expr = CONTAINS("path", "/usr/local")
assert filter_expr.value == "/usr/local"
def test_startswith_basic(self):
"""Test STARTSWITH operator with basic string."""
filter_expr = STARTSWITH("name", "Agent")
assert filter_expr.key == "name"
assert filter_expr.value == "Agent"
assert filter_expr.to_dict() == {
"op": "STARTSWITH",
"key": "name",
"value": "Agent",
}
def test_startswith_with_prefix(self):
"""Test STARTSWITH operator with common prefix patterns."""
filter_expr = STARTSWITH("session_id", "sess_")
assert filter_expr.value == "sess_"
def test_startswith_with_empty_string(self):
"""Test STARTSWITH operator with empty string."""
filter_expr = STARTSWITH("name", "")
assert filter_expr.value == ""
def test_startswith_with_unicode(self):
"""Test STARTSWITH operator with unicode characters."""
filter_expr = STARTSWITH("name", "日本")
assert filter_expr.value == "日本"
def test_contains_in_and_expression(self):
"""Test CONTAINS within AND expression."""
filter_expr = AND(
CONTAINS("user_id", "admin"),
EQ("status", "OK"),
)
result = filter_expr.to_dict()
assert result["op"] == "AND"
assert result["conditions"][0]["op"] == "CONTAINS"
assert result["conditions"][1]["op"] == "EQ"
def test_startswith_in_or_expression(self):
"""Test STARTSWITH within OR expression."""
filter_expr = OR(
STARTSWITH("name", "Agent"),
STARTSWITH("name", "Team"),
)
result = filter_expr.to_dict()
assert result["op"] == "OR"
assert all(c["op"] == "STARTSWITH" for c in result["conditions"])
def test_contains_and_startswith_combined(self):
"""Test combining CONTAINS and STARTSWITH in complex expression."""
filter_expr = AND(
CONTAINS("user_id", "user"),
STARTSWITH("agent_id", "stock_"),
EQ("status", "OK"),
)
result = filter_expr.to_dict()
assert len(result["conditions"]) == 3
def test_not_contains(self):
"""Test NOT(CONTAINS(...)) for negated substring match."""
filter_expr = NOT(CONTAINS("name", "test"))
result = filter_expr.to_dict()
assert result["op"] == "NOT"
assert result["condition"]["op"] == "CONTAINS"
assert result["condition"]["value"] == "test"
class TestLogicalOperators:
"""Test logical operators (AND, OR, NOT)."""
def test_and_with_two_conditions(self):
"""Test AND operator with two expressions."""
filter_expr = AND(EQ("status", "published"), GT("views", 1000))
assert len(filter_expr.expressions) == 2
assert filter_expr.to_dict() == {
"op": "AND",
"conditions": [
{"op": "EQ", "key": "status", "value": "published"},
{"op": "GT", "key": "views", "value": 1000},
],
}
def test_and_with_multiple_conditions(self):
"""Test AND operator with multiple expressions."""
filter_expr = AND(
EQ("status", "active"),
GT("age", 18),
LT("age", 65),
IN("role", ["user", "admin"]),
)
assert len(filter_expr.expressions) == 4
def test_or_with_two_conditions(self):
"""Test OR operator with two expressions."""
filter_expr = OR(EQ("priority", "high"), EQ("urgent", True))
assert len(filter_expr.expressions) == 2
assert filter_expr.to_dict() == {
"op": "OR",
"conditions": [
{"op": "EQ", "key": "priority", "value": "high"},
{"op": "EQ", "key": "urgent", "value": True},
],
}
def test_or_with_multiple_conditions(self):
"""Test OR operator with multiple expressions."""
filter_expr = OR(
EQ("status", "draft"),
EQ("status", "published"),
EQ("status", "archived"),
)
assert len(filter_expr.expressions) == 3
def test_not_with_eq(self):
"""Test NOT operator with EQ expression."""
filter_expr = NOT(EQ("status", "archived"))
assert isinstance(filter_expr.expression, EQ)
assert filter_expr.to_dict() == {
"op": "NOT",
"condition": {"op": "EQ", "key": "status", "value": "archived"},
}
def test_not_with_in(self):
"""Test NOT operator with IN expression."""
filter_expr = NOT(IN("user_id", [101, 102, 103]))
assert filter_expr.to_dict() == {
"op": "NOT",
"condition": {"op": "IN", "key": "user_id", "values": [101, 102, 103]},
}
def test_not_with_complex_expression(self):
"""Test NOT operator with complex AND expression."""
filter_expr = NOT(AND(EQ("status", "inactive"), LT("score", 10)))
assert isinstance(filter_expr.expression, AND)
assert filter_expr.to_dict() == {
"op": "NOT",
"condition": {
"op": "AND",
"conditions": [
{"op": "EQ", "key": "status", "value": "inactive"},
{"op": "LT", "key": "score", "value": 10},
],
},
}
class TestOperatorOverloading:
"""Test operator overloading (&, |, ~)."""
def test_and_operator_overload(self):
"""Test & operator creates AND expression."""
filter_expr = EQ("status", "published") & GT("views", 1000)
assert isinstance(filter_expr, AND)
assert len(filter_expr.expressions) == 2
def test_or_operator_overload(self):
"""Test | operator creates OR expression."""
filter_expr = EQ("priority", "high") | EQ("urgent", True)
assert isinstance(filter_expr, OR)
assert len(filter_expr.expressions) == 2
def test_not_operator_overload(self):
"""Test ~ operator creates NOT expression."""
filter_expr = ~EQ("status", "archived")
assert isinstance(filter_expr, NOT)
assert isinstance(filter_expr.expression, EQ)
def test_chained_and_operators(self):
"""Test chaining multiple & operators."""
filter_expr = EQ("status", "active") & GT("age", 18) & LT("age", 65)
# Should create nested AND structures
assert isinstance(filter_expr, AND)
def test_chained_or_operators(self):
"""Test chaining multiple | operators."""
filter_expr = EQ("status", "draft") | EQ("status", "published") | EQ("status", "archived")
# Should create nested OR structures
assert isinstance(filter_expr, OR)
def test_mixed_operators(self):
"""Test mixing & and | operators."""
filter_expr = (EQ("status", "active") & GT("age", 18)) | EQ("role", "admin")
assert isinstance(filter_expr, OR)
def test_not_with_and(self):
"""Test ~ operator with AND expression."""
filter_expr = ~(EQ("status", "inactive") & LT("score", 10))
assert isinstance(filter_expr, NOT)
assert isinstance(filter_expr.expression, AND)
def test_not_with_or(self):
"""Test ~ operator with OR expression."""
filter_expr = ~(EQ("role", "guest") | EQ("role", "banned"))
assert isinstance(filter_expr, NOT)
assert isinstance(filter_expr.expression, OR)
class TestComplexNesting:
"""Test complex nested filter expressions."""
def test_nested_and_or(self):
"""Test AND within OR."""
filter_expr = OR(
AND(EQ("type", "article"), GT("word_count", 500)),
AND(EQ("type", "tutorial"), LT("difficulty", 5)),
)
assert isinstance(filter_expr, OR)
assert len(filter_expr.expressions) == 2
assert all(isinstance(e, AND) for e in filter_expr.expressions)
def test_nested_or_and(self):
"""Test OR within AND."""
filter_expr = AND(
EQ("status", "published"),
OR(EQ("category", "tech"), EQ("category", "science")),
)
assert isinstance(filter_expr, AND)
assert len(filter_expr.expressions) == 2
def test_deeply_nested_expression(self):
"""Test deeply nested expression with multiple levels."""
filter_expr = AND(
EQ("is_active", True),
OR(
AND(EQ("tier", "premium"), GT("credits", 100)),
AND(EQ("tier", "enterprise"), NOT(EQ("suspended", True))),
),
)
result = filter_expr.to_dict()
assert result["op"] == "AND"
assert result["conditions"][1]["op"] == "OR"
def test_complex_with_not(self):
"""Test complex expression with NOT at different levels."""
filter_expr = AND(
NOT(EQ("status", "deleted")),
OR(GT("score", 80), AND(EQ("tier", "gold"), NOT(LT("age", 18)))),
)
assert isinstance(filter_expr, AND)
assert isinstance(filter_expr.expressions[0], NOT)
def test_triple_nested_and_or_not(self):
"""Test triple nested AND/OR/NOT combination."""
filter_expr = OR(
AND(EQ("region", "US"), NOT(IN("state", ["AK", "HI"]))),
AND(EQ("region", "EU"), IN("country", ["UK", "FR", "DE"])),
)
result = filter_expr.to_dict()
assert result["op"] == "OR"
assert len(result["conditions"]) == 2
class TestSerialization:
"""Test to_dict serialization for all operators."""
def test_eq_serialization(self):
"""Test EQ serialization maintains correct structure."""
filter_expr = EQ("key", "value")
result = filter_expr.to_dict()
assert "op" in result
assert "key" in result
assert "value" in result
assert result["op"] == "EQ"
def test_in_serialization(self):
"""Test IN serialization maintains list structure."""
filter_expr = IN("tags", ["python", "javascript"])
result = filter_expr.to_dict()
assert result["values"] == ["python", "javascript"]
assert isinstance(result["values"], list)
def test_and_serialization_nested(self):
"""Test AND serialization with nested conditions."""
filter_expr = AND(EQ("a", 1), OR(EQ("b", 2), EQ("c", 3)))
result = filter_expr.to_dict()
assert result["conditions"][1]["op"] == "OR"
assert len(result["conditions"][1]["conditions"]) == 2
def test_neq_serialization(self):
"""Test NEQ serialization maintains correct structure."""
filter_expr = NEQ("status", "archived")
result = filter_expr.to_dict()
assert result == {"op": "NEQ", "key": "status", "value": "archived"}
def test_gte_serialization(self):
"""Test GTE serialization maintains correct structure."""
filter_expr = GTE("age", 18)
result = filter_expr.to_dict()
assert result == {"op": "GTE", "key": "age", "value": 18}
def test_lte_serialization(self):
"""Test LTE serialization maintains correct structure."""
filter_expr = LTE("price", 100.0)
result = filter_expr.to_dict()
assert result == {"op": "LTE", "key": "price", "value": 100.0}
def test_contains_serialization(self):
"""Test CONTAINS serialization maintains correct structure."""
filter_expr = CONTAINS("name", "admin")
result = filter_expr.to_dict()
assert result == {"op": "CONTAINS", "key": "name", "value": "admin"}
def test_startswith_serialization(self):
"""Test STARTSWITH serialization maintains correct structure."""
filter_expr = STARTSWITH("name", "Agent")
result = filter_expr.to_dict()
assert result == {"op": "STARTSWITH", "key": "name", "value": "Agent"}
def test_complex_with_new_operators(self):
"""Test serialization of complex expression with new operators."""
filter_expr = AND(
NEQ("status", "archived"),
GTE("duration_ms", 100),
LTE("duration_ms", 5000),
CONTAINS("user_id", "admin"),
STARTSWITH("name", "Agent"),
)
result = filter_expr.to_dict()
assert result["op"] == "AND"
assert len(result["conditions"]) == 5
assert result["conditions"][0]["op"] == "NEQ"
assert result["conditions"][1]["op"] == "GTE"
assert result["conditions"][2]["op"] == "LTE"
assert result["conditions"][3]["op"] == "CONTAINS"
assert result["conditions"][4]["op"] == "STARTSWITH"
def test_complex_serialization_roundtrip(self):
"""Test that complex expressions serialize to valid dict structure."""
filter_expr = OR(
AND(EQ("status", "published"), GT("views", 1000)),
NOT(IN("category", ["draft", "archived"])),
)
result = filter_expr.to_dict()
# Verify structure is valid and nested correctly
assert isinstance(result, dict)
assert result["op"] == "OR"
assert isinstance(result["conditions"], list)
assert result["conditions"][0]["op"] == "AND"
assert result["conditions"][1]["op"] == "NOT"
class TestDeserialization:
"""Test from_dict deserialization of FilterExpr objects."""
def test_eq_deserialization(self):
"""Test EQ filter deserialization."""
original = EQ("status", "published")
serialized = original.to_dict()
deserialized = from_dict(serialized)
assert isinstance(deserialized, EQ)
assert deserialized.key == "status"
assert deserialized.value == "published"
def test_in_deserialization(self):
"""Test IN filter deserialization."""
original = IN("category", ["tech", "science", "engineering"])
serialized = original.to_dict()
deserialized = from_dict(serialized)
assert isinstance(deserialized, IN)
assert deserialized.key == "category"
assert deserialized.values == ["tech", "science", "engineering"]
def test_gt_deserialization(self):
"""Test GT filter deserialization."""
original = GT("age", 18)
serialized = original.to_dict()
deserialized = from_dict(serialized)
assert isinstance(deserialized, GT)
assert deserialized.key == "age"
assert deserialized.value == 18
def test_lt_deserialization(self):
"""Test LT filter deserialization."""
original = LT("price", 100.0)
serialized = original.to_dict()
deserialized = from_dict(serialized)
assert isinstance(deserialized, LT)
assert deserialized.key == "price"
assert deserialized.value == 100.0
def test_neq_deserialization(self):
"""Test NEQ filter deserialization."""
original = NEQ("status", "archived")
serialized = original.to_dict()
deserialized = from_dict(serialized)
assert isinstance(deserialized, NEQ)
assert deserialized.key == "status"
assert deserialized.value == "archived"
def test_gte_deserialization(self):
"""Test GTE filter deserialization."""
original = GTE("age", 18)
serialized = original.to_dict()
deserialized = from_dict(serialized)
assert isinstance(deserialized, GTE)
assert deserialized.key == "age"
assert deserialized.value == 18
def test_lte_deserialization(self):
"""Test LTE filter deserialization."""
original = LTE("price", 99.99)
serialized = original.to_dict()
deserialized = from_dict(serialized)
assert isinstance(deserialized, LTE)
assert deserialized.key == "price"
assert deserialized.value == 99.99
def test_contains_deserialization(self):
"""Test CONTAINS filter deserialization."""
original = CONTAINS("user_id", "admin")
serialized = original.to_dict()
deserialized = from_dict(serialized)
assert isinstance(deserialized, CONTAINS)
assert deserialized.key == "user_id"
assert deserialized.value == "admin"
def test_startswith_deserialization(self):
"""Test STARTSWITH filter deserialization."""
original = STARTSWITH("name", "Agent")
serialized = original.to_dict()
deserialized = from_dict(serialized)
assert isinstance(deserialized, STARTSWITH)
assert deserialized.key == "name"
assert deserialized.value == "Agent"
def test_invalid_neq_missing_fields(self):
"""Test NEQ deserialization with missing fields raises ValueError."""
with pytest.raises(ValueError, match="NEQ filter requires"):
from_dict({"op": "NEQ", "key": "status"})
def test_invalid_gte_missing_fields(self):
"""Test GTE deserialization with missing fields raises ValueError."""
with pytest.raises(ValueError, match="GTE filter requires"):
from_dict({"op": "GTE", "key": "age"})
def test_invalid_lte_missing_fields(self):
"""Test LTE deserialization with missing fields raises ValueError."""
with pytest.raises(ValueError, match="LTE filter requires"):
from_dict({"op": "LTE", "value": 100})
def test_invalid_contains_missing_fields(self):
"""Test CONTAINS deserialization with missing fields raises ValueError."""
with pytest.raises(ValueError, match="CONTAINS filter requires"):
from_dict({"op": "CONTAINS", "key": "name"})
def test_invalid_startswith_missing_fields(self):
"""Test STARTSWITH deserialization with missing fields raises ValueError."""
with pytest.raises(ValueError, match="STARTSWITH filter requires"):
from_dict({"op": "STARTSWITH", "value": "Agent"})
def test_and_deserialization(self):
"""Test AND filter deserialization."""
original = AND(EQ("status", "published"), GT("views", 1000))
serialized = original.to_dict()
deserialized = from_dict(serialized)
assert isinstance(deserialized, AND)
assert len(deserialized.expressions) == 2
assert isinstance(deserialized.expressions[0], EQ)
assert isinstance(deserialized.expressions[1], GT)
def test_or_deserialization(self):
"""Test OR filter deserialization."""
original = OR(EQ("priority", "high"), EQ("urgent", True))
serialized = original.to_dict()
deserialized = from_dict(serialized)
assert isinstance(deserialized, OR)
assert len(deserialized.expressions) == 2
def test_not_deserialization(self):
"""Test NOT filter deserialization."""
original = NOT(EQ("status", "archived"))
serialized = original.to_dict()
deserialized = from_dict(serialized)
assert isinstance(deserialized, NOT)
assert isinstance(deserialized.expression, EQ)
def test_complex_nested_deserialization(self):
"""Test complex nested filter deserialization."""
original = (EQ("type", "article") & GT("word_count", 500)) | (
EQ("type", "tutorial") & ~EQ("difficulty", "beginner")
)
serialized = original.to_dict()
deserialized = from_dict(serialized)
assert isinstance(deserialized, OR)
assert len(deserialized.expressions) == 2
assert isinstance(deserialized.expressions[0], AND)
assert isinstance(deserialized.expressions[1], AND)
def test_operator_overload_deserialization(self):
"""Test deserialization of filters created with operator overloads."""
# Using & operator
filter1 = EQ("status", "published") & GT("views", 1000)
deserialized1 = from_dict(filter1.to_dict())
assert isinstance(deserialized1, AND)
# Using | operator
filter2 = EQ("priority", "high") | EQ("urgent", True)
deserialized2 = from_dict(filter2.to_dict())
assert isinstance(deserialized2, OR)
# Using ~ operator
filter3 = ~EQ("status", "draft")
deserialized3 = from_dict(filter3.to_dict())
assert isinstance(deserialized3, NOT)
def test_invalid_dict_missing_op(self):
"""Test from_dict with missing 'op' key raises ValueError."""
with pytest.raises(ValueError, match="must contain 'op' key"):
from_dict({"key": "status", "value": "published"})
def test_invalid_dict_unknown_op(self):
"""Test from_dict with unknown operator raises ValueError."""
with pytest.raises(ValueError, match="Unknown filter operator"):
from_dict({"op": "UNKNOWN", "key": "status", "value": "published"})
def test_invalid_eq_missing_fields(self):
"""Test EQ deserialization with missing fields raises ValueError."""
with pytest.raises(ValueError, match="EQ filter requires"):
from_dict({"op": "EQ", "key": "status"})
def test_invalid_in_missing_fields(self):
"""Test IN deserialization with missing fields raises ValueError."""
with pytest.raises(ValueError, match="IN filter requires"):
from_dict({"op": "IN", "key": "category"})
def test_invalid_and_missing_conditions(self):
"""Test AND deserialization with missing conditions raises ValueError."""
with pytest.raises(ValueError, match="AND filter requires 'conditions' field"):
from_dict({"op": "AND"})
def test_invalid_or_missing_conditions(self):
"""Test OR deserialization with missing conditions raises ValueError."""
with pytest.raises(ValueError, match="OR filter requires 'conditions' field"):
from_dict({"op": "OR"})
def test_invalid_not_missing_condition(self):
"""Test NOT deserialization with missing condition raises ValueError."""
with pytest.raises(ValueError, match="NOT filter requires 'condition' field"):
from_dict({"op": "NOT"})
def test_complex_nested_with_new_operators(self):
"""Test complex nested deserialization with new operators."""
original = AND(
NEQ("status", "archived"),
OR(
CONTAINS("user_id", "admin"),
STARTSWITH("agent_id", "stock_"),
),
GTE("duration_ms", 100),
LTE("duration_ms", 5000),
)
serialized = original.to_dict()
deserialized = from_dict(serialized)
assert isinstance(deserialized, AND)
assert len(deserialized.expressions) == 4
assert isinstance(deserialized.expressions[0], NEQ)
assert isinstance(deserialized.expressions[1], OR)
assert isinstance(deserialized.expressions[2], GTE)
assert isinstance(deserialized.expressions[3], LTE)
# Verify nested OR
or_expr = deserialized.expressions[1]
assert isinstance(or_expr.expressions[0], CONTAINS)
assert isinstance(or_expr.expressions[1], STARTSWITH)
def test_roundtrip_preserves_semantics(self):
"""Test that serialization -> deserialization preserves filter semantics."""
filters = [
EQ("status", "published"),
IN("category", ["tech", "science"]),
GT("views", 1000),
LT("age", 65),
NEQ("status", "archived"),
GTE("age", 18),
LTE("price", 100.0),
CONTAINS("name", "admin"),
STARTSWITH("session_id", "sess_"),
EQ("active", True) & GT("score", 80),
EQ("priority", "high") | EQ("urgent", True),
~EQ("status", "archived"),
(EQ("type", "article") & GT("word_count", 500)) | (EQ("type", "tutorial")),
AND(NEQ("status", "deleted"), GTE("duration_ms", 0), CONTAINS("user_id", "test")),
]
for original in filters:
serialized = original.to_dict()
deserialized = from_dict(serialized)
# Re-serialize to compare structure
reserialized = deserialized.to_dict()
assert serialized == reserialized, f"Roundtrip failed for {original}"
class TestEdgeCases:
"""Test edge cases and special scenarios."""
def test_special_characters_in_strings(self):
"""Test filters with special characters."""
filter_expr = EQ("name", "O'Brien")
assert filter_expr.value == "O'Brien"
filter_expr = EQ("path", "/usr/local/bin")
assert filter_expr.value == "/usr/local/bin"
def test_unicode_characters(self):
"""Test filters with unicode characters."""
filter_expr = EQ("name", "François")
assert filter_expr.value == "François"
filter_expr = IN("languages", ["中文", "日本語", "한국어"])
assert "中文" in filter_expr.values
def test_very_large_numbers(self):
"""Test filters with very large numbers."""
filter_expr = GT("timestamp", 1234567890123456)
assert filter_expr.value == 1234567890123456
def test_floating_point_precision(self):
"""Test filters with floating point numbers."""
filter_expr = EQ("price", 19.99999)
assert filter_expr.value == 19.99999
def test_empty_string(self):
"""Test EQ with empty string."""
filter_expr = EQ("description", "")
assert filter_expr.value == ""
def test_whitespace_string(self):
"""Test EQ with whitespace string."""
filter_expr = EQ("name", " ")
assert filter_expr.value == " "
def test_in_with_mixed_types(self):
"""Test IN operator with mixed types in list."""
filter_expr = IN("value", [1, "two", 3.0, True])
assert filter_expr.values == [1, "two", 3.0, True]
def test_multiple_ands_same_key(self):
"""Test multiple AND conditions on same key (range query)."""
filter_expr = AND(GT("age", 18), LT("age", 65))
result = filter_expr.to_dict()
assert len(result["conditions"]) == 2
class TestRepr:
"""Test string representation of filter expressions."""
def test_eq_repr(self):
"""Test EQ __repr__ output."""
filter_expr = EQ("status", "published")
repr_str = repr(filter_expr)
assert "EQ" in repr_str
assert "status" in repr_str
def test_and_repr(self):
"""Test AND __repr__ output."""
filter_expr = AND(EQ("a", 1), EQ("b", 2))
repr_str = repr(filter_expr)
assert "AND" in repr_str
def test_neq_repr(self):
"""Test NEQ __repr__ output."""
filter_expr = NEQ("status", "archived")
repr_str = repr(filter_expr)
assert "NEQ" in repr_str
assert "status" in repr_str
def test_gte_repr(self):
"""Test GTE __repr__ output."""
filter_expr = GTE("age", 18)
repr_str = repr(filter_expr)
assert "GTE" in repr_str
def test_lte_repr(self):
"""Test LTE __repr__ output."""
filter_expr = LTE("price", 99.99)
repr_str = repr(filter_expr)
assert "LTE" in repr_str
def test_contains_repr(self):
"""Test CONTAINS __repr__ output."""
filter_expr = CONTAINS("name", "admin")
repr_str = repr(filter_expr)
assert "CONTAINS" in repr_str
def test_startswith_repr(self):
"""Test STARTSWITH __repr__ output."""
filter_expr = STARTSWITH("name", "Agent")
repr_str = repr(filter_expr)
assert "STARTSWITH" in repr_str
def test_complex_repr(self):
"""Test complex expression __repr__ is valid."""
filter_expr = OR(AND(EQ("a", 1), GT("b", 2)), NOT(EQ("c", 3)))
repr_str = repr(filter_expr)
assert isinstance(repr_str, str)
assert len(repr_str) > 0
class TestRealWorldScenarios:
"""Test real-world usage scenarios from the cookbook examples."""
def test_sales_data_filtering(self):
"""Test filtering sales data by region (from cookbook example)."""
filter_expr = IN("region", ["north_america"])
assert filter_expr.to_dict() == {
"op": "IN",
"key": "region",
"values": ["north_america"],
}
def test_exclude_region(self):
"""Test excluding a region with NOT."""
filter_expr = NOT(IN("region", ["north_america"]))
result = filter_expr.to_dict()
assert result["op"] == "NOT"
assert result["condition"]["op"] == "IN"
def test_sales_and_not_region(self):
"""Test combining data_type check with region exclusion."""
filter_expr = AND(EQ("data_type", "sales"), NOT(EQ("region", "north_america")))
result = filter_expr.to_dict()
assert result["op"] == "AND"
assert result["conditions"][0]["value"] == "sales"
assert result["conditions"][1]["op"] == "NOT"
def test_cv_filtering_by_users(self):
"""Test filtering CVs by user_id (from team cookbook example)."""
filter_expr = IN(
"user_id",
[
"jordan_mitchell",
"taylor_brooks",
"morgan_lee",
"casey_jordan",
"alex_rivera",
],
)
assert len(filter_expr.values) == 5
def test_cv_complex_filter(self):
"""Test complex CV filtering with AND/NOT combination."""
filter_expr = AND(
IN("user_id", ["jordan_mitchell", "taylor_brooks"]),
NOT(IN("user_id", ["morgan_lee", "casey_jordan", "alex_rivera"])),
)
result = filter_expr.to_dict()
assert result["op"] == "AND"
assert result["conditions"][1]["op"] == "NOT"
def test_or_with_nonexistent_fallback(self):
"""Test OR with non-existent value fallback."""
filter_expr = OR(EQ("user_id", "this candidate does not exist"), EQ("year", 2020))
result = filter_expr.to_dict()
assert result["op"] == "OR"
assert len(result["conditions"]) == 2
def test_multiple_metadata_fields(self):
"""Test filtering on multiple metadata fields."""
filter_expr = AND(
EQ("data_type", "sales"),
EQ("year", 2024),
IN("currency", ["USD", "EUR"]),
NOT(EQ("archived", True)),
)
assert len(filter_expr.expressions) == 4
class TestTypeValidation:
"""Test that operators work with expected types."""
def test_eq_accepts_any_type(self):
"""Test that EQ works with various Python types."""
# These should all work without errors
EQ("str_field", "value")
EQ("int_field", 42)
EQ("float_field", 3.14)
EQ("bool_field", True)
EQ("none_field", None)
EQ("list_field", [1, 2, 3])
EQ("dict_field", {"key": "value"})
def test_in_requires_list(self):
"""Test IN operator with list values."""
# Should work with lists
filter_expr = IN("field", [1, 2, 3])
assert isinstance(filter_expr.values, list)
def test_comparison_operators_with_strings(self):
"""Test GT/LT can be used with strings (lexicographic comparison)."""
# These should work (implementation dependent on vector DB)
GT("name", "A")
LT("name", "Z")
def test_and_or_require_filter_expressions(self):
"""Test that AND/OR work with FilterExpr instances."""
# Should work with proper FilterExpr objects
and_expr = AND(EQ("a", 1), EQ("b", 2))
assert all(isinstance(e, FilterExpr) for e in and_expr.expressions)
or_expr = OR(EQ("a", 1), EQ("b", 2))
assert all(isinstance(e, FilterExpr) for e in or_expr.expressions)
class TestUsagePatterns:
"""Test proper usage patterns and common mistakes."""
def test_single_filter_should_be_wrapped_in_list(self):
"""Test that single filters work when properly wrapped."""
# Correct usage: wrap single filter in list
filters = [EQ("status", "active")]
assert isinstance(filters, list)
assert len(filters) == 1
assert isinstance(filters[0], FilterExpr)
def test_multiple_filters_in_list(self):
"""Test multiple independent filters in a list."""
# When passing multiple filters, they should all be in a list
filters = [
EQ("status", "active"),
GT("age", 18),
IN("category", ["tech", "science"]),
]
assert isinstance(filters, list)
assert len(filters) == 3
assert all(isinstance(f, FilterExpr) for f in filters)
def test_list_with_single_complex_expression(self):
"""Test list containing single complex AND/OR expression."""
# Single complex expression wrapped in list
filters = [AND(EQ("status", "active"), GT("score", 80))]
assert isinstance(filters, list)
assert len(filters) == 1
assert isinstance(filters[0], AND)
def test_list_with_multiple_complex_expressions(self):
"""Test list with multiple complex expressions."""
# Multiple complex expressions in list
filters = [
AND(EQ("type", "article"), GT("views", 1000)),
OR(EQ("featured", True), GT("score", 90)),
]
assert isinstance(filters, list)
assert len(filters) == 2
def test_filter_expr_is_not_iterable(self):
"""Test that FilterExpr objects are not directly iterable."""
# This test documents that you cannot iterate over a single filter
# You must wrap it in a list first
filter_expr = EQ("status", "active")
# Attempting to iterate over a FilterExpr will fail
try:
list(filter_expr) # This should fail
assert False, "Expected TypeError when iterating over FilterExpr"
except TypeError:
pass # Expected behavior
def test_correct_way_to_pass_single_filter(self):
"""Test the correct way to pass a single filter to a list-expecting function."""
# Simulate what knowledge_filters parameter expects
def validate_filters(filters):
"""Simulate filter validation that expects a list."""
if isinstance(filters, list):
for f in filters:
if not isinstance(f, FilterExpr):
raise ValueError(f"Expected FilterExpr, got {type(f)}")
return True
else:
raise TypeError("filters must be a list")
# Correct: single filter wrapped in list
correct_usage = [EQ("user_id", "123")]
assert validate_filters(correct_usage)
# Incorrect: single filter without list (would fail)
incorrect_usage = EQ("user_id", "123")
try:
validate_filters(incorrect_usage)
assert False, "Should have raised TypeError"
except TypeError:
pass # Expected
def test_empty_filter_list(self):
"""Test that empty filter list is valid."""
filters = []
assert isinstance(filters, list)
assert len(filters) == 0
class TestTraceFilterScenarios:
"""Test real-world trace filtering scenarios matching the FE advanced filter bar."""
def test_status_equals_ok(self):
"""Test filtering traces where status = OK."""
filter_expr = EQ("status", "OK")
result = filter_expr.to_dict()
assert result == {"op": "EQ", "key": "status", "value": "OK"}
def test_status_not_error(self):
"""Test filtering traces where status != ERROR."""
filter_expr = NEQ("status", "ERROR")
result = filter_expr.to_dict()
assert result == {"op": "NEQ", "key": "status", "value": "ERROR"}
def test_user_id_contains(self):
"""Test filtering traces where user_id contains 'admin'."""
filter_expr = CONTAINS("user_id", "admin")
result = filter_expr.to_dict()
assert result == {"op": "CONTAINS", "key": "user_id", "value": "admin"}
def test_status_ok_and_user_contains(self):
"""Test composite: status = OK AND user_id contains 'user'."""
filter_expr = AND(
EQ("status", "OK"),
CONTAINS("user_id", "user"),
)
result = filter_expr.to_dict()
assert result["op"] == "AND"
assert len(result["conditions"]) == 2
assert result["conditions"][0] == {"op": "EQ", "key": "status", "value": "OK"}
assert result["conditions"][1] == {"op": "CONTAINS", "key": "user_id", "value": "user"}
def test_duration_range_filter(self):
"""Test filtering traces by duration range (100ms to 5000ms)."""
filter_expr = AND(
GTE("duration_ms", 100),
LTE("duration_ms", 5000),
)
result = filter_expr.to_dict()
assert result["conditions"][0] == {"op": "GTE", "key": "duration_ms", "value": 100}
assert result["conditions"][1] == {"op": "LTE", "key": "duration_ms", "value": 5000}
def test_agent_id_startswith(self):
"""Test filtering traces where agent_id starts with 'stock_'."""
filter_expr = STARTSWITH("agent_id", "stock_")
result = filter_expr.to_dict()
assert result == {"op": "STARTSWITH", "key": "agent_id", "value": "stock_"}
def test_multiple_agent_ids(self):
"""Test filtering traces for multiple agent IDs."""
filter_expr = IN("agent_id", ["stock_agent", "weather_agent", "news_agent"])
result = filter_expr.to_dict()
assert result["op"] == "IN"
assert len(result["values"]) == 3
def test_complex_trace_search(self):
"""Test complex trace search: (status=OK AND agent_id starts with 'stock') OR (status=ERROR AND duration > 5000)."""
filter_expr = OR(
AND(EQ("status", "OK"), STARTSWITH("agent_id", "stock")),
AND(EQ("status", "ERROR"), GT("duration_ms", 5000)),
)
result = filter_expr.to_dict()
assert result["op"] == "OR"
assert len(result["conditions"]) == 2
assert result["conditions"][0]["op"] == "AND"
assert result["conditions"][1]["op"] == "AND"
def test_time_range_filter(self):
"""Test filtering traces by time range."""
filter_expr = AND(
GTE("start_time", "2025-01-01T00:00:00Z"),
LTE("end_time", "2025-12-31T23:59:59Z"),
)
result = filter_expr.to_dict()
assert result["op"] == "AND"
assert result["conditions"][0]["op"] == "GTE"
assert result["conditions"][1]["op"] == "LTE"
def test_exclude_specific_sessions(self):
"""Test filtering traces excluding specific sessions."""
filter_expr = AND(
EQ("status", "OK"),
NOT(IN("session_id", ["test-session-1", "test-session-2"])),
)
result = filter_expr.to_dict()
assert result["op"] == "AND"
assert result["conditions"][1]["op"] == "NOT"
assert result["conditions"][1]["condition"]["op"] == "IN"
def test_workflow_traces_with_duration(self):
"""Test filtering workflow traces with minimum duration."""
filter_expr = AND(
NEQ("workflow_id", None),
GTE("duration_ms", 1000),
)
result = filter_expr.to_dict()
assert result["op"] == "AND"
assert result["conditions"][0]["op"] == "NEQ"
def test_search_request_body_structure(self):
"""Test the structure of a search request body as sent by the FE."""
filter_dict = AND(
EQ("status", "OK"),
CONTAINS("user_id", "admin"),
).to_dict()
request_body = {
"filter": filter_dict,
"page": 1,
"limit": 20,
}
assert "filter" in request_body
assert request_body["filter"]["op"] == "AND"
assert request_body["page"] == 1
assert request_body["limit"] == 20
def test_all_trace_filter_operators_roundtrip(self):
"""Test roundtrip for all operators applicable to trace filtering."""
trace_filters = [
EQ("status", "OK"),
NEQ("status", "ERROR"),
GT("duration_ms", 100),
GTE("duration_ms", 100),
LT("duration_ms", 5000),
LTE("duration_ms", 5000),
IN("status", ["OK", "ERROR"]),
CONTAINS("user_id", "admin"),
STARTSWITH("name", "Agent"),
AND(EQ("status", "OK"), GT("duration_ms", 0)),
OR(EQ("agent_id", "a1"), EQ("agent_id", "a2")),
NOT(EQ("status", "ERROR")),
]
for original in trace_filters:
serialized = original.to_dict()
deserialized = from_dict(serialized)
reserialized = deserialized.to_dict()
assert serialized == reserialized, f"Roundtrip failed for {original}"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/test_filters.py",
"license": "Apache License 2.0",
"lines": 1122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/tools/parallel.py | import json
from os import getenv
from typing import Any, Dict, List, Optional
from agno.tools import Toolkit
from agno.utils.log import log_error
try:
from parallel import Parallel as ParallelClient
except ImportError:
raise ImportError("`parallel-web` not installed. Please install using `pip install parallel-web`")
class CustomJSONEncoder(json.JSONEncoder):
"""Custom JSON encoder that handles non-serializable types by converting them to strings."""
def default(self, obj):
try:
return super().default(obj)
except TypeError:
return str(obj)
class ParallelTools(Toolkit):
"""
ParallelTools provides access to Parallel's web search and extraction APIs.
Parallel offers powerful APIs optimized for AI agents:
- Search API: AI-optimized web search that returns relevant excerpts tailored for LLMs
- Extract API: Extract content from specific URLs in clean markdown format, handling JavaScript-heavy pages and PDFs
Args:
api_key (Optional[str]): Parallel API key. If not provided, will use PARALLEL_API_KEY environment variable.
enable_search (bool): Enable Search API functionality. Default is True.
enable_extract (bool): Enable Extract API functionality. Default is True.
all (bool): Enable all tools. Overrides individual flags when True. Default is False.
max_results (int): Default maximum number of results for search operations. Default is 10.
max_chars_per_result (int): Default maximum characters per result for search operations. Default is 10000.
beta_version (str): Beta API version header. Default is "search-extract-2025-10-10".
mode (Optional[str]): Default search mode. Options: "one-shot" or "agentic". Default is None.
include_domains (Optional[List[str]]): Default domains to restrict results to. Default is None.
exclude_domains (Optional[List[str]]): Default domains to exclude from results. Default is None.
max_age_seconds (Optional[int]): Default cache age threshold (minimum 600). Default is None.
disable_cache_fallback (Optional[bool]): Default cache fallback behavior. Default is None.
"""
def __init__(
self,
api_key: Optional[str] = None,
enable_search: bool = True,
enable_extract: bool = True,
all: bool = False,
max_results: int = 10,
max_chars_per_result: int = 10000,
beta_version: str = "search-extract-2025-10-10",
mode: Optional[str] = None,
include_domains: Optional[List[str]] = None,
exclude_domains: Optional[List[str]] = None,
max_age_seconds: Optional[int] = None,
disable_cache_fallback: Optional[bool] = None,
**kwargs,
):
self.api_key: Optional[str] = api_key or getenv("PARALLEL_API_KEY")
if not self.api_key:
log_error("PARALLEL_API_KEY not set. Please set the PARALLEL_API_KEY environment variable.")
self.max_results = max_results
self.max_chars_per_result = max_chars_per_result
self.beta_version = beta_version
self.mode = mode
self.include_domains = include_domains
self.exclude_domains = exclude_domains
self.max_age_seconds = max_age_seconds
self.disable_cache_fallback = disable_cache_fallback
self.parallel_client = ParallelClient(
api_key=self.api_key, default_headers={"parallel-beta": self.beta_version}
)
tools: List[Any] = []
if all or enable_search:
tools.append(self.parallel_search)
if all or enable_extract:
tools.append(self.parallel_extract)
super().__init__(name="parallel_tools", tools=tools, **kwargs)
def parallel_search(
self,
objective: Optional[str] = None,
search_queries: Optional[List[str]] = None,
max_results: Optional[int] = None,
max_chars_per_result: Optional[int] = None,
) -> str:
"""Use this function to search the web using Parallel's Search API with a natural language objective.
You must provide at least one of objective or search_queries.
Args:
objective (Optional[str]): Natural-language description of what the web search is trying to find.
search_queries (Optional[List[str]]): Traditional keyword queries with optional search operators.
max_results (Optional[int]): Upper bound on results returned. Overrides constructor default.
max_chars_per_result (Optional[int]): Upper bound on total characters per url for excerpts.
Returns:
str: A JSON formatted string containing the search results with URLs, titles, publish dates, and relevant excerpts.
"""
try:
if not objective and not search_queries:
return json.dumps({"error": "Please provide at least one of: objective or search_queries"}, indent=2)
# Use instance defaults if not provided
final_max_results = max_results if max_results is not None else self.max_results
search_params: Dict[str, Any] = {
"max_results": final_max_results,
}
# Add objective if provided
if objective:
search_params["objective"] = objective
# Add search_queries if provided
if search_queries:
search_params["search_queries"] = search_queries
# Add mode from constructor default
if self.mode:
search_params["mode"] = self.mode
# Add excerpts configuration
excerpts_config: Dict[str, Any] = {}
final_max_chars = max_chars_per_result if max_chars_per_result is not None else self.max_chars_per_result
if final_max_chars is not None:
excerpts_config["max_chars_per_result"] = final_max_chars
if excerpts_config:
search_params["excerpts"] = excerpts_config
# Add source_policy from constructor defaults
source_policy: Dict[str, Any] = {}
if self.include_domains:
source_policy["include_domains"] = self.include_domains
if self.exclude_domains:
source_policy["exclude_domains"] = self.exclude_domains
if source_policy:
search_params["source_policy"] = source_policy
# Add fetch_policy from constructor defaults
fetch_policy: Dict[str, Any] = {}
if self.max_age_seconds is not None:
fetch_policy["max_age_seconds"] = self.max_age_seconds
if self.disable_cache_fallback is not None:
fetch_policy["disable_cache_fallback"] = self.disable_cache_fallback
if fetch_policy:
search_params["fetch_policy"] = fetch_policy
search_result = self.parallel_client.beta.search(**search_params)
# Use model_dump() if available, otherwise convert to dict
try:
if hasattr(search_result, "model_dump"):
return json.dumps(search_result.model_dump(), cls=CustomJSONEncoder)
except Exception:
pass
# Manually format the results
formatted_results: Dict[str, Any] = {
"search_id": getattr(search_result, "search_id", ""),
"results": [],
}
if hasattr(search_result, "results") and search_result.results:
results_list: List[Dict[str, Any]] = []
for result in search_result.results:
formatted_result: Dict[str, Any] = {
"title": getattr(result, "title", ""),
"url": getattr(result, "url", ""),
"publish_date": getattr(result, "publish_date", ""),
"excerpt": getattr(result, "excerpt", ""),
}
results_list.append(formatted_result)
formatted_results["results"] = results_list
if hasattr(search_result, "warnings"):
formatted_results["warnings"] = search_result.warnings
if hasattr(search_result, "usage"):
formatted_results["usage"] = search_result.usage
return json.dumps(formatted_results, cls=CustomJSONEncoder, indent=2)
except Exception as e:
log_error(f"Error searching Parallel for objective '{objective}': {e}")
return json.dumps({"error": f"Search failed: {str(e)}"}, indent=2)
def parallel_extract(
self,
urls: List[str],
objective: Optional[str] = None,
search_queries: Optional[List[str]] = None,
excerpts: bool = True,
max_chars_per_excerpt: Optional[int] = None,
full_content: bool = False,
max_chars_for_full_content: Optional[int] = None,
) -> str:
"""Use this function to extract content from specific URLs using Parallel's Extract API.
Args:
urls (List[str]): List of public URLs to extract content from.
objective (Optional[str]): Search focus to guide content extraction.
search_queries (Optional[List[str]]): Keywords for targeting relevant content.
excerpts (bool): Include relevant text snippets.
max_chars_per_excerpt (Optional[int]): Upper bound on total characters per url. Only used when excerpts is True.
full_content (bool): Include complete page text.
max_chars_for_full_content (Optional[int]): Limit on characters per url. Only used when full_content is True.
Returns:
str: A JSON formatted string containing extracted content with titles, publish dates, excerpts and/or full content.
"""
try:
if not urls:
return json.dumps({"error": "Please provide at least one URL to extract"}, indent=2)
extract_params: Dict[str, Any] = {
"urls": urls,
}
# Add objective if provided
if objective:
extract_params["objective"] = objective
# Add search_queries if provided
if search_queries:
extract_params["search_queries"] = search_queries
# Add excerpts configuration
if excerpts and max_chars_per_excerpt is not None:
extract_params["excerpts"] = {"max_chars_per_result": max_chars_per_excerpt}
else:
extract_params["excerpts"] = excerpts
# Add full_content configuration
if full_content and max_chars_for_full_content is not None:
extract_params["full_content"] = {"max_chars_per_result": max_chars_for_full_content}
else:
extract_params["full_content"] = full_content
# Add fetch_policy from constructor defaults
fetch_policy: Dict[str, Any] = {}
if self.max_age_seconds is not None:
fetch_policy["max_age_seconds"] = self.max_age_seconds
if self.disable_cache_fallback is not None:
fetch_policy["disable_cache_fallback"] = self.disable_cache_fallback
if fetch_policy:
extract_params["fetch_policy"] = fetch_policy
extract_result = self.parallel_client.beta.extract(**extract_params)
# Use model_dump() if available, otherwise convert to dict
try:
if hasattr(extract_result, "model_dump"):
return json.dumps(extract_result.model_dump(), cls=CustomJSONEncoder)
except Exception:
pass
# Manually format the results
formatted_results: Dict[str, Any] = {
"extract_id": getattr(extract_result, "extract_id", ""),
"results": [],
"errors": [],
}
if hasattr(extract_result, "results") and extract_result.results:
results_list: List[Dict[str, Any]] = []
for result in extract_result.results:
formatted_result: Dict[str, Any] = {
"url": getattr(result, "url", ""),
"title": getattr(result, "title", ""),
"publish_date": getattr(result, "publish_date", ""),
}
if excerpts and hasattr(result, "excerpts"):
formatted_result["excerpts"] = result.excerpts
if full_content and hasattr(result, "full_content"):
formatted_result["full_content"] = result.full_content
results_list.append(formatted_result)
formatted_results["results"] = results_list
if hasattr(extract_result, "errors") and extract_result.errors:
formatted_results["errors"] = extract_result.errors
if hasattr(extract_result, "warnings"):
formatted_results["warnings"] = extract_result.warnings
if hasattr(extract_result, "usage"):
formatted_results["usage"] = extract_result.usage
return json.dumps(formatted_results, cls=CustomJSONEncoder, indent=2)
except Exception as e:
log_error(f"Error extracting from Parallel: {e}")
return json.dumps({"error": f"Extract failed: {str(e)}"}, indent=2)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/parallel.py",
"license": "Apache License 2.0",
"lines": 249,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/tools/test_parallel_tools.py | """Unit tests for ParallelTools"""
import json
from unittest.mock import Mock, patch
import pytest
from agno.tools.parallel import ParallelTools
@pytest.fixture
def mock_parallel_client():
"""Mock Parallel client."""
with patch("agno.tools.parallel.ParallelClient") as mock_client:
yield mock_client
@pytest.fixture
def parallel_tools(mock_parallel_client):
"""Create ParallelTools instance with mocked client."""
with patch.dict("os.environ", {"PARALLEL_API_KEY": "test-api-key"}):
return ParallelTools(api_key="test-api-key")
def test_parallel_search(parallel_tools):
"""Test parallel_search function."""
# Setup mock data
mock_result = Mock()
mock_result.model_dump = Mock(
return_value={
"search_id": "test-search-id",
"results": [
{
"title": "Test Title",
"url": "https://example.com",
"publish_date": "2025-01-01",
"excerpt": "Test excerpt content",
}
],
}
)
parallel_tools.parallel_client.beta.search = Mock(return_value=mock_result)
# Execute test
result = parallel_tools.parallel_search(objective="Test objective")
result_dict = json.loads(result)
# Verify the result
assert result_dict["search_id"] == "test-search-id"
assert len(result_dict["results"]) == 1
assert result_dict["results"][0]["title"] == "Test Title"
def test_parallel_search_with_queries(parallel_tools):
"""Test parallel_search with search queries."""
mock_result = Mock()
mock_result.model_dump = Mock(return_value={"search_id": "test-id", "results": []})
parallel_tools.parallel_client.beta.search = Mock(return_value=mock_result)
parallel_tools.parallel_search(objective="Test", search_queries=["query1", "query2"])
# Verify search_queries was passed
call_args = parallel_tools.parallel_client.beta.search.call_args
assert call_args[1]["search_queries"] == ["query1", "query2"]
def test_parallel_search_error(parallel_tools):
"""Test parallel_search error handling."""
parallel_tools.parallel_client.beta.search = Mock(side_effect=Exception("API Error"))
result = parallel_tools.parallel_search(objective="Test")
result_dict = json.loads(result)
assert "error" in result_dict
assert "Search failed" in result_dict["error"]
def test_parallel_extract(parallel_tools):
"""Test parallel_extract function."""
# Setup mock data
mock_result = Mock()
mock_result.model_dump = Mock(
return_value={
"extract_id": "test-extract-id",
"results": [
{
"url": "https://example.com",
"title": "Test Title",
"excerpts": ["Excerpt 1", "Excerpt 2"],
}
],
"errors": [],
}
)
parallel_tools.parallel_client.beta.extract = Mock(return_value=mock_result)
# Execute test
result = parallel_tools.parallel_extract(urls=["https://example.com"])
result_dict = json.loads(result)
# Verify the result
assert result_dict["extract_id"] == "test-extract-id"
assert len(result_dict["results"]) == 1
assert result_dict["results"][0]["url"] == "https://example.com"
def test_parallel_extract_with_full_content(parallel_tools):
"""Test parallel_extract with full_content."""
mock_result = Mock()
mock_result.model_dump = Mock(return_value={"extract_id": "test-id", "results": [], "errors": []})
parallel_tools.parallel_client.beta.extract = Mock(return_value=mock_result)
parallel_tools.parallel_extract(urls=["https://example.com"], excerpts=False, full_content=True)
# Verify parameters
call_args = parallel_tools.parallel_client.beta.extract.call_args
assert call_args[1]["excerpts"] is False
assert call_args[1]["full_content"] is True
def test_parallel_extract_error(parallel_tools):
"""Test parallel_extract error handling."""
parallel_tools.parallel_client.beta.extract = Mock(side_effect=Exception("API Error"))
result = parallel_tools.parallel_extract(urls=["https://example.com"])
result_dict = json.loads(result)
assert "error" in result_dict
assert "Extract failed" in result_dict["error"]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_parallel_tools.py",
"license": "Apache License 2.0",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/anthropic/test_betas.py | """Tests for Anthropic beta features support."""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from agno.agent import Agent
from agno.models.anthropic import Claude
def _create_mock_response():
"""Create a properly structured mock response."""
mock_content_block = MagicMock()
mock_content_block.type = "text"
mock_content_block.text = "Test response"
mock_content_block.citations = None
# Create a proper usage mock with all required attributes
mock_usage = MagicMock()
mock_usage.input_tokens = 10
mock_usage.output_tokens = 20
mock_usage.cache_creation_input_tokens = None
mock_usage.cache_read_input_tokens = None
mock_response = MagicMock()
mock_response.id = "msg_test123"
mock_response.model = "claude-sonnet-4-5-20250929"
mock_response.role = "assistant"
mock_response.stop_reason = "end_turn"
mock_response.content = [mock_content_block]
mock_response.usage = mock_usage
return mock_response
@pytest.fixture
def mock_anthropic_client():
"""Mock Anthropic client for testing beta features."""
with patch("agno.models.anthropic.claude.AnthropicClient") as mock_client_class:
mock_client = MagicMock()
mock_beta_messages = MagicMock()
mock_messages = MagicMock()
# Setup mock client structure
mock_client.beta.messages = mock_beta_messages
mock_client.messages = mock_messages
mock_client.is_closed.return_value = False
# Setup mock responses
mock_beta_messages.create.return_value = _create_mock_response()
mock_messages.create.return_value = _create_mock_response()
mock_client_class.return_value = mock_client
yield mock_client
@pytest.fixture(scope="module")
def claude_model():
"""Fixture that provides a Claude model and reuses it across all tests in the module."""
return Claude(id="claude-sonnet-4-20250514", betas=["context-1m-2025-08-07"])
def test_betas_parameter_in_request_params():
"""Test that betas parameter is included in request params when provided."""
betas = ["context-1m-2025-08-07", "custom-beta-feature"]
model = Claude(id="claude-sonnet-4-20250514", betas=betas)
request_params = model.get_request_params()
assert "betas" in request_params
assert request_params["betas"] == betas
def test_no_betas_parameter_when_not_provided():
"""Test that betas parameter is not included when not provided."""
model = Claude(id="claude-sonnet-4-20250514")
request_params = model.get_request_params()
assert "betas" not in request_params
def test_has_beta_features_with_betas():
"""Test that _has_beta_features returns True when betas are provided."""
model = Claude(id="claude-sonnet-4-20250514", betas=["context-1m-2025-08-07"])
assert model._has_beta_features() is True
def test_has_beta_features_without_betas():
"""Test that _has_beta_features returns False when no beta features are enabled."""
model = Claude(id="claude-sonnet-4-20250514")
# Should return False when no beta features are enabled
assert model._has_beta_features() is False
def test_beta_client_used_when_betas_provided(mock_anthropic_client):
"""Test that beta client is used when betas parameter is provided."""
betas = ["context-1m-2025-08-07"]
model = Claude(id="claude-sonnet-4-20250514", betas=betas)
agent = Agent(model=model, telemetry=False)
# Run the agent
agent.run("Test message")
# Verify that beta client was used
mock_anthropic_client.beta.messages.create.assert_called_once()
# Verify that regular client was NOT used
mock_anthropic_client.messages.create.assert_not_called()
# Verify that betas parameter was passed in the request
call_kwargs = mock_anthropic_client.beta.messages.create.call_args[1]
assert "betas" in call_kwargs
assert call_kwargs["betas"] == betas
def test_regular_client_used_without_betas(mock_anthropic_client):
"""Test that regular client is used when no betas are provided."""
model = Claude(id="claude-sonnet-4-20250514")
agent = Agent(model=model, telemetry=False)
# Run the agent
agent.run("Test message")
# Verify that regular client was used
mock_anthropic_client.messages.create.assert_called_once()
# Verify that beta client was NOT used
mock_anthropic_client.beta.messages.create.assert_not_called()
# Verify that betas parameter was not passed
call_kwargs = mock_anthropic_client.messages.create.call_args[1]
assert "betas" not in call_kwargs
def test_multiple_betas():
"""Test that multiple beta features can be specified."""
betas = ["context-1m-2025-08-07", "feature-a", "feature-b"]
model = Claude(id="claude-sonnet-4-20250514", betas=betas)
request_params = model.get_request_params()
assert request_params["betas"] == betas
assert len(request_params["betas"]) == 3
def test_betas_with_skills():
"""Test that betas work alongside skills configuration."""
betas = ["custom-beta"]
model = Claude(
id="claude-sonnet-4-20250514",
betas=betas,
skills=[{"type": "anthropic", "skill_id": "pptx", "version": "latest"}],
)
# Skills automatically add required betas
assert model._has_beta_features() is True
request_params = model.get_request_params()
# Should include both custom betas and skills-required betas
assert "betas" in request_params
assert "custom-beta" in request_params["betas"]
assert "code-execution-2025-08-25" in request_params["betas"]
assert "skills-2025-10-02" in request_params["betas"]
@pytest.mark.integration
def test_betas_with_real_client(claude_model):
"""Test that betas work with a real client"""
agent = Agent(model=claude_model, telemetry=False)
# Assert betas are present
assert agent.model.betas is not None # type: ignore
response = agent.run("What is 2+2? Answer in one sentence.")
# Verify the response was correctly generated
assert response is not None, "Response should not be None"
assert response.content is not None, "Response content should not be None"
@pytest.mark.asyncio
async def test_async_beta_client_used_when_betas_provided():
"""Test that async beta client is used when betas parameter is provided."""
with patch("agno.models.anthropic.claude.AsyncAnthropicClient") as mock_async_client_class:
mock_async_client = MagicMock()
mock_beta_messages = MagicMock()
mock_messages = MagicMock()
# Setup mock async client structure
mock_async_client.beta.messages = mock_beta_messages
mock_async_client.messages = mock_messages
mock_async_client.is_closed.return_value = False
# Setup async mock responses using AsyncMock
mock_beta_messages.create = AsyncMock(return_value=_create_mock_response())
mock_messages.create = AsyncMock(return_value=_create_mock_response())
mock_async_client_class.return_value = mock_async_client
betas = ["context-1m-2025-08-07"]
model = Claude(id="claude-sonnet-4-20250514", betas=betas)
agent = Agent(model=model, telemetry=False)
# Run the agent asynchronously
await agent.arun("Test message")
# Verify that beta client was used
mock_async_client.beta.messages.create.assert_called_once()
# Verify that regular client was NOT used
mock_async_client.messages.create.assert_not_called()
# Verify that betas parameter was passed in the request
call_kwargs = mock_async_client.beta.messages.create.call_args[1]
assert "betas" in call_kwargs
assert call_kwargs["betas"] == betas
@pytest.mark.integration
@pytest.mark.asyncio
@pytest.mark.skipif(
not pytest.importorskip("os").getenv("ANTHROPIC_API_KEY"),
reason="ANTHROPIC_API_KEY not set - skipping real API test",
)
async def test_betas_with_real_client_async(claude_model):
"""Test that betas work with a real async client.
This integration test makes a real async API call to Anthropic to verify that:
1. The beta API endpoint is successfully invoked asynchronously
2. The response is properly formatted
3. No errors occur when using beta features with async operations
Note: Requires ANTHROPIC_API_KEY to be set in environment.
"""
agent = Agent(model=claude_model, telemetry=False)
# Use a simple message to minimize token usage
response = await agent.arun("What is 2+2? Answer in one sentence.")
# Verify response structure
assert response is not None, "Response should not be None"
assert response.content is not None, "Response content should not be None"
assert len(response.content) > 0, "Response content should not be empty"
# Verify we got a meaningful response
assert isinstance(response.content, str), "Response content should be a string"
assert len(response.content.strip()) > 0, "Response should contain non-empty content"
# Verify the model was set correctly
assert response.model is not None, "Response model should not be None"
assert response.model == "claude-sonnet-4-20250514" or response.model.startswith("claude-"), (
f"Expected Claude model, got {response.model}"
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/anthropic/test_betas.py",
"license": "Apache License 2.0",
"lines": 184,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/workflows/test_workflow_metrics.py | """Integration tests for Workflow metrics including duration tracking."""
import asyncio
import pytest
from agno.models.metrics import RunMetrics
from agno.run.workflow import WorkflowCompletedEvent
from agno.workflow import Condition, Parallel, Step, StepInput, StepOutput, Workflow
from agno.workflow.types import WorkflowMetrics
# Helper functions
def research_step(step_input: StepInput) -> StepOutput:
"""Research step function."""
return StepOutput(content=f"Research: {step_input.input}")
def analysis_step(step_input: StepInput) -> StepOutput:
"""Analysis step function."""
prev = step_input.previous_step_content or ""
return StepOutput(content=f"Analysis of: {prev}")
def error_step(step_input: StepInput) -> StepOutput:
"""Step that raises an error."""
raise ValueError("Intentional test error")
async def async_research_step(step_input: StepInput) -> StepOutput:
"""Async research step."""
await asyncio.sleep(0.001) # Minimal delay
return StepOutput(content=f"Async Research: {step_input.input}")
# Condition evaluators
def condition_true(step_input: StepInput) -> bool:
"""Condition that returns True."""
return True
def condition_false(step_input: StepInput) -> bool:
"""Condition that returns False."""
return False
def test_workflow_duration_with_agent(shared_db, test_agent):
"""Test that workflow-level duration is tracked with agent."""
test_agent.instructions = "Respond with 'Hello World'"
workflow = Workflow(
name="Agent Duration Test",
db=shared_db,
steps=[
Step(name="agent_step", agent=test_agent),
],
)
response = workflow.run(input="test")
# Verify workflow-level metrics exist
assert response.metrics is not None
assert isinstance(response.metrics, WorkflowMetrics)
# Verify workflow-level duration
assert response.metrics.duration is not None
assert isinstance(response.metrics.duration, float)
assert response.metrics.duration > 0
# Verify step-level metrics (agent step should have duration)
assert len(response.metrics.steps) > 0
if "agent_step" in response.metrics.steps:
agent_step_metrics = response.metrics.steps["agent_step"]
assert agent_step_metrics.metrics is not None
assert isinstance(agent_step_metrics.metrics, RunMetrics)
assert agent_step_metrics.metrics.duration is not None
assert agent_step_metrics.metrics.duration > 0
def test_workflow_duration_agent_and_function(shared_db, test_agent):
"""Test workflow duration with mixed agent and function steps."""
test_agent.instructions = "Analyze the input"
workflow = Workflow(
name="Mixed Steps Duration Test",
db=shared_db,
steps=[
Step(name="research", executor=research_step),
Step(name="agent_analysis", agent=test_agent),
Step(name="final", executor=analysis_step),
],
)
response = workflow.run(input="test topic")
# Verify workflow-level duration
assert response.metrics is not None
assert response.metrics.duration is not None
assert response.metrics.duration > 0
# Verify agent step has duration
if "agent_analysis" in response.metrics.steps:
assert response.metrics.steps["agent_analysis"].metrics.duration is not None
assert response.metrics.steps["agent_analysis"].metrics.duration > 0
def test_workflow_duration_with_team(shared_db, test_team):
"""Test that workflow-level duration is tracked with team."""
test_team.members[0].instructions = "Respond with team analysis"
workflow = Workflow(
name="Team Duration Test",
db=shared_db,
steps=[
Step(name="team_step", team=test_team),
],
)
response = workflow.run(input="test")
# Verify workflow-level duration
assert response.metrics is not None
assert isinstance(response.metrics, WorkflowMetrics)
assert response.metrics.duration is not None
assert response.metrics.duration > 0
# Verify team step has duration
if "team_step" in response.metrics.steps:
team_step_metrics = response.metrics.steps["team_step"]
assert team_step_metrics.metrics is not None
assert team_step_metrics.metrics.duration is not None
assert team_step_metrics.metrics.duration > 0
def test_workflow_duration_on_error(shared_db):
"""Test that workflow duration is tracked even when step fails."""
workflow = Workflow(
name="Error Duration Test",
db=shared_db,
steps=[
Step(name="error_step", executor=error_step),
],
)
# Run workflow - workflow handles errors internally with retries
workflow.run(input="test")
# Get the workflow run from database
session = workflow.get_session()
assert session is not None
assert len(session.runs) > 0
last_run = session.runs[-1]
# Workflow completes but step results contain the error
# The step should have failed (success=False)
assert last_run.step_results is not None
assert len(last_run.step_results) > 0
error_step_result = last_run.step_results[-1]
assert error_step_result.success is False
# Verify metrics exist with correct type
assert last_run.metrics is not None
assert isinstance(last_run.metrics, WorkflowMetrics)
# Verify duration exists
assert last_run.metrics.duration is not None
assert isinstance(last_run.metrics.duration, float)
assert last_run.metrics.duration >= 0
def test_workflow_duration_partial_error(shared_db, test_agent):
"""Test duration when error occurs after successful agent step."""
test_agent.instructions = "Respond with analysis"
workflow = Workflow(
name="Partial Error Test",
db=shared_db,
steps=[
Step(name="agent_step", agent=test_agent),
Step(name="error_step", executor=error_step),
],
)
# Run workflow - workflow handles errors internally with retries
workflow.run(input="test")
session = workflow.get_session()
last_run = session.runs[-1]
# Workflow completes but the error step should have failed
assert last_run.step_results is not None
assert len(last_run.step_results) >= 2
# Find the error step result and verify it failed
error_step_results = [s for s in last_run.step_results if s.step_name == "error_step"]
assert len(error_step_results) > 0
assert error_step_results[-1].success is False
# Should still have duration tracked
assert last_run.metrics is not None
assert isinstance(last_run.metrics, WorkflowMetrics)
assert last_run.metrics.duration is not None
assert last_run.metrics.duration > 0
def test_workflow_duration_streaming_with_agent(shared_db, test_agent):
"""Test duration tracking with streaming and agent."""
test_agent.instructions = "Respond with analysis"
workflow = Workflow(
name="Streaming Duration Test",
db=shared_db,
steps=[
Step(name="agent_step", agent=test_agent),
],
)
events = list(workflow.run(input="test", stream=True))
# Verify events were generated
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
# Get session and check metrics
session = workflow.get_session()
last_run = session.runs[-1]
assert last_run.metrics is not None
assert isinstance(last_run.metrics, WorkflowMetrics)
assert last_run.metrics.duration is not None
assert last_run.metrics.duration > 0
@pytest.mark.asyncio
async def test_workflow_duration_async_with_agent(shared_db, test_agent):
"""Test async workflow duration with agent."""
test_agent.instructions = "Respond with analysis"
workflow = Workflow(
name="Async Agent Duration Test",
db=shared_db,
steps=[
Step(name="agent_step", agent=test_agent),
],
)
response = await workflow.arun(input="test")
# Verify workflow-level duration
assert response.metrics is not None
assert isinstance(response.metrics, WorkflowMetrics)
assert response.metrics.duration is not None
assert response.metrics.duration > 0
# Verify agent step duration
if "agent_step" in response.metrics.steps:
assert response.metrics.steps["agent_step"].metrics.duration is not None
assert response.metrics.steps["agent_step"].metrics.duration > 0
@pytest.mark.asyncio
async def test_workflow_duration_async_streaming(shared_db, test_agent):
"""Test duration tracking with async streaming."""
test_agent.instructions = "Respond with analysis"
workflow = Workflow(
name="Async Streaming Duration Test",
db=shared_db,
steps=[
Step(name="agent_step", agent=test_agent),
],
)
events = []
async for event in workflow.arun(input="test", stream=True):
events.append(event)
completed_events = [e for e in events if isinstance(e, WorkflowCompletedEvent)]
assert len(completed_events) == 1
# Get session to check metrics
session = workflow.get_session()
last_run = session.runs[-1]
assert last_run.metrics is not None
assert isinstance(last_run.metrics, WorkflowMetrics)
assert last_run.metrics.duration is not None
assert last_run.metrics.duration > 0
@pytest.mark.asyncio
async def test_workflow_duration_async_function(shared_db):
"""Test duration with async function steps."""
workflow = Workflow(
name="Async Function Duration Test",
db=shared_db,
steps=[
Step(name="async_research", executor=async_research_step),
],
)
response = await workflow.arun(input="test")
# Verify workflow-level duration
assert response.metrics is not None
assert isinstance(response.metrics, WorkflowMetrics)
assert response.metrics.duration is not None
assert response.metrics.duration >= 0
def test_workflow_duration_parallel_with_agent(shared_db, test_agent):
"""Test duration with parallel execution including agent."""
test_agent.instructions = "Respond with analysis"
workflow = Workflow(
name="Parallel Agent Duration Test",
db=shared_db,
steps=[
Parallel(
Step(name="agent_step", agent=test_agent),
Step(name="research_step", executor=research_step),
)
],
)
response = workflow.run(input="test")
# Verify workflow-level duration
assert response.metrics is not None
assert isinstance(response.metrics, WorkflowMetrics)
assert response.metrics.duration is not None
assert response.metrics.duration > 0
# Verify agent step duration if present
if "agent_step" in response.metrics.steps:
assert response.metrics.steps["agent_step"].metrics.duration is not None
assert response.metrics.steps["agent_step"].metrics.duration > 0
def test_workflow_duration_condition_true_with_agent(shared_db, test_agent):
"""Test duration with condition that evaluates to true."""
test_agent.instructions = "Respond with analysis"
workflow = Workflow(
name="Condition True Duration Test",
db=shared_db,
steps=[
Condition(
evaluator=condition_true,
steps=[Step(name="agent_step", agent=test_agent)],
)
],
)
response = workflow.run(input="test")
# Verify workflow-level duration
assert response.metrics is not None
assert isinstance(response.metrics, WorkflowMetrics)
assert response.metrics.duration is not None
assert response.metrics.duration > 0
# Verify agent step duration if present
if "agent_step" in response.metrics.steps:
assert response.metrics.steps["agent_step"].metrics.duration is not None
assert response.metrics.steps["agent_step"].metrics.duration > 0
def test_workflow_duration_condition_false_with_agent(shared_db, test_agent):
"""Test duration with condition that evaluates to false."""
test_agent.instructions = "Respond with analysis"
workflow = Workflow(
name="Condition False Duration Test",
db=shared_db,
steps=[
Condition(
evaluator=condition_false,
steps=[Step(name="skipped_agent_step", agent=test_agent)],
)
],
)
response = workflow.run(input="test")
# Verify workflow-level duration still tracked
assert response.metrics is not None
assert isinstance(response.metrics, WorkflowMetrics)
assert response.metrics.duration is not None
assert response.metrics.duration >= 0
# Agent step should not be in metrics since condition was false
assert "skipped_agent_step" not in response.metrics.steps
def test_workflow_metrics_serialization(shared_db, test_agent):
"""Test metrics serialization with agent steps."""
test_agent.instructions = "Respond with analysis"
workflow = Workflow(
name="Serialization Test",
db=shared_db,
steps=[
Step(name="agent_step", agent=test_agent),
],
)
response = workflow.run(input="test")
# Serialize to dict
metrics_dict = response.metrics.to_dict()
# Verify duration is in the dict
assert "duration" in metrics_dict
assert isinstance(metrics_dict["duration"], float)
assert metrics_dict["duration"] > 0
# Verify steps are in the dict
assert "steps" in metrics_dict
assert isinstance(metrics_dict["steps"], dict)
# Deserialize from dict
reconstructed = WorkflowMetrics.from_dict(metrics_dict)
# Verify duration preserved
assert reconstructed.duration == response.metrics.duration
assert isinstance(reconstructed.duration, float)
# Verify step metrics preserved
if "agent_step" in response.metrics.steps:
assert "agent_step" in reconstructed.steps
assert reconstructed.steps["agent_step"].metrics is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/workflows/test_workflow_metrics.py",
"license": "Apache License 2.0",
"lines": 331,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/aimlapi/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.aimlapi import AIMLAPI
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=AIMLAPI(id="gpt-4o-mini"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=AIMLAPI(id="gpt-4o-mini"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=AIMLAPI(id="gpt-4o-mini", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/aimlapi/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/azure/ai_foundry/test_structured_response.py | import enum
from typing import Dict, List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.azure import AzureAIFoundry
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
rating: Dict[str, int] = Field(
...,
description="Your own rating of the movie. 1-10. Return a dictionary with the keys 'story' and 'acting'.",
)
def test_structured_response_with_dict_fields():
structured_output_agent = Agent(
model=AzureAIFoundry(id="gpt-4o-mini"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.rating, Dict)
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=AzureAIFoundry(id="gpt-4o"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=AzureAIFoundry(id="gpt-4o-mini", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/azure/ai_foundry/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/azure/openai/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.azure import AzureOpenAI
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=AzureOpenAI(id="gpt-4o-mini"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=AzureOpenAI(id="gpt-4o"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=AzureOpenAI(id="gpt-4o-mini", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/azure/openai/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/cerebras/cerebras_openai/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.cerebras import CerebrasOpenAI
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=CerebrasOpenAI(id="gpt-oss-120b"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=CerebrasOpenAI(id="gpt-oss-120b"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=CerebrasOpenAI(id="gpt-oss-120b", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/cerebras/cerebras_openai/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/cerebras/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.cerebras import Cerebras
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=Cerebras(id="qwen-3-32b"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=Cerebras(id="qwen-3-32b"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
class MovieScriptWithDict(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
genre: str = Field(..., description="Genre of the movie.")
name: str = Field(..., description="Give a name to this movie")
guided_output_agent = Agent(
model=Cerebras(id="qwen-3-32b", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScriptWithDict,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
def test_structured_response_with_nested_objects():
"""Test structured response with nested objects - validates additionalProperties: false fix.
This test ensures that the _ensure_additional_properties_false method correctly
handles nested object schemas, which is required by the Cerebras API.
"""
class Address(BaseModel):
city: str = Field(..., description="City name")
country: str = Field(..., description="Country name")
class Person(BaseModel):
name: str = Field(..., description="Person's full name")
age: int = Field(..., description="Person's age")
address: Address = Field(..., description="Person's address")
agent = Agent(
model=Cerebras(id="zai-glm-4.7"),
description="You generate person profiles.",
output_schema=Person,
)
response = agent.run("Create a profile for a 30-year-old software engineer living in San Francisco")
assert response.content is not None
assert isinstance(response.content.name, str)
assert isinstance(response.content.age, int)
assert isinstance(response.content.address, Address)
assert isinstance(response.content.address.city, str)
assert isinstance(response.content.address.country, str)
def test_structured_response_with_list_of_objects():
"""Test structured response with a list of objects - validates additionalProperties: false fix.
This test ensures that array items with object types also get additionalProperties: false.
"""
class Task(BaseModel):
title: str = Field(..., description="Task title")
completed: bool = Field(..., description="Whether the task is completed")
class TodoList(BaseModel):
name: str = Field(..., description="Name of the todo list")
tasks: List[Task] = Field(..., description="List of tasks")
agent = Agent(
model=Cerebras(id="zai-glm-4.7"),
description="You create todo lists.",
output_schema=TodoList,
)
response = agent.run("Create a todo list for a weekend trip with 3 tasks")
assert response.content is not None
assert isinstance(response.content.name, str)
assert isinstance(response.content.tasks, list)
assert len(response.content.tasks) > 0
for task in response.content.tasks:
assert isinstance(task, Task)
assert isinstance(task.title, str)
assert isinstance(task.completed, bool)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/cerebras/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/cometapi/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.cometapi import CometAPI
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=CometAPI(id="gpt-5-mini"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=CometAPI(id="gpt-5-mini"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=CometAPI(id="gpt-5-mini", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/cometapi/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/dashscope/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.dashscope import DashScope
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=DashScope(id="qwen-plus"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=DashScope(id="qwen-plus"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=DashScope(id="qwen-plus", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/dashscope/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/deepinfra/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.deepinfra import DeepInfra
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=DeepInfra(id="meta-llama/Llama-2-70b-chat-hf"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=DeepInfra(id="meta-llama/Llama-2-70b-chat-hf"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=DeepInfra(id="meta-llama/Llama-2-70b-chat-hf", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/deepinfra/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/deepseek/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.deepseek import DeepSeek
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=DeepSeek(id="deepseek-chat"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=DeepSeek(id="deepseek-chat"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=DeepSeek(id="deepseek-chat", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/deepseek/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/fireworks/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.fireworks import Fireworks
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=Fireworks(id="accounts/fireworks/models/llama-v3p1-405b-instruct"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=Fireworks(id="accounts/fireworks/models/llama-v3p1-405b-instruct"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=Fireworks(id="accounts/fireworks/models/llama-v3p1-8b-instruct", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/fireworks/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/langdb/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.langdb import LangDB
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=LangDB(id="gemini-1.5-pro-latest"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=LangDB(id="gemini-1.5-pro-latest"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=LangDB(id="gemini-1.5-pro-latest", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/langdb/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/lmstudio/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.lmstudio import LMStudio
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=LMStudio(id="qwen2.5-7b-instruct-1m"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=LMStudio(id="qwen2.5-7b-instruct-1m"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=LMStudio(id="qwen2.5-7b-instruct-1m", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/lmstudio/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/meta/llama_openai/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.meta import LlamaOpenAI
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=LlamaOpenAI(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=LlamaOpenAI(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=LlamaOpenAI(id="Llama-4-Maverick-17B-128E-Instruct-FP8", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/meta/llama_openai/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/nebius/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.nebius import Nebius
NEBIUS_MODEL_ID = "Qwen/Qwen3-4B-fast"
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=Nebius(id=NEBIUS_MODEL_ID),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=Nebius(id=NEBIUS_MODEL_ID),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=Nebius(id=NEBIUS_MODEL_ID, strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/nebius/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/nvidia/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.nvidia import Nvidia
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=Nvidia(id="meta/llama-3.3-70b-instruct"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=Nvidia(id="meta/llama-3.3-70b-instruct"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=Nvidia(id="meta/llama-3.1-8b-instruct", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/nvidia/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/openrouter/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.openrouter import OpenRouter
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=OpenRouter(id="gpt-4o"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=OpenRouter(id="gpt-4o"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=OpenRouter(id="gpt-4o", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/openrouter/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/perplexity/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.perplexity import Perplexity
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=Perplexity(id="sonar"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=Perplexity(id="sonar"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=Perplexity(id="sonar", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/perplexity/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/portkey/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.portkey import Portkey
PORTKEY_MODEL_ID = "gpt-4o-mini"
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=Portkey(id=PORTKEY_MODEL_ID),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=Portkey(id=PORTKEY_MODEL_ID),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=Portkey(id=PORTKEY_MODEL_ID, strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/portkey/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/sambanova/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.sambanova import Sambanova
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=Sambanova(id="Meta-Llama-3.3-70B-Instruct"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=Sambanova(id="Meta-Llama-3.3-70B-Instruct"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=Sambanova(id="Meta-Llama-3.1-8B-Instruct", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/sambanova/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/together/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.together import Together
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=Together(id="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=Together(id="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=Together(id="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/together/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/vercel/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.vercel import V0
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=V0(id="v0-1.0-md"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=V0(id="v0-1.0-md"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=V0(id="v0-1.0-md", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/vercel/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/vllm/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.vllm import VLLM
VLLM_MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=VLLM(id=VLLM_MODEL_ID),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=VLLM(id=VLLM_MODEL_ID),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=VLLM(id=VLLM_MODEL_ID, strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/vllm/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/models/xai/test_structured_response.py | import enum
from typing import List
from pydantic import BaseModel, Field
from agno.agent import Agent
from agno.models.xai import xAI
class MovieScript(BaseModel):
setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.")
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
def test_structured_response():
structured_output_agent = Agent(
model=xAI(id="grok-2-latest"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
response = structured_output_agent.run("New York")
assert response.content is not None
assert isinstance(response.content.setting, str)
assert isinstance(response.content.ending, str)
assert isinstance(response.content.genre, str)
assert isinstance(response.content.name, str)
assert isinstance(response.content.characters, List)
assert isinstance(response.content.storyline, str)
def test_structured_response_with_enum_fields():
class Grade(enum.Enum):
A_PLUS = "a+"
A = "a"
B = "b"
C = "c"
D = "d"
F = "f"
class Recipe(BaseModel):
recipe_name: str
rating: Grade
structured_output_agent = Agent(
model=xAI(id="grok-2-latest"),
description="You help generate recipe names and ratings.",
output_schema=Recipe,
)
response = structured_output_agent.run("Generate a recipe name and rating.")
assert response.content is not None
assert isinstance(response.content.rating, Grade)
assert isinstance(response.content.recipe_name, str)
def test_structured_response_strict_output_false():
"""Test structured response with strict_output=False (guided mode)"""
guided_output_agent = Agent(
model=xAI(id="grok-3-mini-fast", strict_output=False),
description="You write movie scripts.",
output_schema=MovieScript,
)
response = guided_output_agent.run("Create a short action movie")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/models/xai/test_structured_response.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/test_basic.py | import pytest
from agno.agent.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.models.openai import OpenAIChat
from agno.run.agent import RunOutput
from agno.team.team import Team
from agno.vectordb.chroma import ChromaDb
@pytest.fixture
def vector_db():
"""Setup a temporary vector DB for testing."""
vector_db = ChromaDb(collection="vectors", path="tmp/chromadb", persistent_client=True)
yield vector_db
# Clean up after test
vector_db.drop()
def test_basic_with_no_import_errors(shared_db, vector_db):
knowledge = Knowledge(
vector_db=vector_db,
contents_db=shared_db,
)
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), knowledge=knowledge, db=shared_db, markdown=True, telemetry=False)
team = Team(members=[agent], model=OpenAIChat(id="gpt-4o-mini"), db=shared_db, markdown=True, telemetry=False)
# Simple test to ensure that we can run agents/team without any import errors
response: RunOutput = team.run("Share a 2 sentence horror story")
assert response.content is not None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/test_basic.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/test_os_basic.py | import pytest
from fastapi.testclient import TestClient
from agno.agent.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.models.openai import OpenAIChat
from agno.os import AgentOS
from agno.vectordb.chroma import ChromaDb
@pytest.fixture
def vector_db():
"""Setup a temporary vector DB for testing."""
vector_db = ChromaDb(collection="vectors", path="tmp/chromadb", persistent_client=True)
yield vector_db
# Clean up after test
vector_db.drop()
@pytest.fixture
def agent(shared_db, vector_db):
"""Create a test agent with SQLite database."""
knowledge = Knowledge(
vector_db=vector_db,
contents_db=shared_db,
)
agent = Agent(model=OpenAIChat(id="gpt-4o-mini"), knowledge=knowledge, db=shared_db, markdown=True, telemetry=False)
return agent
@pytest.fixture
def test_os_client(agent: Agent):
"""Create a FastAPI test client with AgentOS."""
agent_os = AgentOS(agents=[agent])
app = agent_os.get_app()
return TestClient(app)
def test_basic(test_os_client, agent):
"""Minimal test to ensure the OS works and can run an agent."""
response = test_os_client.post(
f"/agents/{agent.id}/runs",
data={"message": "Hello, world!"},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
assert response.status_code == 200
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/test_os_basic.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/knowledge/embedder/vllm.py | import asyncio
from dataclasses import dataclass
from os import getenv
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
from agno.knowledge.embedder.base import Embedder
from agno.utils.log import logger
try:
from vllm import LLM # type: ignore
from vllm.outputs import EmbeddingRequestOutput # type: ignore
except ImportError:
raise ImportError("`vllm` not installed. Please install using `pip install vllm`.")
if TYPE_CHECKING:
from openai import AsyncOpenAI
from openai import OpenAI as OpenAIClient
from openai.types.create_embedding_response import CreateEmbeddingResponse
@dataclass
class VLLMEmbedder(Embedder):
"""
VLLM Embedder supporting both local and remote deployment modes.
Local Mode (default):
- Loads model locally and runs inference on your GPU/CPU
- No API key required
- Example: VLLMEmbedder(id="intfloat/e5-mistral-7b-instruct")
Remote Mode:
- Connects to a remote vLLM server via OpenAI-compatible API
- Uses OpenAI SDK to communicate with vLLM's OpenAI-compatible endpoint
- Requires base_url and optionally api_key
- Example: VLLMEmbedder(base_url="http://localhost:8000/v1", api_key="your-key")
- Ref: https://docs.vllm.ai/en/latest/serving/openai_compatible_server.html
"""
id: str = "sentence-transformers/all-MiniLM-L6-v2"
dimensions: int = 4096
# Local mode parameters
enforce_eager: bool = True
vllm_kwargs: Optional[Dict[str, Any]] = None
vllm_client: Optional[LLM] = None
# Remote mode parameters
api_key: Optional[str] = getenv("VLLM_API_KEY")
base_url: Optional[str] = None
request_params: Optional[Dict[str, Any]] = None
client_params: Optional[Dict[str, Any]] = None
remote_client: Optional["OpenAIClient"] = None # OpenAI-compatible client for vLLM server
async_remote_client: Optional["AsyncOpenAI"] = None # Async OpenAI-compatible client for vLLM server
@property
def is_remote(self) -> bool:
"""Determine if we should use remote mode."""
return self.base_url is not None
def _get_vllm_client(self) -> LLM:
"""Get local VLLM client."""
if self.vllm_client:
return self.vllm_client
_vllm_params: Dict[str, Any] = {
"model": self.id,
"task": "embed",
"enforce_eager": self.enforce_eager,
}
if self.vllm_kwargs:
_vllm_params.update(self.vllm_kwargs)
self.vllm_client = LLM(**_vllm_params)
return self.vllm_client
def _get_remote_client(self) -> "OpenAIClient":
"""Get OpenAI-compatible client for remote vLLM server."""
if self.remote_client:
return self.remote_client
try:
from openai import OpenAI as OpenAIClient
except ImportError:
raise ImportError("`openai` package required for remote vLLM mode. ")
_client_params: Dict[str, Any] = {
"api_key": self.api_key or "EMPTY", # VLLM can run without API key
"base_url": self.base_url,
}
if self.client_params:
_client_params.update(self.client_params)
self.remote_client = OpenAIClient(**_client_params)
return self.remote_client
def _get_async_remote_client(self) -> "AsyncOpenAI":
"""Get async OpenAI-compatible client for remote vLLM server."""
if self.async_remote_client:
return self.async_remote_client
try:
from openai import AsyncOpenAI
except ImportError:
raise ImportError("`openai` package required for remote vLLM mode. ")
_client_params: Dict[str, Any] = {
"api_key": self.api_key or "EMPTY",
"base_url": self.base_url,
}
if self.client_params:
_client_params.update(self.client_params)
self.async_remote_client = AsyncOpenAI(**_client_params)
return self.async_remote_client
def _create_embedding_local(self, text: str) -> Optional[EmbeddingRequestOutput]:
"""Create embedding using local VLLM."""
try:
outputs = self._get_vllm_client().embed([text])
return outputs[0] if outputs else None
except Exception as e:
logger.warning(f"Error creating local embedding: {e}")
return None
def _create_embedding_remote(self, text: str) -> "CreateEmbeddingResponse":
"""Create embedding using remote vLLM server."""
_request_params: Dict[str, Any] = {
"input": text,
"model": self.id,
}
if self.request_params:
_request_params.update(self.request_params)
return self._get_remote_client().embeddings.create(**_request_params)
def get_embedding(self, text: str) -> List[float]:
try:
if self.is_remote:
# Remote mode: OpenAI-compatible API
response: "CreateEmbeddingResponse" = self._create_embedding_remote(text=text)
return response.data[0].embedding
else:
# Local mode: Direct VLLM
output = self._create_embedding_local(text=text)
if output and hasattr(output, "outputs") and hasattr(output.outputs, "embedding"):
embedding = output.outputs.embedding
if len(embedding) != self.dimensions:
logger.warning(f"Expected embedding dimension {self.dimensions}, but got {len(embedding)}")
return embedding
return []
except Exception as e:
logger.warning(f"Error extracting embedding: {e}")
return []
def get_embedding_and_usage(self, text: str) -> Tuple[List[float], Optional[Dict]]:
if self.is_remote:
try:
response: "CreateEmbeddingResponse" = self._create_embedding_remote(text=text)
embedding = response.data[0].embedding
usage = response.usage
if usage:
return embedding, usage.model_dump()
return embedding, None
except Exception as e:
logger.warning(f"Error in remote embedding: {e}")
return [], None
else:
embedding = self.get_embedding(text=text)
# Local VLLM doesn't provide usage information
return embedding, None
async def async_get_embedding(self, text: str) -> List[float]:
"""Async version of get_embedding using thread executor for local mode."""
if self.is_remote:
# Remote mode: async client for vLLM server
try:
req: Dict[str, Any] = {
"input": text,
"model": self.id,
}
if self.request_params:
req.update(self.request_params)
response: "CreateEmbeddingResponse" = await self._get_async_remote_client().embeddings.create(**req)
return response.data[0].embedding
except Exception as e:
logger.warning(f"Error in async remote embedding: {e}")
return []
else:
# Local mode: use thread executor for CPU-bound operations
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, self.get_embedding, text)
async def async_get_embedding_and_usage(self, text: str) -> Tuple[List[float], Optional[Dict]]:
"""Async version of get_embedding_and_usage using thread executor for local mode."""
if self.is_remote:
try:
req: Dict[str, Any] = {
"input": text,
"model": self.id,
}
if self.request_params:
req.update(self.request_params)
response: "CreateEmbeddingResponse" = await self._get_async_remote_client().embeddings.create(**req)
embedding = response.data[0].embedding
usage = response.usage
return embedding, usage.model_dump() if usage else None
except Exception as e:
logger.warning(f"Error in async remote embedding: {e}")
return [], None
else:
# Local mode: use thread executor for CPU-bound operations
try:
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, self.get_embedding_and_usage, text)
except Exception as e:
logger.warning(f"Error in async local embedding: {e}")
return [], None
async def async_get_embeddings_batch_and_usage(
self, texts: List[str]
) -> Tuple[List[List[float]], List[Optional[Dict]]]:
"""
Get embeddings and usage for multiple texts in batches (async version).
Args:
texts: List of text strings to embed
Returns:
Tuple of (List of embedding vectors, List of usage dictionaries)
"""
all_embeddings = []
all_usage = []
logger.info(f"Getting embeddings for {len(texts)} texts in batches of {self.batch_size} (async)")
for i in range(0, len(texts), self.batch_size):
batch_texts = texts[i : i + self.batch_size]
try:
if self.is_remote:
# Remote mode: use batch API
req: Dict[str, Any] = {
"input": batch_texts,
"model": self.id,
}
if self.request_params:
req.update(self.request_params)
response: "CreateEmbeddingResponse" = await self._get_async_remote_client().embeddings.create(**req)
batch_embeddings = [data.embedding for data in response.data]
all_embeddings.extend(batch_embeddings)
# For each embedding in the batch, add the same usage information
usage_dict = response.usage.model_dump() if response.usage else None
all_usage.extend([usage_dict] * len(batch_embeddings))
else:
# Local mode: process individually using thread executor
for text in batch_texts:
embedding, usage = await self.async_get_embedding_and_usage(text)
all_embeddings.append(embedding)
all_usage.append(usage)
except Exception as e:
logger.warning(f"Error in async batch embedding: {e}")
# Fallback: add empty results for failed batch
for _ in batch_texts:
all_embeddings.append([])
all_usage.append(None)
return all_embeddings, all_usage
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/embedder/vllm.py",
"license": "Apache License 2.0",
"lines": 231,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/vectordb/redis/redisdb.py | import asyncio
from typing import Any, Dict, List, Optional, Union
try:
from redis import Redis
from redis.asyncio import Redis as AsyncRedis
from redisvl.index import AsyncSearchIndex, SearchIndex
from redisvl.query import FilterQuery, HybridQuery, TextQuery, VectorQuery
from redisvl.query.filter import Tag
from redisvl.redis.utils import array_to_buffer, convert_bytes
from redisvl.schema import IndexSchema
except ImportError:
raise ImportError("`redis` and `redisvl` not installed. Please install using `pip install redis redisvl`")
from agno.filters import FilterExpr
from agno.knowledge.document import Document
from agno.knowledge.embedder import Embedder
from agno.knowledge.reranker.base import Reranker
from agno.utils.log import log_debug, log_error, log_warning
from agno.utils.string import hash_string_sha256
from agno.vectordb.base import VectorDb
from agno.vectordb.distance import Distance
from agno.vectordb.search import SearchType
class RedisDB(VectorDb):
"""
Redis class for managing vector operations with Redis and RedisVL.
This class provides methods for creating, inserting, searching, and managing
vector data in a Redis database using the RedisVL library.
"""
def __init__(
self,
index_name: str,
redis_url: Optional[str] = None,
redis_client: Optional[Redis] = None,
embedder: Optional[Embedder] = None,
search_type: SearchType = SearchType.vector,
distance: Distance = Distance.cosine,
vector_score_weight: float = 0.7,
reranker: Optional[Reranker] = None,
**redis_kwargs,
):
"""
Initialize the Redis instance.
Args:
index_name (str): Name of the Redis index to store vector data.
redis_url (Optional[str]): Redis connection URL.
redis_client (Optional[redis.Redis]): Redis client instance.
embedder (Optional[Embedder]): Embedder instance for creating embeddings.
search_type (SearchType): Type of search to perform.
distance (Distance): Distance metric for vector comparisons.
vector_score_weight (float): Weight for vector similarity in hybrid search.
reranker (Optional[Reranker]): Reranker instance.
**redis_kwargs: Additional Redis connection parameters.
"""
if not index_name:
raise ValueError("Index name must be provided.")
if redis_client is None and redis_url is None:
raise ValueError("Either 'redis_url' or 'redis_client' must be provided.")
self.redis_url = redis_url
# Initialize Redis client
if redis_client is None:
assert redis_url is not None
self.redis_client = Redis.from_url(redis_url, **redis_kwargs)
else:
self.redis_client = redis_client
# Index settings
self.index_name: str = index_name
# Embedder for embedding the document contents
if embedder is None:
from agno.knowledge.embedder.openai import OpenAIEmbedder
embedder = OpenAIEmbedder()
log_debug("Embedder not provided, using OpenAIEmbedder as default.")
self.embedder: Embedder = embedder
self.dimensions: Optional[int] = self.embedder.dimensions
if self.dimensions is None:
raise ValueError("Embedder.dimensions must be set.")
# Search type and distance metric
self.search_type: SearchType = search_type
self.distance: Distance = distance
self.vector_score_weight: float = vector_score_weight
# Reranker instance
self.reranker: Optional[Reranker] = reranker
# Create index schema
self.schema = self._get_schema()
self.index = self._create_index()
self.meta_data_fields: set[str] = set()
# Async components - created lazily when needed
self._async_redis_client: Optional[AsyncRedis] = None
self._async_index: Optional[AsyncSearchIndex] = None
log_debug(f"Initialized Redis with index '{self.index_name}'")
async def _get_async_index(self) -> AsyncSearchIndex:
"""Get or create the async index and client."""
if self._async_index is None:
if self.redis_url is None:
raise ValueError("redis_url must be provided for async operations")
url: str = self.redis_url
self._async_redis_client = AsyncRedis.from_url(url)
self._async_index = AsyncSearchIndex(schema=self.schema, redis_client=self._async_redis_client)
return self._async_index
def _get_schema(self):
"""Get default redis schema"""
distance_mapping = {
Distance.cosine: "cosine",
Distance.l2: "l2",
Distance.max_inner_product: "ip",
}
return IndexSchema.from_dict(
{
"index": {
"name": self.index_name,
"prefix": f"{self.index_name}:",
"storage_type": "hash",
},
"fields": [
{"name": "id", "type": "tag"},
{"name": "name", "type": "tag"},
{"name": "content", "type": "text"},
{"name": "content_hash", "type": "tag"},
{"name": "content_id", "type": "tag"},
# Common metadata fields used in operations/tests
{"name": "status", "type": "tag"},
{"name": "category", "type": "tag"},
{"name": "tag", "type": "tag"},
{"name": "source", "type": "tag"},
{"name": "mode", "type": "tag"},
{
"name": "embedding",
"type": "vector",
"attrs": {
"dims": self.dimensions,
"distance_metric": distance_mapping[self.distance],
"algorithm": "flat",
},
},
],
}
)
def _create_index(self) -> SearchIndex:
"""Create the RedisVL index object for this schema."""
return SearchIndex(self.schema, redis_url=self.redis_url)
def create(self) -> None:
"""Create the Redis index if it does not exist."""
try:
if not self.exists():
self.index.create()
log_debug(f"Created Redis index: {self.index_name}")
else:
log_debug(f"Redis index already exists: {self.index_name}")
except Exception as e:
log_error(f"Error creating Redis index: {e}")
raise
async def async_create(self) -> None:
"""Async version of create method."""
try:
async_index = await self._get_async_index()
await async_index.create(overwrite=False, drop=False)
log_debug(f"Created Redis index: {self.index_name}")
except Exception as e:
if "already exists" in str(e).lower():
log_debug(f"Redis index already exists: {self.index_name}")
else:
log_error(f"Error creating Redis index: {e}")
raise
def name_exists(self, name: str) -> bool:
"""Check if a document with the given name exists."""
try:
name_filter = Tag("name") == name
query = FilterQuery(
filter_expression=name_filter,
return_fields=["id"],
num_results=1,
)
results = self.index.query(query)
return len(results) > 0
except Exception as e:
log_error(f"Error checking if name exists: {e}")
return False
async def async_name_exists(self, name: str) -> bool: # type: ignore[override]
"""Async version of name_exists method."""
try:
async_index = await self._get_async_index()
name_filter = Tag("name") == name
query = FilterQuery(
filter_expression=name_filter,
return_fields=["id"],
num_results=1,
)
results = await async_index.query(query)
return len(results) > 0
except Exception as e:
log_error(f"Error checking if name exists: {e}")
return False
def id_exists(self, id: str) -> bool:
"""Check if a document with the given ID exists."""
try:
id_filter = Tag("id") == id
query = FilterQuery(
filter_expression=id_filter,
return_fields=["id"],
num_results=1,
)
results = self.index.query(query)
return len(results) > 0
except Exception as e:
log_error(f"Error checking if ID exists: {e}")
return False
def content_hash_exists(self, content_hash: str) -> bool:
"""Check if a document with the given content hash exists."""
try:
content_hash_filter = Tag("content_hash") == content_hash
query = FilterQuery(
filter_expression=content_hash_filter,
return_fields=["id"],
num_results=1,
)
results = self.index.query(query)
return len(results) > 0
except Exception as e:
log_error(f"Error checking if content hash exists: {e}")
return False
def _parse_redis_hash(self, doc: Document):
"""
Create object serializable into Redis HASH structure
"""
doc_dict = doc.to_dict()
# Ensure an ID is present; derive a deterministic one from content when missing
doc_id = doc.id or hash_string_sha256(doc.content)
doc_dict["id"] = doc_id
if not doc.embedding:
doc.embed(self.embedder)
# TODO: determine how we want to handle dtypes
doc_dict["embedding"] = array_to_buffer(doc.embedding, "float32")
# Add content_id if available
if hasattr(doc, "content_id") and doc.content_id:
doc_dict["content_id"] = doc.content_id
if "meta_data" in doc_dict:
meta_data = doc_dict.pop("meta_data", {})
for md in meta_data:
self.meta_data_fields.add(md)
doc_dict.update(meta_data)
return doc_dict
def insert(
self,
content_hash: str,
documents: List[Document],
filters: Optional[Dict[str, Any]] = None,
) -> None:
"""Insert documents into the Redis index."""
try:
# Store content hash for tracking
parsed_documents = []
for doc in documents:
parsed_doc = self._parse_redis_hash(doc)
parsed_doc["content_hash"] = content_hash
parsed_documents.append(parsed_doc)
self.index.load(parsed_documents, id_field="id")
log_debug(f"Inserted {len(documents)} documents with content_hash: {content_hash}")
except Exception as e:
log_error(f"Error inserting documents: {e}")
raise
async def async_insert(
self,
content_hash: str,
documents: List[Document],
filters: Optional[Dict[str, Any]] = None,
) -> None:
"""Async version of insert method."""
try:
async_index = await self._get_async_index()
parsed_documents = []
for doc in documents:
parsed_doc = self._parse_redis_hash(doc)
parsed_doc["content_hash"] = content_hash
parsed_documents.append(parsed_doc)
await async_index.load(parsed_documents, id_field="id")
log_debug(f"Inserted {len(documents)} documents with content_hash: {content_hash}")
except Exception as e:
log_error(f"Error inserting documents: {e}")
raise
def upsert_available(self) -> bool:
"""Check if upsert is available (always True for Redis)."""
return True
def upsert(
self,
content_hash: str,
documents: List[Document],
filters: Optional[Dict[str, Any]] = None,
) -> None:
"""Upsert documents into the Redis index.
Strategy: delete existing docs with the same content_hash, then insert new docs.
"""
try:
# Find existing docs for this content_hash and delete them
ch_filter = Tag("content_hash") == content_hash
query = FilterQuery(
filter_expression=ch_filter,
return_fields=["id"],
num_results=1000,
)
existing = self.index.query(query)
parsed = convert_bytes(existing)
for r in parsed:
key = r.get("id")
if key:
self.index.drop_keys(key)
# Insert new docs
self.insert(content_hash, documents, filters)
except Exception as e:
log_error(f"Error upserting documents: {e}")
raise
async def async_upsert(
self,
content_hash: str,
documents: List[Document],
filters: Optional[Dict[str, Any]] = None,
) -> None:
"""Async version of upsert method.
Strategy: delete existing docs with the same content_hash, then insert new docs.
"""
try:
async_index = await self._get_async_index()
# Find existing docs for this content_hash and delete them
ch_filter = Tag("content_hash") == content_hash
query = FilterQuery(
filter_expression=ch_filter,
return_fields=["id"],
num_results=1000,
)
existing = await async_index.query(query)
parsed = convert_bytes(existing)
for r in parsed:
key = r.get("id")
if key:
await async_index.drop_keys(key)
# Insert new docs
await self.async_insert(content_hash, documents, filters)
except Exception as e:
log_error(f"Error upserting documents: {e}")
raise
def search(
self, query: str, limit: int = 5, filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
) -> List[Document]:
"""Search for documents using the specified search type."""
if filters and isinstance(filters, List):
log_warning("Filters Expressions are not supported in Redis. No filters will be applied.")
filters = None
try:
if self.search_type == SearchType.vector:
return self.vector_search(query, limit)
elif self.search_type == SearchType.keyword:
return self.keyword_search(query, limit)
elif self.search_type == SearchType.hybrid:
return self.hybrid_search(query, limit)
else:
raise ValueError(f"Unsupported search type: {self.search_type}")
except Exception as e:
log_error(f"Error in search: {e}")
return []
async def async_search(
self, query: str, limit: int = 5, filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
) -> List[Document]:
"""Async version of search method."""
return await asyncio.to_thread(self.search, query, limit, filters)
def vector_search(self, query: str, limit: int = 5) -> List[Document]:
"""Perform vector similarity search."""
try:
# Get query embedding
query_embedding = array_to_buffer(self.embedder.get_embedding(query), "float32")
# TODO: do we want to pass back the embedding?
# Create vector query
vector_query = VectorQuery(
vector=query_embedding,
vector_field_name="embedding",
return_fields=["id", "name", "content"],
return_score=False,
num_results=limit,
)
# Execute search
results = self.index.query(vector_query)
# Convert results to documents
documents = [Document.from_dict(r) for r in results]
# Apply reranking if reranker is available
if self.reranker:
documents = self.reranker.rerank(query=query, documents=documents)
return documents
except Exception as e:
log_error(f"Error in vector search: {e}")
return []
def keyword_search(self, query: str, limit: int = 5) -> List[Document]:
"""Perform keyword search using Redis text search."""
try:
# Create text query
text_query = TextQuery(
text=query,
text_field_name="content",
)
# Execute search
results = self.index.query(text_query)
# Convert results to documents
parsed = convert_bytes(results)
# Convert results to documents
documents = [Document.from_dict(p) for p in parsed]
# Apply reranking if reranker is available
if self.reranker:
documents = self.reranker.rerank(query=query, documents=documents)
return documents
except Exception as e:
log_error(f"Error in keyword search: {e}")
return []
def hybrid_search(self, query: str, limit: int = 5) -> List[Document]:
"""Perform hybrid search combining vector and keyword search."""
try:
# Get query embedding
query_embedding = array_to_buffer(self.embedder.get_embedding(query), "float32")
# Create vector query
vector_query = HybridQuery(
vector=query_embedding,
vector_field_name="embedding",
text=query,
text_field_name="content",
linear_alpha=self.vector_score_weight,
return_fields=["id", "name", "content"],
num_results=limit,
)
# Execute search
results = self.index.query(vector_query)
parsed = convert_bytes(results)
# Convert results to documents
documents = [Document.from_dict(p) for p in parsed]
# Apply reranking if reranker is available
if self.reranker:
documents = self.reranker.rerank(query=query, documents=documents)
return documents
except Exception as e:
log_error(f"Error in hybrid search: {e}")
return []
def drop(self) -> bool: # type: ignore[override]
"""Drop the Redis index."""
try:
self.index.delete(drop=True)
log_debug(f"Deleted Redis index: {self.index_name}")
return True
except Exception as e:
log_error(f"Error dropping Redis index: {e}")
return False
async def async_drop(self) -> None:
"""Async version of drop method."""
try:
async_index = await self._get_async_index()
await async_index.delete(drop=True)
log_debug(f"Deleted Redis index: {self.index_name}")
except Exception as e:
log_error(f"Error dropping Redis index: {e}")
raise
def exists(self) -> bool:
"""Check if the Redis index exists."""
try:
return self.index.exists()
except Exception as e:
log_error(f"Error checking if index exists: {e}")
return False
async def async_exists(self) -> bool:
"""Async version of exists method."""
try:
async_index = await self._get_async_index()
return await async_index.exists()
except Exception as e:
log_error(f"Error checking if index exists: {e}")
return False
def optimize(self) -> None:
"""Optimize the Redis index (no-op for Redis)."""
log_debug("Redis optimization not required")
pass
def delete(self) -> bool:
"""Delete the Redis index (same as drop)."""
try:
self.index.clear()
return True
except Exception as e:
log_error(f"Error deleting Redis index: {e}")
return False
def delete_by_id(self, id: str) -> bool:
"""Delete documents by ID."""
try:
# Use RedisVL to drop documents by document ID
result = self.index.drop_documents(id)
log_debug(f"Deleted document with id '{id}' from Redis index")
return result > 0
except Exception as e:
log_error(f"Error deleting document by ID: {e}")
return False
def delete_by_name(self, name: str) -> bool:
"""Delete documents by name."""
try:
# First find documents with the given name
name_filter = Tag("name") == name
query = FilterQuery(
filter_expression=name_filter,
return_fields=["id"],
num_results=1000, # Get all matching documents
)
results = self.index.query(query)
parsed = convert_bytes(results)
# Delete each found document by key (result['id'] is the Redis key)
deleted_count = 0
for result in parsed:
key = result.get("id")
if key:
deleted_count += self.index.drop_keys(key)
log_debug(f"Deleted {deleted_count} documents with name '{name}'")
return deleted_count > 0
except Exception as e:
log_error(f"Error deleting documents by name: {e}")
return False
def delete_by_metadata(self, metadata: Dict[str, Any]) -> bool:
"""Delete documents by metadata."""
try:
# Build filter expression for metadata using Tag filters
filters = []
for key, value in metadata.items():
filters.append(Tag(key) == str(value))
# Combine filters with AND logic
if len(filters) == 1:
combined_filter = filters[0]
else:
combined_filter = filters[0]
for f in filters[1:]:
combined_filter = combined_filter & f
# Find documents with the given metadata
query = FilterQuery(
filter_expression=combined_filter,
return_fields=["id"],
num_results=1000, # Get all matching documents
)
results = self.index.query(query)
parsed = convert_bytes(results)
# Delete each found document by key (result['id'] is the Redis key)
deleted_count = 0
for result in parsed:
key = result.get("id")
if key:
deleted_count += self.index.drop_keys(key)
log_debug(f"Deleted {deleted_count} documents with metadata {metadata}")
return deleted_count > 0
except Exception as e:
log_error(f"Error deleting documents by metadata: {e}")
return False
def delete_by_content_id(self, content_id: str) -> bool:
"""Delete documents by content ID."""
try:
# Find documents with the given content_id
content_id_filter = Tag("content_id") == content_id
query = FilterQuery(
filter_expression=content_id_filter,
return_fields=["id"],
num_results=1000, # Get all matching documents
)
results = self.index.query(query)
parsed = convert_bytes(results)
# Delete each found document by key (result['id'] is the Redis key)
deleted_count = 0
for result in parsed:
key = result.get("id")
if key:
deleted_count += self.index.drop_keys(key)
log_debug(f"Deleted {deleted_count} documents with content_id '{content_id}'")
return deleted_count > 0
except Exception as e:
log_error(f"Error deleting documents by content_id: {e}")
return False
def update_metadata(self, content_id: str, metadata: Dict[str, Any]) -> None:
"""Update metadata for documents with the given content ID."""
try:
# Find documents with the given content_id
content_id_filter = Tag("content_id") == content_id
query = FilterQuery(
filter_expression=content_id_filter,
return_fields=["id"],
num_results=1000, # Get all matching documents
)
results = self.index.query(query)
# Update metadata for each found document
for result in results:
doc_id = result.get("id")
if doc_id:
# result['id'] is the Redis key
key = result.get("id")
# Update the hash with new metadata
if key:
self.redis_client.hset(key, mapping=metadata)
log_debug(f"Updated metadata for documents with content_id '{content_id}'")
except Exception as e:
log_error(f"Error updating metadata: {e}")
raise
def get_supported_search_types(self) -> List[str]:
"""Get list of supported search types."""
return ["vector", "keyword", "hybrid"]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/vectordb/redis/redisdb.py",
"license": "Apache License 2.0",
"lines": 595,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/vectordb/test_redisdb.py | import sys
import types
from typing import Any, Dict, List
from unittest.mock import MagicMock
import pytest
from agno.knowledge.document import Document
from agno.vectordb.search import SearchType
@pytest.fixture()
def stub_redisvl(monkeypatch):
"""Patch installed redis/redisvl to avoid real network and return MagicMock indexes.
Returns a tuple (SearchIndex_mock, AsyncSearchIndex_mock).
"""
# Patch redis client constructors to return MagicMocks
try:
import redis
import redis.asyncio as redis_async
except Exception:
redis = types.ModuleType("redis")
redis_async = types.ModuleType("redis.asyncio")
sys.modules["redis"] = redis
sys.modules["redis.asyncio"] = redis_async
def _redis_from_url(url: str, **kwargs):
return MagicMock(name="RedisClient")
def _async_redis_from_url(url: str, **kwargs):
return MagicMock(name="AsyncRedisClient")
# If attributes don't exist, create shells
if not hasattr(redis, "Redis"):
class _Redis: # type: ignore
@classmethod
def from_url(cls, url: str, **kwargs):
return _redis_from_url(url, **kwargs)
redis.Redis = _Redis # type: ignore
else:
monkeypatch.setattr(redis.Redis, "from_url", staticmethod(_redis_from_url))
if not hasattr(redis_async, "Redis"):
class _AsyncRedis: # type: ignore
@classmethod
def from_url(cls, url: str, **kwargs):
return _async_redis_from_url(url, **kwargs)
redis_async.Redis = _AsyncRedis # type: ignore
else:
monkeypatch.setattr(redis_async.Redis, "from_url", staticmethod(_async_redis_from_url))
# Patch redisvl SearchIndex/AsyncSearchIndex to return MagicMocks
import importlib
try:
rvl_index = importlib.import_module("redisvl.index")
except ModuleNotFoundError:
rvl_index = types.ModuleType("redisvl.index")
sys.modules["redisvl.index"] = rvl_index
search_index_mock = MagicMock(name="SearchIndex")
async_search_index_mock = MagicMock(name="AsyncSearchIndex")
def _SearchIndex(*args, **kwargs):
return search_index_mock
def _AsyncSearchIndex(*args, **kwargs):
return async_search_index_mock
monkeypatch.setattr(rvl_index, "SearchIndex", _SearchIndex, raising=False)
monkeypatch.setattr(rvl_index, "AsyncSearchIndex", _AsyncSearchIndex, raising=False)
# Optionally patch utils to simple pass-throughs (safe even if module missing)
try:
rvl_utils = importlib.import_module("redisvl.redis.utils")
monkeypatch.setattr(rvl_utils, "convert_bytes", lambda x: x, raising=False)
monkeypatch.setattr(rvl_utils, "array_to_buffer", lambda a, dt: a, raising=False)
monkeypatch.setattr(rvl_utils, "buffer_to_array", lambda b, dt: b, raising=False)
except ModuleNotFoundError:
pass
# Also ensure schema.from_dict is available (no-op)
try:
rvl_schema = importlib.import_module("redisvl.schema")
class _IndexSchema:
@classmethod
def from_dict(cls, d: Dict[str, Any]):
return d
monkeypatch.setattr(rvl_schema, "IndexSchema", _IndexSchema, raising=False)
except ModuleNotFoundError:
pass
yield search_index_mock, async_search_index_mock
@pytest.fixture()
def import_redisdb(stub_redisvl):
"""Import RedisVectorDb after stubbing dependencies and return (RedisVectorDb, search_idx_mock)."""
# Delayed import to ensure patches are in place
from agno.vectordb.redis import RedisVectorDb # type: ignore
search_idx_mock, async_idx_mock = stub_redisvl
return RedisVectorDb, search_idx_mock, async_idx_mock
@pytest.fixture()
def sample_documents() -> List[Document]:
return [
Document(content="Doc A", meta_data={"category": "A"}, name="doc_a"),
Document(content="Doc B", meta_data={"category": "B"}, name="doc_b"),
Document(content="Doc C", meta_data={"category": "A"}, name="doc_c"),
]
@pytest.fixture()
def redis_db(import_redisdb, mock_embedder):
RedisVectorDb, _search_idx_mock, _ = import_redisdb
db = RedisVectorDb(
index_name="test_index",
redis_url="redis://localhost:6379/0",
embedder=mock_embedder,
)
# Replace the internal index with our own MagicMock instance so we fully control it
idx = MagicMock(name="SearchIndexInstance")
idx.exists.return_value = False
idx.create.return_value = None
idx.delete.return_value = None
idx.load.return_value = None
idx.clear.return_value = None
idx.drop_keys.return_value = 1
idx.drop_documents.return_value = 1
db.index = idx
return db, idx
@pytest.fixture()
def import_knowledge():
from agno.knowledge.knowledge import Knowledge
return Knowledge
@pytest.fixture()
def create_knowledge(import_knowledge, redis_db):
db, idx = redis_db
Knowledge = import_knowledge
knowledge = Knowledge(
name="My Redis Vector Knowledge Base",
description="This knowledge base uses Redis + RedisVL as the vector store",
vector_db=db,
)
return knowledge
def test_knowlwedge_insert(create_knowledge):
knowledge = create_knowledge
try:
result = knowledge.insert(
name="Recipes",
url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf",
metadata={"doc_type": "recipe_book"},
skip_if_exists=True,
)
assert result is None or result is not False
except Exception as e:
pytest.fail(f"insert raised an unexpected exception: {e}")
def test_create_and_exists(redis_db):
db, idx = redis_db
# When index does not exist, create() should call create()
idx.exists.return_value = False
db.create()
idx.create.assert_called_once()
# exists() returns underlying value
idx.exists.return_value = True
assert db.exists() is True
idx.exists.return_value = False
assert db.exists() is False
def test_drop(redis_db):
db, idx = redis_db
assert db.drop() is True
idx.delete.assert_called_once()
def test_insert_loads_documents(redis_db, sample_documents):
db, idx = redis_db
db.insert(content_hash="chash1", documents=sample_documents)
# Ensure load was called with id_field and 3 docs
assert idx.load.call_count == 1
args, kwargs = idx.load.call_args
loaded = args[0]
assert isinstance(loaded, list) and len(loaded) == 3
assert kwargs.get("id_field") == "id"
# Documents should have content_hash field set
assert all("content_hash" in d for d in loaded)
def test_upsert_deletes_existing_then_inserts(redis_db, sample_documents):
db, idx = redis_db
# Simulate existing keys for the same content_hash
idx.query.return_value = [{"id": "test_index:key1"}, {"id": "test_index:key2"}]
db.upsert(content_hash="same_hash", documents=sample_documents)
# Should have dropped keys for each found result
assert idx.drop_keys.call_count == 2
# And loaded new docs
assert idx.load.call_count >= 1
def test_existence_checks(redis_db):
db, idx = redis_db
# name_exists -> returns True if query returns non-empty
idx.query.return_value = [{"id": "k1"}]
assert db.name_exists("doc_a") is True
idx.query.return_value = []
assert db.name_exists("doc_a") is False
# id_exists
idx.query.return_value = [{"id": "k1"}]
assert db.id_exists("someid") is True
idx.query.return_value = []
assert db.id_exists("someid") is False
# content_hash_exists
idx.query.return_value = [{"id": "k1"}]
assert db.content_hash_exists("hash") is True
idx.query.return_value = []
assert db.content_hash_exists("hash") is False
def test_search_vector_keyword_hybrid(redis_db):
db, idx = redis_db
# Configure vector_search to return list of dicts that map to Documents
idx.query.return_value = [
{"id": "1", "name": "doc_a", "content": "Doc A"},
{"id": "2", "name": "doc_b", "content": "Doc B"},
]
db.search_type = SearchType.vector
docs = db.search("q", limit=2)
assert len(docs) == 2 and all(isinstance(d, Document) for d in docs)
# Keyword search uses convert_bytes, but we stubbed it to passthrough
idx.query.return_value = [
{"id": "3", "name": "doc_c", "content": "Doc C"},
]
db.search_type = SearchType.keyword
docs = db.search("curry", limit=1)
assert len(docs) == 1 and docs[0].name == "doc_c"
# Hybrid search
idx.query.return_value = [
{"id": "4", "name": "doc_a", "content": "Doc A"},
]
db.search_type = SearchType.hybrid
docs = db.search("thai curry", limit=1)
assert len(docs) == 1 and docs[0].name == "doc_a"
def test_delete_by_name_and_metadata_and_content_id(redis_db):
db, idx = redis_db
# Query returns 2 keys to delete
idx.query.return_value = [{"id": "test_index:k1"}, {"id": "test_index:k2"}]
idx.drop_keys.return_value = 1
assert db.delete_by_name("doc_a") is True
assert idx.drop_keys.call_count >= 2
# Reset and test metadata deletion
idx.drop_keys.reset_mock()
assert db.delete_by_metadata({"category": "A"}) is True
assert idx.drop_keys.call_count >= 1
# Reset and test content_id deletion
idx.drop_keys.reset_mock()
assert db.delete_by_content_id("content-123") is True
assert idx.drop_keys.call_count >= 1
def test_update_metadata_writes_to_hash(redis_db):
db, idx = redis_db
# Query returns keys to update
idx.query.return_value = [{"id": "test_index:k1"}, {"id": "test_index:k2"}]
# Underlying redis client was created by stub as MagicMock
redis_client = db.redis_client
db.update_metadata("content-xyz", {"status": "updated"})
# Ensure hset called for each key
assert redis_client.hset.call_count == 2
for call in redis_client.hset.call_args_list:
assert "mapping" in call.kwargs and call.kwargs["mapping"]["status"] == "updated"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/vectordb/test_redisdb.py",
"license": "Apache License 2.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.